Source file src/cmd/compile/internal/ssa/debug.go

     1  // Copyright 2017 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package ssa
     6  
     7  import (
     8  	"cmd/compile/internal/abi"
     9  	"cmd/compile/internal/abt"
    10  	"cmd/compile/internal/ir"
    11  	"cmd/compile/internal/types"
    12  	"cmd/internal/dwarf"
    13  	"cmd/internal/obj"
    14  	"cmd/internal/src"
    15  	"cmp"
    16  	"encoding/hex"
    17  	"fmt"
    18  	"internal/buildcfg"
    19  	"math/bits"
    20  	"slices"
    21  	"strings"
    22  )
    23  
    24  type SlotID int32
    25  type VarID int32
    26  
    27  // A FuncDebug contains all the debug information for the variables in a
    28  // function. Variables are identified by their LocalSlot, which may be
    29  // the result of decomposing a larger variable.
    30  type FuncDebug struct {
    31  	// Slots is all the slots used in the debug info, indexed by their SlotID.
    32  	Slots []LocalSlot
    33  	// The user variables, indexed by VarID.
    34  	Vars []*ir.Name
    35  	// The slots that make up each variable, indexed by VarID.
    36  	VarSlots [][]SlotID
    37  	// The location list data, indexed by VarID. Must be processed by PutLocationList.
    38  	LocationLists [][]byte
    39  	// Register-resident output parameters for the function. This is filled in at
    40  	// SSA generation time.
    41  	RegOutputParams []*ir.Name
    42  	// Variable declarations that were removed during optimization
    43  	OptDcl []*ir.Name
    44  	// The ssa.Func.EntryID value, used to build location lists for
    45  	// return values promoted to heap in later DWARF generation.
    46  	EntryID ID
    47  
    48  	// Filled in by the user. Translates Block and Value ID to PC.
    49  	//
    50  	// NOTE: block is only used if value is BlockStart.ID or BlockEnd.ID.
    51  	// Otherwise, it is ignored.
    52  	GetPC func(block, value ID) int64
    53  }
    54  
    55  type BlockDebug struct {
    56  	// State at the start and end of the block. These are initialized,
    57  	// and updated from new information that flows on back edges.
    58  	startState, endState abt.T
    59  	// Use these to avoid excess work in the merge. If none of the
    60  	// predecessors has changed since the last check, the old answer is
    61  	// still good.
    62  	lastCheckedTime, lastChangedTime int32
    63  	// Whether the block had any changes to user variables at all.
    64  	relevant bool
    65  	// false until the block has been processed at least once. This
    66  	// affects how the merge is done; the goal is to maximize sharing
    67  	// and avoid allocation.
    68  	everProcessed bool
    69  }
    70  
    71  // A liveSlot is a slot that's live in loc at entry/exit of a block.
    72  type liveSlot struct {
    73  	VarLoc
    74  }
    75  
    76  func (ls *liveSlot) String() string {
    77  	return fmt.Sprintf("0x%x.%d.%d", ls.Registers, ls.stackOffsetValue(), int32(ls.StackOffset)&1)
    78  }
    79  
    80  // StackOffset encodes whether a value is on the stack and if so, where.
    81  // It is a 31-bit integer followed by a presence flag at the low-order
    82  // bit.
    83  type StackOffset int32
    84  
    85  func (s StackOffset) onStack() bool {
    86  	return s != 0
    87  }
    88  
    89  func (s StackOffset) stackOffsetValue() int32 {
    90  	return int32(s) >> 1
    91  }
    92  
    93  // stateAtPC is the current state of all variables at some point.
    94  type stateAtPC struct {
    95  	// The location of each known slot, indexed by SlotID.
    96  	slots []VarLoc
    97  	// The slots present in each register, indexed by register number.
    98  	registers [][]SlotID
    99  }
   100  
   101  // reset fills state with the live variables from live.
   102  func (state *stateAtPC) reset(live abt.T) {
   103  	slots, registers := state.slots, state.registers
   104  	clear(slots)
   105  	for i := range registers {
   106  		registers[i] = registers[i][:0]
   107  	}
   108  	for it := live.Iterator(); !it.Done(); {
   109  		k, d := it.Next()
   110  		live := d.(*liveSlot)
   111  		slots[k] = live.VarLoc
   112  		if live.VarLoc.Registers == 0 {
   113  			continue
   114  		}
   115  
   116  		mask := uint64(live.VarLoc.Registers)
   117  		for {
   118  			if mask == 0 {
   119  				break
   120  			}
   121  			reg := uint8(bits.TrailingZeros64(mask))
   122  			mask &^= 1 << reg
   123  
   124  			registers[reg] = append(registers[reg], SlotID(k))
   125  		}
   126  	}
   127  	state.slots, state.registers = slots, registers
   128  }
   129  
   130  func (s *debugState) LocString(loc VarLoc) string {
   131  	if loc.absent() {
   132  		return "<nil>"
   133  	}
   134  
   135  	var storage []string
   136  	if loc.onStack() {
   137  		storage = append(storage, fmt.Sprintf("@%+d", loc.stackOffsetValue()))
   138  	}
   139  
   140  	mask := uint64(loc.Registers)
   141  	for {
   142  		if mask == 0 {
   143  			break
   144  		}
   145  		reg := uint8(bits.TrailingZeros64(mask))
   146  		mask &^= 1 << reg
   147  
   148  		storage = append(storage, s.registers[reg].String())
   149  	}
   150  	return strings.Join(storage, ",")
   151  }
   152  
   153  // A VarLoc describes the storage for part of a user variable.
   154  type VarLoc struct {
   155  	// The registers this variable is available in. There can be more than
   156  	// one in various situations, e.g. it's being moved between registers.
   157  	Registers RegisterSet
   158  
   159  	StackOffset
   160  }
   161  
   162  func (loc VarLoc) absent() bool {
   163  	return loc.Registers == 0 && !loc.onStack()
   164  }
   165  
   166  func (loc VarLoc) intersect(other VarLoc) VarLoc {
   167  	if !loc.onStack() || !other.onStack() || loc.StackOffset != other.StackOffset {
   168  		loc.StackOffset = 0
   169  	}
   170  	loc.Registers &= other.Registers
   171  	return loc
   172  }
   173  
   174  var BlockStart = &Value{
   175  	ID:  -10000,
   176  	Op:  OpInvalid,
   177  	Aux: StringToAux("BlockStart"),
   178  }
   179  
   180  var BlockEnd = &Value{
   181  	ID:  -20000,
   182  	Op:  OpInvalid,
   183  	Aux: StringToAux("BlockEnd"),
   184  }
   185  
   186  var FuncEnd = &Value{
   187  	ID:  -30000,
   188  	Op:  OpInvalid,
   189  	Aux: StringToAux("FuncEnd"),
   190  }
   191  
   192  // RegisterSet is a bitmap of registers, indexed by Register.num.
   193  type RegisterSet uint64
   194  
   195  // logf prints debug-specific logging to stdout (always stdout) if the
   196  // current function is tagged by GOSSAFUNC (for ssa output directed
   197  // either to stdout or html).
   198  func (s *debugState) logf(msg string, args ...interface{}) {
   199  	if s.f.PrintOrHtmlSSA {
   200  		fmt.Printf(msg, args...)
   201  	}
   202  }
   203  
   204  type debugState struct {
   205  	// See FuncDebug.
   206  	slots    []LocalSlot
   207  	vars     []*ir.Name
   208  	varSlots [][]SlotID
   209  	lists    [][]byte
   210  
   211  	// The user variable that each slot rolls up to, indexed by SlotID.
   212  	slotVars []VarID
   213  
   214  	f             *Func
   215  	loggingLevel  int
   216  	convergeCount int // testing; iterate over block debug state this many times
   217  	registers     []Register
   218  	stackOffset   func(LocalSlot) int32
   219  	ctxt          *obj.Link
   220  
   221  	// The names (slots) associated with each value, indexed by Value ID.
   222  	valueNames [][]SlotID
   223  
   224  	// The current state of whatever analysis is running.
   225  	currentState stateAtPC
   226  	changedVars  *sparseSet
   227  	changedSlots *sparseSet
   228  
   229  	// The pending location list entry for each user variable, indexed by VarID.
   230  	pendingEntries []pendingEntry
   231  
   232  	varParts        map[*ir.Name][]SlotID
   233  	blockDebug      []BlockDebug
   234  	pendingSlotLocs []VarLoc
   235  }
   236  
   237  func (state *debugState) initializeCache(f *Func, numVars, numSlots int) {
   238  	// One blockDebug per block. Initialized in allocBlock.
   239  	if cap(state.blockDebug) < f.NumBlocks() {
   240  		state.blockDebug = make([]BlockDebug, f.NumBlocks())
   241  	} else {
   242  		clear(state.blockDebug[:f.NumBlocks()])
   243  	}
   244  
   245  	// A list of slots per Value. Reuse the previous child slices.
   246  	if cap(state.valueNames) < f.NumValues() {
   247  		old := state.valueNames
   248  		state.valueNames = make([][]SlotID, f.NumValues())
   249  		copy(state.valueNames, old)
   250  	}
   251  	vn := state.valueNames[:f.NumValues()]
   252  	for i := range vn {
   253  		vn[i] = vn[i][:0]
   254  	}
   255  
   256  	// Slot and register contents for currentState. Cleared by reset().
   257  	if cap(state.currentState.slots) < numSlots {
   258  		state.currentState.slots = make([]VarLoc, numSlots)
   259  	} else {
   260  		state.currentState.slots = state.currentState.slots[:numSlots]
   261  	}
   262  	if cap(state.currentState.registers) < len(state.registers) {
   263  		state.currentState.registers = make([][]SlotID, len(state.registers))
   264  	} else {
   265  		state.currentState.registers = state.currentState.registers[:len(state.registers)]
   266  	}
   267  
   268  	// A relatively small slice, but used many times as the return from processValue.
   269  	state.changedVars = newSparseSet(numVars)
   270  	state.changedSlots = newSparseSet(numSlots)
   271  
   272  	// A pending entry per user variable, with space to track each of its pieces.
   273  	numPieces := 0
   274  	for i := range state.varSlots {
   275  		numPieces += len(state.varSlots[i])
   276  	}
   277  	if cap(state.pendingSlotLocs) < numPieces {
   278  		state.pendingSlotLocs = make([]VarLoc, numPieces)
   279  	} else {
   280  		clear(state.pendingSlotLocs[:numPieces])
   281  	}
   282  	if cap(state.pendingEntries) < numVars {
   283  		state.pendingEntries = make([]pendingEntry, numVars)
   284  	}
   285  	pe := state.pendingEntries[:numVars]
   286  	freePieceIdx := 0
   287  	for varID, slots := range state.varSlots {
   288  		pe[varID] = pendingEntry{
   289  			pieces: state.pendingSlotLocs[freePieceIdx : freePieceIdx+len(slots)],
   290  		}
   291  		freePieceIdx += len(slots)
   292  	}
   293  	state.pendingEntries = pe
   294  
   295  	if cap(state.lists) < numVars {
   296  		state.lists = make([][]byte, numVars)
   297  	} else {
   298  		state.lists = state.lists[:numVars]
   299  		clear(state.lists)
   300  	}
   301  }
   302  
   303  func (state *debugState) allocBlock(b *Block) *BlockDebug {
   304  	return &state.blockDebug[b.ID]
   305  }
   306  
   307  func (s *debugState) blockEndStateString(b *BlockDebug) string {
   308  	endState := stateAtPC{slots: make([]VarLoc, len(s.slots)), registers: make([][]SlotID, len(s.registers))}
   309  	endState.reset(b.endState)
   310  	return s.stateString(endState)
   311  }
   312  
   313  func (s *debugState) stateString(state stateAtPC) string {
   314  	var strs []string
   315  	for slotID, loc := range state.slots {
   316  		if !loc.absent() {
   317  			strs = append(strs, fmt.Sprintf("\t%v = %v\n", s.slots[slotID], s.LocString(loc)))
   318  		}
   319  	}
   320  
   321  	strs = append(strs, "\n")
   322  	for reg, slots := range state.registers {
   323  		if len(slots) != 0 {
   324  			var slotStrs []string
   325  			for _, slot := range slots {
   326  				slotStrs = append(slotStrs, s.slots[slot].String())
   327  			}
   328  			strs = append(strs, fmt.Sprintf("\t%v = %v\n", &s.registers[reg], slotStrs))
   329  		}
   330  	}
   331  
   332  	if len(strs) == 1 {
   333  		return "(no vars)\n"
   334  	}
   335  	return strings.Join(strs, "")
   336  }
   337  
   338  // slotCanonicalizer is a table used to lookup and canonicalize
   339  // LocalSlot's in a type insensitive way (e.g. taking into account the
   340  // base name, offset, and width of the slot, but ignoring the slot
   341  // type).
   342  type slotCanonicalizer struct {
   343  	slmap  map[slotKey]SlKeyIdx
   344  	slkeys []LocalSlot
   345  }
   346  
   347  func newSlotCanonicalizer() *slotCanonicalizer {
   348  	return &slotCanonicalizer{
   349  		slmap:  make(map[slotKey]SlKeyIdx),
   350  		slkeys: []LocalSlot{LocalSlot{N: nil}},
   351  	}
   352  }
   353  
   354  type SlKeyIdx uint32
   355  
   356  const noSlot = SlKeyIdx(0)
   357  
   358  // slotKey is a type-insensitive encapsulation of a LocalSlot; it
   359  // is used to key a map within slotCanonicalizer.
   360  type slotKey struct {
   361  	name        *ir.Name
   362  	offset      int64
   363  	width       int64
   364  	splitOf     SlKeyIdx // idx in slkeys slice in slotCanonicalizer
   365  	splitOffset int64
   366  }
   367  
   368  // lookup looks up a LocalSlot in the slot canonicalizer "sc", returning
   369  // a canonical index for the slot, and adding it to the table if need
   370  // be. Return value is the canonical slot index, and a boolean indicating
   371  // whether the slot was found in the table already (TRUE => found).
   372  func (sc *slotCanonicalizer) lookup(ls LocalSlot) (SlKeyIdx, bool) {
   373  	split := noSlot
   374  	if ls.SplitOf != nil {
   375  		split, _ = sc.lookup(*ls.SplitOf)
   376  	}
   377  	k := slotKey{
   378  		name: ls.N, offset: ls.Off, width: ls.Type.Size(),
   379  		splitOf: split, splitOffset: ls.SplitOffset,
   380  	}
   381  	if idx, ok := sc.slmap[k]; ok {
   382  		return idx, true
   383  	}
   384  	rv := SlKeyIdx(len(sc.slkeys))
   385  	sc.slkeys = append(sc.slkeys, ls)
   386  	sc.slmap[k] = rv
   387  	return rv, false
   388  }
   389  
   390  func (sc *slotCanonicalizer) canonSlot(idx SlKeyIdx) LocalSlot {
   391  	return sc.slkeys[idx]
   392  }
   393  
   394  // PopulateABIInRegArgOps examines the entry block of the function
   395  // and looks for incoming parameters that have missing or partial
   396  // OpArg{Int,Float}Reg values, inserting additional values in
   397  // cases where they are missing. Example:
   398  //
   399  //	func foo(s string, used int, notused int) int {
   400  //	  return len(s) + used
   401  //	}
   402  //
   403  // In the function above, the incoming parameter "used" is fully live,
   404  // "notused" is not live, and "s" is partially live (only the length
   405  // field of the string is used). At the point where debug value
   406  // analysis runs, we might expect to see an entry block with:
   407  //
   408  //	b1:
   409  //	  v4 = ArgIntReg <uintptr> {s+8} [0] : BX
   410  //	  v5 = ArgIntReg <int> {used} [0] : CX
   411  //
   412  // While this is an accurate picture of the live incoming params,
   413  // we also want to have debug locations for non-live params (or
   414  // their non-live pieces), e.g. something like
   415  //
   416  //	b1:
   417  //	  v9 = ArgIntReg <*uint8> {s+0} [0] : AX
   418  //	  v4 = ArgIntReg <uintptr> {s+8} [0] : BX
   419  //	  v5 = ArgIntReg <int> {used} [0] : CX
   420  //	  v10 = ArgIntReg <int> {unused} [0] : DI
   421  //
   422  // This function examines the live OpArg{Int,Float}Reg values and
   423  // synthesizes new (dead) values for the non-live params or the
   424  // non-live pieces of partially live params.
   425  func PopulateABIInRegArgOps(f *Func) {
   426  	pri := f.ABISelf.ABIAnalyzeFuncType(f.Type)
   427  
   428  	// When manufacturing new slots that correspond to splits of
   429  	// composite parameters, we want to avoid creating a new sub-slot
   430  	// that differs from some existing sub-slot only by type, since
   431  	// the debug location analysis will treat that slot as a separate
   432  	// entity. To achieve this, create a lookup table of existing
   433  	// slots that is type-insenstitive.
   434  	sc := newSlotCanonicalizer()
   435  	for _, sl := range f.Names {
   436  		sc.lookup(*sl)
   437  	}
   438  
   439  	// Add slot -> value entry to f.NamedValues if not already present.
   440  	addToNV := func(v *Value, sl LocalSlot) {
   441  		values, ok := f.NamedValues[sl]
   442  		if !ok {
   443  			// Haven't seen this slot yet.
   444  			sla := f.localSlotAddr(sl)
   445  			f.Names = append(f.Names, sla)
   446  		} else {
   447  			for _, ev := range values {
   448  				if v == ev {
   449  					return
   450  				}
   451  			}
   452  		}
   453  		values = append(values, v)
   454  		f.NamedValues[sl] = values
   455  	}
   456  
   457  	newValues := []*Value{}
   458  
   459  	abiRegIndexToRegister := func(reg abi.RegIndex) int8 {
   460  		i := f.ABISelf.FloatIndexFor(reg)
   461  		if i >= 0 { // float PR
   462  			return f.Config.floatParamRegs[i]
   463  		} else {
   464  			return f.Config.intParamRegs[reg]
   465  		}
   466  	}
   467  
   468  	// Helper to construct a new OpArg{Float,Int}Reg op value.
   469  	var pos src.XPos
   470  	if len(f.Entry.Values) != 0 {
   471  		pos = f.Entry.Values[0].Pos
   472  	}
   473  	synthesizeOpIntFloatArg := func(n *ir.Name, t *types.Type, reg abi.RegIndex, sl LocalSlot) *Value {
   474  		aux := &AuxNameOffset{n, sl.Off}
   475  		op, auxInt := ArgOpAndRegisterFor(reg, f.ABISelf)
   476  		v := f.newValueNoBlock(op, t, pos)
   477  		v.AuxInt = auxInt
   478  		v.Aux = aux
   479  		v.Args = nil
   480  		v.Block = f.Entry
   481  		newValues = append(newValues, v)
   482  		addToNV(v, sl)
   483  		f.setHome(v, &f.Config.registers[abiRegIndexToRegister(reg)])
   484  		return v
   485  	}
   486  
   487  	// Make a pass through the entry block looking for
   488  	// OpArg{Int,Float}Reg ops. Record the slots they use in a table
   489  	// ("sc"). We use a type-insensitive lookup for the slot table,
   490  	// since the type we get from the ABI analyzer won't always match
   491  	// what the compiler uses when creating OpArg{Int,Float}Reg ops.
   492  	for _, v := range f.Entry.Values {
   493  		if v.Op == OpArgIntReg || v.Op == OpArgFloatReg {
   494  			aux := v.Aux.(*AuxNameOffset)
   495  			sl := LocalSlot{N: aux.Name, Type: v.Type, Off: aux.Offset}
   496  			// install slot in lookup table
   497  			idx, _ := sc.lookup(sl)
   498  			// add to f.NamedValues if not already present
   499  			addToNV(v, sc.canonSlot(idx))
   500  		} else if v.Op.IsCall() {
   501  			// if we hit a call, we've gone too far.
   502  			break
   503  		}
   504  	}
   505  
   506  	// Now make a pass through the ABI in-params, looking for params
   507  	// or pieces of params that we didn't encounter in the loop above.
   508  	for _, inp := range pri.InParams() {
   509  		if !isNamedRegParam(inp) {
   510  			continue
   511  		}
   512  		n := inp.Name
   513  
   514  		// Param is spread across one or more registers. Walk through
   515  		// each piece to see whether we've seen an arg reg op for it.
   516  		types, offsets := inp.RegisterTypesAndOffsets()
   517  		for k, t := range types {
   518  			// Note: this recipe for creating a LocalSlot is designed
   519  			// to be compatible with the one used in expand_calls.go
   520  			// as opposed to decompose.go. The expand calls code just
   521  			// takes the base name and creates an offset into it,
   522  			// without using the SplitOf/SplitOffset fields. The code
   523  			// in decompose.go does the opposite -- it creates a
   524  			// LocalSlot object with "Off" set to zero, but with
   525  			// SplitOf pointing to a parent slot, and SplitOffset
   526  			// holding the offset into the parent object.
   527  			pieceSlot := LocalSlot{N: n, Type: t, Off: offsets[k]}
   528  
   529  			// Look up this piece to see if we've seen a reg op
   530  			// for it. If not, create one.
   531  			_, found := sc.lookup(pieceSlot)
   532  			if !found {
   533  				// This slot doesn't appear in the map, meaning it
   534  				// corresponds to an in-param that is not live, or
   535  				// a portion of an in-param that is not live/used.
   536  				// Add a new dummy OpArg{Int,Float}Reg for it.
   537  				synthesizeOpIntFloatArg(n, t, inp.Registers[k],
   538  					pieceSlot)
   539  			}
   540  		}
   541  	}
   542  
   543  	// Insert the new values into the head of the block.
   544  	f.Entry.Values = append(newValues, f.Entry.Values...)
   545  }
   546  
   547  // BuildFuncDebug builds debug information for f, placing the results
   548  // in "rval". f must be fully processed, so that each Value is where it
   549  // will be when machine code is emitted.
   550  func BuildFuncDebug(ctxt *obj.Link, f *Func, loggingLevel int, stackOffset func(LocalSlot) int32, rval *FuncDebug) {
   551  	if f.RegAlloc == nil {
   552  		f.Fatalf("BuildFuncDebug on func %v that has not been fully processed", f)
   553  	}
   554  	state := &f.Cache.debugState
   555  	state.loggingLevel = loggingLevel % 1000
   556  
   557  	// A specific number demands exactly that many iterations. Under
   558  	// particular circumstances it make require more than the total of
   559  	// 2 passes implied by a single run through liveness and a single
   560  	// run through location list generation.
   561  	state.convergeCount = loggingLevel / 1000
   562  	state.f = f
   563  	state.registers = f.Config.registers
   564  	state.stackOffset = stackOffset
   565  	state.ctxt = ctxt
   566  
   567  	if buildcfg.Experiment.RegabiArgs {
   568  		PopulateABIInRegArgOps(f)
   569  	}
   570  
   571  	if state.loggingLevel > 0 {
   572  		state.logf("Generating location lists for function %q\n", f.Name)
   573  	}
   574  
   575  	if state.varParts == nil {
   576  		state.varParts = make(map[*ir.Name][]SlotID)
   577  	} else {
   578  		clear(state.varParts)
   579  	}
   580  
   581  	// Recompose any decomposed variables, and establish the canonical
   582  	// IDs for each var and slot by filling out state.vars and state.slots.
   583  
   584  	state.slots = state.slots[:0]
   585  	state.vars = state.vars[:0]
   586  	for i, slot := range f.Names {
   587  		state.slots = append(state.slots, *slot)
   588  		if ir.IsSynthetic(slot.N) || !IsVarWantedForDebug(slot.N) {
   589  			continue
   590  		}
   591  
   592  		topSlot := slot
   593  		for topSlot.SplitOf != nil {
   594  			topSlot = topSlot.SplitOf
   595  		}
   596  		if _, ok := state.varParts[topSlot.N]; !ok {
   597  			state.vars = append(state.vars, topSlot.N)
   598  		}
   599  		state.varParts[topSlot.N] = append(state.varParts[topSlot.N], SlotID(i))
   600  	}
   601  
   602  	// Recreate the LocalSlot for each stack-only variable.
   603  	// This would probably be better as an output from stackframe.
   604  	for _, b := range f.Blocks {
   605  		for _, v := range b.Values {
   606  			if v.Op == OpVarDef {
   607  				n := v.Aux.(*ir.Name)
   608  				if ir.IsSynthetic(n) || !IsVarWantedForDebug(n) {
   609  					continue
   610  				}
   611  
   612  				if _, ok := state.varParts[n]; !ok {
   613  					slot := LocalSlot{N: n, Type: v.Type, Off: 0}
   614  					state.slots = append(state.slots, slot)
   615  					state.varParts[n] = []SlotID{SlotID(len(state.slots) - 1)}
   616  					state.vars = append(state.vars, n)
   617  				}
   618  			}
   619  		}
   620  	}
   621  
   622  	// Fill in the var<->slot mappings.
   623  	if cap(state.varSlots) < len(state.vars) {
   624  		state.varSlots = make([][]SlotID, len(state.vars))
   625  	} else {
   626  		state.varSlots = state.varSlots[:len(state.vars)]
   627  		for i := range state.varSlots {
   628  			state.varSlots[i] = state.varSlots[i][:0]
   629  		}
   630  	}
   631  	if cap(state.slotVars) < len(state.slots) {
   632  		state.slotVars = make([]VarID, len(state.slots))
   633  	} else {
   634  		state.slotVars = state.slotVars[:len(state.slots)]
   635  	}
   636  
   637  	for varID, n := range state.vars {
   638  		parts := state.varParts[n]
   639  		slices.SortFunc(parts, func(a, b SlotID) int {
   640  			return cmp.Compare(varOffset(state.slots[a]), varOffset(state.slots[b]))
   641  		})
   642  
   643  		state.varSlots[varID] = parts
   644  		for _, slotID := range parts {
   645  			state.slotVars[slotID] = VarID(varID)
   646  		}
   647  	}
   648  
   649  	state.initializeCache(f, len(state.varParts), len(state.slots))
   650  
   651  	for i, slot := range f.Names {
   652  		if ir.IsSynthetic(slot.N) || !IsVarWantedForDebug(slot.N) {
   653  			continue
   654  		}
   655  		for _, value := range f.NamedValues[*slot] {
   656  			state.valueNames[value.ID] = append(state.valueNames[value.ID], SlotID(i))
   657  		}
   658  	}
   659  
   660  	blockLocs := state.liveness()
   661  	state.buildLocationLists(blockLocs)
   662  
   663  	// Populate "rval" with what we've computed.
   664  	rval.Slots = state.slots
   665  	rval.VarSlots = state.varSlots
   666  	rval.Vars = state.vars
   667  	rval.LocationLists = state.lists
   668  }
   669  
   670  // liveness walks the function in control flow order, calculating the start
   671  // and end state of each block.
   672  func (state *debugState) liveness() []*BlockDebug {
   673  	blockLocs := make([]*BlockDebug, state.f.NumBlocks())
   674  	counterTime := int32(1)
   675  
   676  	// Reverse postorder: visit a block after as many as possible of its
   677  	// predecessors have been visited.
   678  	po := state.f.Postorder()
   679  	converged := false
   680  
   681  	// The iteration rule is that by default, run until converged, but
   682  	// if a particular iteration count is specified, run that many
   683  	// iterations, no more, no less.  A count is specified as the
   684  	// thousands digit of the location lists debug flag,
   685  	// e.g. -d=locationlists=4000
   686  	keepGoing := func(k int) bool {
   687  		if state.convergeCount == 0 {
   688  			return !converged
   689  		}
   690  		return k < state.convergeCount
   691  	}
   692  	for k := 0; keepGoing(k); k++ {
   693  		if state.loggingLevel > 0 {
   694  			state.logf("Liveness pass %d\n", k)
   695  		}
   696  		converged = true
   697  		for i := len(po) - 1; i >= 0; i-- {
   698  			b := po[i]
   699  			locs := blockLocs[b.ID]
   700  			if locs == nil {
   701  				locs = state.allocBlock(b)
   702  				blockLocs[b.ID] = locs
   703  			}
   704  
   705  			// Build the starting state for the block from the final
   706  			// state of its predecessors.
   707  			startState, blockChanged := state.mergePredecessors(b, blockLocs, nil, false)
   708  			locs.lastCheckedTime = counterTime
   709  			counterTime++
   710  			if state.loggingLevel > 1 {
   711  				state.logf("Processing %v, block changed %v, initial state:\n%v", b, blockChanged, state.stateString(state.currentState))
   712  			}
   713  
   714  			if blockChanged {
   715  				// If the start did not change, then the old endState is good
   716  				converged = false
   717  				changed := false
   718  				state.changedSlots.clear()
   719  
   720  				// Update locs/registers with the effects of each Value.
   721  				for _, v := range b.Values {
   722  					slots := state.valueNames[v.ID]
   723  
   724  					// Loads and stores inherit the names of their sources.
   725  					var source *Value
   726  					switch v.Op {
   727  					case OpStoreReg:
   728  						source = v.Args[0]
   729  					case OpLoadReg:
   730  						switch a := v.Args[0]; a.Op {
   731  						case OpArg, OpPhi:
   732  							source = a
   733  						case OpStoreReg:
   734  							source = a.Args[0]
   735  						default:
   736  							if state.loggingLevel > 1 {
   737  								state.logf("at %v: load with unexpected source op: %v (%v)\n", v, a.Op, a)
   738  							}
   739  						}
   740  					}
   741  					// Update valueNames with the source so that later steps
   742  					// don't need special handling.
   743  					if source != nil && k == 0 {
   744  						// limit to k == 0 otherwise there are duplicates.
   745  						slots = append(slots, state.valueNames[source.ID]...)
   746  						state.valueNames[v.ID] = slots
   747  					}
   748  
   749  					reg, _ := state.f.getHome(v.ID).(*Register)
   750  					c := state.processValue(v, slots, reg)
   751  					changed = changed || c
   752  				}
   753  
   754  				if state.loggingLevel > 1 {
   755  					state.logf("Block %v done, locs:\n%v", b, state.stateString(state.currentState))
   756  				}
   757  
   758  				locs.relevant = locs.relevant || changed
   759  				if !changed {
   760  					locs.endState = startState
   761  				} else {
   762  					for _, id := range state.changedSlots.contents() {
   763  						slotID := SlotID(id)
   764  						slotLoc := state.currentState.slots[slotID]
   765  						if slotLoc.absent() {
   766  							startState.Delete(int32(slotID))
   767  							continue
   768  						}
   769  						old := startState.Find(int32(slotID)) // do NOT replace existing values
   770  						if oldLS, ok := old.(*liveSlot); !ok || oldLS.VarLoc != slotLoc {
   771  							startState.Insert(int32(slotID),
   772  								&liveSlot{VarLoc: slotLoc})
   773  						}
   774  					}
   775  					locs.endState = startState
   776  				}
   777  				locs.lastChangedTime = counterTime
   778  			}
   779  			counterTime++
   780  		}
   781  	}
   782  	return blockLocs
   783  }
   784  
   785  // mergePredecessors takes the end state of each of b's predecessors and
   786  // intersects them to form the starting state for b. It puts that state
   787  // in blockLocs[b.ID].startState, and fills state.currentState with it.
   788  // It returns the start state and whether this is changed from the
   789  // previously approximated value of startState for this block.  After
   790  // the first call, subsequent calls can only shrink startState.
   791  //
   792  // Passing forLocationLists=true enables additional side-effects that
   793  // are necessary for building location lists but superfluous while still
   794  // iterating to an answer.
   795  //
   796  // If previousBlock is non-nil, it registers changes vs. that block's
   797  // end state in state.changedVars. Note that previousBlock will often
   798  // not be a predecessor.
   799  //
   800  // Note that mergePredecessors behaves slightly differently between
   801  // first and subsequent calls for a block.  For the first call, the
   802  // starting state is approximated by taking the state from the
   803  // predecessor whose state is smallest, and removing any elements not
   804  // in all the other predecessors; this makes the smallest number of
   805  // changes and shares the most state.  On subsequent calls the old
   806  // value of startState is adjusted with new information; this is judged
   807  // to do the least amount of extra work.
   808  //
   809  // To improve performance, each block's state information is marked with
   810  // lastChanged and lastChecked "times" so unchanged predecessors can be
   811  // skipped on after-the-first iterations.  Doing this allows extra
   812  // iterations by the caller to be almost free.
   813  //
   814  // It is important to know that the set representation used for
   815  // startState, endState, and merges can share data for two sets where
   816  // one is a small delta from the other.  Doing this does require a
   817  // little care in how sets are updated, both in mergePredecessors, and
   818  // using its result.
   819  func (state *debugState) mergePredecessors(b *Block, blockLocs []*BlockDebug, previousBlock *Block, forLocationLists bool) (abt.T, bool) {
   820  	// Filter out back branches.
   821  	var predsBuf [10]*Block
   822  
   823  	preds := predsBuf[:0]
   824  	locs := blockLocs[b.ID]
   825  
   826  	blockChanged := !locs.everProcessed // the first time it always changes.
   827  	updating := locs.everProcessed
   828  
   829  	// For the first merge, exclude predecessors that have not been seen yet.
   830  	// I.e., backedges.
   831  	for _, pred := range b.Preds {
   832  		if bl := blockLocs[pred.b.ID]; bl != nil && bl.everProcessed {
   833  			// crucially, a self-edge has bl != nil, but bl.everProcessed is false the first time.
   834  			preds = append(preds, pred.b)
   835  		}
   836  	}
   837  
   838  	locs.everProcessed = true
   839  
   840  	if state.loggingLevel > 1 {
   841  		// The logf below would cause preds to be heap-allocated if
   842  		// it were passed directly.
   843  		preds2 := make([]*Block, len(preds))
   844  		copy(preds2, preds)
   845  		state.logf("Merging %v into %v (changed=%d, checked=%d)\n", preds2, b, locs.lastChangedTime, locs.lastCheckedTime)
   846  	}
   847  
   848  	state.changedVars.clear()
   849  
   850  	markChangedVars := func(slots, merged abt.T) {
   851  		if !forLocationLists {
   852  			return
   853  		}
   854  		// Fill changedVars with those that differ between the previous
   855  		// block (in the emit order, not necessarily a flow predecessor)
   856  		// and the start state for this block.
   857  		for it := slots.Iterator(); !it.Done(); {
   858  			k, v := it.Next()
   859  			m := merged.Find(k)
   860  			if m == nil || v.(*liveSlot).VarLoc != m.(*liveSlot).VarLoc {
   861  				state.changedVars.add(ID(state.slotVars[k]))
   862  			}
   863  		}
   864  	}
   865  
   866  	reset := func(ourStartState abt.T) {
   867  		if !(forLocationLists || blockChanged) {
   868  			// there is no change and this is not for location lists, do
   869  			// not bother to reset currentState because it will not be
   870  			// examined.
   871  			return
   872  		}
   873  		state.currentState.reset(ourStartState)
   874  	}
   875  
   876  	// Zero predecessors
   877  	if len(preds) == 0 {
   878  		if previousBlock != nil {
   879  			state.f.Fatalf("Function %v, block %s with no predecessors is not first block, has previous %s", state.f, b.String(), previousBlock.String())
   880  		}
   881  		// startState is empty
   882  		reset(abt.T{})
   883  		return abt.T{}, blockChanged
   884  	}
   885  
   886  	// One predecessor
   887  	l0 := blockLocs[preds[0].ID]
   888  	p0 := l0.endState
   889  	if len(preds) == 1 {
   890  		if previousBlock != nil && preds[0].ID != previousBlock.ID {
   891  			// Change from previous block is its endState minus the predecessor's endState
   892  			markChangedVars(blockLocs[previousBlock.ID].endState, p0)
   893  		}
   894  		locs.startState = p0
   895  		blockChanged = blockChanged || l0.lastChangedTime > locs.lastCheckedTime
   896  		reset(p0)
   897  		return p0, blockChanged
   898  	}
   899  
   900  	// More than one predecessor
   901  
   902  	if updating {
   903  		// After the first approximation, i.e., when updating, results
   904  		// can only get smaller, because initially backedge
   905  		// predecessors do not participate in the intersection.  This
   906  		// means that for the update, given the prior approximation of
   907  		// startState, there is no need to re-intersect with unchanged
   908  		// blocks.  Therefore remove unchanged blocks from the
   909  		// predecessor list.
   910  		for i := len(preds) - 1; i >= 0; i-- {
   911  			pred := preds[i]
   912  			if blockLocs[pred.ID].lastChangedTime > locs.lastCheckedTime {
   913  				continue // keep this predecessor
   914  			}
   915  			preds[i] = preds[len(preds)-1]
   916  			preds = preds[:len(preds)-1]
   917  			if state.loggingLevel > 2 {
   918  				state.logf("Pruned b%d, lastChanged was %d but b%d lastChecked is %d\n", pred.ID, blockLocs[pred.ID].lastChangedTime, b.ID, locs.lastCheckedTime)
   919  			}
   920  		}
   921  		// Check for an early out; this should always hit for the update
   922  		// if there are no cycles.
   923  		if len(preds) == 0 {
   924  			blockChanged = false
   925  
   926  			reset(locs.startState)
   927  			if state.loggingLevel > 2 {
   928  				state.logf("Early out, no predecessors changed since last check\n")
   929  			}
   930  			if previousBlock != nil {
   931  				markChangedVars(blockLocs[previousBlock.ID].endState, locs.startState)
   932  			}
   933  			return locs.startState, blockChanged
   934  		}
   935  	}
   936  
   937  	baseID := preds[0].ID
   938  	baseState := p0
   939  
   940  	// Choose the predecessor with the smallest endState for intersection work
   941  	for _, pred := range preds[1:] {
   942  		if blockLocs[pred.ID].endState.Size() < baseState.Size() {
   943  			baseState = blockLocs[pred.ID].endState
   944  			baseID = pred.ID
   945  		}
   946  	}
   947  
   948  	if state.loggingLevel > 2 {
   949  		state.logf("Starting %v with state from b%v:\n%v", b, baseID, state.blockEndStateString(blockLocs[baseID]))
   950  		for _, pred := range preds {
   951  			if pred.ID == baseID {
   952  				continue
   953  			}
   954  			state.logf("Merging in state from %v:\n%v", pred, state.blockEndStateString(blockLocs[pred.ID]))
   955  		}
   956  	}
   957  
   958  	state.currentState.reset(abt.T{})
   959  	// The normal logic of "reset" is included in the intersection loop below.
   960  
   961  	slotLocs := state.currentState.slots
   962  
   963  	// If this is the first call, do updates on the "baseState"; if this
   964  	// is a subsequent call, tweak the startState instead. Note that
   965  	// these "set" values are values; there are no side effects to
   966  	// other values as these are modified.
   967  	newState := baseState
   968  	if updating {
   969  		newState = blockLocs[b.ID].startState
   970  	}
   971  
   972  	for it := newState.Iterator(); !it.Done(); {
   973  		k, d := it.Next()
   974  		thisSlot := d.(*liveSlot)
   975  		x := thisSlot.VarLoc
   976  		x0 := x // initial value in newState
   977  
   978  		// Intersect this slot with the slot in all the predecessors
   979  		for _, other := range preds {
   980  			if !updating && other.ID == baseID {
   981  				continue
   982  			}
   983  			otherSlot := blockLocs[other.ID].endState.Find(k)
   984  			if otherSlot == nil {
   985  				x = VarLoc{}
   986  				break
   987  			}
   988  			y := otherSlot.(*liveSlot).VarLoc
   989  			x = x.intersect(y)
   990  			if x.absent() {
   991  				x = VarLoc{}
   992  				break
   993  			}
   994  		}
   995  
   996  		// Delete if necessary, but not otherwise (in order to maximize sharing).
   997  		if x.absent() {
   998  			if !x0.absent() {
   999  				blockChanged = true
  1000  				newState.Delete(k)
  1001  			}
  1002  			slotLocs[k] = VarLoc{}
  1003  			continue
  1004  		}
  1005  		if x != x0 {
  1006  			blockChanged = true
  1007  			newState.Insert(k, &liveSlot{VarLoc: x})
  1008  		}
  1009  
  1010  		slotLocs[k] = x
  1011  		mask := uint64(x.Registers)
  1012  		for {
  1013  			if mask == 0 {
  1014  				break
  1015  			}
  1016  			reg := uint8(bits.TrailingZeros64(mask))
  1017  			mask &^= 1 << reg
  1018  			state.currentState.registers[reg] = append(state.currentState.registers[reg], SlotID(k))
  1019  		}
  1020  	}
  1021  
  1022  	if previousBlock != nil {
  1023  		markChangedVars(blockLocs[previousBlock.ID].endState, newState)
  1024  	}
  1025  	locs.startState = newState
  1026  	return newState, blockChanged
  1027  }
  1028  
  1029  // processValue updates locs and state.registerContents to reflect v, a
  1030  // value with the names in vSlots and homed in vReg.  "v" becomes
  1031  // visible after execution of the instructions evaluating it. It
  1032  // returns which VarIDs were modified by the Value's execution.
  1033  func (state *debugState) processValue(v *Value, vSlots []SlotID, vReg *Register) bool {
  1034  	locs := state.currentState
  1035  	changed := false
  1036  	setSlot := func(slot SlotID, loc VarLoc) {
  1037  		changed = true
  1038  		state.changedVars.add(ID(state.slotVars[slot]))
  1039  		state.changedSlots.add(ID(slot))
  1040  		state.currentState.slots[slot] = loc
  1041  	}
  1042  
  1043  	// Handle any register clobbering. Call operations, for example,
  1044  	// clobber all registers even though they don't explicitly write to
  1045  	// them.
  1046  	clobbers := uint64(opcodeTable[v.Op].reg.clobbers)
  1047  	for {
  1048  		if clobbers == 0 {
  1049  			break
  1050  		}
  1051  		reg := uint8(bits.TrailingZeros64(clobbers))
  1052  		clobbers &^= 1 << reg
  1053  
  1054  		for _, slot := range locs.registers[reg] {
  1055  			if state.loggingLevel > 1 {
  1056  				state.logf("at %v: %v clobbered out of %v\n", v, state.slots[slot], &state.registers[reg])
  1057  			}
  1058  
  1059  			last := locs.slots[slot]
  1060  			if last.absent() {
  1061  				state.f.Fatalf("at %v: slot %v in register %v with no location entry", v, state.slots[slot], &state.registers[reg])
  1062  				continue
  1063  			}
  1064  			regs := last.Registers &^ (1 << reg)
  1065  			setSlot(slot, VarLoc{regs, last.StackOffset})
  1066  		}
  1067  
  1068  		locs.registers[reg] = locs.registers[reg][:0]
  1069  	}
  1070  
  1071  	switch {
  1072  	case v.Op == OpVarDef:
  1073  		n := v.Aux.(*ir.Name)
  1074  		if ir.IsSynthetic(n) || !IsVarWantedForDebug(n) {
  1075  			break
  1076  		}
  1077  
  1078  		slotID := state.varParts[n][0]
  1079  		var stackOffset StackOffset
  1080  		if v.Op == OpVarDef {
  1081  			stackOffset = StackOffset(state.stackOffset(state.slots[slotID])<<1 | 1)
  1082  		}
  1083  		setSlot(slotID, VarLoc{0, stackOffset})
  1084  		if state.loggingLevel > 1 {
  1085  			if v.Op == OpVarDef {
  1086  				state.logf("at %v: stack-only var %v now live\n", v, state.slots[slotID])
  1087  			} else {
  1088  				state.logf("at %v: stack-only var %v now dead\n", v, state.slots[slotID])
  1089  			}
  1090  		}
  1091  
  1092  	case v.Op == OpArg:
  1093  		home := state.f.getHome(v.ID).(LocalSlot)
  1094  		stackOffset := state.stackOffset(home)<<1 | 1
  1095  		for _, slot := range vSlots {
  1096  			if state.loggingLevel > 1 {
  1097  				state.logf("at %v: arg %v now on stack in location %v\n", v, state.slots[slot], home)
  1098  				if last := locs.slots[slot]; !last.absent() {
  1099  					state.logf("at %v: unexpected arg op on already-live slot %v\n", v, state.slots[slot])
  1100  				}
  1101  			}
  1102  
  1103  			setSlot(slot, VarLoc{0, StackOffset(stackOffset)})
  1104  		}
  1105  
  1106  	case v.Op == OpStoreReg:
  1107  		home := state.f.getHome(v.ID).(LocalSlot)
  1108  		stackOffset := state.stackOffset(home)<<1 | 1
  1109  		for _, slot := range vSlots {
  1110  			last := locs.slots[slot]
  1111  			if last.absent() {
  1112  				if state.loggingLevel > 1 {
  1113  					state.logf("at %v: unexpected spill of unnamed register %s\n", v, vReg)
  1114  				}
  1115  				break
  1116  			}
  1117  
  1118  			setSlot(slot, VarLoc{last.Registers, StackOffset(stackOffset)})
  1119  			if state.loggingLevel > 1 {
  1120  				state.logf("at %v: %v spilled to stack location %v@%d\n", v, state.slots[slot], home, state.stackOffset(home))
  1121  			}
  1122  		}
  1123  
  1124  	case vReg != nil:
  1125  		if state.loggingLevel > 1 {
  1126  			newSlots := make([]bool, len(state.slots))
  1127  			for _, slot := range vSlots {
  1128  				newSlots[slot] = true
  1129  			}
  1130  
  1131  			for _, slot := range locs.registers[vReg.num] {
  1132  				if !newSlots[slot] {
  1133  					state.logf("at %v: overwrote %v in register %v\n", v, state.slots[slot], vReg)
  1134  				}
  1135  			}
  1136  		}
  1137  
  1138  		for _, slot := range locs.registers[vReg.num] {
  1139  			last := locs.slots[slot]
  1140  			setSlot(slot, VarLoc{last.Registers &^ (1 << uint8(vReg.num)), last.StackOffset})
  1141  		}
  1142  		locs.registers[vReg.num] = locs.registers[vReg.num][:0]
  1143  		locs.registers[vReg.num] = append(locs.registers[vReg.num], vSlots...)
  1144  		for _, slot := range vSlots {
  1145  			if state.loggingLevel > 1 {
  1146  				state.logf("at %v: %v now in %s\n", v, state.slots[slot], vReg)
  1147  			}
  1148  
  1149  			last := locs.slots[slot]
  1150  			setSlot(slot, VarLoc{1<<uint8(vReg.num) | last.Registers, last.StackOffset})
  1151  		}
  1152  	}
  1153  	return changed
  1154  }
  1155  
  1156  // varOffset returns the offset of slot within the user variable it was
  1157  // decomposed from. This has nothing to do with its stack offset.
  1158  func varOffset(slot LocalSlot) int64 {
  1159  	offset := slot.Off
  1160  	s := &slot
  1161  	for ; s.SplitOf != nil; s = s.SplitOf {
  1162  		offset += s.SplitOffset
  1163  	}
  1164  	return offset
  1165  }
  1166  
  1167  // A pendingEntry represents the beginning of a location list entry, missing
  1168  // only its end coordinate.
  1169  type pendingEntry struct {
  1170  	present                bool
  1171  	startBlock, startValue ID
  1172  	// The location of each piece of the variable, in the same order as the
  1173  	// SlotIDs in varParts.
  1174  	pieces []VarLoc
  1175  }
  1176  
  1177  func (e *pendingEntry) clear() {
  1178  	e.present = false
  1179  	e.startBlock = 0
  1180  	e.startValue = 0
  1181  	clear(e.pieces)
  1182  }
  1183  
  1184  // canMerge reports whether a new location description is a superset
  1185  // of the (non-empty) pending location description, if so, the two
  1186  // can be merged (i.e., pending is still a valid and useful location
  1187  // description).
  1188  func canMerge(pending, new VarLoc) bool {
  1189  	if pending.absent() && new.absent() {
  1190  		return true
  1191  	}
  1192  	if pending.absent() || new.absent() {
  1193  		return false
  1194  	}
  1195  	// pending is not absent, therefore it has either a stack mapping,
  1196  	// or registers, or both.
  1197  	if pending.onStack() && pending.StackOffset != new.StackOffset {
  1198  		// if pending has a stack offset, then new must also, and it
  1199  		// must be the same (StackOffset encodes onStack).
  1200  		return false
  1201  	}
  1202  	if pending.Registers&new.Registers != pending.Registers {
  1203  		// There is at least one register in pending not mentioned in new.
  1204  		return false
  1205  	}
  1206  	return true
  1207  }
  1208  
  1209  // firstReg returns the first register in set that is present.
  1210  func firstReg(set RegisterSet) uint8 {
  1211  	if set == 0 {
  1212  		// This is wrong, but there seem to be some situations where we
  1213  		// produce locations with no storage.
  1214  		return 0
  1215  	}
  1216  	return uint8(bits.TrailingZeros64(uint64(set)))
  1217  }
  1218  
  1219  // buildLocationLists builds location lists for all the user variables
  1220  // in state.f, using the information about block state in blockLocs.
  1221  // The returned location lists are not fully complete. They are in
  1222  // terms of SSA values rather than PCs, and have no base address/end
  1223  // entries. They will be finished by PutLocationList.
  1224  func (state *debugState) buildLocationLists(blockLocs []*BlockDebug) {
  1225  	// Run through the function in program text order, building up location
  1226  	// lists as we go. The heavy lifting has mostly already been done.
  1227  
  1228  	var prevBlock *Block
  1229  	for _, b := range state.f.Blocks {
  1230  		state.mergePredecessors(b, blockLocs, prevBlock, true)
  1231  
  1232  		// Handle any differences among predecessor blocks and previous block (perhaps not a predecessor)
  1233  		for _, varID := range state.changedVars.contents() {
  1234  			state.updateVar(VarID(varID), b, BlockStart)
  1235  		}
  1236  		state.changedVars.clear()
  1237  
  1238  		if !blockLocs[b.ID].relevant {
  1239  			continue
  1240  		}
  1241  
  1242  		mustBeFirst := func(v *Value) bool {
  1243  			return v.Op == OpPhi || v.Op.isLoweredGetClosurePtr() ||
  1244  				v.Op == OpArgIntReg || v.Op == OpArgFloatReg
  1245  		}
  1246  
  1247  		blockPrologComplete := func(v *Value) bool {
  1248  			if b.ID != state.f.Entry.ID {
  1249  				return !opcodeTable[v.Op].zeroWidth
  1250  			} else {
  1251  				return v.Op == OpInitMem
  1252  			}
  1253  		}
  1254  
  1255  		// Examine the prolog portion of the block to process special
  1256  		// zero-width ops such as Arg, Phi, LoweredGetClosurePtr (etc)
  1257  		// whose lifetimes begin at the block starting point. In an
  1258  		// entry block, allow for the possibility that we may see Arg
  1259  		// ops that appear _after_ other non-zero-width operations.
  1260  		// Example:
  1261  		//
  1262  		//   v33 = ArgIntReg <uintptr> {foo+0} [0] : AX (foo)
  1263  		//   v34 = ArgIntReg <uintptr> {bar+0} [0] : BX (bar)
  1264  		//   ...
  1265  		//   v77 = StoreReg <unsafe.Pointer> v67 : ctx+8[unsafe.Pointer]
  1266  		//   v78 = StoreReg <unsafe.Pointer> v68 : ctx[unsafe.Pointer]
  1267  		//   v79 = Arg <*uint8> {args} : args[*uint8] (args[*uint8])
  1268  		//   v80 = Arg <int> {args} [8] : args+8[int] (args+8[int])
  1269  		//   ...
  1270  		//   v1 = InitMem <mem>
  1271  		//
  1272  		// We can stop scanning the initial portion of the block when
  1273  		// we either see the InitMem op (for entry blocks) or the
  1274  		// first non-zero-width op (for other blocks).
  1275  		for idx := 0; idx < len(b.Values); idx++ {
  1276  			v := b.Values[idx]
  1277  			if blockPrologComplete(v) {
  1278  				break
  1279  			}
  1280  			// Consider only "lifetime begins at block start" ops.
  1281  			if !mustBeFirst(v) && v.Op != OpArg {
  1282  				continue
  1283  			}
  1284  			slots := state.valueNames[v.ID]
  1285  			reg, _ := state.f.getHome(v.ID).(*Register)
  1286  			changed := state.processValue(v, slots, reg) // changed == added to state.changedVars
  1287  			if changed {
  1288  				for _, varID := range state.changedVars.contents() {
  1289  					state.updateVar(VarID(varID), v.Block, BlockStart)
  1290  				}
  1291  				state.changedVars.clear()
  1292  			}
  1293  		}
  1294  
  1295  		// Now examine the block again, handling things other than the
  1296  		// "begins at block start" lifetimes.
  1297  		zeroWidthPending := false
  1298  		prologComplete := false
  1299  		// expect to see values in pattern (apc)* (zerowidth|real)*
  1300  		for _, v := range b.Values {
  1301  			if blockPrologComplete(v) {
  1302  				prologComplete = true
  1303  			}
  1304  			slots := state.valueNames[v.ID]
  1305  			reg, _ := state.f.getHome(v.ID).(*Register)
  1306  			changed := state.processValue(v, slots, reg) // changed == added to state.changedVars
  1307  
  1308  			if opcodeTable[v.Op].zeroWidth {
  1309  				if prologComplete && mustBeFirst(v) {
  1310  					panic(fmt.Errorf("Unexpected placement of op '%s' appearing after non-pseudo-op at beginning of block %s in %s\n%s", v.LongString(), b, b.Func.Name, b.Func))
  1311  				}
  1312  				if changed {
  1313  					if mustBeFirst(v) || v.Op == OpArg {
  1314  						// already taken care of above
  1315  						continue
  1316  					}
  1317  					zeroWidthPending = true
  1318  				}
  1319  				continue
  1320  			}
  1321  			if !changed && !zeroWidthPending {
  1322  				continue
  1323  			}
  1324  
  1325  			// Not zero-width; i.e., a "real" instruction.
  1326  			zeroWidthPending = false
  1327  			for _, varID := range state.changedVars.contents() {
  1328  				state.updateVar(VarID(varID), v.Block, v)
  1329  			}
  1330  			state.changedVars.clear()
  1331  		}
  1332  		for _, varID := range state.changedVars.contents() {
  1333  			state.updateVar(VarID(varID), b, BlockEnd)
  1334  		}
  1335  
  1336  		prevBlock = b
  1337  	}
  1338  
  1339  	if state.loggingLevel > 0 {
  1340  		state.logf("location lists:\n")
  1341  	}
  1342  
  1343  	// Flush any leftover entries live at the end of the last block.
  1344  	for varID := range state.lists {
  1345  		state.writePendingEntry(VarID(varID), -1, FuncEnd.ID)
  1346  		list := state.lists[varID]
  1347  		if state.loggingLevel > 0 {
  1348  			if len(list) == 0 {
  1349  				state.logf("\t%v : empty list\n", state.vars[varID])
  1350  			} else {
  1351  				state.logf("\t%v : %q\n", state.vars[varID], hex.EncodeToString(state.lists[varID]))
  1352  			}
  1353  		}
  1354  	}
  1355  }
  1356  
  1357  // updateVar updates the pending location list entry for varID to
  1358  // reflect the new locations in curLoc, beginning at v in block b.
  1359  // v may be one of the special values indicating block start or end.
  1360  func (state *debugState) updateVar(varID VarID, b *Block, v *Value) {
  1361  	curLoc := state.currentState.slots
  1362  	// Assemble the location list entry with whatever's live.
  1363  	empty := true
  1364  	for _, slotID := range state.varSlots[varID] {
  1365  		if !curLoc[slotID].absent() {
  1366  			empty = false
  1367  			break
  1368  		}
  1369  	}
  1370  	pending := &state.pendingEntries[varID]
  1371  	if empty {
  1372  		state.writePendingEntry(varID, b.ID, v.ID)
  1373  		pending.clear()
  1374  		return
  1375  	}
  1376  
  1377  	// Extend the previous entry if possible.
  1378  	if pending.present {
  1379  		merge := true
  1380  		for i, slotID := range state.varSlots[varID] {
  1381  			if !canMerge(pending.pieces[i], curLoc[slotID]) {
  1382  				merge = false
  1383  				break
  1384  			}
  1385  		}
  1386  		if merge {
  1387  			return
  1388  		}
  1389  	}
  1390  
  1391  	state.writePendingEntry(varID, b.ID, v.ID)
  1392  	pending.present = true
  1393  	pending.startBlock = b.ID
  1394  	pending.startValue = v.ID
  1395  	for i, slot := range state.varSlots[varID] {
  1396  		pending.pieces[i] = curLoc[slot]
  1397  	}
  1398  }
  1399  
  1400  // writePendingEntry writes out the pending entry for varID, if any,
  1401  // terminated at endBlock/Value.
  1402  func (state *debugState) writePendingEntry(varID VarID, endBlock, endValue ID) {
  1403  	pending := state.pendingEntries[varID]
  1404  	if !pending.present {
  1405  		return
  1406  	}
  1407  
  1408  	// Pack the start/end coordinates into the start/end addresses
  1409  	// of the entry, for decoding by PutLocationList.
  1410  	start, startOK := encodeValue(state.ctxt, pending.startBlock, pending.startValue)
  1411  	end, endOK := encodeValue(state.ctxt, endBlock, endValue)
  1412  	if !startOK || !endOK {
  1413  		// If someone writes a function that uses >65K values,
  1414  		// they get incomplete debug info on 32-bit platforms.
  1415  		return
  1416  	}
  1417  	if start == end {
  1418  		if state.loggingLevel > 1 {
  1419  			// Printf not logf so not gated by GOSSAFUNC; this should fire very rarely.
  1420  			// TODO this fires a lot, need to figure out why.
  1421  			state.logf("Skipping empty location list for %v in %s\n", state.vars[varID], state.f.Name)
  1422  		}
  1423  		return
  1424  	}
  1425  
  1426  	list := state.lists[varID]
  1427  	list = appendPtr(state.ctxt, list, start)
  1428  	list = appendPtr(state.ctxt, list, end)
  1429  	// Where to write the length of the location description once
  1430  	// we know how big it is.
  1431  	sizeIdx := len(list)
  1432  	list = list[:len(list)+2]
  1433  
  1434  	if state.loggingLevel > 1 {
  1435  		var partStrs []string
  1436  		for i, slot := range state.varSlots[varID] {
  1437  			partStrs = append(partStrs, fmt.Sprintf("%v@%v", state.slots[slot], state.LocString(pending.pieces[i])))
  1438  		}
  1439  		state.logf("Add entry for %v: \tb%vv%v-b%vv%v = \t%v\n", state.vars[varID], pending.startBlock, pending.startValue, endBlock, endValue, strings.Join(partStrs, " "))
  1440  	}
  1441  
  1442  	for i, slotID := range state.varSlots[varID] {
  1443  		loc := pending.pieces[i]
  1444  		slot := state.slots[slotID]
  1445  
  1446  		if !loc.absent() {
  1447  			if loc.onStack() {
  1448  				if loc.stackOffsetValue() == 0 {
  1449  					list = append(list, dwarf.DW_OP_call_frame_cfa)
  1450  				} else {
  1451  					list = append(list, dwarf.DW_OP_fbreg)
  1452  					list = dwarf.AppendSleb128(list, int64(loc.stackOffsetValue()))
  1453  				}
  1454  			} else {
  1455  				regnum := state.ctxt.Arch.DWARFRegisters[state.registers[firstReg(loc.Registers)].ObjNum()]
  1456  				if regnum < 32 {
  1457  					list = append(list, dwarf.DW_OP_reg0+byte(regnum))
  1458  				} else {
  1459  					list = append(list, dwarf.DW_OP_regx)
  1460  					list = dwarf.AppendUleb128(list, uint64(regnum))
  1461  				}
  1462  			}
  1463  		}
  1464  
  1465  		if len(state.varSlots[varID]) > 1 {
  1466  			list = append(list, dwarf.DW_OP_piece)
  1467  			list = dwarf.AppendUleb128(list, uint64(slot.Type.Size()))
  1468  		}
  1469  	}
  1470  	state.ctxt.Arch.ByteOrder.PutUint16(list[sizeIdx:], uint16(len(list)-sizeIdx-2))
  1471  	state.lists[varID] = list
  1472  }
  1473  
  1474  // PutLocationList adds list (a location list in its intermediate
  1475  // representation) to listSym.
  1476  func (debugInfo *FuncDebug) PutLocationList(list []byte, ctxt *obj.Link, listSym, startPC *obj.LSym) {
  1477  	if buildcfg.Experiment.Dwarf5 {
  1478  		debugInfo.PutLocationListDwarf5(list, ctxt, listSym, startPC)
  1479  	} else {
  1480  		debugInfo.PutLocationListDwarf4(list, ctxt, listSym, startPC)
  1481  	}
  1482  }
  1483  
  1484  // PutLocationListDwarf5 adds list (a location list in its intermediate
  1485  // representation) to listSym in DWARF 5 format. NB: this is a somewhat
  1486  // hacky implementation in that it actually reads a DWARF4 encoded
  1487  // info from list (with all its DWARF4-specific quirks) then re-encodes
  1488  // it in DWARF5. It would probably be better at some point to have
  1489  // ssa/debug encode the list in a version-independent form and then
  1490  // have this func (and PutLocationListDwarf4) intoduce the quirks.
  1491  func (debugInfo *FuncDebug) PutLocationListDwarf5(list []byte, ctxt *obj.Link, listSym, startPC *obj.LSym) {
  1492  	getPC := debugInfo.GetPC
  1493  
  1494  	// base address entry
  1495  	listSym.WriteInt(ctxt, listSym.Size, 1, dwarf.DW_LLE_base_addressx)
  1496  	listSym.WriteDwTxtAddrx(ctxt, listSym.Size, startPC, ctxt.DwTextCount*2)
  1497  
  1498  	var stbuf, enbuf [10]byte
  1499  	stb, enb := stbuf[:], enbuf[:]
  1500  	// Re-read list, translating its address from block/value ID to PC.
  1501  	for i := 0; i < len(list); {
  1502  		begin := getPC(decodeValue(ctxt, readPtr(ctxt, list[i:])))
  1503  		end := getPC(decodeValue(ctxt, readPtr(ctxt, list[i+ctxt.Arch.PtrSize:])))
  1504  
  1505  		// Write LLE_offset_pair tag followed by payload (ULEB for start
  1506  		// and then end).
  1507  		listSym.WriteInt(ctxt, listSym.Size, 1, dwarf.DW_LLE_offset_pair)
  1508  		stb, enb = stb[:0], enb[:0]
  1509  		stb = dwarf.AppendUleb128(stb, uint64(begin))
  1510  		enb = dwarf.AppendUleb128(enb, uint64(end))
  1511  		listSym.WriteBytes(ctxt, listSym.Size, stb)
  1512  		listSym.WriteBytes(ctxt, listSym.Size, enb)
  1513  
  1514  		// The encoded data in "list" is in DWARF4 format, which uses
  1515  		// a 2-byte length; DWARF5 uses an LEB-encoded value for this
  1516  		// length. Read the length and then re-encode it.
  1517  		i += 2 * ctxt.Arch.PtrSize
  1518  		datalen := int(ctxt.Arch.ByteOrder.Uint16(list[i:]))
  1519  		i += 2
  1520  		stb = stb[:0]
  1521  		stb = dwarf.AppendUleb128(stb, uint64(datalen))
  1522  		listSym.WriteBytes(ctxt, listSym.Size, stb)               // copy length
  1523  		listSym.WriteBytes(ctxt, listSym.Size, list[i:i+datalen]) // loc desc
  1524  
  1525  		i += datalen
  1526  	}
  1527  
  1528  	// Terminator
  1529  	listSym.WriteInt(ctxt, listSym.Size, 1, dwarf.DW_LLE_end_of_list)
  1530  }
  1531  
  1532  // PutLocationListDwarf4 adds list (a location list in its intermediate
  1533  // representation) to listSym in DWARF 4 format.
  1534  func (debugInfo *FuncDebug) PutLocationListDwarf4(list []byte, ctxt *obj.Link, listSym, startPC *obj.LSym) {
  1535  	getPC := debugInfo.GetPC
  1536  
  1537  	if ctxt.UseBASEntries {
  1538  		listSym.WriteInt(ctxt, listSym.Size, ctxt.Arch.PtrSize, ^0)
  1539  		listSym.WriteAddr(ctxt, listSym.Size, ctxt.Arch.PtrSize, startPC, 0)
  1540  	}
  1541  
  1542  	// Re-read list, translating its address from block/value ID to PC.
  1543  	for i := 0; i < len(list); {
  1544  		begin := getPC(decodeValue(ctxt, readPtr(ctxt, list[i:])))
  1545  		end := getPC(decodeValue(ctxt, readPtr(ctxt, list[i+ctxt.Arch.PtrSize:])))
  1546  
  1547  		// Horrible hack. If a range contains only zero-width
  1548  		// instructions, e.g. an Arg, and it's at the beginning of the
  1549  		// function, this would be indistinguishable from an
  1550  		// end entry. Fudge it.
  1551  		if begin == 0 && end == 0 {
  1552  			end = 1
  1553  		}
  1554  
  1555  		if ctxt.UseBASEntries {
  1556  			listSym.WriteInt(ctxt, listSym.Size, ctxt.Arch.PtrSize, int64(begin))
  1557  			listSym.WriteInt(ctxt, listSym.Size, ctxt.Arch.PtrSize, int64(end))
  1558  		} else {
  1559  			listSym.WriteCURelativeAddr(ctxt, listSym.Size, startPC, int64(begin))
  1560  			listSym.WriteCURelativeAddr(ctxt, listSym.Size, startPC, int64(end))
  1561  		}
  1562  
  1563  		i += 2 * ctxt.Arch.PtrSize
  1564  		datalen := 2 + int(ctxt.Arch.ByteOrder.Uint16(list[i:]))
  1565  		listSym.WriteBytes(ctxt, listSym.Size, list[i:i+datalen]) // copy datalen and location encoding
  1566  		i += datalen
  1567  	}
  1568  
  1569  	// Location list contents, now with real PCs.
  1570  	// End entry.
  1571  	listSym.WriteInt(ctxt, listSym.Size, ctxt.Arch.PtrSize, 0)
  1572  	listSym.WriteInt(ctxt, listSym.Size, ctxt.Arch.PtrSize, 0)
  1573  }
  1574  
  1575  // Pack a value and block ID into an address-sized uint, returning
  1576  // encoded value and boolean indicating whether the encoding succeeded.
  1577  // For 32-bit architectures the process may fail for very large
  1578  // procedures(the theory being that it's ok to have degraded debug
  1579  // quality in this case).
  1580  func encodeValue(ctxt *obj.Link, b, v ID) (uint64, bool) {
  1581  	if ctxt.Arch.PtrSize == 8 {
  1582  		result := uint64(b)<<32 | uint64(uint32(v))
  1583  		//ctxt.Logf("b %#x (%d) v %#x (%d) -> %#x\n", b, b, v, v, result)
  1584  		return result, true
  1585  	}
  1586  	if ctxt.Arch.PtrSize != 4 {
  1587  		panic("unexpected pointer size")
  1588  	}
  1589  	if ID(int16(b)) != b || ID(int16(v)) != v {
  1590  		return 0, false
  1591  	}
  1592  	return uint64(b)<<16 | uint64(uint16(v)), true
  1593  }
  1594  
  1595  // Unpack a value and block ID encoded by encodeValue.
  1596  func decodeValue(ctxt *obj.Link, word uint64) (ID, ID) {
  1597  	if ctxt.Arch.PtrSize == 8 {
  1598  		b, v := ID(word>>32), ID(word)
  1599  		//ctxt.Logf("%#x -> b %#x (%d) v %#x (%d)\n", word, b, b, v, v)
  1600  		return b, v
  1601  	}
  1602  	if ctxt.Arch.PtrSize != 4 {
  1603  		panic("unexpected pointer size")
  1604  	}
  1605  	return ID(word >> 16), ID(int16(word))
  1606  }
  1607  
  1608  // Append a pointer-sized uint to buf.
  1609  func appendPtr(ctxt *obj.Link, buf []byte, word uint64) []byte {
  1610  	if cap(buf) < len(buf)+20 {
  1611  		b := make([]byte, len(buf), 20+cap(buf)*2)
  1612  		copy(b, buf)
  1613  		buf = b
  1614  	}
  1615  	writeAt := len(buf)
  1616  	buf = buf[0 : len(buf)+ctxt.Arch.PtrSize]
  1617  	writePtr(ctxt, buf[writeAt:], word)
  1618  	return buf
  1619  }
  1620  
  1621  // Write a pointer-sized uint to the beginning of buf.
  1622  func writePtr(ctxt *obj.Link, buf []byte, word uint64) {
  1623  	switch ctxt.Arch.PtrSize {
  1624  	case 4:
  1625  		ctxt.Arch.ByteOrder.PutUint32(buf, uint32(word))
  1626  	case 8:
  1627  		ctxt.Arch.ByteOrder.PutUint64(buf, word)
  1628  	default:
  1629  		panic("unexpected pointer size")
  1630  	}
  1631  
  1632  }
  1633  
  1634  // Read a pointer-sized uint from the beginning of buf.
  1635  func readPtr(ctxt *obj.Link, buf []byte) uint64 {
  1636  	switch ctxt.Arch.PtrSize {
  1637  	case 4:
  1638  		return uint64(ctxt.Arch.ByteOrder.Uint32(buf))
  1639  	case 8:
  1640  		return ctxt.Arch.ByteOrder.Uint64(buf)
  1641  	default:
  1642  		panic("unexpected pointer size")
  1643  	}
  1644  
  1645  }
  1646  
  1647  // SetupLocList creates the initial portion of a location list for a
  1648  // user variable. It emits the encoded start/end of the range and a
  1649  // placeholder for the size. Return value is the new list plus the
  1650  // slot in the list holding the size (to be updated later).
  1651  func SetupLocList(ctxt *obj.Link, entryID ID, list []byte, st, en ID) ([]byte, int) {
  1652  	start, startOK := encodeValue(ctxt, entryID, st)
  1653  	end, endOK := encodeValue(ctxt, entryID, en)
  1654  	if !startOK || !endOK {
  1655  		// This could happen if someone writes a function that uses
  1656  		// >65K values on a 32-bit platform. Hopefully a degraded debugging
  1657  		// experience is ok in that case.
  1658  		return nil, 0
  1659  	}
  1660  	list = appendPtr(ctxt, list, start)
  1661  	list = appendPtr(ctxt, list, end)
  1662  
  1663  	// Where to write the length of the location description once
  1664  	// we know how big it is.
  1665  	sizeIdx := len(list)
  1666  	list = list[:len(list)+2]
  1667  	return list, sizeIdx
  1668  }
  1669  
  1670  // locatePrologEnd walks the entry block of a function with incoming
  1671  // register arguments and locates the last instruction in the prolog
  1672  // that spills a register arg. It returns the ID of that instruction,
  1673  // and (where appropriate) the prolog's lowered closure ptr store inst.
  1674  //
  1675  // Example:
  1676  //
  1677  //	b1:
  1678  //	    v3 = ArgIntReg <int> {p1+0} [0] : AX
  1679  //	    ... more arg regs ..
  1680  //	    v4 = ArgFloatReg <float32> {f1+0} [0] : X0
  1681  //	    v52 = MOVQstore <mem> {p1} v2 v3 v1
  1682  //	    ... more stores ...
  1683  //	    v68 = MOVSSstore <mem> {f4} v2 v67 v66
  1684  //	    v38 = MOVQstoreconst <mem> {blob} [val=0,off=0] v2 v32
  1685  //
  1686  // Important: locatePrologEnd is expected to work properly only with
  1687  // optimization turned off (e.g. "-N"). If optimization is enabled
  1688  // we can't be assured of finding all input arguments spilled in the
  1689  // entry block prolog.
  1690  func locatePrologEnd(f *Func, needCloCtx bool) (ID, *Value) {
  1691  
  1692  	// returns true if this instruction looks like it moves an ABI
  1693  	// register (or context register for rangefunc bodies) to the
  1694  	// stack, along with the value being stored.
  1695  	isRegMoveLike := func(v *Value) (bool, ID) {
  1696  		n, ok := v.Aux.(*ir.Name)
  1697  		var r ID
  1698  		if (!ok || n.Class != ir.PPARAM) && !needCloCtx {
  1699  			return false, r
  1700  		}
  1701  		regInputs, memInputs, spInputs := 0, 0, 0
  1702  		for _, a := range v.Args {
  1703  			if a.Op == OpArgIntReg || a.Op == OpArgFloatReg ||
  1704  				(needCloCtx && a.Op.isLoweredGetClosurePtr()) {
  1705  				regInputs++
  1706  				r = a.ID
  1707  			} else if a.Type.IsMemory() {
  1708  				memInputs++
  1709  			} else if a.Op == OpSP {
  1710  				spInputs++
  1711  			} else {
  1712  				return false, r
  1713  			}
  1714  		}
  1715  		return v.Type.IsMemory() && memInputs == 1 &&
  1716  			regInputs == 1 && spInputs == 1, r
  1717  	}
  1718  
  1719  	// OpArg*Reg values we've seen so far on our forward walk,
  1720  	// for which we have not yet seen a corresponding spill.
  1721  	regArgs := make([]ID, 0, 32)
  1722  
  1723  	// removeReg tries to remove a value from regArgs, returning true
  1724  	// if found and removed, or false otherwise.
  1725  	removeReg := func(r ID) bool {
  1726  		for i := 0; i < len(regArgs); i++ {
  1727  			if regArgs[i] == r {
  1728  				regArgs = slices.Delete(regArgs, i, i+1)
  1729  				return true
  1730  			}
  1731  		}
  1732  		return false
  1733  	}
  1734  
  1735  	// Walk forwards through the block. When we see OpArg*Reg, record
  1736  	// the value it produces in the regArgs list. When see a store that uses
  1737  	// the value, remove the entry. When we hit the last store (use)
  1738  	// then we've arrived at the end of the prolog.
  1739  	var cloRegStore *Value
  1740  	for k, v := range f.Entry.Values {
  1741  		if v.Op == OpArgIntReg || v.Op == OpArgFloatReg {
  1742  			regArgs = append(regArgs, v.ID)
  1743  			continue
  1744  		}
  1745  		if needCloCtx && v.Op.isLoweredGetClosurePtr() {
  1746  			regArgs = append(regArgs, v.ID)
  1747  			cloRegStore = v
  1748  			continue
  1749  		}
  1750  		if ok, r := isRegMoveLike(v); ok {
  1751  			if removed := removeReg(r); removed {
  1752  				if len(regArgs) == 0 {
  1753  					// Found our last spill; return the value after
  1754  					// it. Note that it is possible that this spill is
  1755  					// the last instruction in the block. If so, then
  1756  					// return the "end of block" sentinel.
  1757  					if k < len(f.Entry.Values)-1 {
  1758  						return f.Entry.Values[k+1].ID, cloRegStore
  1759  					}
  1760  					return BlockEnd.ID, cloRegStore
  1761  				}
  1762  			}
  1763  		}
  1764  		if v.Op.IsCall() {
  1765  			// if we hit a call, we've gone too far.
  1766  			return v.ID, cloRegStore
  1767  		}
  1768  	}
  1769  	// nothing found
  1770  	return ID(-1), cloRegStore
  1771  }
  1772  
  1773  // isNamedRegParam returns true if the param corresponding to "p"
  1774  // is a named, non-blank input parameter assigned to one or more
  1775  // registers.
  1776  func isNamedRegParam(p abi.ABIParamAssignment) bool {
  1777  	if p.Name == nil {
  1778  		return false
  1779  	}
  1780  	n := p.Name
  1781  	if n.Sym() == nil || n.Sym().IsBlank() {
  1782  		return false
  1783  	}
  1784  	if len(p.Registers) == 0 {
  1785  		return false
  1786  	}
  1787  	return true
  1788  }
  1789  
  1790  // BuildFuncDebugNoOptimized populates a FuncDebug object "rval" with
  1791  // entries corresponding to the register-resident input parameters for
  1792  // the function "f"; it is used when we are compiling without
  1793  // optimization but the register ABI is enabled. For each reg param,
  1794  // it constructs a 2-element location list: the first element holds
  1795  // the input register, and the second element holds the stack location
  1796  // of the param (the assumption being that when optimization is off,
  1797  // each input param reg will be spilled in the prolog). In addition
  1798  // to the register params, here we also build location lists (where
  1799  // appropriate for the ".closureptr" compiler-synthesized variable
  1800  // needed by the debugger for range func bodies.
  1801  func BuildFuncDebugNoOptimized(ctxt *obj.Link, f *Func, loggingEnabled bool, stackOffset func(LocalSlot) int32, rval *FuncDebug) {
  1802  	needCloCtx := f.CloSlot != nil
  1803  	pri := f.ABISelf.ABIAnalyzeFuncType(f.Type)
  1804  
  1805  	// Look to see if we have any named register-promoted parameters,
  1806  	// and/or whether we need location info for the ".closureptr"
  1807  	// synthetic variable; if not bail early and let the caller sort
  1808  	// things out for the remainder of the params/locals.
  1809  	numRegParams := 0
  1810  	for _, inp := range pri.InParams() {
  1811  		if isNamedRegParam(inp) {
  1812  			numRegParams++
  1813  		}
  1814  	}
  1815  	if numRegParams == 0 && !needCloCtx {
  1816  		return
  1817  	}
  1818  
  1819  	state := debugState{f: f}
  1820  
  1821  	if loggingEnabled {
  1822  		state.logf("generating -N reg param loc lists for func %q\n", f.Name)
  1823  	}
  1824  
  1825  	// cloReg stores the obj register num that the context register
  1826  	// appears in within the function prolog, where appropriate.
  1827  	var cloReg int16
  1828  
  1829  	extraForCloCtx := 0
  1830  	if needCloCtx {
  1831  		extraForCloCtx = 1
  1832  	}
  1833  
  1834  	// Allocate location lists.
  1835  	rval.LocationLists = make([][]byte, numRegParams+extraForCloCtx)
  1836  
  1837  	// Locate the value corresponding to the last spill of
  1838  	// an input register.
  1839  	afterPrologVal, cloRegStore := locatePrologEnd(f, needCloCtx)
  1840  
  1841  	if needCloCtx {
  1842  		reg, _ := state.f.getHome(cloRegStore.ID).(*Register)
  1843  		cloReg = reg.ObjNum()
  1844  		if loggingEnabled {
  1845  			state.logf("needCloCtx is true for func %q, cloreg=%v\n",
  1846  				f.Name, reg)
  1847  		}
  1848  	}
  1849  
  1850  	addVarSlot := func(name *ir.Name, typ *types.Type) {
  1851  		sl := LocalSlot{N: name, Type: typ, Off: 0}
  1852  		rval.Vars = append(rval.Vars, name)
  1853  		rval.Slots = append(rval.Slots, sl)
  1854  		slid := len(rval.VarSlots)
  1855  		rval.VarSlots = append(rval.VarSlots, []SlotID{SlotID(slid)})
  1856  	}
  1857  
  1858  	// Make an initial pass to populate the vars/slots for our return
  1859  	// value, covering first the input parameters and then (if needed)
  1860  	// the special ".closureptr" var for rangefunc bodies.
  1861  	params := []abi.ABIParamAssignment{}
  1862  	for _, inp := range pri.InParams() {
  1863  		if !isNamedRegParam(inp) {
  1864  			// will be sorted out elsewhere
  1865  			continue
  1866  		}
  1867  		if !IsVarWantedForDebug(inp.Name) {
  1868  			continue
  1869  		}
  1870  		addVarSlot(inp.Name, inp.Type)
  1871  		params = append(params, inp)
  1872  	}
  1873  	if needCloCtx {
  1874  		addVarSlot(f.CloSlot, f.CloSlot.Type())
  1875  		cloAssign := abi.ABIParamAssignment{
  1876  			Type:      f.CloSlot.Type(),
  1877  			Name:      f.CloSlot,
  1878  			Registers: []abi.RegIndex{0}, // dummy
  1879  		}
  1880  		params = append(params, cloAssign)
  1881  	}
  1882  
  1883  	// Walk the input params again and process the register-resident elements.
  1884  	pidx := 0
  1885  	for _, inp := range params {
  1886  		if !isNamedRegParam(inp) {
  1887  			// will be sorted out elsewhere
  1888  			continue
  1889  		}
  1890  		if !IsVarWantedForDebug(inp.Name) {
  1891  			continue
  1892  		}
  1893  
  1894  		sl := rval.Slots[pidx]
  1895  		n := rval.Vars[pidx]
  1896  
  1897  		if afterPrologVal == ID(-1) {
  1898  			// This can happen for degenerate functions with infinite
  1899  			// loops such as that in issue 45948. In such cases, leave
  1900  			// the var/slot set up for the param, but don't try to
  1901  			// emit a location list.
  1902  			if loggingEnabled {
  1903  				state.logf("locatePrologEnd failed, skipping %v\n", n)
  1904  			}
  1905  			pidx++
  1906  			continue
  1907  		}
  1908  
  1909  		// Param is arriving in one or more registers. We need a 2-element
  1910  		// location expression for it. First entry in location list
  1911  		// will correspond to lifetime in input registers.
  1912  		list, sizeIdx := SetupLocList(ctxt, f.Entry.ID, rval.LocationLists[pidx],
  1913  			BlockStart.ID, afterPrologVal)
  1914  		if list == nil {
  1915  			pidx++
  1916  			continue
  1917  		}
  1918  		if loggingEnabled {
  1919  			state.logf("param %v:\n  [<entry>, %d]:\n", n, afterPrologVal)
  1920  		}
  1921  		rtypes, _ := inp.RegisterTypesAndOffsets()
  1922  		padding := make([]uint64, 0, 32)
  1923  		padding = inp.ComputePadding(padding)
  1924  		for k, r := range inp.Registers {
  1925  			var reg int16
  1926  			if n == f.CloSlot {
  1927  				reg = cloReg
  1928  			} else {
  1929  				reg = ObjRegForAbiReg(r, f.Config)
  1930  			}
  1931  			dwreg := ctxt.Arch.DWARFRegisters[reg]
  1932  			if dwreg < 32 {
  1933  				list = append(list, dwarf.DW_OP_reg0+byte(dwreg))
  1934  			} else {
  1935  				list = append(list, dwarf.DW_OP_regx)
  1936  				list = dwarf.AppendUleb128(list, uint64(dwreg))
  1937  			}
  1938  			if loggingEnabled {
  1939  				state.logf("    piece %d -> dwreg %d", k, dwreg)
  1940  			}
  1941  			if len(inp.Registers) > 1 {
  1942  				list = append(list, dwarf.DW_OP_piece)
  1943  				ts := rtypes[k].Size()
  1944  				list = dwarf.AppendUleb128(list, uint64(ts))
  1945  				if padding[k] > 0 {
  1946  					if loggingEnabled {
  1947  						state.logf(" [pad %d bytes]", padding[k])
  1948  					}
  1949  					list = append(list, dwarf.DW_OP_piece)
  1950  					list = dwarf.AppendUleb128(list, padding[k])
  1951  				}
  1952  			}
  1953  			if loggingEnabled {
  1954  				state.logf("\n")
  1955  			}
  1956  		}
  1957  		// fill in length of location expression element
  1958  		ctxt.Arch.ByteOrder.PutUint16(list[sizeIdx:], uint16(len(list)-sizeIdx-2))
  1959  
  1960  		// Second entry in the location list will be the stack home
  1961  		// of the param, once it has been spilled.  Emit that now.
  1962  		list, sizeIdx = SetupLocList(ctxt, f.Entry.ID, list,
  1963  			afterPrologVal, FuncEnd.ID)
  1964  		if list == nil {
  1965  			pidx++
  1966  			continue
  1967  		}
  1968  		soff := stackOffset(sl)
  1969  		if soff == 0 {
  1970  			list = append(list, dwarf.DW_OP_call_frame_cfa)
  1971  		} else {
  1972  			list = append(list, dwarf.DW_OP_fbreg)
  1973  			list = dwarf.AppendSleb128(list, int64(soff))
  1974  		}
  1975  		if loggingEnabled {
  1976  			state.logf("  [%d, <end>): stackOffset=%d\n", afterPrologVal, soff)
  1977  		}
  1978  
  1979  		// fill in size
  1980  		ctxt.Arch.ByteOrder.PutUint16(list[sizeIdx:], uint16(len(list)-sizeIdx-2))
  1981  
  1982  		rval.LocationLists[pidx] = list
  1983  		pidx++
  1984  	}
  1985  }
  1986  
  1987  // IsVarWantedForDebug returns true if the debug info for the node should
  1988  // be generated.
  1989  // For example, internal variables for range-over-func loops have little
  1990  // value to users, so we don't generate debug info for them.
  1991  func IsVarWantedForDebug(n ir.Node) bool {
  1992  	name := n.Sym().Name
  1993  	if len(name) > 0 && name[0] == '&' {
  1994  		name = name[1:]
  1995  	}
  1996  	if len(name) > 0 && name[0] == '#' {
  1997  		// #yield is used by delve.
  1998  		return strings.HasPrefix(name, "#yield")
  1999  	}
  2000  	return true
  2001  }
  2002  

View as plain text