Source file src/cmd/compile/internal/walk/walk.go

     1  // Copyright 2009 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package walk
     6  
     7  import (
     8  	"fmt"
     9  	"internal/abi"
    10  
    11  	"cmd/compile/internal/base"
    12  	"cmd/compile/internal/ir"
    13  	"cmd/compile/internal/reflectdata"
    14  	"cmd/compile/internal/rttype"
    15  	"cmd/compile/internal/ssagen"
    16  	"cmd/compile/internal/typecheck"
    17  	"cmd/compile/internal/types"
    18  	"cmd/internal/src"
    19  )
    20  
    21  // The constant is known to runtime.
    22  const tmpstringbufsize = 32
    23  
    24  func Walk(fn *ir.Func) {
    25  	ir.CurFunc = fn
    26  
    27  	// Set and then clear a package-level cache of static values for this fn.
    28  	// (At some point, it might be worthwhile to have a walkState structure
    29  	// that gets passed everywhere where things like this can go.)
    30  	staticValues = findStaticValues(fn)
    31  	defer func() { staticValues = nil }()
    32  
    33  	errorsBefore := base.Errors()
    34  	order(fn)
    35  	if base.Errors() > errorsBefore {
    36  		return
    37  	}
    38  
    39  	if base.Flag.W != 0 {
    40  		s := fmt.Sprintf("\nbefore walk %v", ir.CurFunc.Sym())
    41  		ir.DumpList(s, ir.CurFunc.Body)
    42  	}
    43  
    44  	walkStmtList(ir.CurFunc.Body)
    45  	if base.Flag.W != 0 {
    46  		s := fmt.Sprintf("after walk %v", ir.CurFunc.Sym())
    47  		ir.DumpList(s, ir.CurFunc.Body)
    48  	}
    49  
    50  	// Eagerly compute sizes of all variables for SSA.
    51  	for _, n := range fn.Dcl {
    52  		types.CalcSize(n.Type())
    53  	}
    54  }
    55  
    56  // walkRecv walks an ORECV node.
    57  func walkRecv(n *ir.UnaryExpr) ir.Node {
    58  	if n.Typecheck() == 0 {
    59  		base.Fatalf("missing typecheck: %+v", n)
    60  	}
    61  	init := ir.TakeInit(n)
    62  
    63  	n.X = walkExpr(n.X, &init)
    64  	call := walkExpr(mkcall1(chanfn("chanrecv1", 2, n.X.Type()), nil, &init, n.X, typecheck.NodNil()), &init)
    65  	return ir.InitExpr(init, call)
    66  }
    67  
    68  func convas(n *ir.AssignStmt, init *ir.Nodes) *ir.AssignStmt {
    69  	if n.Op() != ir.OAS {
    70  		base.Fatalf("convas: not OAS %v", n.Op())
    71  	}
    72  	n.SetTypecheck(1)
    73  
    74  	if n.X == nil || n.Y == nil {
    75  		return n
    76  	}
    77  
    78  	lt := n.X.Type()
    79  	rt := n.Y.Type()
    80  	if lt == nil || rt == nil {
    81  		return n
    82  	}
    83  
    84  	if ir.IsBlank(n.X) {
    85  		n.Y = typecheck.DefaultLit(n.Y, nil)
    86  		return n
    87  	}
    88  
    89  	if !types.Identical(lt, rt) {
    90  		n.Y = typecheck.AssignConv(n.Y, lt, "assignment")
    91  		n.Y = walkExpr(n.Y, init)
    92  	}
    93  	types.CalcSize(n.Y.Type())
    94  
    95  	return n
    96  }
    97  
    98  func vmkcall(fn ir.Node, t *types.Type, init *ir.Nodes, va []ir.Node) *ir.CallExpr {
    99  	if init == nil {
   100  		base.Fatalf("mkcall with nil init: %v", fn)
   101  	}
   102  	if fn.Type() == nil || fn.Type().Kind() != types.TFUNC {
   103  		base.Fatalf("mkcall %v %v", fn, fn.Type())
   104  	}
   105  
   106  	n := fn.Type().NumParams()
   107  	if n != len(va) {
   108  		base.Fatalf("vmkcall %v needs %v args got %v", fn, n, len(va))
   109  	}
   110  
   111  	call := typecheck.Call(base.Pos, fn, va, false).(*ir.CallExpr)
   112  	call.SetType(t)
   113  	return walkExpr(call, init).(*ir.CallExpr)
   114  }
   115  
   116  func mkcall(name string, t *types.Type, init *ir.Nodes, args ...ir.Node) *ir.CallExpr {
   117  	return vmkcall(typecheck.LookupRuntime(name), t, init, args)
   118  }
   119  
   120  func mkcallstmt(name string, args ...ir.Node) ir.Node {
   121  	return mkcallstmt1(typecheck.LookupRuntime(name), args...)
   122  }
   123  
   124  func mkcall1(fn ir.Node, t *types.Type, init *ir.Nodes, args ...ir.Node) *ir.CallExpr {
   125  	return vmkcall(fn, t, init, args)
   126  }
   127  
   128  func mkcallstmt1(fn ir.Node, args ...ir.Node) ir.Node {
   129  	var init ir.Nodes
   130  	n := vmkcall(fn, nil, &init, args)
   131  	if len(init) == 0 {
   132  		return n
   133  	}
   134  	init.Append(n)
   135  	return ir.NewBlockStmt(n.Pos(), init)
   136  }
   137  
   138  func chanfn(name string, n int, t *types.Type) ir.Node {
   139  	if !t.IsChan() {
   140  		base.Fatalf("chanfn %v", t)
   141  	}
   142  	switch n {
   143  	case 1:
   144  		return typecheck.LookupRuntime(name, t.Elem())
   145  	case 2:
   146  		return typecheck.LookupRuntime(name, t.Elem(), t.Elem())
   147  	}
   148  	base.Fatalf("chanfn %d", n)
   149  	return nil
   150  }
   151  
   152  func mapfn(name string, t *types.Type, isfat bool) ir.Node {
   153  	if !t.IsMap() {
   154  		base.Fatalf("mapfn %v", t)
   155  	}
   156  	if mapfast(t) == mapslow || isfat {
   157  		return typecheck.LookupRuntime(name, t.Key(), t.Elem(), t.Key(), t.Elem())
   158  	}
   159  	return typecheck.LookupRuntime(name, t.Key(), t.Elem(), t.Elem())
   160  }
   161  
   162  func mapfndel(name string, t *types.Type) ir.Node {
   163  	if !t.IsMap() {
   164  		base.Fatalf("mapfn %v", t)
   165  	}
   166  	if mapfast(t) == mapslow {
   167  		return typecheck.LookupRuntime(name, t.Key(), t.Elem(), t.Key())
   168  	}
   169  	return typecheck.LookupRuntime(name, t.Key(), t.Elem())
   170  }
   171  
   172  const (
   173  	mapslow = iota
   174  	mapfast32
   175  	mapfast32ptr
   176  	mapfast64
   177  	mapfast64ptr
   178  	mapfaststr
   179  	nmapfast
   180  )
   181  
   182  type mapnames [nmapfast]string
   183  
   184  func mkmapnames(base string, ptr string) mapnames {
   185  	return mapnames{base, base + "_fast32", base + "_fast32" + ptr, base + "_fast64", base + "_fast64" + ptr, base + "_faststr"}
   186  }
   187  
   188  var mapaccess1 = mkmapnames("mapaccess1", "")
   189  var mapaccess2 = mkmapnames("mapaccess2", "")
   190  var mapassign = mkmapnames("mapassign", "ptr")
   191  var mapdelete = mkmapnames("mapdelete", "")
   192  
   193  func mapfast(t *types.Type) int {
   194  	if t.Elem().Size() > abi.MapMaxElemBytes {
   195  		return mapslow
   196  	}
   197  	switch reflectdata.AlgType(t.Key()) {
   198  	case types.AMEM32:
   199  		if !t.Key().HasPointers() {
   200  			return mapfast32
   201  		}
   202  		if types.PtrSize == 4 {
   203  			return mapfast32ptr
   204  		}
   205  		base.Fatalf("small pointer %v", t.Key())
   206  	case types.AMEM64:
   207  		if !t.Key().HasPointers() {
   208  			return mapfast64
   209  		}
   210  		if types.PtrSize == 8 {
   211  			return mapfast64ptr
   212  		}
   213  		// Two-word object, at least one of which is a pointer.
   214  		// Use the slow path.
   215  	case types.ASTRING:
   216  		return mapfaststr
   217  	}
   218  	return mapslow
   219  }
   220  
   221  func walkAppendArgs(n *ir.CallExpr, init *ir.Nodes) {
   222  	walkExprListSafe(n.Args, init)
   223  
   224  	// walkExprListSafe will leave OINDEX (s[n]) alone if both s
   225  	// and n are name or literal, but those may index the slice we're
   226  	// modifying here. Fix explicitly.
   227  	ls := n.Args
   228  	for i1, n1 := range ls {
   229  		ls[i1] = cheapExpr(n1, init)
   230  	}
   231  }
   232  
   233  // appendWalkStmt typechecks and walks stmt and then appends it to init.
   234  func appendWalkStmt(init *ir.Nodes, stmt ir.Node) {
   235  	op := stmt.Op()
   236  	n := typecheck.Stmt(stmt)
   237  	if op == ir.OAS || op == ir.OAS2 {
   238  		// If the assignment has side effects, walkExpr will append them
   239  		// directly to init for us, while walkStmt will wrap it in an OBLOCK.
   240  		// We need to append them directly.
   241  		// TODO(rsc): Clean this up.
   242  		n = walkExpr(n, init)
   243  	} else {
   244  		n = walkStmt(n)
   245  	}
   246  	init.Append(n)
   247  }
   248  
   249  // The max number of defers in a function using open-coded defers. We enforce this
   250  // limit because the deferBits bitmask is currently a single byte (to minimize code size)
   251  const maxOpenDefers = 8
   252  
   253  // backingArrayPtrLen extracts the pointer and length from a slice or string.
   254  // This constructs two nodes referring to n, so n must be a cheapExpr.
   255  func backingArrayPtrLen(n ir.Node) (ptr, length ir.Node) {
   256  	var init ir.Nodes
   257  	c := cheapExpr(n, &init)
   258  	if c != n || len(init) != 0 {
   259  		base.Fatalf("backingArrayPtrLen not cheap: %v", n)
   260  	}
   261  	ptr = ir.NewUnaryExpr(base.Pos, ir.OSPTR, n)
   262  	if n.Type().IsString() {
   263  		ptr.SetType(types.Types[types.TUINT8].PtrTo())
   264  	} else {
   265  		ptr.SetType(n.Type().Elem().PtrTo())
   266  	}
   267  	ptr.SetTypecheck(1)
   268  	length = ir.NewUnaryExpr(base.Pos, ir.OLEN, n)
   269  	length.SetType(types.Types[types.TINT])
   270  	length.SetTypecheck(1)
   271  	return ptr, length
   272  }
   273  
   274  // mayCall reports whether evaluating expression n may require
   275  // function calls, which could clobber function call arguments/results
   276  // currently on the stack.
   277  func mayCall(n ir.Node) bool {
   278  	// When instrumenting, any expression might require function calls.
   279  	if base.Flag.Cfg.Instrumenting {
   280  		return true
   281  	}
   282  
   283  	isSoftFloat := func(typ *types.Type) bool {
   284  		return types.IsFloat[typ.Kind()] || types.IsComplex[typ.Kind()]
   285  	}
   286  
   287  	return ir.Any(n, func(n ir.Node) bool {
   288  		// walk should have already moved any Init blocks off of
   289  		// expressions.
   290  		if len(n.Init()) != 0 {
   291  			base.FatalfAt(n.Pos(), "mayCall %+v", n)
   292  		}
   293  
   294  		switch n.Op() {
   295  		default:
   296  			base.FatalfAt(n.Pos(), "mayCall %+v", n)
   297  
   298  		case ir.OCALLFUNC, ir.OCALLINTER,
   299  			ir.OUNSAFEADD, ir.OUNSAFESLICE:
   300  			return true
   301  
   302  		case ir.OINDEX, ir.OSLICE, ir.OSLICEARR, ir.OSLICE3, ir.OSLICE3ARR, ir.OSLICESTR,
   303  			ir.ODEREF, ir.ODOTPTR, ir.ODOTTYPE, ir.ODYNAMICDOTTYPE, ir.ODIV, ir.OMOD,
   304  			ir.OSLICE2ARR, ir.OSLICE2ARRPTR:
   305  			// These ops might panic, make sure they are done
   306  			// before we start marshaling args for a call. See issue 16760.
   307  			return true
   308  
   309  		case ir.OANDAND, ir.OOROR:
   310  			n := n.(*ir.LogicalExpr)
   311  			// The RHS expression may have init statements that
   312  			// should only execute conditionally, and so cannot be
   313  			// pulled out to the top-level init list. We could try
   314  			// to be more precise here.
   315  			return len(n.Y.Init()) != 0
   316  
   317  		// When using soft-float, these ops might be rewritten to function calls
   318  		// so we ensure they are evaluated first.
   319  		case ir.OADD, ir.OSUB, ir.OMUL, ir.ONEG:
   320  			return ssagen.Arch.SoftFloat && isSoftFloat(n.Type())
   321  		case ir.OLT, ir.OEQ, ir.ONE, ir.OLE, ir.OGE, ir.OGT:
   322  			n := n.(*ir.BinaryExpr)
   323  			return ssagen.Arch.SoftFloat && isSoftFloat(n.X.Type())
   324  		case ir.OCONV:
   325  			n := n.(*ir.ConvExpr)
   326  			return ssagen.Arch.SoftFloat && (isSoftFloat(n.Type()) || isSoftFloat(n.X.Type()))
   327  
   328  		case ir.OMIN, ir.OMAX:
   329  			// string or float requires runtime call, see (*ssagen.state).minmax method.
   330  			return n.Type().IsString() || n.Type().IsFloat()
   331  
   332  		case ir.OLITERAL, ir.ONIL, ir.ONAME, ir.OLINKSYMOFFSET, ir.OMETHEXPR,
   333  			ir.OAND, ir.OANDNOT, ir.OLSH, ir.OOR, ir.ORSH, ir.OXOR, ir.OCOMPLEX, ir.OMAKEFACE,
   334  			ir.OADDR, ir.OBITNOT, ir.ONOT, ir.OPLUS,
   335  			ir.OCAP, ir.OIMAG, ir.OLEN, ir.OREAL,
   336  			ir.OCONVNOP, ir.ODOT,
   337  			ir.OCFUNC, ir.OIDATA, ir.OITAB, ir.OSPTR,
   338  			ir.OBYTES2STRTMP, ir.OGETG, ir.OGETCALLERSP, ir.OSLICEHEADER, ir.OSTRINGHEADER:
   339  			// ok: operations that don't require function calls.
   340  			// Expand as needed.
   341  		}
   342  
   343  		return false
   344  	})
   345  }
   346  
   347  // itabType loads the _type field from a runtime.itab struct.
   348  func itabType(itab ir.Node) ir.Node {
   349  	if itabTypeField == nil {
   350  		// internal/abi.ITab's Type field
   351  		itabTypeField = runtimeField("Type", rttype.ITab.OffsetOf("Type"), types.NewPtr(types.Types[types.TUINT8]))
   352  	}
   353  	return boundedDotPtr(base.Pos, itab, itabTypeField)
   354  }
   355  
   356  var itabTypeField *types.Field
   357  
   358  // boundedDotPtr returns a selector expression representing ptr.field
   359  // and omits nil-pointer checks for ptr.
   360  func boundedDotPtr(pos src.XPos, ptr ir.Node, field *types.Field) *ir.SelectorExpr {
   361  	sel := ir.NewSelectorExpr(pos, ir.ODOTPTR, ptr, field.Sym)
   362  	sel.Selection = field
   363  	sel.SetType(field.Type)
   364  	sel.SetTypecheck(1)
   365  	sel.SetBounded(true) // guaranteed not to fault
   366  	return sel
   367  }
   368  
   369  func runtimeField(name string, offset int64, typ *types.Type) *types.Field {
   370  	f := types.NewField(src.NoXPos, ir.Pkgs.Runtime.Lookup(name), typ)
   371  	f.Offset = offset
   372  	return f
   373  }
   374  
   375  // ifaceData loads the data field from an interface.
   376  // The concrete type must be known to have type t.
   377  // It follows the pointer if !IsDirectIface(t).
   378  func ifaceData(pos src.XPos, n ir.Node, t *types.Type) ir.Node {
   379  	if t.IsInterface() {
   380  		base.Fatalf("ifaceData interface: %v", t)
   381  	}
   382  	ptr := ir.NewUnaryExpr(pos, ir.OIDATA, n)
   383  	if types.IsDirectIface(t) {
   384  		ptr.SetType(t)
   385  		ptr.SetTypecheck(1)
   386  		return ptr
   387  	}
   388  	ptr.SetType(types.NewPtr(t))
   389  	ptr.SetTypecheck(1)
   390  	ind := ir.NewStarExpr(pos, ptr)
   391  	ind.SetType(t)
   392  	ind.SetTypecheck(1)
   393  	ind.SetBounded(true)
   394  	return ind
   395  }
   396  
   397  // staticValue returns the earliest expression it can find that always
   398  // evaluates to n, with similar semantics to [ir.StaticValue].
   399  //
   400  // It only returns results for the ir.CurFunc being processed in [Walk],
   401  // including its closures, and uses a cache to reduce duplicative work.
   402  // It can return n or nil if it does not find an earlier expression.
   403  //
   404  // The current use case is reducing OCONVIFACE allocations, and hence
   405  // staticValue is currently only useful when given an *ir.ConvExpr.X as n.
   406  func staticValue(n ir.Node) ir.Node {
   407  	if staticValues == nil {
   408  		base.Fatalf("staticValues is nil. staticValue called outside of walk.Walk?")
   409  	}
   410  	return staticValues[n]
   411  }
   412  
   413  // staticValues is a cache of static values for use by staticValue.
   414  var staticValues map[ir.Node]ir.Node
   415  
   416  // findStaticValues returns a map of static values for fn.
   417  func findStaticValues(fn *ir.Func) map[ir.Node]ir.Node {
   418  	// We can't use an ir.ReassignOracle or ir.StaticValue in the
   419  	// middle of walk because they don't currently handle
   420  	// transformed assignments (e.g., will complain about 'RHS == nil').
   421  	// So we instead build this map to use in walk.
   422  	ro := &ir.ReassignOracle{}
   423  	ro.Init(fn)
   424  	m := make(map[ir.Node]ir.Node)
   425  	ir.Visit(fn, func(n ir.Node) {
   426  		if n.Op() == ir.OCONVIFACE {
   427  			x := n.(*ir.ConvExpr).X
   428  			v := ro.StaticValue(x)
   429  			if v != nil && v != x {
   430  				m[x] = v
   431  			}
   432  		}
   433  	})
   434  	return m
   435  }
   436  

View as plain text