Source file src/cmd/compile/internal/walk/range.go

     1  // Copyright 2009 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package walk
     6  
     7  import (
     8  	"go/constant"
     9  	"unicode/utf8"
    10  
    11  	"cmd/compile/internal/base"
    12  	"cmd/compile/internal/ir"
    13  	"cmd/compile/internal/reflectdata"
    14  	"cmd/compile/internal/ssagen"
    15  	"cmd/compile/internal/typecheck"
    16  	"cmd/compile/internal/types"
    17  	"cmd/internal/src"
    18  	"cmd/internal/sys"
    19  )
    20  
    21  func cheapComputableIndex(width int64) bool {
    22  	switch ssagen.Arch.LinkArch.Family {
    23  	// MIPS does not have R+R addressing
    24  	// Arm64 may lack ability to generate this code in our assembler,
    25  	// but the architecture supports it.
    26  	case sys.PPC64, sys.S390X:
    27  		return width == 1
    28  	case sys.AMD64, sys.I386, sys.ARM64, sys.ARM:
    29  		switch width {
    30  		case 1, 2, 4, 8:
    31  			return true
    32  		}
    33  	}
    34  	return false
    35  }
    36  
    37  // walkRange transforms various forms of ORANGE into
    38  // simpler forms.  The result must be assigned back to n.
    39  // Node n may also be modified in place, and may also be
    40  // the returned node.
    41  func walkRange(nrange *ir.RangeStmt) ir.Node {
    42  	base.Assert(!nrange.DistinctVars) // Should all be rewritten before escape analysis
    43  	if isMapClear(nrange) {
    44  		return mapRangeClear(nrange)
    45  	}
    46  
    47  	nfor := ir.NewForStmt(nrange.Pos(), nil, nil, nil, nil, nrange.DistinctVars)
    48  	nfor.SetInit(nrange.Init())
    49  	nfor.Label = nrange.Label
    50  
    51  	// variable name conventions:
    52  	//	ohv1, hv1, hv2: hidden (old) val 1, 2
    53  	//	ha, hit: hidden aggregate, iterator
    54  	//	hn, hp: hidden len, pointer
    55  	//	hb: hidden bool
    56  	//	a, v1, v2: not hidden aggregate, val 1, 2
    57  
    58  	a := nrange.X
    59  	t := a.Type()
    60  	lno := ir.SetPos(a)
    61  
    62  	v1, v2 := nrange.Key, nrange.Value
    63  
    64  	if ir.IsBlank(v2) {
    65  		v2 = nil
    66  	}
    67  
    68  	if ir.IsBlank(v1) && v2 == nil {
    69  		v1 = nil
    70  	}
    71  
    72  	if v1 == nil && v2 != nil {
    73  		base.Fatalf("walkRange: v2 != nil while v1 == nil")
    74  	}
    75  
    76  	var body []ir.Node
    77  	var init []ir.Node
    78  	switch k := t.Kind(); {
    79  	default:
    80  		base.Fatalf("walkRange")
    81  
    82  	case types.IsInt[k]:
    83  		if nn := arrayRangeClear(nrange, v1, v2, a); nn != nil {
    84  			base.Pos = lno
    85  			return nn
    86  		}
    87  		hv1 := typecheck.TempAt(base.Pos, ir.CurFunc, t)
    88  		hn := typecheck.TempAt(base.Pos, ir.CurFunc, t)
    89  
    90  		init = append(init, ir.NewAssignStmt(base.Pos, hv1, nil))
    91  		init = append(init, ir.NewAssignStmt(base.Pos, hn, a))
    92  
    93  		nfor.Cond = ir.NewBinaryExpr(base.Pos, ir.OLT, hv1, hn)
    94  		nfor.Post = ir.NewAssignStmt(base.Pos, hv1, ir.NewBinaryExpr(base.Pos, ir.OADD, hv1, ir.NewInt(base.Pos, 1)))
    95  
    96  		if v1 != nil {
    97  			body = []ir.Node{rangeAssign(nrange, hv1)}
    98  		}
    99  
   100  	case k == types.TARRAY, k == types.TSLICE, k == types.TPTR: // TPTR is pointer-to-array
   101  		if nn := arrayRangeClear(nrange, v1, v2, a); nn != nil {
   102  			base.Pos = lno
   103  			return nn
   104  		}
   105  
   106  		// Element type of the iteration
   107  		var elem *types.Type
   108  		switch t.Kind() {
   109  		case types.TSLICE, types.TARRAY:
   110  			elem = t.Elem()
   111  		case types.TPTR:
   112  			elem = t.Elem().Elem()
   113  		}
   114  
   115  		// order.stmt arranged for a copy of the array/slice variable if needed.
   116  		ha := a
   117  
   118  		hv1 := typecheck.TempAt(base.Pos, ir.CurFunc, types.Types[types.TINT])
   119  		hn := typecheck.TempAt(base.Pos, ir.CurFunc, types.Types[types.TINT])
   120  
   121  		init = append(init, ir.NewAssignStmt(base.Pos, hv1, nil))
   122  		init = append(init, ir.NewAssignStmt(base.Pos, hn, ir.NewUnaryExpr(base.Pos, ir.OLEN, ha)))
   123  
   124  		nfor.Cond = ir.NewBinaryExpr(base.Pos, ir.OLT, hv1, hn)
   125  		nfor.Post = ir.NewAssignStmt(base.Pos, hv1, ir.NewBinaryExpr(base.Pos, ir.OADD, hv1, ir.NewInt(base.Pos, 1)))
   126  
   127  		// for range ha { body }
   128  		if v1 == nil {
   129  			break
   130  		}
   131  
   132  		// for v1 := range ha { body }
   133  		if v2 == nil {
   134  			body = []ir.Node{rangeAssign(nrange, hv1)}
   135  			break
   136  		}
   137  
   138  		// for v1, v2 := range ha { body }
   139  		if cheapComputableIndex(elem.Size()) {
   140  			// v1, v2 = hv1, ha[hv1]
   141  			tmp := ir.NewIndexExpr(base.Pos, ha, hv1)
   142  			tmp.SetBounded(true)
   143  			body = []ir.Node{rangeAssign2(nrange, hv1, tmp)}
   144  			break
   145  		}
   146  
   147  		// Slice to iterate over
   148  		var hs ir.Node
   149  		if t.IsSlice() {
   150  			hs = ha
   151  		} else {
   152  			var arr ir.Node
   153  			if t.IsPtr() {
   154  				arr = ha
   155  			} else {
   156  				arr = typecheck.NodAddr(ha)
   157  				arr.SetType(t.PtrTo())
   158  				arr.SetTypecheck(1)
   159  			}
   160  			hs = ir.NewSliceExpr(base.Pos, ir.OSLICEARR, arr, nil, nil, nil)
   161  			// old typechecker doesn't know OSLICEARR, so we set types explicitly
   162  			hs.SetType(types.NewSlice(elem))
   163  			hs.SetTypecheck(1)
   164  		}
   165  
   166  		// We use a "pointer" to keep track of where we are in the backing array
   167  		// of the slice hs. This pointer starts at hs.ptr and gets incremented
   168  		// by the element size each time through the loop.
   169  		//
   170  		// It's tricky, though, as on the last iteration this pointer gets
   171  		// incremented to point past the end of the backing array. We can't
   172  		// let the garbage collector see that final out-of-bounds pointer.
   173  		//
   174  		// To avoid this, we keep the "pointer" alternately in 2 variables, one
   175  		// pointer typed and one uintptr typed. Most of the time it lives in the
   176  		// regular pointer variable, but when it might be out of bounds (after it
   177  		// has been incremented, but before the loop condition has been checked)
   178  		// it lives briefly in the uintptr variable.
   179  		//
   180  		// hp contains the pointer version (of type *T, where T is the element type).
   181  		// It is guaranteed to always be in range, keeps the backing store alive,
   182  		// and is updated on stack copies. If a GC occurs when this function is
   183  		// suspended at any safepoint, this variable ensures correct operation.
   184  		//
   185  		// hu contains the equivalent uintptr version. It may point past the
   186  		// end, but doesn't keep the backing store alive and doesn't get updated
   187  		// on a stack copy. If a GC occurs while this function is on the top of
   188  		// the stack, then the last frame is scanned conservatively and hu will
   189  		// act as a reference to the backing array to ensure it is not collected.
   190  		//
   191  		// The "pointer" we're moving across the backing array lives in one
   192  		// or the other of hp and hu as the loop proceeds.
   193  		//
   194  		// hp is live during most of the body of the loop. But it isn't live
   195  		// at the very top of the loop, when we haven't checked i<n yet, and
   196  		// it could point off the end of the backing store.
   197  		// hu is live only at the very top and very bottom of the loop.
   198  		// In particular, only when it cannot possibly be live across a call.
   199  		//
   200  		// So we do
   201  		//   hu = uintptr(unsafe.Pointer(hs.ptr))
   202  		//   for i := 0; i < hs.len; i++ {
   203  		//     hp = (*T)(unsafe.Pointer(hu))
   204  		//     v1, v2 = i, *hp
   205  		//     ... body of loop ...
   206  		//     hu = uintptr(unsafe.Pointer(hp)) + elemsize
   207  		//   }
   208  		//
   209  		// Between the assignments to hu and the assignment back to hp, there
   210  		// must not be any calls.
   211  
   212  		// Pointer to current iteration position. Start on entry to the loop
   213  		// with the pointer in hu.
   214  		ptr := ir.NewUnaryExpr(base.Pos, ir.OSPTR, hs)
   215  		ptr.SetBounded(true)
   216  		huVal := ir.NewConvExpr(base.Pos, ir.OCONVNOP, types.Types[types.TUNSAFEPTR], ptr)
   217  		huVal = ir.NewConvExpr(base.Pos, ir.OCONVNOP, types.Types[types.TUINTPTR], huVal)
   218  		hu := typecheck.TempAt(base.Pos, ir.CurFunc, types.Types[types.TUINTPTR])
   219  		init = append(init, ir.NewAssignStmt(base.Pos, hu, huVal))
   220  
   221  		// Convert hu to hp at the top of the loop (after the condition has been checked).
   222  		hpVal := ir.NewConvExpr(base.Pos, ir.OCONVNOP, types.Types[types.TUNSAFEPTR], hu)
   223  		hpVal.SetCheckPtr(true) // disable checkptr on this conversion
   224  		hpVal = ir.NewConvExpr(base.Pos, ir.OCONVNOP, elem.PtrTo(), hpVal)
   225  		hp := typecheck.TempAt(base.Pos, ir.CurFunc, elem.PtrTo())
   226  		body = append(body, ir.NewAssignStmt(base.Pos, hp, hpVal))
   227  
   228  		// Assign variables on the LHS of the range statement. Use *hp to get the element.
   229  		e := ir.NewStarExpr(base.Pos, hp)
   230  		e.SetBounded(true)
   231  		a := rangeAssign2(nrange, hv1, e)
   232  		body = append(body, a)
   233  
   234  		// Advance pointer for next iteration of the loop.
   235  		// This reads from hp and writes to hu.
   236  		huVal = ir.NewConvExpr(base.Pos, ir.OCONVNOP, types.Types[types.TUNSAFEPTR], hp)
   237  		huVal = ir.NewConvExpr(base.Pos, ir.OCONVNOP, types.Types[types.TUINTPTR], huVal)
   238  		as := ir.NewAssignStmt(base.Pos, hu, ir.NewBinaryExpr(base.Pos, ir.OADD, huVal, ir.NewInt(base.Pos, elem.Size())))
   239  		nfor.Post = ir.NewBlockStmt(base.Pos, []ir.Node{nfor.Post, as})
   240  
   241  	case k == types.TMAP:
   242  		// order.stmt allocated the iterator for us.
   243  		// we only use a once, so no copy needed.
   244  		ha := a
   245  
   246  		hit := nrange.Prealloc
   247  		th := hit.Type()
   248  		// depends on layout of iterator struct.
   249  		// See cmd/compile/internal/reflectdata/map.go:MapIterType
   250  		keysym := th.Field(0).Sym
   251  		elemsym := th.Field(1).Sym // ditto
   252  		iterInit := "mapIterStart"
   253  		iterNext := "mapIterNext"
   254  
   255  		fn := typecheck.LookupRuntime(iterInit, t.Key(), t.Elem(), th)
   256  		init = append(init, mkcallstmt1(fn, reflectdata.RangeMapRType(base.Pos, nrange), ha, typecheck.NodAddr(hit)))
   257  		nfor.Cond = ir.NewBinaryExpr(base.Pos, ir.ONE, ir.NewSelectorExpr(base.Pos, ir.ODOT, hit, keysym), typecheck.NodNil())
   258  
   259  		fn = typecheck.LookupRuntime(iterNext, th)
   260  		nfor.Post = mkcallstmt1(fn, typecheck.NodAddr(hit))
   261  
   262  		key := ir.NewStarExpr(base.Pos, typecheck.ConvNop(ir.NewSelectorExpr(base.Pos, ir.ODOT, hit, keysym), types.NewPtr(t.Key())))
   263  		if v1 == nil {
   264  			body = nil
   265  		} else if v2 == nil {
   266  			body = []ir.Node{rangeAssign(nrange, key)}
   267  		} else {
   268  			elem := ir.NewStarExpr(base.Pos, typecheck.ConvNop(ir.NewSelectorExpr(base.Pos, ir.ODOT, hit, elemsym), types.NewPtr(t.Elem())))
   269  			body = []ir.Node{rangeAssign2(nrange, key, elem)}
   270  		}
   271  
   272  	case k == types.TCHAN:
   273  		// order.stmt arranged for a copy of the channel variable.
   274  		ha := a
   275  
   276  		hv1 := typecheck.TempAt(base.Pos, ir.CurFunc, t.Elem())
   277  		hv1.SetTypecheck(1)
   278  		if t.Elem().HasPointers() {
   279  			init = append(init, ir.NewAssignStmt(base.Pos, hv1, nil))
   280  		}
   281  		hb := typecheck.TempAt(base.Pos, ir.CurFunc, types.Types[types.TBOOL])
   282  
   283  		nfor.Cond = ir.NewBinaryExpr(base.Pos, ir.ONE, hb, ir.NewBool(base.Pos, false))
   284  		lhs := []ir.Node{hv1, hb}
   285  		rhs := []ir.Node{ir.NewUnaryExpr(base.Pos, ir.ORECV, ha)}
   286  		a := ir.NewAssignListStmt(base.Pos, ir.OAS2RECV, lhs, rhs)
   287  		a.SetTypecheck(1)
   288  		nfor.Cond = ir.InitExpr([]ir.Node{a}, nfor.Cond)
   289  		if v1 == nil {
   290  			body = nil
   291  		} else {
   292  			body = []ir.Node{rangeAssign(nrange, hv1)}
   293  		}
   294  		// Zero hv1. This prevents hv1 from being the sole, inaccessible
   295  		// reference to an otherwise GC-able value during the next channel receive.
   296  		// See issue 15281.
   297  		body = append(body, ir.NewAssignStmt(base.Pos, hv1, nil))
   298  
   299  	case k == types.TSTRING:
   300  		// Transform string range statements like "for v1, v2 = range a" into
   301  		//
   302  		// ha := a
   303  		// for hv1 := 0; hv1 < len(ha); {
   304  		//   hv1t := hv1
   305  		//   hv2 := rune(ha[hv1])
   306  		//   if hv2 < utf8.RuneSelf {
   307  		//      hv1++
   308  		//   } else {
   309  		//      hv2, hv1 = decoderune(ha, hv1)
   310  		//   }
   311  		//   v1, v2 = hv1t, hv2
   312  		//   // original body
   313  		// }
   314  
   315  		// order.stmt arranged for a copy of the string variable.
   316  		ha := a
   317  
   318  		hv1 := typecheck.TempAt(base.Pos, ir.CurFunc, types.Types[types.TINT])
   319  		hv1t := typecheck.TempAt(base.Pos, ir.CurFunc, types.Types[types.TINT])
   320  		hv2 := typecheck.TempAt(base.Pos, ir.CurFunc, types.RuneType)
   321  
   322  		// hv1 := 0
   323  		init = append(init, ir.NewAssignStmt(base.Pos, hv1, nil))
   324  
   325  		// hv1 < len(ha)
   326  		nfor.Cond = ir.NewBinaryExpr(base.Pos, ir.OLT, hv1, ir.NewUnaryExpr(base.Pos, ir.OLEN, ha))
   327  
   328  		if v1 != nil {
   329  			// hv1t = hv1
   330  			body = append(body, ir.NewAssignStmt(base.Pos, hv1t, hv1))
   331  		}
   332  
   333  		// hv2 := rune(ha[hv1])
   334  		nind := ir.NewIndexExpr(base.Pos, ha, hv1)
   335  		nind.SetBounded(true)
   336  		body = append(body, ir.NewAssignStmt(base.Pos, hv2, typecheck.Conv(nind, types.RuneType)))
   337  
   338  		// if hv2 < utf8.RuneSelf
   339  		nif := ir.NewIfStmt(base.Pos, nil, nil, nil)
   340  
   341  		// On x86, hv2 <= 127 is shorter to encode than hv2 < 128
   342  		// Doesn't hurt other archs.
   343  		nif.Cond = ir.NewBinaryExpr(base.Pos, ir.OLE, hv2, ir.NewInt(base.Pos, utf8.RuneSelf-1))
   344  
   345  		// hv1++
   346  		nif.Body = []ir.Node{ir.NewAssignStmt(base.Pos, hv1, ir.NewBinaryExpr(base.Pos, ir.OADD, hv1, ir.NewInt(base.Pos, 1)))}
   347  
   348  		// } else {
   349  		// hv2, hv1 = decoderune(ha, hv1)
   350  		fn := typecheck.LookupRuntime("decoderune")
   351  		call := mkcall1(fn, fn.Type().ResultsTuple(), &nif.Else, ha, hv1)
   352  		a := ir.NewAssignListStmt(base.Pos, ir.OAS2, []ir.Node{hv2, hv1}, []ir.Node{call})
   353  		nif.Else.Append(a)
   354  
   355  		body = append(body, nif)
   356  
   357  		if v1 != nil {
   358  			if v2 != nil {
   359  				// v1, v2 = hv1t, hv2
   360  				body = append(body, rangeAssign2(nrange, hv1t, hv2))
   361  			} else {
   362  				// v1 = hv1t
   363  				body = append(body, rangeAssign(nrange, hv1t))
   364  			}
   365  		}
   366  	}
   367  
   368  	typecheck.Stmts(init)
   369  
   370  	nfor.PtrInit().Append(init...)
   371  
   372  	typecheck.Stmts(nfor.Cond.Init())
   373  
   374  	nfor.Cond = typecheck.Expr(nfor.Cond)
   375  	nfor.Cond = typecheck.DefaultLit(nfor.Cond, nil)
   376  	nfor.Post = typecheck.Stmt(nfor.Post)
   377  	typecheck.Stmts(body)
   378  	nfor.Body.Append(body...)
   379  	nfor.Body.Append(nrange.Body...)
   380  
   381  	var n ir.Node = nfor
   382  
   383  	n = walkStmt(n)
   384  
   385  	base.Pos = lno
   386  	return n
   387  }
   388  
   389  // rangeAssign returns "n.Key = key".
   390  func rangeAssign(n *ir.RangeStmt, key ir.Node) ir.Node {
   391  	key = rangeConvert(n, n.Key.Type(), key, n.KeyTypeWord, n.KeySrcRType)
   392  	return ir.NewAssignStmt(n.Pos(), n.Key, key)
   393  }
   394  
   395  // rangeAssign2 returns "n.Key, n.Value = key, value".
   396  func rangeAssign2(n *ir.RangeStmt, key, value ir.Node) ir.Node {
   397  	// Use OAS2 to correctly handle assignments
   398  	// of the form "v1, a[v1] = range".
   399  	key = rangeConvert(n, n.Key.Type(), key, n.KeyTypeWord, n.KeySrcRType)
   400  	value = rangeConvert(n, n.Value.Type(), value, n.ValueTypeWord, n.ValueSrcRType)
   401  	return ir.NewAssignListStmt(n.Pos(), ir.OAS2, []ir.Node{n.Key, n.Value}, []ir.Node{key, value})
   402  }
   403  
   404  // rangeConvert returns src, converted to dst if necessary. If a
   405  // conversion is necessary, then typeWord and srcRType are copied to
   406  // their respective ConvExpr fields.
   407  func rangeConvert(nrange *ir.RangeStmt, dst *types.Type, src, typeWord, srcRType ir.Node) ir.Node {
   408  	src = typecheck.Expr(src)
   409  	if dst.Kind() == types.TBLANK || types.Identical(dst, src.Type()) {
   410  		return src
   411  	}
   412  
   413  	n := ir.NewConvExpr(nrange.Pos(), ir.OCONV, dst, src)
   414  	n.TypeWord = typeWord
   415  	n.SrcRType = srcRType
   416  	return typecheck.Expr(n)
   417  }
   418  
   419  // isMapClear checks if n is of the form:
   420  //
   421  //	for k := range m {
   422  //		delete(m, k)
   423  //	}
   424  //
   425  // where == for keys of map m is reflexive.
   426  func isMapClear(n *ir.RangeStmt) bool {
   427  	if base.Flag.N != 0 || base.Flag.Cfg.Instrumenting {
   428  		return false
   429  	}
   430  
   431  	t := n.X.Type()
   432  	if n.Op() != ir.ORANGE || t.Kind() != types.TMAP || n.Key == nil || n.Value != nil {
   433  		return false
   434  	}
   435  
   436  	k := n.Key
   437  	// Require k to be a new variable name.
   438  	if !ir.DeclaredBy(k, n) {
   439  		return false
   440  	}
   441  
   442  	if len(n.Body) != 1 {
   443  		return false
   444  	}
   445  
   446  	stmt := n.Body[0] // only stmt in body
   447  	if stmt == nil || stmt.Op() != ir.ODELETE {
   448  		return false
   449  	}
   450  
   451  	m := n.X
   452  	if delete := stmt.(*ir.CallExpr); !ir.SameSafeExpr(delete.Args[0], m) || !ir.SameSafeExpr(delete.Args[1], k) {
   453  		return false
   454  	}
   455  
   456  	// Keys where equality is not reflexive can not be deleted from maps.
   457  	if !types.IsReflexive(t.Key()) {
   458  		return false
   459  	}
   460  
   461  	return true
   462  }
   463  
   464  // mapRangeClear constructs a call to runtime.mapclear for the map range idiom.
   465  func mapRangeClear(nrange *ir.RangeStmt) ir.Node {
   466  	m := nrange.X
   467  	origPos := ir.SetPos(m)
   468  	defer func() { base.Pos = origPos }()
   469  
   470  	return mapClear(m, reflectdata.RangeMapRType(base.Pos, nrange))
   471  }
   472  
   473  // mapClear constructs a call to runtime.mapclear for the map m.
   474  func mapClear(m, rtyp ir.Node) ir.Node {
   475  	t := m.Type()
   476  
   477  	// instantiate mapclear(typ *type, hmap map[any]any)
   478  	fn := typecheck.LookupRuntime("mapclear", t.Key(), t.Elem())
   479  	n := mkcallstmt1(fn, rtyp, m)
   480  	return walkStmt(typecheck.Stmt(n))
   481  }
   482  
   483  // Lower n into runtime·memclr if possible, for
   484  // fast zeroing of slices and arrays (issue 5373).
   485  // Look for instances of
   486  //
   487  //	for i := range a {
   488  //		a[i] = zero
   489  //	}
   490  //
   491  // in which the evaluation of a is side-effect-free.
   492  //
   493  // Parameters are as in walkRange: "for v1, v2 = range a".
   494  func arrayRangeClear(loop *ir.RangeStmt, v1, v2, a ir.Node) ir.Node {
   495  	if base.Flag.N != 0 || base.Flag.Cfg.Instrumenting {
   496  		return nil
   497  	}
   498  
   499  	if v1 == nil || v2 != nil {
   500  		return nil
   501  	}
   502  
   503  	if len(loop.Body) != 1 || loop.Body[0] == nil {
   504  		return nil
   505  	}
   506  
   507  	stmt1 := loop.Body[0] // only stmt in body
   508  	if stmt1.Op() != ir.OAS {
   509  		return nil
   510  	}
   511  	stmt := stmt1.(*ir.AssignStmt)
   512  	if stmt.X.Op() != ir.OINDEX {
   513  		return nil
   514  	}
   515  	lhs := stmt.X.(*ir.IndexExpr)
   516  	x := lhs.X
   517  
   518  	// Get constant number of iterations for int and array cases.
   519  	n := int64(-1)
   520  	if ir.IsConst(a, constant.Int) {
   521  		n = ir.Int64Val(a)
   522  	} else if a.Type().IsArray() {
   523  		n = a.Type().NumElem()
   524  	} else if a.Type().IsPtr() && a.Type().Elem().IsArray() {
   525  		n = a.Type().Elem().NumElem()
   526  	}
   527  
   528  	if n >= 0 {
   529  		// Int/Array case.
   530  		if !x.Type().IsArray() {
   531  			return nil
   532  		}
   533  		if x.Type().NumElem() != n {
   534  			return nil
   535  		}
   536  	} else {
   537  		// Slice case.
   538  		if !ir.SameSafeExpr(x, a) {
   539  			return nil
   540  		}
   541  	}
   542  
   543  	if !ir.SameSafeExpr(lhs.Index, v1) {
   544  		return nil
   545  	}
   546  
   547  	if !ir.IsZero(stmt.Y) {
   548  		return nil
   549  	}
   550  
   551  	return arrayClear(stmt.Pos(), x, loop)
   552  }
   553  
   554  // arrayClear constructs a call to runtime.memclr for fast zeroing of slices and arrays.
   555  func arrayClear(wbPos src.XPos, a ir.Node, nrange *ir.RangeStmt) ir.Node {
   556  	elemsize := typecheck.RangeExprType(a.Type()).Elem().Size()
   557  	if elemsize <= 0 {
   558  		return nil
   559  	}
   560  
   561  	// Convert to
   562  	// if len(a) != 0 {
   563  	// 	hp = &a[0]
   564  	// 	hn = len(a)*sizeof(elem(a))
   565  	// 	memclr{NoHeap,Has}Pointers(hp, hn)
   566  	// 	i = len(a) - 1
   567  	// }
   568  	n := ir.NewIfStmt(base.Pos, nil, nil, nil)
   569  	n.Cond = ir.NewBinaryExpr(base.Pos, ir.ONE, ir.NewUnaryExpr(base.Pos, ir.OLEN, a), ir.NewInt(base.Pos, 0))
   570  
   571  	// hp = &a[0]
   572  	hp := typecheck.TempAt(base.Pos, ir.CurFunc, types.Types[types.TUNSAFEPTR])
   573  
   574  	ix := ir.NewIndexExpr(base.Pos, a, ir.NewInt(base.Pos, 0))
   575  	ix.SetBounded(true)
   576  	addr := typecheck.ConvNop(typecheck.NodAddr(ix), types.Types[types.TUNSAFEPTR])
   577  	n.Body.Append(ir.NewAssignStmt(base.Pos, hp, addr))
   578  
   579  	// hn = len(a) * sizeof(elem(a))
   580  	hn := typecheck.TempAt(base.Pos, ir.CurFunc, types.Types[types.TUINTPTR])
   581  	mul := typecheck.Conv(ir.NewBinaryExpr(base.Pos, ir.OMUL, ir.NewUnaryExpr(base.Pos, ir.OLEN, a), ir.NewInt(base.Pos, elemsize)), types.Types[types.TUINTPTR])
   582  	n.Body.Append(ir.NewAssignStmt(base.Pos, hn, mul))
   583  
   584  	var fn ir.Node
   585  	if a.Type().Elem().HasPointers() {
   586  		// memclrHasPointers(hp, hn)
   587  		ir.CurFunc.SetWBPos(wbPos)
   588  		fn = mkcallstmt("memclrHasPointers", hp, hn)
   589  	} else {
   590  		// memclrNoHeapPointers(hp, hn)
   591  		fn = mkcallstmt("memclrNoHeapPointers", hp, hn)
   592  	}
   593  
   594  	n.Body.Append(fn)
   595  
   596  	// For array range clear, also set "i = len(a) - 1"
   597  	if nrange != nil {
   598  		idx := ir.NewAssignStmt(base.Pos, nrange.Key, typecheck.Conv(ir.NewBinaryExpr(base.Pos, ir.OSUB, ir.NewUnaryExpr(base.Pos, ir.OLEN, a), ir.NewInt(base.Pos, 1)), nrange.Key.Type()))
   599  		n.Body.Append(idx)
   600  	}
   601  
   602  	n.Cond = typecheck.Expr(n.Cond)
   603  	n.Cond = typecheck.DefaultLit(n.Cond, nil)
   604  	typecheck.Stmts(n.Body)
   605  	return walkStmt(n)
   606  }
   607  

View as plain text