Source file src/internal/runtime/maps/runtime.go

     1  // Copyright 2024 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package maps
     6  
     7  import (
     8  	"internal/abi"
     9  	"internal/asan"
    10  	"internal/msan"
    11  	"internal/race"
    12  	"internal/runtime/sys"
    13  	"unsafe"
    14  )
    15  
    16  // Functions below pushed from runtime.
    17  
    18  //go:linkname fatal
    19  func fatal(s string)
    20  
    21  //go:linkname rand
    22  func rand() uint64
    23  
    24  //go:linkname typedmemmove
    25  func typedmemmove(typ *abi.Type, dst, src unsafe.Pointer)
    26  
    27  //go:linkname typedmemclr
    28  func typedmemclr(typ *abi.Type, ptr unsafe.Pointer)
    29  
    30  //go:linkname newarray
    31  func newarray(typ *abi.Type, n int) unsafe.Pointer
    32  
    33  //go:linkname newobject
    34  func newobject(typ *abi.Type) unsafe.Pointer
    35  
    36  // Pushed from runtime in order to use runtime.plainError
    37  //
    38  //go:linkname errNilAssign
    39  var errNilAssign error
    40  
    41  // Pull from runtime. It is important that is this the exact same copy as the
    42  // runtime because runtime.mapaccess1_fat compares the returned pointer with
    43  // &runtime.zeroVal[0].
    44  // TODO: move zeroVal to internal/abi?
    45  //
    46  //go:linkname zeroVal runtime.zeroVal
    47  var zeroVal [abi.ZeroValSize]byte
    48  
    49  // mapaccess1 returns a pointer to h[key].  Never returns nil, instead
    50  // it will return a reference to the zero object for the elem type if
    51  // the key is not in the map.
    52  // NOTE: The returned pointer may keep the whole map live, so don't
    53  // hold onto it for very long.
    54  //
    55  //go:linkname runtime_mapaccess1 runtime.mapaccess1
    56  func runtime_mapaccess1(typ *abi.MapType, m *Map, key unsafe.Pointer) unsafe.Pointer {
    57  	if race.Enabled && m != nil {
    58  		callerpc := sys.GetCallerPC()
    59  		pc := abi.FuncPCABIInternal(runtime_mapaccess1)
    60  		race.ReadPC(unsafe.Pointer(m), callerpc, pc)
    61  		race.ReadObjectPC(typ.Key, key, callerpc, pc)
    62  	}
    63  	if msan.Enabled && m != nil {
    64  		msan.Read(key, typ.Key.Size_)
    65  	}
    66  	if asan.Enabled && m != nil {
    67  		asan.Read(key, typ.Key.Size_)
    68  	}
    69  
    70  	if m == nil || m.Used() == 0 {
    71  		if err := mapKeyError(typ, key); err != nil {
    72  			panic(err) // see issue 23734
    73  		}
    74  		return unsafe.Pointer(&zeroVal[0])
    75  	}
    76  
    77  	if m.writing != 0 {
    78  		fatal("concurrent map read and map write")
    79  	}
    80  
    81  	hash := typ.Hasher(key, m.seed)
    82  
    83  	if m.dirLen <= 0 {
    84  		_, elem, ok := m.getWithKeySmall(typ, hash, key)
    85  		if !ok {
    86  			return unsafe.Pointer(&zeroVal[0])
    87  		}
    88  		return elem
    89  	}
    90  
    91  	// Select table.
    92  	idx := m.directoryIndex(hash)
    93  	t := m.directoryAt(idx)
    94  
    95  	// Probe table.
    96  	seq := makeProbeSeq(h1(hash), t.groups.lengthMask)
    97  	for ; ; seq = seq.next() {
    98  		g := t.groups.group(typ, seq.offset)
    99  
   100  		match := g.ctrls().matchH2(h2(hash))
   101  
   102  		for match != 0 {
   103  			i := match.first()
   104  
   105  			slotKey := g.key(typ, i)
   106  			slotKeyOrig := slotKey
   107  			if typ.IndirectKey() {
   108  				slotKey = *((*unsafe.Pointer)(slotKey))
   109  			}
   110  			if typ.Key.Equal(key, slotKey) {
   111  				slotElem := unsafe.Pointer(uintptr(slotKeyOrig) + typ.ElemOff)
   112  				if typ.IndirectElem() {
   113  					slotElem = *((*unsafe.Pointer)(slotElem))
   114  				}
   115  				return slotElem
   116  			}
   117  			match = match.removeFirst()
   118  		}
   119  
   120  		match = g.ctrls().matchEmpty()
   121  		if match != 0 {
   122  			// Finding an empty slot means we've reached the end of
   123  			// the probe sequence.
   124  			return unsafe.Pointer(&zeroVal[0])
   125  		}
   126  	}
   127  }
   128  
   129  //go:linkname runtime_mapaccess2 runtime.mapaccess2
   130  func runtime_mapaccess2(typ *abi.MapType, m *Map, key unsafe.Pointer) (unsafe.Pointer, bool) {
   131  	if race.Enabled && m != nil {
   132  		callerpc := sys.GetCallerPC()
   133  		pc := abi.FuncPCABIInternal(runtime_mapaccess1)
   134  		race.ReadPC(unsafe.Pointer(m), callerpc, pc)
   135  		race.ReadObjectPC(typ.Key, key, callerpc, pc)
   136  	}
   137  	if msan.Enabled && m != nil {
   138  		msan.Read(key, typ.Key.Size_)
   139  	}
   140  	if asan.Enabled && m != nil {
   141  		asan.Read(key, typ.Key.Size_)
   142  	}
   143  
   144  	if m == nil || m.Used() == 0 {
   145  		if err := mapKeyError(typ, key); err != nil {
   146  			panic(err) // see issue 23734
   147  		}
   148  		return unsafe.Pointer(&zeroVal[0]), false
   149  	}
   150  
   151  	if m.writing != 0 {
   152  		fatal("concurrent map read and map write")
   153  	}
   154  
   155  	hash := typ.Hasher(key, m.seed)
   156  
   157  	if m.dirLen == 0 {
   158  		_, elem, ok := m.getWithKeySmall(typ, hash, key)
   159  		if !ok {
   160  			return unsafe.Pointer(&zeroVal[0]), false
   161  		}
   162  		return elem, true
   163  	}
   164  
   165  	// Select table.
   166  	idx := m.directoryIndex(hash)
   167  	t := m.directoryAt(idx)
   168  
   169  	// Probe table.
   170  	seq := makeProbeSeq(h1(hash), t.groups.lengthMask)
   171  	for ; ; seq = seq.next() {
   172  		g := t.groups.group(typ, seq.offset)
   173  
   174  		match := g.ctrls().matchH2(h2(hash))
   175  
   176  		for match != 0 {
   177  			i := match.first()
   178  
   179  			slotKey := g.key(typ, i)
   180  			slotKeyOrig := slotKey
   181  			if typ.IndirectKey() {
   182  				slotKey = *((*unsafe.Pointer)(slotKey))
   183  			}
   184  			if typ.Key.Equal(key, slotKey) {
   185  				slotElem := unsafe.Pointer(uintptr(slotKeyOrig) + typ.ElemOff)
   186  				if typ.IndirectElem() {
   187  					slotElem = *((*unsafe.Pointer)(slotElem))
   188  				}
   189  				return slotElem, true
   190  			}
   191  			match = match.removeFirst()
   192  		}
   193  
   194  		match = g.ctrls().matchEmpty()
   195  		if match != 0 {
   196  			// Finding an empty slot means we've reached the end of
   197  			// the probe sequence.
   198  			return unsafe.Pointer(&zeroVal[0]), false
   199  		}
   200  	}
   201  }
   202  
   203  //go:linkname runtime_mapassign runtime.mapassign
   204  func runtime_mapassign(typ *abi.MapType, m *Map, key unsafe.Pointer) unsafe.Pointer {
   205  	if m == nil {
   206  		panic(errNilAssign)
   207  	}
   208  	if race.Enabled {
   209  		callerpc := sys.GetCallerPC()
   210  		pc := abi.FuncPCABIInternal(runtime_mapassign)
   211  		race.WritePC(unsafe.Pointer(m), callerpc, pc)
   212  		race.ReadObjectPC(typ.Key, key, callerpc, pc)
   213  	}
   214  	if msan.Enabled {
   215  		msan.Read(key, typ.Key.Size_)
   216  	}
   217  	if asan.Enabled {
   218  		asan.Read(key, typ.Key.Size_)
   219  	}
   220  	if m.writing != 0 {
   221  		fatal("concurrent map writes")
   222  	}
   223  
   224  	hash := typ.Hasher(key, m.seed)
   225  
   226  	// Set writing after calling Hasher, since Hasher may panic, in which
   227  	// case we have not actually done a write.
   228  	m.writing ^= 1 // toggle, see comment on writing
   229  
   230  	if m.dirPtr == nil {
   231  		m.growToSmall(typ)
   232  	}
   233  
   234  	if m.dirLen == 0 {
   235  		if m.used < abi.MapGroupSlots {
   236  			elem := m.putSlotSmall(typ, hash, key)
   237  
   238  			if m.writing == 0 {
   239  				fatal("concurrent map writes")
   240  			}
   241  			m.writing ^= 1
   242  
   243  			return elem
   244  		}
   245  
   246  		// Can't fit another entry, grow to full size map.
   247  		m.growToTable(typ)
   248  	}
   249  
   250  	var slotElem unsafe.Pointer
   251  outer:
   252  	for {
   253  		// Select table.
   254  		idx := m.directoryIndex(hash)
   255  		t := m.directoryAt(idx)
   256  
   257  		seq := makeProbeSeq(h1(hash), t.groups.lengthMask)
   258  
   259  		// As we look for a match, keep track of the first deleted slot
   260  		// we find, which we'll use to insert the new entry if
   261  		// necessary.
   262  		var firstDeletedGroup groupReference
   263  		var firstDeletedSlot uintptr
   264  
   265  		for ; ; seq = seq.next() {
   266  			g := t.groups.group(typ, seq.offset)
   267  			match := g.ctrls().matchH2(h2(hash))
   268  
   269  			// Look for an existing slot containing this key.
   270  			for match != 0 {
   271  				i := match.first()
   272  
   273  				slotKey := g.key(typ, i)
   274  				slotKeyOrig := slotKey
   275  				if typ.IndirectKey() {
   276  					slotKey = *((*unsafe.Pointer)(slotKey))
   277  				}
   278  				if typ.Key.Equal(key, slotKey) {
   279  					if typ.NeedKeyUpdate() {
   280  						typedmemmove(typ.Key, slotKey, key)
   281  					}
   282  
   283  					slotElem = unsafe.Pointer(uintptr(slotKeyOrig) + typ.ElemOff)
   284  					if typ.IndirectElem() {
   285  						slotElem = *((*unsafe.Pointer)(slotElem))
   286  					}
   287  
   288  					t.checkInvariants(typ, m)
   289  					break outer
   290  				}
   291  				match = match.removeFirst()
   292  			}
   293  
   294  			// No existing slot for this key in this group. Is this the end
   295  			// of the probe sequence?
   296  			match = g.ctrls().matchEmpty()
   297  			if match != 0 {
   298  				// Finding an empty slot means we've reached the end of
   299  				// the probe sequence.
   300  
   301  				var i uintptr
   302  
   303  				// If we found a deleted slot along the way, we
   304  				// can replace it without consuming growthLeft.
   305  				if firstDeletedGroup.data != nil {
   306  					g = firstDeletedGroup
   307  					i = firstDeletedSlot
   308  					t.growthLeft++ // will be decremented below to become a no-op.
   309  				} else {
   310  					// Otherwise, use the empty slot.
   311  					i = match.first()
   312  				}
   313  
   314  				// If there is room left to grow, just insert the new entry.
   315  				if t.growthLeft > 0 {
   316  					slotKey := g.key(typ, i)
   317  					slotKeyOrig := slotKey
   318  					if typ.IndirectKey() {
   319  						kmem := newobject(typ.Key)
   320  						*(*unsafe.Pointer)(slotKey) = kmem
   321  						slotKey = kmem
   322  					}
   323  					typedmemmove(typ.Key, slotKey, key)
   324  
   325  					slotElem = unsafe.Pointer(uintptr(slotKeyOrig) + typ.ElemOff)
   326  					if typ.IndirectElem() {
   327  						emem := newobject(typ.Elem)
   328  						*(*unsafe.Pointer)(slotElem) = emem
   329  						slotElem = emem
   330  					}
   331  
   332  					g.ctrls().set(i, ctrl(h2(hash)))
   333  					t.growthLeft--
   334  					t.used++
   335  					m.used++
   336  
   337  					t.checkInvariants(typ, m)
   338  					break outer
   339  				}
   340  
   341  				t.rehash(typ, m)
   342  				continue outer
   343  			}
   344  
   345  			// No empty slots in this group. Check for a deleted
   346  			// slot, which we'll use if we don't find a match later
   347  			// in the probe sequence.
   348  			//
   349  			// We only need to remember a single deleted slot.
   350  			if firstDeletedGroup.data == nil {
   351  				// Since we already checked for empty slots
   352  				// above, matches here must be deleted slots.
   353  				match = g.ctrls().matchEmptyOrDeleted()
   354  				if match != 0 {
   355  					firstDeletedGroup = g
   356  					firstDeletedSlot = match.first()
   357  				}
   358  			}
   359  		}
   360  	}
   361  
   362  	if m.writing == 0 {
   363  		fatal("concurrent map writes")
   364  	}
   365  	m.writing ^= 1
   366  
   367  	return slotElem
   368  }
   369  

View as plain text