Source file
src/runtime/export_test.go
1
2
3
4
5
6
7 package runtime
8
9 import (
10 "internal/abi"
11 "internal/goarch"
12 "internal/goexperiment"
13 "internal/goos"
14 "internal/runtime/atomic"
15 "internal/runtime/gc"
16 "internal/runtime/sys"
17 "unsafe"
18 )
19
20 var Fadd64 = fadd64
21 var Fsub64 = fsub64
22 var Fmul64 = fmul64
23 var Fdiv64 = fdiv64
24 var F64to32 = f64to32
25 var F32to64 = f32to64
26 var Fcmp64 = fcmp64
27 var Fintto64 = fintto64
28 var F64toint = f64toint
29
30 var Entersyscall = entersyscall
31 var Exitsyscall = exitsyscall
32 var LockedOSThread = lockedOSThread
33 var Xadduintptr = atomic.Xadduintptr
34
35 var ReadRandomFailed = &readRandomFailed
36
37 var Fastlog2 = fastlog2
38
39 var ParseByteCount = parseByteCount
40
41 var Nanotime = nanotime
42 var Cputicks = cputicks
43 var CyclesPerSecond = pprof_cyclesPerSecond
44 var NetpollBreak = netpollBreak
45 var Usleep = usleep
46
47 var PhysPageSize = physPageSize
48 var PhysHugePageSize = physHugePageSize
49
50 var NetpollGenericInit = netpollGenericInit
51
52 var Memmove = memmove
53 var MemclrNoHeapPointers = memclrNoHeapPointers
54
55 var CgoCheckPointer = cgoCheckPointer
56
57 const CrashStackImplemented = crashStackImplemented
58
59 const TracebackInnerFrames = tracebackInnerFrames
60 const TracebackOuterFrames = tracebackOuterFrames
61
62 var LockPartialOrder = lockPartialOrder
63
64 type TimeTimer = timeTimer
65
66 type LockRank lockRank
67
68 func (l LockRank) String() string {
69 return lockRank(l).String()
70 }
71
72 const PreemptMSupported = preemptMSupported
73
74 type LFNode struct {
75 Next uint64
76 Pushcnt uintptr
77 }
78
79 func LFStackPush(head *uint64, node *LFNode) {
80 (*lfstack)(head).push((*lfnode)(unsafe.Pointer(node)))
81 }
82
83 func LFStackPop(head *uint64) *LFNode {
84 return (*LFNode)((*lfstack)(head).pop())
85 }
86 func LFNodeValidate(node *LFNode) {
87 lfnodeValidate((*lfnode)(unsafe.Pointer(node)))
88 }
89
90 func Netpoll(delta int64) {
91 systemstack(func() {
92 netpoll(delta)
93 })
94 }
95
96 func PointerMask(x any) (ret []byte) {
97 systemstack(func() {
98 ret = pointerMask(x)
99 })
100 return
101 }
102
103 func RunSchedLocalQueueTest() {
104 pp := new(p)
105 gs := make([]g, len(pp.runq))
106 Escape(gs)
107 for i := 0; i < len(pp.runq); i++ {
108 if g, _ := runqget(pp); g != nil {
109 throw("runq is not empty initially")
110 }
111 for j := 0; j < i; j++ {
112 runqput(pp, &gs[i], false)
113 }
114 for j := 0; j < i; j++ {
115 if g, _ := runqget(pp); g != &gs[i] {
116 print("bad element at iter ", i, "/", j, "\n")
117 throw("bad element")
118 }
119 }
120 if g, _ := runqget(pp); g != nil {
121 throw("runq is not empty afterwards")
122 }
123 }
124 }
125
126 func RunSchedLocalQueueStealTest() {
127 p1 := new(p)
128 p2 := new(p)
129 gs := make([]g, len(p1.runq))
130 Escape(gs)
131 for i := 0; i < len(p1.runq); i++ {
132 for j := 0; j < i; j++ {
133 gs[j].sig = 0
134 runqput(p1, &gs[j], false)
135 }
136 gp := runqsteal(p2, p1, true)
137 s := 0
138 if gp != nil {
139 s++
140 gp.sig++
141 }
142 for {
143 gp, _ = runqget(p2)
144 if gp == nil {
145 break
146 }
147 s++
148 gp.sig++
149 }
150 for {
151 gp, _ = runqget(p1)
152 if gp == nil {
153 break
154 }
155 gp.sig++
156 }
157 for j := 0; j < i; j++ {
158 if gs[j].sig != 1 {
159 print("bad element ", j, "(", gs[j].sig, ") at iter ", i, "\n")
160 throw("bad element")
161 }
162 }
163 if s != i/2 && s != i/2+1 {
164 print("bad steal ", s, ", want ", i/2, " or ", i/2+1, ", iter ", i, "\n")
165 throw("bad steal")
166 }
167 }
168 }
169
170 func RunSchedLocalQueueEmptyTest(iters int) {
171
172
173
174
175 done := make(chan bool, 1)
176 p := new(p)
177 gs := make([]g, 2)
178 Escape(gs)
179 ready := new(uint32)
180 for i := 0; i < iters; i++ {
181 *ready = 0
182 next0 := (i & 1) == 0
183 next1 := (i & 2) == 0
184 runqput(p, &gs[0], next0)
185 go func() {
186 for atomic.Xadd(ready, 1); atomic.Load(ready) != 2; {
187 }
188 if runqempty(p) {
189 println("next:", next0, next1)
190 throw("queue is empty")
191 }
192 done <- true
193 }()
194 for atomic.Xadd(ready, 1); atomic.Load(ready) != 2; {
195 }
196 runqput(p, &gs[1], next1)
197 runqget(p)
198 <-done
199 runqget(p)
200 }
201 }
202
203 var (
204 StringHash = stringHash
205 BytesHash = bytesHash
206 Int32Hash = int32Hash
207 Int64Hash = int64Hash
208 MemHash = memhash
209 MemHash32 = memhash32
210 MemHash64 = memhash64
211 EfaceHash = efaceHash
212 IfaceHash = ifaceHash
213 )
214
215 var UseAeshash = &useAeshash
216
217 func MemclrBytes(b []byte) {
218 s := (*slice)(unsafe.Pointer(&b))
219 memclrNoHeapPointers(s.array, uintptr(s.len))
220 }
221
222 const HashLoad = hashLoad
223
224
225 func GostringW(w []uint16) (s string) {
226 systemstack(func() {
227 s = gostringw(&w[0])
228 })
229 return
230 }
231
232 var Open = open
233 var Close = closefd
234 var Read = read
235 var Write = write
236
237 func Envs() []string { return envs }
238 func SetEnvs(e []string) { envs = e }
239
240 const PtrSize = goarch.PtrSize
241
242 var ForceGCPeriod = &forcegcperiod
243
244
245
246
247 func SetTracebackEnv(level string) {
248 setTraceback(level)
249 traceback_env = traceback_cache
250 }
251
252 var ReadUnaligned32 = readUnaligned32
253 var ReadUnaligned64 = readUnaligned64
254
255 func CountPagesInUse() (pagesInUse, counted uintptr) {
256 stw := stopTheWorld(stwForTestCountPagesInUse)
257
258 pagesInUse = mheap_.pagesInUse.Load()
259
260 for _, s := range mheap_.allspans {
261 if s.state.get() == mSpanInUse {
262 counted += s.npages
263 }
264 }
265
266 startTheWorld(stw)
267
268 return
269 }
270
271 func Fastrand() uint32 { return uint32(rand()) }
272 func Fastrand64() uint64 { return rand() }
273 func Fastrandn(n uint32) uint32 { return randn(n) }
274
275 type ProfBuf profBuf
276
277 func NewProfBuf(hdrsize, bufwords, tags int) *ProfBuf {
278 return (*ProfBuf)(newProfBuf(hdrsize, bufwords, tags))
279 }
280
281 func (p *ProfBuf) Write(tag *unsafe.Pointer, now int64, hdr []uint64, stk []uintptr) {
282 (*profBuf)(p).write(tag, now, hdr, stk)
283 }
284
285 const (
286 ProfBufBlocking = profBufBlocking
287 ProfBufNonBlocking = profBufNonBlocking
288 )
289
290 func (p *ProfBuf) Read(mode profBufReadMode) ([]uint64, []unsafe.Pointer, bool) {
291 return (*profBuf)(p).read(mode)
292 }
293
294 func (p *ProfBuf) Close() {
295 (*profBuf)(p).close()
296 }
297
298 type CPUStats = cpuStats
299
300 func ReadCPUStats() CPUStats {
301 return work.cpuStats
302 }
303
304 func ReadMetricsSlow(memStats *MemStats, samplesp unsafe.Pointer, len, cap int) {
305 stw := stopTheWorld(stwForTestReadMetricsSlow)
306
307
308
309 metricsLock()
310 initMetrics()
311
312 systemstack(func() {
313
314
315 getg().racectx = getg().m.curg.racectx
316
317
318
319
320
321
322 readMetricsLocked(samplesp, len, cap)
323
324
325
326
327
328 readmemstats_m(memStats)
329
330
331
332
333 readMetricsLocked(samplesp, len, cap)
334
335
336 getg().racectx = 0
337 })
338 metricsUnlock()
339
340 startTheWorld(stw)
341 }
342
343 var DoubleCheckReadMemStats = &doubleCheckReadMemStats
344
345
346
347 func ReadMemStatsSlow() (base, slow MemStats) {
348 stw := stopTheWorld(stwForTestReadMemStatsSlow)
349
350
351 systemstack(func() {
352
353 getg().m.mallocing++
354
355 readmemstats_m(&base)
356
357
358
359 slow = base
360 slow.Alloc = 0
361 slow.TotalAlloc = 0
362 slow.Mallocs = 0
363 slow.Frees = 0
364 slow.HeapReleased = 0
365 var bySize [gc.NumSizeClasses]struct {
366 Mallocs, Frees uint64
367 }
368
369
370 for _, s := range mheap_.allspans {
371 if s.state.get() != mSpanInUse {
372 continue
373 }
374 if s.isUnusedUserArenaChunk() {
375 continue
376 }
377 if sizeclass := s.spanclass.sizeclass(); sizeclass == 0 {
378 slow.Mallocs++
379 slow.Alloc += uint64(s.elemsize)
380 } else {
381 slow.Mallocs += uint64(s.allocCount)
382 slow.Alloc += uint64(s.allocCount) * uint64(s.elemsize)
383 bySize[sizeclass].Mallocs += uint64(s.allocCount)
384 }
385 }
386
387
388 var m heapStatsDelta
389 memstats.heapStats.unsafeRead(&m)
390
391
392 var smallFree uint64
393 for i := 0; i < gc.NumSizeClasses; i++ {
394 slow.Frees += m.smallFreeCount[i]
395 bySize[i].Frees += m.smallFreeCount[i]
396 bySize[i].Mallocs += m.smallFreeCount[i]
397 smallFree += m.smallFreeCount[i] * uint64(gc.SizeClassToSize[i])
398 }
399 slow.Frees += m.tinyAllocCount + m.largeFreeCount
400 slow.Mallocs += slow.Frees
401
402 slow.TotalAlloc = slow.Alloc + m.largeFree + smallFree
403
404 for i := range slow.BySize {
405 slow.BySize[i].Mallocs = bySize[i].Mallocs
406 slow.BySize[i].Frees = bySize[i].Frees
407 }
408
409 for i := mheap_.pages.start; i < mheap_.pages.end; i++ {
410 chunk := mheap_.pages.tryChunkOf(i)
411 if chunk == nil {
412 continue
413 }
414 pg := chunk.scavenged.popcntRange(0, pallocChunkPages)
415 slow.HeapReleased += uint64(pg) * pageSize
416 }
417 for _, p := range allp {
418
419 pg := sys.OnesCount64(p.pcache.cache & p.pcache.scav)
420 slow.HeapReleased += uint64(pg) * pageSize
421 }
422
423 getg().m.mallocing--
424 })
425
426 startTheWorld(stw)
427 return
428 }
429
430
431
432
433 func ShrinkStackAndVerifyFramePointers() {
434 before := stackPoisonCopy
435 defer func() { stackPoisonCopy = before }()
436 stackPoisonCopy = 1
437
438 gp := getg()
439 systemstack(func() {
440 shrinkstack(gp)
441 })
442
443
444 FPCallers(make([]uintptr, 1024))
445 }
446
447
448
449
450 func BlockOnSystemStack() {
451 systemstack(blockOnSystemStackInternal)
452 }
453
454 func blockOnSystemStackInternal() {
455 print("x\n")
456 lock(&deadlock)
457 lock(&deadlock)
458 }
459
460 type RWMutex struct {
461 rw rwmutex
462 }
463
464 func (rw *RWMutex) Init() {
465 rw.rw.init(lockRankTestR, lockRankTestRInternal, lockRankTestW)
466 }
467
468 func (rw *RWMutex) RLock() {
469 rw.rw.rlock()
470 }
471
472 func (rw *RWMutex) RUnlock() {
473 rw.rw.runlock()
474 }
475
476 func (rw *RWMutex) Lock() {
477 rw.rw.lock()
478 }
479
480 func (rw *RWMutex) Unlock() {
481 rw.rw.unlock()
482 }
483
484 func LockOSCounts() (external, internal uint32) {
485 gp := getg()
486 if gp.m.lockedExt+gp.m.lockedInt == 0 {
487 if gp.lockedm != 0 {
488 panic("lockedm on non-locked goroutine")
489 }
490 } else {
491 if gp.lockedm == 0 {
492 panic("nil lockedm on locked goroutine")
493 }
494 }
495 return gp.m.lockedExt, gp.m.lockedInt
496 }
497
498
499 func TracebackSystemstack(stk []uintptr, i int) int {
500 if i == 0 {
501 pc, sp := sys.GetCallerPC(), sys.GetCallerSP()
502 var u unwinder
503 u.initAt(pc, sp, 0, getg(), unwindJumpStack)
504 return tracebackPCs(&u, 0, stk)
505 }
506 n := 0
507 systemstack(func() {
508 n = TracebackSystemstack(stk, i-1)
509 })
510 return n
511 }
512
513 func KeepNArenaHints(n int) {
514 hint := mheap_.arenaHints
515 for i := 1; i < n; i++ {
516 hint = hint.next
517 if hint == nil {
518 return
519 }
520 }
521 hint.next = nil
522 }
523
524
525
526
527
528
529
530 func MapNextArenaHint() (start, end uintptr, ok bool) {
531 hint := mheap_.arenaHints
532 addr := hint.addr
533 if hint.down {
534 start, end = addr-heapArenaBytes, addr
535 addr -= physPageSize
536 } else {
537 start, end = addr, addr+heapArenaBytes
538 }
539 got := sysReserve(unsafe.Pointer(addr), physPageSize, "")
540 ok = (addr == uintptr(got))
541 if !ok {
542
543
544 sysFreeOS(got, physPageSize)
545 }
546 return
547 }
548
549 func GetNextArenaHint() uintptr {
550 return mheap_.arenaHints.addr
551 }
552
553 type G = g
554
555 type Sudog = sudog
556
557 type XRegPerG = xRegPerG
558
559 func Getg() *G {
560 return getg()
561 }
562
563 func Goid() uint64 {
564 return getg().goid
565 }
566
567 func GIsWaitingOnMutex(gp *G) bool {
568 return readgstatus(gp) == _Gwaiting && gp.waitreason.isMutexWait()
569 }
570
571 var CasGStatusAlwaysTrack = &casgstatusAlwaysTrack
572
573
574 func PanicForTesting(b []byte, i int) byte {
575 return unexportedPanicForTesting(b, i)
576 }
577
578
579 func unexportedPanicForTesting(b []byte, i int) byte {
580 return b[i]
581 }
582
583 func G0StackOverflow() {
584 systemstack(func() {
585 g0 := getg()
586 sp := sys.GetCallerSP()
587
588
589
590 g0.stack.lo = sp - 4096 - stackSystem
591 g0.stackguard0 = g0.stack.lo + stackGuard
592 g0.stackguard1 = g0.stackguard0
593
594 stackOverflow(nil)
595 })
596 }
597
598 func stackOverflow(x *byte) {
599 var buf [256]byte
600 stackOverflow(&buf[0])
601 }
602
603 func RunGetgThreadSwitchTest() {
604
605
606
607
608
609
610 ch := make(chan int)
611 go func(ch chan int) {
612 ch <- 5
613 LockOSThread()
614 }(ch)
615
616 g1 := getg()
617
618
619
620
621
622 <-ch
623
624 g2 := getg()
625 if g1 != g2 {
626 panic("g1 != g2")
627 }
628
629
630
631 g3 := getg()
632 if g1 != g3 {
633 panic("g1 != g3")
634 }
635 }
636
637 const (
638 PageSize = pageSize
639 PallocChunkPages = pallocChunkPages
640 PageAlloc64Bit = pageAlloc64Bit
641 PallocSumBytes = pallocSumBytes
642 )
643
644
645 type PallocSum pallocSum
646
647 func PackPallocSum(start, max, end uint) PallocSum { return PallocSum(packPallocSum(start, max, end)) }
648 func (m PallocSum) Start() uint { return pallocSum(m).start() }
649 func (m PallocSum) Max() uint { return pallocSum(m).max() }
650 func (m PallocSum) End() uint { return pallocSum(m).end() }
651
652
653 type PallocBits pallocBits
654
655 func (b *PallocBits) Find(npages uintptr, searchIdx uint) (uint, uint) {
656 return (*pallocBits)(b).find(npages, searchIdx)
657 }
658 func (b *PallocBits) AllocRange(i, n uint) { (*pallocBits)(b).allocRange(i, n) }
659 func (b *PallocBits) Free(i, n uint) { (*pallocBits)(b).free(i, n) }
660 func (b *PallocBits) Summarize() PallocSum { return PallocSum((*pallocBits)(b).summarize()) }
661 func (b *PallocBits) PopcntRange(i, n uint) uint { return (*pageBits)(b).popcntRange(i, n) }
662
663
664
665 func SummarizeSlow(b *PallocBits) PallocSum {
666 var start, most, end uint
667
668 const N = uint(len(b)) * 64
669 for start < N && (*pageBits)(b).get(start) == 0 {
670 start++
671 }
672 for end < N && (*pageBits)(b).get(N-end-1) == 0 {
673 end++
674 }
675 run := uint(0)
676 for i := uint(0); i < N; i++ {
677 if (*pageBits)(b).get(i) == 0 {
678 run++
679 } else {
680 run = 0
681 }
682 most = max(most, run)
683 }
684 return PackPallocSum(start, most, end)
685 }
686
687
688 func FindBitRange64(c uint64, n uint) uint { return findBitRange64(c, n) }
689
690
691
692 func DiffPallocBits(a, b *PallocBits) []BitRange {
693 ba := (*pageBits)(a)
694 bb := (*pageBits)(b)
695
696 var d []BitRange
697 base, size := uint(0), uint(0)
698 for i := uint(0); i < uint(len(ba))*64; i++ {
699 if ba.get(i) != bb.get(i) {
700 if size == 0 {
701 base = i
702 }
703 size++
704 } else {
705 if size != 0 {
706 d = append(d, BitRange{base, size})
707 }
708 size = 0
709 }
710 }
711 if size != 0 {
712 d = append(d, BitRange{base, size})
713 }
714 return d
715 }
716
717
718
719
720 func StringifyPallocBits(b *PallocBits, r BitRange) string {
721 str := ""
722 for j := r.I; j < r.I+r.N; j++ {
723 if (*pageBits)(b).get(j) != 0 {
724 str += "1"
725 } else {
726 str += "0"
727 }
728 }
729 return str
730 }
731
732
733 type PallocData pallocData
734
735 func (d *PallocData) FindScavengeCandidate(searchIdx uint, min, max uintptr) (uint, uint) {
736 return (*pallocData)(d).findScavengeCandidate(searchIdx, min, max)
737 }
738 func (d *PallocData) AllocRange(i, n uint) { (*pallocData)(d).allocRange(i, n) }
739 func (d *PallocData) ScavengedSetRange(i, n uint) {
740 (*pallocData)(d).scavenged.setRange(i, n)
741 }
742 func (d *PallocData) PallocBits() *PallocBits {
743 return (*PallocBits)(&(*pallocData)(d).pallocBits)
744 }
745 func (d *PallocData) Scavenged() *PallocBits {
746 return (*PallocBits)(&(*pallocData)(d).scavenged)
747 }
748
749
750 func FillAligned(x uint64, m uint) uint64 { return fillAligned(x, m) }
751
752
753 type PageCache pageCache
754
755 const PageCachePages = pageCachePages
756
757 func NewPageCache(base uintptr, cache, scav uint64) PageCache {
758 return PageCache(pageCache{base: base, cache: cache, scav: scav})
759 }
760 func (c *PageCache) Empty() bool { return (*pageCache)(c).empty() }
761 func (c *PageCache) Base() uintptr { return (*pageCache)(c).base }
762 func (c *PageCache) Cache() uint64 { return (*pageCache)(c).cache }
763 func (c *PageCache) Scav() uint64 { return (*pageCache)(c).scav }
764 func (c *PageCache) Alloc(npages uintptr) (uintptr, uintptr) {
765 return (*pageCache)(c).alloc(npages)
766 }
767 func (c *PageCache) Flush(s *PageAlloc) {
768 cp := (*pageCache)(c)
769 sp := (*pageAlloc)(s)
770
771 systemstack(func() {
772
773
774 lock(sp.mheapLock)
775 cp.flush(sp)
776 unlock(sp.mheapLock)
777 })
778 }
779
780
781 type ChunkIdx chunkIdx
782
783
784
785 type PageAlloc pageAlloc
786
787 func (p *PageAlloc) Alloc(npages uintptr) (uintptr, uintptr) {
788 pp := (*pageAlloc)(p)
789
790 var addr, scav uintptr
791 systemstack(func() {
792
793
794 lock(pp.mheapLock)
795 addr, scav = pp.alloc(npages)
796 unlock(pp.mheapLock)
797 })
798 return addr, scav
799 }
800 func (p *PageAlloc) AllocToCache() PageCache {
801 pp := (*pageAlloc)(p)
802
803 var c PageCache
804 systemstack(func() {
805
806
807 lock(pp.mheapLock)
808 c = PageCache(pp.allocToCache())
809 unlock(pp.mheapLock)
810 })
811 return c
812 }
813 func (p *PageAlloc) Free(base, npages uintptr) {
814 pp := (*pageAlloc)(p)
815
816 systemstack(func() {
817
818
819 lock(pp.mheapLock)
820 pp.free(base, npages)
821 unlock(pp.mheapLock)
822 })
823 }
824 func (p *PageAlloc) Bounds() (ChunkIdx, ChunkIdx) {
825 return ChunkIdx((*pageAlloc)(p).start), ChunkIdx((*pageAlloc)(p).end)
826 }
827 func (p *PageAlloc) Scavenge(nbytes uintptr) (r uintptr) {
828 pp := (*pageAlloc)(p)
829 systemstack(func() {
830 r = pp.scavenge(nbytes, nil, true)
831 })
832 return
833 }
834 func (p *PageAlloc) InUse() []AddrRange {
835 ranges := make([]AddrRange, 0, len(p.inUse.ranges))
836 for _, r := range p.inUse.ranges {
837 ranges = append(ranges, AddrRange{r})
838 }
839 return ranges
840 }
841
842
843 func (p *PageAlloc) PallocData(i ChunkIdx) *PallocData {
844 ci := chunkIdx(i)
845 return (*PallocData)((*pageAlloc)(p).tryChunkOf(ci))
846 }
847
848
849 type AddrRange struct {
850 addrRange
851 }
852
853
854 func MakeAddrRange(base, limit uintptr) AddrRange {
855 return AddrRange{makeAddrRange(base, limit)}
856 }
857
858
859 func (a AddrRange) Base() uintptr {
860 return a.addrRange.base.addr()
861 }
862
863
864 func (a AddrRange) Limit() uintptr {
865 return a.addrRange.limit.addr()
866 }
867
868
869 func (a AddrRange) Equals(b AddrRange) bool {
870 return a == b
871 }
872
873
874 func (a AddrRange) Size() uintptr {
875 return a.addrRange.size()
876 }
877
878
879
880
881
882 var testSysStat = &memstats.other_sys
883
884
885 type AddrRanges struct {
886 addrRanges
887 mutable bool
888 }
889
890
891
892
893
894
895
896
897
898
899 func NewAddrRanges() AddrRanges {
900 r := addrRanges{}
901 r.init(testSysStat)
902 return AddrRanges{r, true}
903 }
904
905
906
907
908
909
910 func MakeAddrRanges(a ...AddrRange) AddrRanges {
911
912
913
914
915
916 ranges := make([]addrRange, 0, len(a))
917 total := uintptr(0)
918 for _, r := range a {
919 ranges = append(ranges, r.addrRange)
920 total += r.Size()
921 }
922 return AddrRanges{addrRanges{
923 ranges: ranges,
924 totalBytes: total,
925 sysStat: testSysStat,
926 }, false}
927 }
928
929
930
931 func (a *AddrRanges) Ranges() []AddrRange {
932 result := make([]AddrRange, 0, len(a.addrRanges.ranges))
933 for _, r := range a.addrRanges.ranges {
934 result = append(result, AddrRange{r})
935 }
936 return result
937 }
938
939
940
941 func (a *AddrRanges) FindSucc(base uintptr) int {
942 return a.findSucc(base)
943 }
944
945
946
947
948
949 func (a *AddrRanges) Add(r AddrRange) {
950 if !a.mutable {
951 throw("attempt to mutate immutable AddrRanges")
952 }
953 a.add(r.addrRange)
954 }
955
956
957 func (a *AddrRanges) TotalBytes() uintptr {
958 return a.addrRanges.totalBytes
959 }
960
961
962 type BitRange struct {
963 I, N uint
964 }
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980 func NewPageAlloc(chunks, scav map[ChunkIdx][]BitRange) *PageAlloc {
981 p := new(pageAlloc)
982
983
984 p.init(new(mutex), testSysStat, true)
985 lockInit(p.mheapLock, lockRankMheap)
986 for i, init := range chunks {
987 addr := chunkBase(chunkIdx(i))
988
989
990 systemstack(func() {
991 lock(p.mheapLock)
992 p.grow(addr, pallocChunkBytes)
993 unlock(p.mheapLock)
994 })
995
996
997 ci := chunkIndex(addr)
998 chunk := p.chunkOf(ci)
999
1000
1001 chunk.scavenged.clearRange(0, pallocChunkPages)
1002
1003
1004
1005
1006 p.scav.index.alloc(ci, pallocChunkPages)
1007 p.scav.index.free(ci, 0, pallocChunkPages)
1008
1009
1010 if scav != nil {
1011 if scvg, ok := scav[i]; ok {
1012 for _, s := range scvg {
1013
1014
1015 if s.N != 0 {
1016 chunk.scavenged.setRange(s.I, s.N)
1017 }
1018 }
1019 }
1020 }
1021
1022
1023 for _, s := range init {
1024
1025
1026 if s.N != 0 {
1027 chunk.allocRange(s.I, s.N)
1028
1029
1030 p.scav.index.alloc(ci, s.N)
1031 }
1032 }
1033
1034
1035 systemstack(func() {
1036 lock(p.mheapLock)
1037 p.update(addr, pallocChunkPages, false, false)
1038 unlock(p.mheapLock)
1039 })
1040 }
1041
1042 return (*PageAlloc)(p)
1043 }
1044
1045
1046
1047
1048 func FreePageAlloc(pp *PageAlloc) {
1049 p := (*pageAlloc)(pp)
1050
1051
1052 if pageAlloc64Bit != 0 {
1053 for l := 0; l < summaryLevels; l++ {
1054 sysFreeOS(unsafe.Pointer(&p.summary[l][0]), uintptr(cap(p.summary[l]))*pallocSumBytes)
1055 }
1056 } else {
1057 resSize := uintptr(0)
1058 for _, s := range p.summary {
1059 resSize += uintptr(cap(s)) * pallocSumBytes
1060 }
1061 sysFreeOS(unsafe.Pointer(&p.summary[0][0]), alignUp(resSize, physPageSize))
1062 }
1063
1064
1065 sysFreeOS(unsafe.Pointer(&p.scav.index.chunks[0]), uintptr(cap(p.scav.index.chunks))*unsafe.Sizeof(atomicScavChunkData{}))
1066
1067
1068
1069
1070
1071 gcController.mappedReady.Add(-int64(p.summaryMappedReady))
1072 testSysStat.add(-int64(p.summaryMappedReady))
1073
1074
1075 for i := range p.chunks {
1076 if x := p.chunks[i]; x != nil {
1077 p.chunks[i] = nil
1078
1079 sysFree(unsafe.Pointer(x), unsafe.Sizeof(*p.chunks[0]), testSysStat)
1080 }
1081 }
1082 }
1083
1084
1085
1086
1087
1088
1089
1090 var BaseChunkIdx = func() ChunkIdx {
1091 var prefix uintptr
1092 if pageAlloc64Bit != 0 {
1093 prefix = 0xc000
1094 } else {
1095 prefix = 0x100
1096 }
1097 baseAddr := prefix * pallocChunkBytes
1098 if goos.IsAix != 0 {
1099 baseAddr += arenaBaseOffset
1100 }
1101 return ChunkIdx(chunkIndex(baseAddr))
1102 }()
1103
1104
1105
1106 func PageBase(c ChunkIdx, pageIdx uint) uintptr {
1107 return chunkBase(chunkIdx(c)) + uintptr(pageIdx)*pageSize
1108 }
1109
1110 type BitsMismatch struct {
1111 Base uintptr
1112 Got, Want uint64
1113 }
1114
1115 func CheckScavengedBitsCleared(mismatches []BitsMismatch) (n int, ok bool) {
1116 ok = true
1117
1118
1119 systemstack(func() {
1120 getg().m.mallocing++
1121
1122
1123 lock(&mheap_.lock)
1124
1125 heapBase := mheap_.pages.inUse.ranges[0].base.addr()
1126 secondArenaBase := arenaBase(arenaIndex(heapBase) + 1)
1127 chunkLoop:
1128 for i := mheap_.pages.start; i < mheap_.pages.end; i++ {
1129 chunk := mheap_.pages.tryChunkOf(i)
1130 if chunk == nil {
1131 continue
1132 }
1133 cb := chunkBase(i)
1134 for j := 0; j < pallocChunkPages/64; j++ {
1135
1136
1137
1138
1139
1140 want := chunk.scavenged[j] &^ chunk.pallocBits[j]
1141 got := chunk.scavenged[j]
1142 if want != got {
1143
1144
1145
1146
1147
1148 if goexperiment.RandomizedHeapBase64 && (cb >= heapBase && cb < secondArenaBase) {
1149 continue
1150 }
1151 ok = false
1152 if n >= len(mismatches) {
1153 break chunkLoop
1154 }
1155 mismatches[n] = BitsMismatch{
1156 Base: cb + uintptr(j)*64*pageSize,
1157 Got: got,
1158 Want: want,
1159 }
1160 n++
1161 }
1162 }
1163 }
1164 unlock(&mheap_.lock)
1165
1166 getg().m.mallocing--
1167 })
1168 return
1169 }
1170
1171 func PageCachePagesLeaked() (leaked uintptr) {
1172 stw := stopTheWorld(stwForTestPageCachePagesLeaked)
1173
1174
1175 deadp := allp[len(allp):cap(allp)]
1176 for _, p := range deadp {
1177
1178
1179 if p != nil {
1180 leaked += uintptr(sys.OnesCount64(p.pcache.cache))
1181 }
1182 }
1183
1184 startTheWorld(stw)
1185 return
1186 }
1187
1188 var ProcYield = procyield
1189 var OSYield = osyield
1190
1191 type Mutex = mutex
1192
1193 var Lock = lock
1194 var Unlock = unlock
1195
1196 var MutexContended = mutexContended
1197
1198 func SemRootLock(addr *uint32) *mutex {
1199 root := semtable.rootFor(addr)
1200 return &root.lock
1201 }
1202
1203 var Semacquire = semacquire
1204 var Semrelease1 = semrelease1
1205
1206 func SemNwait(addr *uint32) uint32 {
1207 root := semtable.rootFor(addr)
1208 return root.nwait.Load()
1209 }
1210
1211 const SemTableSize = semTabSize
1212
1213
1214 type SemTable struct {
1215 semTable
1216 }
1217
1218
1219 func (t *SemTable) Enqueue(addr *uint32) {
1220 s := acquireSudog()
1221 s.releasetime = 0
1222 s.acquiretime = 0
1223 s.ticket = 0
1224 t.semTable.rootFor(addr).queue(addr, s, false)
1225 }
1226
1227
1228
1229
1230 func (t *SemTable) Dequeue(addr *uint32) bool {
1231 s, _, _ := t.semTable.rootFor(addr).dequeue(addr)
1232 if s != nil {
1233 releaseSudog(s)
1234 return true
1235 }
1236 return false
1237 }
1238
1239
1240 type MSpan mspan
1241
1242
1243 func AllocMSpan() *MSpan {
1244 var s *mspan
1245 systemstack(func() {
1246 lock(&mheap_.lock)
1247 s = (*mspan)(mheap_.spanalloc.alloc())
1248 s.init(0, 0)
1249 unlock(&mheap_.lock)
1250 })
1251 return (*MSpan)(s)
1252 }
1253
1254
1255 func FreeMSpan(s *MSpan) {
1256 systemstack(func() {
1257 lock(&mheap_.lock)
1258 mheap_.spanalloc.free(unsafe.Pointer(s))
1259 unlock(&mheap_.lock)
1260 })
1261 }
1262
1263 func MSpanCountAlloc(ms *MSpan, bits []byte) int {
1264 s := (*mspan)(ms)
1265 s.nelems = uint16(len(bits) * 8)
1266 s.gcmarkBits = (*gcBits)(unsafe.Pointer(&bits[0]))
1267 result := s.countAlloc()
1268 s.gcmarkBits = nil
1269 return result
1270 }
1271
1272 type MSpanQueue mSpanQueue
1273
1274 func (q *MSpanQueue) Size() int {
1275 return (*mSpanQueue)(q).n
1276 }
1277
1278 func (q *MSpanQueue) Push(s *MSpan) {
1279 (*mSpanQueue)(q).push((*mspan)(s))
1280 }
1281
1282 func (q *MSpanQueue) Pop() *MSpan {
1283 s := (*mSpanQueue)(q).pop()
1284 return (*MSpan)(s)
1285 }
1286
1287 func (q *MSpanQueue) TakeAll(p *MSpanQueue) {
1288 (*mSpanQueue)(q).takeAll((*mSpanQueue)(p))
1289 }
1290
1291 func (q *MSpanQueue) PopN(n int) MSpanQueue {
1292 p := (*mSpanQueue)(q).popN(n)
1293 return (MSpanQueue)(p)
1294 }
1295
1296 const (
1297 TimeHistSubBucketBits = timeHistSubBucketBits
1298 TimeHistNumSubBuckets = timeHistNumSubBuckets
1299 TimeHistNumBuckets = timeHistNumBuckets
1300 TimeHistMinBucketBits = timeHistMinBucketBits
1301 TimeHistMaxBucketBits = timeHistMaxBucketBits
1302 )
1303
1304 type TimeHistogram timeHistogram
1305
1306
1307
1308
1309
1310 func (th *TimeHistogram) Count(bucket, subBucket int) (uint64, bool) {
1311 t := (*timeHistogram)(th)
1312 if bucket < 0 {
1313 return t.underflow.Load(), false
1314 }
1315 i := bucket*TimeHistNumSubBuckets + subBucket
1316 if i >= len(t.counts) {
1317 return t.overflow.Load(), false
1318 }
1319 return t.counts[i].Load(), true
1320 }
1321
1322 func (th *TimeHistogram) Record(duration int64) {
1323 (*timeHistogram)(th).record(duration)
1324 }
1325
1326 var TimeHistogramMetricsBuckets = timeHistogramMetricsBuckets
1327
1328 func SetIntArgRegs(a int) int {
1329 lock(&finlock)
1330 old := intArgRegs
1331 if a >= 0 {
1332 intArgRegs = a
1333 }
1334 unlock(&finlock)
1335 return old
1336 }
1337
1338 func FinalizerGAsleep() bool {
1339 return fingStatus.Load()&fingWait != 0
1340 }
1341
1342
1343
1344
1345 var GCTestMoveStackOnNextCall = gcTestMoveStackOnNextCall
1346
1347
1348
1349 func GCTestIsReachable(ptrs ...unsafe.Pointer) (mask uint64) {
1350 return gcTestIsReachable(ptrs...)
1351 }
1352
1353
1354
1355
1356
1357
1358
1359 func GCTestPointerClass(p unsafe.Pointer) string {
1360 return gcTestPointerClass(p)
1361 }
1362
1363 const Raceenabled = raceenabled
1364
1365 const (
1366 GCBackgroundUtilization = gcBackgroundUtilization
1367 GCGoalUtilization = gcGoalUtilization
1368 DefaultHeapMinimum = defaultHeapMinimum
1369 MemoryLimitHeapGoalHeadroomPercent = memoryLimitHeapGoalHeadroomPercent
1370 MemoryLimitMinHeapGoalHeadroom = memoryLimitMinHeapGoalHeadroom
1371 )
1372
1373 type GCController struct {
1374 gcControllerState
1375 }
1376
1377 func NewGCController(gcPercent int, memoryLimit int64) *GCController {
1378
1379
1380
1381
1382 g := Escape(new(GCController))
1383 g.gcControllerState.test = true
1384 g.init(int32(gcPercent), memoryLimit)
1385 return g
1386 }
1387
1388 func (c *GCController) StartCycle(stackSize, globalsSize uint64, scannableFrac float64, gomaxprocs int) {
1389 trigger, _ := c.trigger()
1390 if c.heapMarked > trigger {
1391 trigger = c.heapMarked
1392 }
1393 c.maxStackScan.Store(stackSize)
1394 c.globalsScan.Store(globalsSize)
1395 c.heapLive.Store(trigger)
1396 c.heapScan.Add(int64(float64(trigger-c.heapMarked) * scannableFrac))
1397 c.startCycle(0, gomaxprocs, gcTrigger{kind: gcTriggerHeap})
1398 }
1399
1400 func (c *GCController) AssistWorkPerByte() float64 {
1401 return c.assistWorkPerByte.Load()
1402 }
1403
1404 func (c *GCController) HeapGoal() uint64 {
1405 return c.heapGoal()
1406 }
1407
1408 func (c *GCController) HeapLive() uint64 {
1409 return c.heapLive.Load()
1410 }
1411
1412 func (c *GCController) HeapMarked() uint64 {
1413 return c.heapMarked
1414 }
1415
1416 func (c *GCController) Triggered() uint64 {
1417 return c.triggered
1418 }
1419
1420 type GCControllerReviseDelta struct {
1421 HeapLive int64
1422 HeapScan int64
1423 HeapScanWork int64
1424 StackScanWork int64
1425 GlobalsScanWork int64
1426 }
1427
1428 func (c *GCController) Revise(d GCControllerReviseDelta) {
1429 c.heapLive.Add(d.HeapLive)
1430 c.heapScan.Add(d.HeapScan)
1431 c.heapScanWork.Add(d.HeapScanWork)
1432 c.stackScanWork.Add(d.StackScanWork)
1433 c.globalsScanWork.Add(d.GlobalsScanWork)
1434 c.revise()
1435 }
1436
1437 func (c *GCController) EndCycle(bytesMarked uint64, assistTime, elapsed int64, gomaxprocs int) {
1438 c.assistTime.Store(assistTime)
1439 c.endCycle(elapsed, gomaxprocs, false)
1440 c.resetLive(bytesMarked)
1441 c.commit(false)
1442 }
1443
1444 func (c *GCController) AddIdleMarkWorker() bool {
1445 return c.addIdleMarkWorker()
1446 }
1447
1448 func (c *GCController) NeedIdleMarkWorker() bool {
1449 return c.needIdleMarkWorker()
1450 }
1451
1452 func (c *GCController) RemoveIdleMarkWorker() {
1453 c.removeIdleMarkWorker()
1454 }
1455
1456 func (c *GCController) SetMaxIdleMarkWorkers(max int32) {
1457 c.setMaxIdleMarkWorkers(max)
1458 }
1459
1460 var alwaysFalse bool
1461 var escapeSink any
1462
1463 func Escape[T any](x T) T {
1464 if alwaysFalse {
1465 escapeSink = x
1466 }
1467 return x
1468 }
1469
1470
1471 func Acquirem() {
1472 acquirem()
1473 }
1474
1475 func Releasem() {
1476 releasem(getg().m)
1477 }
1478
1479 var Timediv = timediv
1480
1481 type PIController struct {
1482 piController
1483 }
1484
1485 func NewPIController(kp, ti, tt, min, max float64) *PIController {
1486 return &PIController{piController{
1487 kp: kp,
1488 ti: ti,
1489 tt: tt,
1490 min: min,
1491 max: max,
1492 }}
1493 }
1494
1495 func (c *PIController) Next(input, setpoint, period float64) (float64, bool) {
1496 return c.piController.next(input, setpoint, period)
1497 }
1498
1499 const (
1500 CapacityPerProc = capacityPerProc
1501 GCCPULimiterUpdatePeriod = gcCPULimiterUpdatePeriod
1502 )
1503
1504 type GCCPULimiter struct {
1505 limiter gcCPULimiterState
1506 }
1507
1508 func NewGCCPULimiter(now int64, gomaxprocs int32) *GCCPULimiter {
1509
1510
1511
1512
1513 l := Escape(new(GCCPULimiter))
1514 l.limiter.test = true
1515 l.limiter.resetCapacity(now, gomaxprocs)
1516 return l
1517 }
1518
1519 func (l *GCCPULimiter) Fill() uint64 {
1520 return l.limiter.bucket.fill
1521 }
1522
1523 func (l *GCCPULimiter) Capacity() uint64 {
1524 return l.limiter.bucket.capacity
1525 }
1526
1527 func (l *GCCPULimiter) Overflow() uint64 {
1528 return l.limiter.overflow
1529 }
1530
1531 func (l *GCCPULimiter) Limiting() bool {
1532 return l.limiter.limiting()
1533 }
1534
1535 func (l *GCCPULimiter) NeedUpdate(now int64) bool {
1536 return l.limiter.needUpdate(now)
1537 }
1538
1539 func (l *GCCPULimiter) StartGCTransition(enableGC bool, now int64) {
1540 l.limiter.startGCTransition(enableGC, now)
1541 }
1542
1543 func (l *GCCPULimiter) FinishGCTransition(now int64) {
1544 l.limiter.finishGCTransition(now)
1545 }
1546
1547 func (l *GCCPULimiter) Update(now int64) {
1548 l.limiter.update(now)
1549 }
1550
1551 func (l *GCCPULimiter) AddAssistTime(t int64) {
1552 l.limiter.addAssistTime(t)
1553 }
1554
1555 func (l *GCCPULimiter) ResetCapacity(now int64, nprocs int32) {
1556 l.limiter.resetCapacity(now, nprocs)
1557 }
1558
1559 const ScavengePercent = scavengePercent
1560
1561 type Scavenger struct {
1562 Sleep func(int64) int64
1563 Scavenge func(uintptr) (uintptr, int64)
1564 ShouldStop func() bool
1565 GoMaxProcs func() int32
1566
1567 released atomic.Uintptr
1568 scavenger scavengerState
1569 stop chan<- struct{}
1570 done <-chan struct{}
1571 }
1572
1573 func (s *Scavenger) Start() {
1574 if s.Sleep == nil || s.Scavenge == nil || s.ShouldStop == nil || s.GoMaxProcs == nil {
1575 panic("must populate all stubs")
1576 }
1577
1578
1579 s.scavenger.sleepStub = s.Sleep
1580 s.scavenger.scavenge = s.Scavenge
1581 s.scavenger.shouldStop = s.ShouldStop
1582 s.scavenger.gomaxprocs = s.GoMaxProcs
1583
1584
1585 stop := make(chan struct{})
1586 s.stop = stop
1587 done := make(chan struct{})
1588 s.done = done
1589 go func() {
1590
1591 s.scavenger.init()
1592 s.scavenger.park()
1593 for {
1594 select {
1595 case <-stop:
1596 close(done)
1597 return
1598 default:
1599 }
1600 released, workTime := s.scavenger.run()
1601 if released == 0 {
1602 s.scavenger.park()
1603 continue
1604 }
1605 s.released.Add(released)
1606 s.scavenger.sleep(workTime)
1607 }
1608 }()
1609 if !s.BlockUntilParked(1e9 ) {
1610 panic("timed out waiting for scavenger to get ready")
1611 }
1612 }
1613
1614
1615
1616
1617
1618
1619
1620 func (s *Scavenger) BlockUntilParked(timeout int64) bool {
1621
1622
1623
1624
1625
1626 start := nanotime()
1627 for nanotime()-start < timeout {
1628 lock(&s.scavenger.lock)
1629 parked := s.scavenger.parked
1630 unlock(&s.scavenger.lock)
1631 if parked {
1632 return true
1633 }
1634 Gosched()
1635 }
1636 return false
1637 }
1638
1639
1640 func (s *Scavenger) Released() uintptr {
1641 return s.released.Load()
1642 }
1643
1644
1645 func (s *Scavenger) Wake() {
1646 s.scavenger.wake()
1647 }
1648
1649
1650
1651 func (s *Scavenger) Stop() {
1652 lock(&s.scavenger.lock)
1653 parked := s.scavenger.parked
1654 unlock(&s.scavenger.lock)
1655 if !parked {
1656 panic("tried to clean up scavenger that is not parked")
1657 }
1658 close(s.stop)
1659 s.Wake()
1660 <-s.done
1661 }
1662
1663 type ScavengeIndex struct {
1664 i scavengeIndex
1665 }
1666
1667 func NewScavengeIndex(min, max ChunkIdx) *ScavengeIndex {
1668 s := new(ScavengeIndex)
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680 s.i.chunks = make([]atomicScavChunkData, max)
1681 s.i.min.Store(uintptr(min))
1682 s.i.max.Store(uintptr(max))
1683 s.i.minHeapIdx.Store(uintptr(min))
1684 s.i.test = true
1685 return s
1686 }
1687
1688 func (s *ScavengeIndex) Find(force bool) (ChunkIdx, uint) {
1689 ci, off := s.i.find(force)
1690 return ChunkIdx(ci), off
1691 }
1692
1693 func (s *ScavengeIndex) AllocRange(base, limit uintptr) {
1694 sc, ec := chunkIndex(base), chunkIndex(limit-1)
1695 si, ei := chunkPageIndex(base), chunkPageIndex(limit-1)
1696
1697 if sc == ec {
1698
1699 s.i.alloc(sc, ei+1-si)
1700 } else {
1701
1702 s.i.alloc(sc, pallocChunkPages-si)
1703 for c := sc + 1; c < ec; c++ {
1704 s.i.alloc(c, pallocChunkPages)
1705 }
1706 s.i.alloc(ec, ei+1)
1707 }
1708 }
1709
1710 func (s *ScavengeIndex) FreeRange(base, limit uintptr) {
1711 sc, ec := chunkIndex(base), chunkIndex(limit-1)
1712 si, ei := chunkPageIndex(base), chunkPageIndex(limit-1)
1713
1714 if sc == ec {
1715
1716 s.i.free(sc, si, ei+1-si)
1717 } else {
1718
1719 s.i.free(sc, si, pallocChunkPages-si)
1720 for c := sc + 1; c < ec; c++ {
1721 s.i.free(c, 0, pallocChunkPages)
1722 }
1723 s.i.free(ec, 0, ei+1)
1724 }
1725 }
1726
1727 func (s *ScavengeIndex) ResetSearchAddrs() {
1728 for _, a := range []*atomicOffAddr{&s.i.searchAddrBg, &s.i.searchAddrForce} {
1729 addr, marked := a.Load()
1730 if marked {
1731 a.StoreUnmark(addr, addr)
1732 }
1733 a.Clear()
1734 }
1735 s.i.freeHWM = minOffAddr
1736 }
1737
1738 func (s *ScavengeIndex) NextGen() {
1739 s.i.nextGen()
1740 }
1741
1742 func (s *ScavengeIndex) SetEmpty(ci ChunkIdx) {
1743 s.i.setEmpty(chunkIdx(ci))
1744 }
1745
1746 func CheckPackScavChunkData(gen uint32, inUse, lastInUse uint16, flags uint8) bool {
1747 sc0 := scavChunkData{
1748 gen: gen,
1749 inUse: inUse,
1750 lastInUse: lastInUse,
1751 scavChunkFlags: scavChunkFlags(flags),
1752 }
1753 scp := sc0.pack()
1754 sc1 := unpackScavChunkData(scp)
1755 return sc0 == sc1
1756 }
1757
1758 const GTrackingPeriod = gTrackingPeriod
1759
1760 var ZeroBase = unsafe.Pointer(&zerobase)
1761
1762 const UserArenaChunkBytes = userArenaChunkBytes
1763
1764 type UserArena struct {
1765 arena *userArena
1766 }
1767
1768 func NewUserArena() *UserArena {
1769 return &UserArena{newUserArena()}
1770 }
1771
1772 func (a *UserArena) New(out *any) {
1773 i := efaceOf(out)
1774 typ := i._type
1775 if typ.Kind() != abi.Pointer {
1776 panic("new result of non-ptr type")
1777 }
1778 typ = (*ptrtype)(unsafe.Pointer(typ)).Elem
1779 i.data = a.arena.new(typ)
1780 }
1781
1782 func (a *UserArena) Slice(sl any, cap int) {
1783 a.arena.slice(sl, cap)
1784 }
1785
1786 func (a *UserArena) Free() {
1787 a.arena.free()
1788 }
1789
1790 func GlobalWaitingArenaChunks() int {
1791 n := 0
1792 systemstack(func() {
1793 lock(&mheap_.lock)
1794 for s := mheap_.userArena.quarantineList.first; s != nil; s = s.next {
1795 n++
1796 }
1797 unlock(&mheap_.lock)
1798 })
1799 return n
1800 }
1801
1802 func UserArenaClone[T any](s T) T {
1803 return arena_heapify(s).(T)
1804 }
1805
1806 var AlignUp = alignUp
1807
1808 func BlockUntilEmptyFinalizerQueue(timeout int64) bool {
1809 return blockUntilEmptyFinalizerQueue(timeout)
1810 }
1811
1812 func BlockUntilEmptyCleanupQueue(timeout int64) bool {
1813 return gcCleanups.blockUntilEmpty(timeout)
1814 }
1815
1816 func FrameStartLine(f *Frame) int {
1817 return f.startLine
1818 }
1819
1820
1821
1822 func PersistentAlloc(n, align uintptr) unsafe.Pointer {
1823 return persistentalloc(n, align, &memstats.other_sys)
1824 }
1825
1826 const TagAlign = tagAlign
1827
1828
1829
1830 func FPCallers(pcBuf []uintptr) int {
1831 return fpTracebackPCs(unsafe.Pointer(getfp()), pcBuf)
1832 }
1833
1834 const FramePointerEnabled = framepointer_enabled
1835
1836 var (
1837 IsPinned = isPinned
1838 GetPinCounter = pinnerGetPinCounter
1839 )
1840
1841 func SetPinnerLeakPanic(f func()) {
1842 pinnerLeakPanic = f
1843 }
1844 func GetPinnerLeakPanic() func() {
1845 return pinnerLeakPanic
1846 }
1847
1848 var testUintptr uintptr
1849
1850 func MyGenericFunc[T any]() {
1851 systemstack(func() {
1852 testUintptr = 4
1853 })
1854 }
1855
1856 func UnsafePoint(pc uintptr) bool {
1857 fi := findfunc(pc)
1858 v := pcdatavalue(fi, abi.PCDATA_UnsafePoint, pc)
1859 switch v {
1860 case abi.UnsafePointUnsafe:
1861 return true
1862 case abi.UnsafePointSafe:
1863 return false
1864 case abi.UnsafePointRestart1, abi.UnsafePointRestart2, abi.UnsafePointRestartAtEntry:
1865
1866
1867 return false
1868 default:
1869 var buf [20]byte
1870 panic("invalid unsafe point code " + string(itoa(buf[:], uint64(v))))
1871 }
1872 }
1873
1874 type TraceMap struct {
1875 traceMap
1876 }
1877
1878 func (m *TraceMap) PutString(s string) (uint64, bool) {
1879 return m.traceMap.put(unsafe.Pointer(unsafe.StringData(s)), uintptr(len(s)))
1880 }
1881
1882 func (m *TraceMap) Reset() {
1883 m.traceMap.reset()
1884 }
1885
1886 func SetSpinInGCMarkDone(spin bool) {
1887 gcDebugMarkDone.spinAfterRaggedBarrier.Store(spin)
1888 }
1889
1890 func GCMarkDoneRestarted() bool {
1891
1892 mp := acquirem()
1893 if gcphase != _GCoff {
1894 releasem(mp)
1895 return false
1896 }
1897 restarted := gcDebugMarkDone.restartedDueTo27993
1898 releasem(mp)
1899 return restarted
1900 }
1901
1902 func GCMarkDoneResetRestartFlag() {
1903 mp := acquirem()
1904 for gcphase != _GCoff {
1905 releasem(mp)
1906 Gosched()
1907 mp = acquirem()
1908 }
1909 gcDebugMarkDone.restartedDueTo27993 = false
1910 releasem(mp)
1911 }
1912
1913 type BitCursor struct {
1914 b bitCursor
1915 }
1916
1917 func NewBitCursor(buf *byte) BitCursor {
1918 return BitCursor{b: bitCursor{ptr: buf, n: 0}}
1919 }
1920
1921 func (b BitCursor) Write(data *byte, cnt uintptr) {
1922 b.b.write(data, cnt)
1923 }
1924 func (b BitCursor) Offset(cnt uintptr) BitCursor {
1925 return BitCursor{b: b.b.offset(cnt)}
1926 }
1927
1928 const (
1929 BubbleAssocUnbubbled = bubbleAssocUnbubbled
1930 BubbleAssocCurrentBubble = bubbleAssocCurrentBubble
1931 BubbleAssocOtherBubble = bubbleAssocOtherBubble
1932 )
1933
1934 type TraceStackTable traceStackTable
1935
1936 func (t *TraceStackTable) Reset() {
1937 t.tab.reset()
1938 }
1939
1940 func TraceStack(gp *G, tab *TraceStackTable) {
1941 traceStack(0, gp, (*traceStackTable)(tab))
1942 }
1943
View as plain text