Source file
src/runtime/mklockrank.go
1
2
3
4
5
6
7
8
9 package main
10
11 import (
12 "bytes"
13 "flag"
14 "fmt"
15 "go/format"
16 "internal/dag"
17 "io"
18 "log"
19 "os"
20 "strings"
21 )
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40 const ranks = `
41 # Sysmon
42 NONE
43 < sysmon
44 < scavenge, forcegc, computeMaxProcs, updateMaxProcsG;
45
46 # Defer
47 NONE < defer;
48
49 # GC
50 NONE <
51 sweepWaiters,
52 assistQueue,
53 strongFromWeakQueue,
54 cleanupQueue,
55 sweep;
56
57 # Test only
58 NONE < testR, testW;
59
60 # vgetrandom
61 NONE < vgetrandom;
62
63 NONE < timerSend;
64
65 # Scheduler, timers, netpoll
66 NONE < allocmW, execW, cpuprof, pollCache, pollDesc, wakeableSleep;
67 scavenge, sweep, testR, wakeableSleep, timerSend < hchan;
68 assistQueue,
69 cleanupQueue,
70 computeMaxProcs,
71 cpuprof,
72 forcegc,
73 updateMaxProcsG,
74 hchan,
75 pollDesc, # pollDesc can interact with timers, which can lock sched.
76 scavenge,
77 strongFromWeakQueue,
78 sweep,
79 sweepWaiters,
80 testR,
81 wakeableSleep
82 # Above SCHED are things that can call into the scheduler.
83 < SCHED
84 # Below SCHED is the scheduler implementation.
85 < allocmR,
86 execR;
87 allocmR, execR, hchan < sched;
88 sched < allg, allp;
89
90 # Channels
91 NONE < notifyList;
92 hchan, notifyList < sudog;
93
94 hchan, pollDesc, wakeableSleep < timers;
95 timers, timerSend < timer < netpollInit;
96
97 # Semaphores
98 NONE < root;
99
100 # Itabs
101 NONE
102 < itab
103 < reflectOffs;
104
105 # Synctest
106 hchan,
107 notifyList,
108 reflectOffs,
109 root,
110 strongFromWeakQueue,
111 sweepWaiters,
112 timer,
113 timers
114 < synctest;
115
116 # User arena state
117 NONE < userArenaState;
118
119 # Tracing without a P uses a global trace buffer.
120 scavenge
121 # Above TRACEGLOBAL can emit a trace event without a P.
122 < TRACEGLOBAL
123 # Below TRACEGLOBAL manages the global tracing buffer.
124 # Note that traceBuf eventually chains to MALLOC, but we never get that far
125 # in the situation where there's no P.
126 < traceBuf;
127 # Starting/stopping tracing traces strings.
128 traceBuf < traceStrings;
129
130 # Malloc
131 allg,
132 allocmR,
133 allp, # procresize
134 execR, # May grow stack
135 execW, # May allocate after BeforeFork
136 hchan,
137 notifyList,
138 reflectOffs,
139 timer,
140 traceStrings,
141 userArenaState,
142 vgetrandom
143 # Above MALLOC are things that can allocate memory.
144 < MALLOC
145 # Below MALLOC is the malloc implementation.
146 < fin,
147 spanSetSpine,
148 mspanSpecial,
149 traceTypeTab,
150 MPROF;
151
152 # We can acquire gcBitsArenas for pinner bits, and
153 # it's guarded by mspanSpecial.
154 MALLOC, mspanSpecial < gcBitsArenas;
155
156 # Memory profiling
157 MPROF < profInsert, profBlock, profMemActive;
158 profMemActive < profMemFuture;
159
160 # Stack allocation and copying
161 gcBitsArenas,
162 netpollInit,
163 profBlock,
164 profInsert,
165 profMemFuture,
166 spanSetSpine,
167 synctest,
168 fin,
169 root
170 # Anything that can grow the stack can acquire STACKGROW.
171 # (Most higher layers imply STACKGROW, like MALLOC.)
172 < STACKGROW
173 # Below STACKGROW is the stack allocator/copying implementation.
174 < gscan;
175 gscan < stackpool;
176 gscan < stackLarge;
177 # Generally, hchan must be acquired before gscan. But in one case,
178 # where we suspend a G and then shrink its stack, syncadjustsudogs
179 # can acquire hchan locks while holding gscan. To allow this case,
180 # we use hchanLeaf instead of hchan.
181 gscan < hchanLeaf;
182
183 # Write barrier
184 defer,
185 gscan,
186 mspanSpecial,
187 pollCache,
188 sudog,
189 timer
190 # Anything that can have write barriers can acquire WB.
191 # Above WB, we can have write barriers.
192 < WB
193 # Below WB is the write barrier implementation.
194 < wbufSpans;
195
196 # xRegState allocator
197 sched < xRegAlloc;
198
199 # Span allocator
200 stackLarge,
201 stackpool,
202 wbufSpans
203 # Above mheap is anything that can call the span allocator.
204 < mheap;
205 # Below mheap is the span allocator implementation.
206 #
207 # Specials: we're allowed to allocate a special while holding
208 # an mspanSpecial lock, and they're part of the malloc implementation.
209 # Pinner bits might be freed by the span allocator.
210 mheap, mspanSpecial < mheapSpecial;
211 # Fixallocs
212 mheap, mheapSpecial, xRegAlloc < globalAlloc;
213
214 # Execution tracer events (with a P)
215 hchan,
216 mheap,
217 root,
218 sched,
219 traceStrings,
220 notifyList,
221 fin
222 # Above TRACE is anything that can create a trace event
223 < TRACE
224 < trace
225 < traceStackTab;
226
227 # panic is handled specially. It is implicitly below all other locks.
228 NONE < panic;
229 # deadlock is not acquired while holding panic, but it also needs to be
230 # below all other locks.
231 panic < deadlock;
232 # raceFini is only held while exiting.
233 panic < raceFini;
234
235 # RWMutex internal read lock
236
237 allocmR,
238 allocmW
239 < allocmRInternal;
240
241 execR,
242 execW
243 < execRInternal;
244
245 testR,
246 testW
247 < testRInternal;
248 `
249
250
251
252
253 var cyclicRanks = map[string]bool{
254
255 "timers": true,
256
257
258 "hchan": true,
259
260
261 "hchanLeaf": true,
262
263 "deadlock": true,
264 }
265
266 func main() {
267 flagO := flag.String("o", "", "write to `file` instead of stdout")
268 flagDot := flag.Bool("dot", false, "emit graphviz output instead of Go")
269 flag.Parse()
270 if flag.NArg() != 0 {
271 fmt.Fprintf(os.Stderr, "too many arguments")
272 os.Exit(2)
273 }
274
275 g, err := dag.Parse(ranks)
276 if err != nil {
277 log.Fatal(err)
278 }
279
280 var out []byte
281 if *flagDot {
282 var b bytes.Buffer
283 g.TransitiveReduction()
284
285 for k := range cyclicRanks {
286 g.AddEdge(k, k)
287 }
288
289
290
291
292
293 g.Transpose()
294 generateDot(&b, g)
295 out = b.Bytes()
296 } else {
297 var b bytes.Buffer
298 generateGo(&b, g)
299 out, err = format.Source(b.Bytes())
300 if err != nil {
301 log.Fatal(err)
302 }
303 }
304
305 if *flagO != "" {
306 err = os.WriteFile(*flagO, out, 0666)
307 } else {
308 _, err = os.Stdout.Write(out)
309 }
310 if err != nil {
311 log.Fatal(err)
312 }
313 }
314
315 func generateGo(w io.Writer, g *dag.Graph) {
316 fmt.Fprintf(w, `// Code generated by mklockrank.go; DO NOT EDIT.
317
318 package runtime
319
320 type lockRank int
321
322 `)
323
324
325 topo := g.Topo()
326 for i, j := 0, len(topo)-1; i < j; i, j = i+1, j-1 {
327 topo[i], topo[j] = topo[j], topo[i]
328 }
329 fmt.Fprintf(w, `
330 // Constants representing the ranks of all non-leaf runtime locks, in rank order.
331 // Locks with lower rank must be taken before locks with higher rank,
332 // in addition to satisfying the partial order in lockPartialOrder.
333 // A few ranks allow self-cycles, which are specified in lockPartialOrder.
334 const (
335 lockRankUnknown lockRank = iota
336
337 `)
338 for _, rank := range topo {
339 if isPseudo(rank) {
340 fmt.Fprintf(w, "\t// %s\n", rank)
341 } else {
342 fmt.Fprintf(w, "\t%s\n", cname(rank))
343 }
344 }
345 fmt.Fprintf(w, `)
346
347 // lockRankLeafRank is the rank of lock that does not have a declared rank,
348 // and hence is a leaf lock.
349 const lockRankLeafRank lockRank = 1000
350 `)
351
352
353 fmt.Fprintf(w, `
354 // lockNames gives the names associated with each of the above ranks.
355 var lockNames = []string{
356 `)
357 for _, rank := range topo {
358 if !isPseudo(rank) {
359 fmt.Fprintf(w, "\t%s: %q,\n", cname(rank), rank)
360 }
361 }
362 fmt.Fprintf(w, `}
363
364 func (rank lockRank) String() string {
365 if rank == 0 {
366 return "UNKNOWN"
367 }
368 if rank == lockRankLeafRank {
369 return "LEAF"
370 }
371 if rank < 0 || int(rank) >= len(lockNames) {
372 return "BAD RANK"
373 }
374 return lockNames[rank]
375 }
376 `)
377
378
379 fmt.Fprintf(w, `
380 // lockPartialOrder is the transitive closure of the lock rank graph.
381 // An entry for rank X lists all of the ranks that can already be held
382 // when rank X is acquired.
383 //
384 // Lock ranks that allow self-cycles list themselves.
385 var lockPartialOrder [][]lockRank = [][]lockRank{
386 `)
387 for _, rank := range topo {
388 if isPseudo(rank) {
389 continue
390 }
391 list := []string{}
392 for _, before := range g.Edges(rank) {
393 if !isPseudo(before) {
394 list = append(list, cname(before))
395 }
396 }
397 if cyclicRanks[rank] {
398 list = append(list, cname(rank))
399 }
400
401 fmt.Fprintf(w, "\t%s: {%s},\n", cname(rank), strings.Join(list, ", "))
402 }
403 fmt.Fprintf(w, "}\n")
404 }
405
406
407 func cname(label string) string {
408 return "lockRank" + strings.ToUpper(label[:1]) + label[1:]
409 }
410
411 func isPseudo(label string) bool {
412 return strings.ToUpper(label) == label
413 }
414
415
416 func generateDot(w io.Writer, g *dag.Graph) {
417 fmt.Fprintf(w, "digraph g {\n")
418
419
420 for _, node := range g.Nodes {
421 fmt.Fprintf(w, "%q;\n", node)
422 }
423
424
425 for _, node := range g.Nodes {
426 for _, to := range g.Edges(node) {
427 fmt.Fprintf(w, "%q -> %q;\n", node, to)
428 }
429 }
430
431 fmt.Fprintf(w, "}\n")
432 }
433
View as plain text