Source file
src/runtime/mklockrank.go
1
2
3
4
5
6
7
8
9 package main
10
11 import (
12 "bytes"
13 "flag"
14 "fmt"
15 "go/format"
16 "internal/dag"
17 "io"
18 "log"
19 "os"
20 "strings"
21 )
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40 const ranks = `
41 # Sysmon
42 NONE
43 < sysmon
44 < scavenge, forcegc, computeMaxProcs, updateMaxProcsG;
45
46 # Defer
47 NONE < defer;
48
49 # GC
50 NONE <
51 sweepWaiters,
52 assistQueue,
53 strongFromWeakQueue,
54 cleanupQueue,
55 sweep;
56
57 # Test only
58 NONE < testR, testW;
59
60 # vgetrandom
61 NONE < vgetrandom;
62
63 NONE < timerSend;
64
65 # Scheduler, timers, netpoll
66 NONE < allocmW, execW, cpuprof, pollCache, pollDesc, wakeableSleep;
67 scavenge, sweep, testR, wakeableSleep, timerSend < hchan;
68 assistQueue,
69 cleanupQueue,
70 computeMaxProcs,
71 cpuprof,
72 forcegc,
73 updateMaxProcsG,
74 hchan,
75 pollDesc, # pollDesc can interact with timers, which can lock sched.
76 scavenge,
77 strongFromWeakQueue,
78 sweep,
79 sweepWaiters,
80 testR,
81 wakeableSleep
82 # Above SCHED are things that can call into the scheduler.
83 < SCHED
84 # Below SCHED is the scheduler implementation.
85 < allocmR,
86 execR;
87 allocmR, execR, hchan < sched;
88 sched < allg, allp;
89
90 # Channels
91 NONE < notifyList;
92 hchan, notifyList < sudog;
93
94 hchan, pollDesc, wakeableSleep < timers;
95 timers, timerSend < timer < netpollInit;
96
97 # Semaphores
98 NONE < root;
99
100 # Itabs
101 NONE
102 < itab
103 < reflectOffs;
104
105 # Synctest
106 hchan,
107 notifyList,
108 reflectOffs,
109 root,
110 strongFromWeakQueue,
111 sweepWaiters,
112 timer,
113 timers
114 < synctest;
115
116 # User arena state
117 NONE < userArenaState;
118
119 # Tracing without a P uses a global trace buffer.
120 scavenge
121 # Above TRACEGLOBAL can emit a trace event without a P.
122 < TRACEGLOBAL
123 # Below TRACEGLOBAL manages the global tracing buffer.
124 # Note that traceBuf eventually chains to MALLOC, but we never get that far
125 # in the situation where there's no P.
126 < traceBuf;
127 # Starting/stopping tracing traces strings.
128 traceBuf < traceStrings;
129
130 # Malloc
131 allg,
132 allocmR,
133 allp, # procresize
134 execR, # May grow stack
135 execW, # May allocate after BeforeFork
136 hchan,
137 notifyList,
138 reflectOffs,
139 timer,
140 traceStrings,
141 userArenaState,
142 vgetrandom
143 # Above MALLOC are things that can allocate memory.
144 < MALLOC
145 # Below MALLOC is the malloc implementation.
146 < fin,
147 spanSetSpine,
148 mspanSpecial,
149 traceTypeTab,
150 MPROF;
151
152 # We can acquire gcBitsArenas for pinner bits, and
153 # it's guarded by mspanSpecial.
154 MALLOC, mspanSpecial < gcBitsArenas;
155
156 # Memory profiling
157 MPROF < profInsert, profBlock, profMemActive;
158 profMemActive < profMemFuture;
159
160 # Stack allocation and copying
161 gcBitsArenas,
162 netpollInit,
163 profBlock,
164 profInsert,
165 profMemFuture,
166 spanSetSpine,
167 synctest,
168 fin,
169 root
170 # Anything that can grow the stack can acquire STACKGROW.
171 # (Most higher layers imply STACKGROW, like MALLOC.)
172 < STACKGROW
173 # Below STACKGROW is the stack allocator/copying implementation.
174 < gscan;
175 gscan < stackpool;
176 gscan < stackLarge;
177 # Generally, hchan must be acquired before gscan. But in one case,
178 # where we suspend a G and then shrink its stack, syncadjustsudogs
179 # can acquire hchan locks while holding gscan. To allow this case,
180 # we use hchanLeaf instead of hchan.
181 gscan < hchanLeaf;
182
183 # Write barrier
184 defer,
185 gscan,
186 mspanSpecial,
187 pollCache,
188 sudog,
189 timer
190 # Anything that can have write barriers can acquire WB.
191 # Above WB, we can have write barriers.
192 < WB
193 # Below WB is the write barrier implementation.
194 < wbufSpans;
195
196 # Span allocator
197 stackLarge,
198 stackpool,
199 wbufSpans
200 # Above mheap is anything that can call the span allocator.
201 < mheap;
202 # Below mheap is the span allocator implementation.
203 #
204 # Specials: we're allowed to allocate a special while holding
205 # an mspanSpecial lock, and they're part of the malloc implementation.
206 # Pinner bits might be freed by the span allocator.
207 mheap, mspanSpecial < mheapSpecial;
208 mheap, mheapSpecial < globalAlloc;
209
210 # Execution tracer events (with a P)
211 hchan,
212 mheap,
213 root,
214 sched,
215 traceStrings,
216 notifyList,
217 fin
218 # Above TRACE is anything that can create a trace event
219 < TRACE
220 < trace
221 < traceStackTab;
222
223 # panic is handled specially. It is implicitly below all other locks.
224 NONE < panic;
225 # deadlock is not acquired while holding panic, but it also needs to be
226 # below all other locks.
227 panic < deadlock;
228 # raceFini is only held while exiting.
229 panic < raceFini;
230
231 # RWMutex internal read lock
232
233 allocmR,
234 allocmW
235 < allocmRInternal;
236
237 execR,
238 execW
239 < execRInternal;
240
241 testR,
242 testW
243 < testRInternal;
244 `
245
246
247
248
249 var cyclicRanks = map[string]bool{
250
251 "timers": true,
252
253
254 "hchan": true,
255
256
257 "hchanLeaf": true,
258
259 "deadlock": true,
260 }
261
262 func main() {
263 flagO := flag.String("o", "", "write to `file` instead of stdout")
264 flagDot := flag.Bool("dot", false, "emit graphviz output instead of Go")
265 flag.Parse()
266 if flag.NArg() != 0 {
267 fmt.Fprintf(os.Stderr, "too many arguments")
268 os.Exit(2)
269 }
270
271 g, err := dag.Parse(ranks)
272 if err != nil {
273 log.Fatal(err)
274 }
275
276 var out []byte
277 if *flagDot {
278 var b bytes.Buffer
279 g.TransitiveReduction()
280
281 for k := range cyclicRanks {
282 g.AddEdge(k, k)
283 }
284
285
286
287
288
289 g.Transpose()
290 generateDot(&b, g)
291 out = b.Bytes()
292 } else {
293 var b bytes.Buffer
294 generateGo(&b, g)
295 out, err = format.Source(b.Bytes())
296 if err != nil {
297 log.Fatal(err)
298 }
299 }
300
301 if *flagO != "" {
302 err = os.WriteFile(*flagO, out, 0666)
303 } else {
304 _, err = os.Stdout.Write(out)
305 }
306 if err != nil {
307 log.Fatal(err)
308 }
309 }
310
311 func generateGo(w io.Writer, g *dag.Graph) {
312 fmt.Fprintf(w, `// Code generated by mklockrank.go; DO NOT EDIT.
313
314 package runtime
315
316 type lockRank int
317
318 `)
319
320
321 topo := g.Topo()
322 for i, j := 0, len(topo)-1; i < j; i, j = i+1, j-1 {
323 topo[i], topo[j] = topo[j], topo[i]
324 }
325 fmt.Fprintf(w, `
326 // Constants representing the ranks of all non-leaf runtime locks, in rank order.
327 // Locks with lower rank must be taken before locks with higher rank,
328 // in addition to satisfying the partial order in lockPartialOrder.
329 // A few ranks allow self-cycles, which are specified in lockPartialOrder.
330 const (
331 lockRankUnknown lockRank = iota
332
333 `)
334 for _, rank := range topo {
335 if isPseudo(rank) {
336 fmt.Fprintf(w, "\t// %s\n", rank)
337 } else {
338 fmt.Fprintf(w, "\t%s\n", cname(rank))
339 }
340 }
341 fmt.Fprintf(w, `)
342
343 // lockRankLeafRank is the rank of lock that does not have a declared rank,
344 // and hence is a leaf lock.
345 const lockRankLeafRank lockRank = 1000
346 `)
347
348
349 fmt.Fprintf(w, `
350 // lockNames gives the names associated with each of the above ranks.
351 var lockNames = []string{
352 `)
353 for _, rank := range topo {
354 if !isPseudo(rank) {
355 fmt.Fprintf(w, "\t%s: %q,\n", cname(rank), rank)
356 }
357 }
358 fmt.Fprintf(w, `}
359
360 func (rank lockRank) String() string {
361 if rank == 0 {
362 return "UNKNOWN"
363 }
364 if rank == lockRankLeafRank {
365 return "LEAF"
366 }
367 if rank < 0 || int(rank) >= len(lockNames) {
368 return "BAD RANK"
369 }
370 return lockNames[rank]
371 }
372 `)
373
374
375 fmt.Fprintf(w, `
376 // lockPartialOrder is the transitive closure of the lock rank graph.
377 // An entry for rank X lists all of the ranks that can already be held
378 // when rank X is acquired.
379 //
380 // Lock ranks that allow self-cycles list themselves.
381 var lockPartialOrder [][]lockRank = [][]lockRank{
382 `)
383 for _, rank := range topo {
384 if isPseudo(rank) {
385 continue
386 }
387 list := []string{}
388 for _, before := range g.Edges(rank) {
389 if !isPseudo(before) {
390 list = append(list, cname(before))
391 }
392 }
393 if cyclicRanks[rank] {
394 list = append(list, cname(rank))
395 }
396
397 fmt.Fprintf(w, "\t%s: {%s},\n", cname(rank), strings.Join(list, ", "))
398 }
399 fmt.Fprintf(w, "}\n")
400 }
401
402
403 func cname(label string) string {
404 return "lockRank" + strings.ToUpper(label[:1]) + label[1:]
405 }
406
407 func isPseudo(label string) bool {
408 return strings.ToUpper(label) == label
409 }
410
411
412 func generateDot(w io.Writer, g *dag.Graph) {
413 fmt.Fprintf(w, "digraph g {\n")
414
415
416 for _, node := range g.Nodes {
417 fmt.Fprintf(w, "%q;\n", node)
418 }
419
420
421 for _, node := range g.Nodes {
422 for _, to := range g.Edges(node) {
423 fmt.Fprintf(w, "%q -> %q;\n", node, to)
424 }
425 }
426
427 fmt.Fprintf(w, "}\n")
428 }
429
View as plain text