Source file
src/runtime/mklockrank.go
1
2
3
4
5
6
7
8
9 package main
10
11 import (
12 "bytes"
13 "flag"
14 "fmt"
15 "go/format"
16 "internal/dag"
17 "io"
18 "log"
19 "os"
20 "strings"
21 )
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40 const ranks = `
41 # Sysmon
42 NONE
43 < sysmon
44 < scavenge, forcegc;
45
46 # Defer
47 NONE < defer;
48
49 # GC
50 NONE <
51 sweepWaiters,
52 assistQueue,
53 sweep;
54
55 # Test only
56 NONE < testR, testW;
57
58 NONE < timerSend;
59
60 # Scheduler, timers, netpoll
61 NONE < allocmW, execW, cpuprof, pollCache, pollDesc, wakeableSleep;
62 scavenge, sweep, testR, wakeableSleep, timerSend < hchan;
63 assistQueue,
64 cpuprof,
65 forcegc,
66 hchan,
67 pollDesc, # pollDesc can interact with timers, which can lock sched.
68 scavenge,
69 sweep,
70 sweepWaiters,
71 testR,
72 wakeableSleep
73 # Above SCHED are things that can call into the scheduler.
74 < SCHED
75 # Below SCHED is the scheduler implementation.
76 < allocmR,
77 execR;
78 allocmR, execR, hchan < sched;
79 sched < allg, allp;
80
81 # Channels
82 NONE < notifyList;
83 hchan, notifyList < sudog;
84
85 hchan, pollDesc, wakeableSleep < timers;
86 timers, timerSend < timer < netpollInit;
87
88 # Semaphores
89 NONE < root;
90
91 # Itabs
92 NONE
93 < itab
94 < reflectOffs;
95
96 # User arena state
97 NONE < userArenaState;
98
99 # Tracing without a P uses a global trace buffer.
100 scavenge
101 # Above TRACEGLOBAL can emit a trace event without a P.
102 < TRACEGLOBAL
103 # Below TRACEGLOBAL manages the global tracing buffer.
104 # Note that traceBuf eventually chains to MALLOC, but we never get that far
105 # in the situation where there's no P.
106 < traceBuf;
107 # Starting/stopping tracing traces strings.
108 traceBuf < traceStrings;
109
110 # Malloc
111 allg,
112 allocmR,
113 allp, # procresize
114 execR, # May grow stack
115 execW, # May allocate after BeforeFork
116 hchan,
117 notifyList,
118 reflectOffs,
119 timer,
120 traceStrings,
121 userArenaState
122 # Above MALLOC are things that can allocate memory.
123 < MALLOC
124 # Below MALLOC is the malloc implementation.
125 < fin,
126 spanSetSpine,
127 mspanSpecial,
128 MPROF;
129
130 # We can acquire gcBitsArenas for pinner bits, and
131 # it's guarded by mspanSpecial.
132 MALLOC, mspanSpecial < gcBitsArenas;
133
134 # Memory profiling
135 MPROF < profInsert, profBlock, profMemActive;
136 profMemActive < profMemFuture;
137
138 # Stack allocation and copying
139 gcBitsArenas,
140 netpollInit,
141 profBlock,
142 profInsert,
143 profMemFuture,
144 spanSetSpine,
145 fin,
146 root
147 # Anything that can grow the stack can acquire STACKGROW.
148 # (Most higher layers imply STACKGROW, like MALLOC.)
149 < STACKGROW
150 # Below STACKGROW is the stack allocator/copying implementation.
151 < gscan;
152 gscan < stackpool;
153 gscan < stackLarge;
154 # Generally, hchan must be acquired before gscan. But in one case,
155 # where we suspend a G and then shrink its stack, syncadjustsudogs
156 # can acquire hchan locks while holding gscan. To allow this case,
157 # we use hchanLeaf instead of hchan.
158 gscan < hchanLeaf;
159
160 # Write barrier
161 defer,
162 gscan,
163 mspanSpecial,
164 pollCache,
165 sudog,
166 timer
167 # Anything that can have write barriers can acquire WB.
168 # Above WB, we can have write barriers.
169 < WB
170 # Below WB is the write barrier implementation.
171 < wbufSpans;
172
173 # Span allocator
174 stackLarge,
175 stackpool,
176 wbufSpans
177 # Above mheap is anything that can call the span allocator.
178 < mheap;
179 # Below mheap is the span allocator implementation.
180 #
181 # Specials: we're allowed to allocate a special while holding
182 # an mspanSpecial lock, and they're part of the malloc implementation.
183 # Pinner bits might be freed by the span allocator.
184 mheap, mspanSpecial < mheapSpecial;
185 mheap, mheapSpecial < globalAlloc;
186
187 # Execution tracer events (with a P)
188 hchan,
189 mheap,
190 root,
191 sched,
192 traceStrings,
193 notifyList,
194 fin
195 # Above TRACE is anything that can create a trace event
196 < TRACE
197 < trace
198 < traceStackTab;
199
200 # panic is handled specially. It is implicitly below all other locks.
201 NONE < panic;
202 # deadlock is not acquired while holding panic, but it also needs to be
203 # below all other locks.
204 panic < deadlock;
205 # raceFini is only held while exiting.
206 panic < raceFini;
207
208 # RWMutex internal read lock
209
210 allocmR,
211 allocmW
212 < allocmRInternal;
213
214 execR,
215 execW
216 < execRInternal;
217
218 testR,
219 testW
220 < testRInternal;
221 `
222
223
224
225
226 var cyclicRanks = map[string]bool{
227
228 "timers": true,
229
230
231 "hchan": true,
232
233
234 "hchanLeaf": true,
235
236 "deadlock": true,
237 }
238
239 func main() {
240 flagO := flag.String("o", "", "write to `file` instead of stdout")
241 flagDot := flag.Bool("dot", false, "emit graphviz output instead of Go")
242 flag.Parse()
243 if flag.NArg() != 0 {
244 fmt.Fprintf(os.Stderr, "too many arguments")
245 os.Exit(2)
246 }
247
248 g, err := dag.Parse(ranks)
249 if err != nil {
250 log.Fatal(err)
251 }
252
253 var out []byte
254 if *flagDot {
255 var b bytes.Buffer
256 g.TransitiveReduction()
257
258 for k := range cyclicRanks {
259 g.AddEdge(k, k)
260 }
261
262
263
264
265
266 g.Transpose()
267 generateDot(&b, g)
268 out = b.Bytes()
269 } else {
270 var b bytes.Buffer
271 generateGo(&b, g)
272 out, err = format.Source(b.Bytes())
273 if err != nil {
274 log.Fatal(err)
275 }
276 }
277
278 if *flagO != "" {
279 err = os.WriteFile(*flagO, out, 0666)
280 } else {
281 _, err = os.Stdout.Write(out)
282 }
283 if err != nil {
284 log.Fatal(err)
285 }
286 }
287
288 func generateGo(w io.Writer, g *dag.Graph) {
289 fmt.Fprintf(w, `// Code generated by mklockrank.go; DO NOT EDIT.
290
291 package runtime
292
293 type lockRank int
294
295 `)
296
297
298 topo := g.Topo()
299 for i, j := 0, len(topo)-1; i < j; i, j = i+1, j-1 {
300 topo[i], topo[j] = topo[j], topo[i]
301 }
302 fmt.Fprintf(w, `
303 // Constants representing the ranks of all non-leaf runtime locks, in rank order.
304 // Locks with lower rank must be taken before locks with higher rank,
305 // in addition to satisfying the partial order in lockPartialOrder.
306 // A few ranks allow self-cycles, which are specified in lockPartialOrder.
307 const (
308 lockRankUnknown lockRank = iota
309
310 `)
311 for _, rank := range topo {
312 if isPseudo(rank) {
313 fmt.Fprintf(w, "\t// %s\n", rank)
314 } else {
315 fmt.Fprintf(w, "\t%s\n", cname(rank))
316 }
317 }
318 fmt.Fprintf(w, `)
319
320 // lockRankLeafRank is the rank of lock that does not have a declared rank,
321 // and hence is a leaf lock.
322 const lockRankLeafRank lockRank = 1000
323 `)
324
325
326 fmt.Fprintf(w, `
327 // lockNames gives the names associated with each of the above ranks.
328 var lockNames = []string{
329 `)
330 for _, rank := range topo {
331 if !isPseudo(rank) {
332 fmt.Fprintf(w, "\t%s: %q,\n", cname(rank), rank)
333 }
334 }
335 fmt.Fprintf(w, `}
336
337 func (rank lockRank) String() string {
338 if rank == 0 {
339 return "UNKNOWN"
340 }
341 if rank == lockRankLeafRank {
342 return "LEAF"
343 }
344 if rank < 0 || int(rank) >= len(lockNames) {
345 return "BAD RANK"
346 }
347 return lockNames[rank]
348 }
349 `)
350
351
352 fmt.Fprintf(w, `
353 // lockPartialOrder is the transitive closure of the lock rank graph.
354 // An entry for rank X lists all of the ranks that can already be held
355 // when rank X is acquired.
356 //
357 // Lock ranks that allow self-cycles list themselves.
358 var lockPartialOrder [][]lockRank = [][]lockRank{
359 `)
360 for _, rank := range topo {
361 if isPseudo(rank) {
362 continue
363 }
364 list := []string{}
365 for _, before := range g.Edges(rank) {
366 if !isPseudo(before) {
367 list = append(list, cname(before))
368 }
369 }
370 if cyclicRanks[rank] {
371 list = append(list, cname(rank))
372 }
373
374 fmt.Fprintf(w, "\t%s: {%s},\n", cname(rank), strings.Join(list, ", "))
375 }
376 fmt.Fprintf(w, "}\n")
377 }
378
379
380 func cname(label string) string {
381 return "lockRank" + strings.ToUpper(label[:1]) + label[1:]
382 }
383
384 func isPseudo(label string) bool {
385 return strings.ToUpper(label) == label
386 }
387
388
389 func generateDot(w io.Writer, g *dag.Graph) {
390 fmt.Fprintf(w, "digraph g {\n")
391
392
393 for _, node := range g.Nodes {
394 fmt.Fprintf(w, "%q;\n", node)
395 }
396
397
398 for _, node := range g.Nodes {
399 for _, to := range g.Edges(node) {
400 fmt.Fprintf(w, "%q -> %q;\n", node, to)
401 }
402 }
403
404 fmt.Fprintf(w, "}\n")
405 }
406
View as plain text