Source file
src/runtime/runtime1.go
1
2
3
4
5 package runtime
6
7 import (
8 "internal/bytealg"
9 "internal/goarch"
10 "internal/runtime/atomic"
11 "internal/strconv"
12 "unsafe"
13 )
14
15
16
17
18
19
20 const (
21 tracebackCrash = 1 << iota
22 tracebackAll
23 tracebackShift = iota
24 )
25
26 var traceback_cache uint32 = 2 << tracebackShift
27 var traceback_env uint32
28
29
30
31
32
33
34
35
36
37
38 func gotraceback() (level int32, all, crash bool) {
39 gp := getg()
40 t := atomic.Load(&traceback_cache)
41 crash = t&tracebackCrash != 0
42 all = gp.m.throwing >= throwTypeUser || t&tracebackAll != 0
43 if gp.m.traceback != 0 {
44 level = int32(gp.m.traceback)
45 } else if gp.m.throwing >= throwTypeRuntime {
46
47
48 level = 2
49 } else {
50 level = int32(t >> tracebackShift)
51 }
52 return
53 }
54
55 var (
56 argc int32
57 argv **byte
58 )
59
60
61
62
63 func argv_index(argv **byte, i int32) *byte {
64 return *(**byte)(add(unsafe.Pointer(argv), uintptr(i)*goarch.PtrSize))
65 }
66
67 func args(c int32, v **byte) {
68 argc = c
69 argv = v
70 sysargs(c, v)
71 }
72
73 func goargs() {
74 if GOOS == "windows" {
75 return
76 }
77 argslice = make([]string, argc)
78 for i := int32(0); i < argc; i++ {
79 argslice[i] = gostringnocopy(argv_index(argv, i))
80 }
81 }
82
83 func goenvs_unix() {
84
85
86
87 n := int32(0)
88 for argv_index(argv, argc+1+n) != nil {
89 n++
90 }
91
92 envs = make([]string, n)
93 for i := int32(0); i < n; i++ {
94 envs[i] = gostring(argv_index(argv, argc+1+i))
95 }
96 }
97
98 func environ() []string {
99 return envs
100 }
101
102
103
104 var test_z64, test_x64 uint64
105
106 func testAtomic64() {
107 test_z64 = 42
108 test_x64 = 0
109 if atomic.Cas64(&test_z64, test_x64, 1) {
110 throw("cas64 failed")
111 }
112 if test_x64 != 0 {
113 throw("cas64 failed")
114 }
115 test_x64 = 42
116 if !atomic.Cas64(&test_z64, test_x64, 1) {
117 throw("cas64 failed")
118 }
119 if test_x64 != 42 || test_z64 != 1 {
120 throw("cas64 failed")
121 }
122 if atomic.Load64(&test_z64) != 1 {
123 throw("load64 failed")
124 }
125 atomic.Store64(&test_z64, (1<<40)+1)
126 if atomic.Load64(&test_z64) != (1<<40)+1 {
127 throw("store64 failed")
128 }
129 if atomic.Xadd64(&test_z64, (1<<40)+1) != (2<<40)+2 {
130 throw("xadd64 failed")
131 }
132 if atomic.Load64(&test_z64) != (2<<40)+2 {
133 throw("xadd64 failed")
134 }
135 if atomic.Xchg64(&test_z64, (3<<40)+3) != (2<<40)+2 {
136 throw("xchg64 failed")
137 }
138 if atomic.Load64(&test_z64) != (3<<40)+3 {
139 throw("xchg64 failed")
140 }
141 }
142
143 func check() {
144 var (
145 a int8
146 b uint8
147 c int16
148 d uint16
149 e int32
150 f uint32
151 g int64
152 h uint64
153 i, i1 float32
154 j, j1 float64
155 k unsafe.Pointer
156 l *uint16
157 m [4]byte
158 )
159 type x1t struct {
160 x uint8
161 }
162 type y1t struct {
163 x1 x1t
164 y uint8
165 }
166 var x1 x1t
167 var y1 y1t
168
169 if unsafe.Sizeof(a) != 1 {
170 throw("bad a")
171 }
172 if unsafe.Sizeof(b) != 1 {
173 throw("bad b")
174 }
175 if unsafe.Sizeof(c) != 2 {
176 throw("bad c")
177 }
178 if unsafe.Sizeof(d) != 2 {
179 throw("bad d")
180 }
181 if unsafe.Sizeof(e) != 4 {
182 throw("bad e")
183 }
184 if unsafe.Sizeof(f) != 4 {
185 throw("bad f")
186 }
187 if unsafe.Sizeof(g) != 8 {
188 throw("bad g")
189 }
190 if unsafe.Sizeof(h) != 8 {
191 throw("bad h")
192 }
193 if unsafe.Sizeof(i) != 4 {
194 throw("bad i")
195 }
196 if unsafe.Sizeof(j) != 8 {
197 throw("bad j")
198 }
199 if unsafe.Sizeof(k) != goarch.PtrSize {
200 throw("bad k")
201 }
202 if unsafe.Sizeof(l) != goarch.PtrSize {
203 throw("bad l")
204 }
205 if unsafe.Sizeof(x1) != 1 {
206 throw("bad unsafe.Sizeof x1")
207 }
208 if unsafe.Offsetof(y1.y) != 1 {
209 throw("bad offsetof y1.y")
210 }
211 if unsafe.Sizeof(y1) != 2 {
212 throw("bad unsafe.Sizeof y1")
213 }
214
215 var z uint32
216 z = 1
217 if !atomic.Cas(&z, 1, 2) {
218 throw("cas1")
219 }
220 if z != 2 {
221 throw("cas2")
222 }
223
224 z = 4
225 if atomic.Cas(&z, 5, 6) {
226 throw("cas3")
227 }
228 if z != 4 {
229 throw("cas4")
230 }
231
232 z = 0xffffffff
233 if !atomic.Cas(&z, 0xffffffff, 0xfffffffe) {
234 throw("cas5")
235 }
236 if z != 0xfffffffe {
237 throw("cas6")
238 }
239
240 m = [4]byte{1, 1, 1, 1}
241 atomic.Or8(&m[1], 0xf0)
242 if m[0] != 1 || m[1] != 0xf1 || m[2] != 1 || m[3] != 1 {
243 throw("atomicor8")
244 }
245
246 m = [4]byte{0xff, 0xff, 0xff, 0xff}
247 atomic.And8(&m[1], 0x1)
248 if m[0] != 0xff || m[1] != 0x1 || m[2] != 0xff || m[3] != 0xff {
249 throw("atomicand8")
250 }
251
252 *(*uint64)(unsafe.Pointer(&j)) = ^uint64(0)
253 if j == j {
254 throw("float64nan")
255 }
256 if !(j != j) {
257 throw("float64nan1")
258 }
259
260 *(*uint64)(unsafe.Pointer(&j1)) = ^uint64(1)
261 if j == j1 {
262 throw("float64nan2")
263 }
264 if !(j != j1) {
265 throw("float64nan3")
266 }
267
268 *(*uint32)(unsafe.Pointer(&i)) = ^uint32(0)
269 if i == i {
270 throw("float32nan")
271 }
272 if i == i {
273 throw("float32nan1")
274 }
275
276 *(*uint32)(unsafe.Pointer(&i1)) = ^uint32(1)
277 if i == i1 {
278 throw("float32nan2")
279 }
280 if i == i1 {
281 throw("float32nan3")
282 }
283
284 testAtomic64()
285
286 if fixedStack != round2(fixedStack) {
287 throw("FixedStack is not power-of-2")
288 }
289
290 if !checkASM() {
291 throw("assembly checks failed")
292 }
293 }
294
295 type dbgVar struct {
296 name string
297 value *int32
298 atomic *atomic.Int32
299 def int32
300 }
301
302
303
304
305
306 var debug struct {
307 cgocheck int32
308 clobberfree int32
309 containermaxprocs int32
310 decoratemappings int32
311 disablethp int32
312 dontfreezetheworld int32
313 efence int32
314 gccheckmark int32
315 gcpacertrace int32
316 gcshrinkstackoff int32
317 gcstoptheworld int32
318 gctrace int32
319 invalidptr int32
320 madvdontneed int32
321 scavtrace int32
322 scheddetail int32
323 schedtrace int32
324 tracebackancestors int32
325 updatemaxprocs int32
326 asyncpreemptoff int32
327 harddecommit int32
328 adaptivestackstart int32
329 tracefpunwindoff int32
330 traceadvanceperiod int32
331 traceCheckStackOwnership int32
332 profstackdepth int32
333 dataindependenttiming int32
334
335
336
337
338 malloc bool
339 inittrace int32
340 sbrk int32
341 checkfinalizers int32
342
343
344
345
346
347
348
349
350 traceallocfree atomic.Int32
351
352 panicnil atomic.Int32
353
354
355
356
357
358
359
360
361
362 asynctimerchan atomic.Int32
363 }
364
365 var dbgvars = []*dbgVar{
366 {name: "adaptivestackstart", value: &debug.adaptivestackstart},
367 {name: "asyncpreemptoff", value: &debug.asyncpreemptoff},
368 {name: "asynctimerchan", atomic: &debug.asynctimerchan},
369 {name: "cgocheck", value: &debug.cgocheck},
370 {name: "clobberfree", value: &debug.clobberfree},
371 {name: "containermaxprocs", value: &debug.containermaxprocs, def: 1},
372 {name: "dataindependenttiming", value: &debug.dataindependenttiming},
373 {name: "decoratemappings", value: &debug.decoratemappings, def: 1},
374 {name: "disablethp", value: &debug.disablethp},
375 {name: "dontfreezetheworld", value: &debug.dontfreezetheworld},
376 {name: "checkfinalizers", value: &debug.checkfinalizers},
377 {name: "efence", value: &debug.efence},
378 {name: "gccheckmark", value: &debug.gccheckmark},
379 {name: "gcpacertrace", value: &debug.gcpacertrace},
380 {name: "gcshrinkstackoff", value: &debug.gcshrinkstackoff},
381 {name: "gcstoptheworld", value: &debug.gcstoptheworld},
382 {name: "gctrace", value: &debug.gctrace},
383 {name: "harddecommit", value: &debug.harddecommit},
384 {name: "inittrace", value: &debug.inittrace},
385 {name: "invalidptr", value: &debug.invalidptr},
386 {name: "madvdontneed", value: &debug.madvdontneed},
387 {name: "panicnil", atomic: &debug.panicnil},
388 {name: "profstackdepth", value: &debug.profstackdepth, def: 128},
389 {name: "sbrk", value: &debug.sbrk},
390 {name: "scavtrace", value: &debug.scavtrace},
391 {name: "scheddetail", value: &debug.scheddetail},
392 {name: "schedtrace", value: &debug.schedtrace},
393 {name: "traceadvanceperiod", value: &debug.traceadvanceperiod},
394 {name: "traceallocfree", atomic: &debug.traceallocfree},
395 {name: "tracecheckstackownership", value: &debug.traceCheckStackOwnership},
396 {name: "tracebackancestors", value: &debug.tracebackancestors},
397 {name: "tracefpunwindoff", value: &debug.tracefpunwindoff},
398 {name: "updatemaxprocs", value: &debug.updatemaxprocs, def: 1},
399 }
400
401 func parseRuntimeDebugVars(godebug string) {
402
403 debug.cgocheck = 1
404 debug.invalidptr = 1
405 debug.adaptivestackstart = 1
406 if GOOS == "linux" {
407
408
409
410
411
412
413
414
415 debug.madvdontneed = 1
416 }
417 debug.traceadvanceperiod = defaultTraceAdvancePeriod
418
419
420 for _, v := range dbgvars {
421 if v.def != 0 {
422
423 if v.value != nil {
424 *v.value = v.def
425 } else if v.atomic != nil {
426 v.atomic.Store(v.def)
427 }
428 }
429 }
430
431 parsegodebug(godebugDefault, nil)
432
433
434 parsegodebug(godebug, nil)
435
436 debug.malloc = (debug.inittrace | debug.sbrk | debug.checkfinalizers) != 0
437 debug.profstackdepth = min(debug.profstackdepth, maxProfStackDepth)
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452 if debug.gccheckmark > 0 {
453 debug.asyncpreemptoff = 1
454 }
455 }
456
457 func finishDebugVarsSetup() {
458 p := new(string)
459 *p = gogetenv("GODEBUG")
460 godebugEnv.Store(p)
461
462 setTraceback(gogetenv("GOTRACEBACK"))
463 traceback_env = traceback_cache
464 }
465
466
467
468 func reparsedebugvars(env string) {
469 seen := make(map[string]bool)
470
471 parsegodebug(env, seen)
472
473 parsegodebug(godebugDefault, seen)
474
475 for _, v := range dbgvars {
476 if v.atomic != nil && !seen[v.name] {
477 v.atomic.Store(0)
478 }
479 }
480 }
481
482
483
484
485
486
487
488
489
490
491
492 func parsegodebug(godebug string, seen map[string]bool) {
493 for p := godebug; p != ""; {
494 var field string
495 if seen == nil {
496
497 i := bytealg.IndexByteString(p, ',')
498 if i < 0 {
499 field, p = p, ""
500 } else {
501 field, p = p[:i], p[i+1:]
502 }
503 } else {
504
505 i := len(p) - 1
506 for i >= 0 && p[i] != ',' {
507 i--
508 }
509 if i < 0 {
510 p, field = "", p
511 } else {
512 p, field = p[:i], p[i+1:]
513 }
514 }
515 i := bytealg.IndexByteString(field, '=')
516 if i < 0 {
517 continue
518 }
519 key, value := field[:i], field[i+1:]
520 if seen[key] {
521 continue
522 }
523 if seen != nil {
524 seen[key] = true
525 }
526
527
528
529
530 if seen == nil && key == "memprofilerate" {
531 if n, err := strconv.Atoi(value); err == nil {
532 MemProfileRate = n
533 }
534 } else {
535 for _, v := range dbgvars {
536 if v.name == key {
537 if n, err := strconv.ParseInt(value, 10, 32); err == nil {
538 if seen == nil && v.value != nil {
539 *v.value = int32(n)
540 } else if v.atomic != nil {
541 v.atomic.Store(int32(n))
542 }
543 }
544 }
545 }
546 }
547 }
548
549 if debug.cgocheck > 1 {
550 throw("cgocheck > 1 mode is no longer supported at runtime. Use GOEXPERIMENT=cgocheck2 at build time instead.")
551 }
552 }
553
554
555 func setTraceback(level string) {
556 var t uint32
557 switch level {
558 case "none":
559 t = 0
560 case "single", "":
561 t = 1 << tracebackShift
562 case "all":
563 t = 1<<tracebackShift | tracebackAll
564 case "system":
565 t = 2<<tracebackShift | tracebackAll
566 case "crash":
567 t = 2<<tracebackShift | tracebackAll | tracebackCrash
568 case "wer":
569 if GOOS == "windows" {
570 t = 2<<tracebackShift | tracebackAll | tracebackCrash
571 enableWER()
572 break
573 }
574 fallthrough
575 default:
576 t = tracebackAll
577 if n, err := strconv.Atoi(level); err == nil && n == int(uint32(n)) {
578 t |= uint32(n) << tracebackShift
579 }
580 }
581
582
583 if islibrary || isarchive {
584 t |= tracebackCrash
585 }
586
587 t |= traceback_env
588
589 atomic.Store(&traceback_cache, t)
590 }
591
592
593
594
595 func acquirem() *m {
596 gp := getg()
597 gp.m.locks++
598 return gp.m
599 }
600
601
602 func releasem(mp *m) {
603 gp := getg()
604 mp.locks--
605 if mp.locks == 0 && gp.preempt {
606
607 gp.stackguard0 = stackPreempt
608 }
609 }
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626 func reflect_typelinks() ([]unsafe.Pointer, [][]int32) {
627 modules := activeModules()
628 sections := []unsafe.Pointer{unsafe.Pointer(modules[0].types)}
629 ret := [][]int32{modules[0].typelinks}
630 for _, md := range modules[1:] {
631 sections = append(sections, unsafe.Pointer(md.types))
632 ret = append(ret, md.typelinks)
633 }
634 return sections, ret
635 }
636
637
638
639
640
641
642
643
644
645
646
647
648 func reflect_resolveNameOff(ptrInModule unsafe.Pointer, off int32) unsafe.Pointer {
649 return unsafe.Pointer(resolveNameOff(ptrInModule, nameOff(off)).Bytes)
650 }
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666 func reflect_resolveTypeOff(rtype unsafe.Pointer, off int32) unsafe.Pointer {
667 return unsafe.Pointer(toRType((*_type)(rtype)).typeOff(typeOff(off)))
668 }
669
670
671
672
673
674
675
676
677
678
679
680
681 func reflect_resolveTextOff(rtype unsafe.Pointer, off int32) unsafe.Pointer {
682 return toRType((*_type)(rtype)).textOff(textOff(off))
683 }
684
685
686
687
688 func reflectlite_resolveNameOff(ptrInModule unsafe.Pointer, off int32) unsafe.Pointer {
689 return unsafe.Pointer(resolveNameOff(ptrInModule, nameOff(off)).Bytes)
690 }
691
692
693
694
695 func reflectlite_resolveTypeOff(rtype unsafe.Pointer, off int32) unsafe.Pointer {
696 return unsafe.Pointer(toRType((*_type)(rtype)).typeOff(typeOff(off)))
697 }
698
699
700
701
702 func reflect_addReflectOff(ptr unsafe.Pointer) int32 {
703 reflectOffsLock()
704 if reflectOffs.m == nil {
705 reflectOffs.m = make(map[int32]unsafe.Pointer)
706 reflectOffs.minv = make(map[unsafe.Pointer]int32)
707 reflectOffs.next = -1
708 }
709 id, found := reflectOffs.minv[ptr]
710 if !found {
711 id = reflectOffs.next
712 reflectOffs.next--
713 reflectOffs.m[id] = ptr
714 reflectOffs.minv[ptr] = id
715 }
716 reflectOffsUnlock()
717 return id
718 }
719
720
721 func fips_getIndicator() uint8 {
722 return getg().fipsIndicator
723 }
724
725
726 func fips_setIndicator(indicator uint8) {
727 getg().fipsIndicator = indicator
728 }
729
View as plain text