Source file
src/runtime/runtime1.go
1
2
3
4
5 package runtime
6
7 import (
8 "internal/bytealg"
9 "internal/goarch"
10 "internal/runtime/atomic"
11 "unsafe"
12 )
13
14
15
16
17
18
19 const (
20 tracebackCrash = 1 << iota
21 tracebackAll
22 tracebackShift = iota
23 )
24
25 var traceback_cache uint32 = 2 << tracebackShift
26 var traceback_env uint32
27
28
29
30
31
32
33
34
35
36
37 func gotraceback() (level int32, all, crash bool) {
38 gp := getg()
39 t := atomic.Load(&traceback_cache)
40 crash = t&tracebackCrash != 0
41 all = gp.m.throwing >= throwTypeUser || t&tracebackAll != 0
42 if gp.m.traceback != 0 {
43 level = int32(gp.m.traceback)
44 } else if gp.m.throwing >= throwTypeRuntime {
45
46
47 level = 2
48 } else {
49 level = int32(t >> tracebackShift)
50 }
51 return
52 }
53
54 var (
55 argc int32
56 argv **byte
57 )
58
59
60
61
62 func argv_index(argv **byte, i int32) *byte {
63 return *(**byte)(add(unsafe.Pointer(argv), uintptr(i)*goarch.PtrSize))
64 }
65
66 func args(c int32, v **byte) {
67 argc = c
68 argv = v
69 sysargs(c, v)
70 }
71
72 func goargs() {
73 if GOOS == "windows" {
74 return
75 }
76 argslice = make([]string, argc)
77 for i := int32(0); i < argc; i++ {
78 argslice[i] = gostringnocopy(argv_index(argv, i))
79 }
80 }
81
82 func goenvs_unix() {
83
84
85
86 n := int32(0)
87 for argv_index(argv, argc+1+n) != nil {
88 n++
89 }
90
91 envs = make([]string, n)
92 for i := int32(0); i < n; i++ {
93 envs[i] = gostring(argv_index(argv, argc+1+i))
94 }
95 }
96
97 func environ() []string {
98 return envs
99 }
100
101
102
103 var test_z64, test_x64 uint64
104
105 func testAtomic64() {
106 test_z64 = 42
107 test_x64 = 0
108 if atomic.Cas64(&test_z64, test_x64, 1) {
109 throw("cas64 failed")
110 }
111 if test_x64 != 0 {
112 throw("cas64 failed")
113 }
114 test_x64 = 42
115 if !atomic.Cas64(&test_z64, test_x64, 1) {
116 throw("cas64 failed")
117 }
118 if test_x64 != 42 || test_z64 != 1 {
119 throw("cas64 failed")
120 }
121 if atomic.Load64(&test_z64) != 1 {
122 throw("load64 failed")
123 }
124 atomic.Store64(&test_z64, (1<<40)+1)
125 if atomic.Load64(&test_z64) != (1<<40)+1 {
126 throw("store64 failed")
127 }
128 if atomic.Xadd64(&test_z64, (1<<40)+1) != (2<<40)+2 {
129 throw("xadd64 failed")
130 }
131 if atomic.Load64(&test_z64) != (2<<40)+2 {
132 throw("xadd64 failed")
133 }
134 if atomic.Xchg64(&test_z64, (3<<40)+3) != (2<<40)+2 {
135 throw("xchg64 failed")
136 }
137 if atomic.Load64(&test_z64) != (3<<40)+3 {
138 throw("xchg64 failed")
139 }
140 }
141
142 func check() {
143 var (
144 a int8
145 b uint8
146 c int16
147 d uint16
148 e int32
149 f uint32
150 g int64
151 h uint64
152 i, i1 float32
153 j, j1 float64
154 k unsafe.Pointer
155 l *uint16
156 m [4]byte
157 )
158 type x1t struct {
159 x uint8
160 }
161 type y1t struct {
162 x1 x1t
163 y uint8
164 }
165 var x1 x1t
166 var y1 y1t
167
168 if unsafe.Sizeof(a) != 1 {
169 throw("bad a")
170 }
171 if unsafe.Sizeof(b) != 1 {
172 throw("bad b")
173 }
174 if unsafe.Sizeof(c) != 2 {
175 throw("bad c")
176 }
177 if unsafe.Sizeof(d) != 2 {
178 throw("bad d")
179 }
180 if unsafe.Sizeof(e) != 4 {
181 throw("bad e")
182 }
183 if unsafe.Sizeof(f) != 4 {
184 throw("bad f")
185 }
186 if unsafe.Sizeof(g) != 8 {
187 throw("bad g")
188 }
189 if unsafe.Sizeof(h) != 8 {
190 throw("bad h")
191 }
192 if unsafe.Sizeof(i) != 4 {
193 throw("bad i")
194 }
195 if unsafe.Sizeof(j) != 8 {
196 throw("bad j")
197 }
198 if unsafe.Sizeof(k) != goarch.PtrSize {
199 throw("bad k")
200 }
201 if unsafe.Sizeof(l) != goarch.PtrSize {
202 throw("bad l")
203 }
204 if unsafe.Sizeof(x1) != 1 {
205 throw("bad unsafe.Sizeof x1")
206 }
207 if unsafe.Offsetof(y1.y) != 1 {
208 throw("bad offsetof y1.y")
209 }
210 if unsafe.Sizeof(y1) != 2 {
211 throw("bad unsafe.Sizeof y1")
212 }
213
214 if timediv(12345*1000000000+54321, 1000000000, &e) != 12345 || e != 54321 {
215 throw("bad timediv")
216 }
217
218 var z uint32
219 z = 1
220 if !atomic.Cas(&z, 1, 2) {
221 throw("cas1")
222 }
223 if z != 2 {
224 throw("cas2")
225 }
226
227 z = 4
228 if atomic.Cas(&z, 5, 6) {
229 throw("cas3")
230 }
231 if z != 4 {
232 throw("cas4")
233 }
234
235 z = 0xffffffff
236 if !atomic.Cas(&z, 0xffffffff, 0xfffffffe) {
237 throw("cas5")
238 }
239 if z != 0xfffffffe {
240 throw("cas6")
241 }
242
243 m = [4]byte{1, 1, 1, 1}
244 atomic.Or8(&m[1], 0xf0)
245 if m[0] != 1 || m[1] != 0xf1 || m[2] != 1 || m[3] != 1 {
246 throw("atomicor8")
247 }
248
249 m = [4]byte{0xff, 0xff, 0xff, 0xff}
250 atomic.And8(&m[1], 0x1)
251 if m[0] != 0xff || m[1] != 0x1 || m[2] != 0xff || m[3] != 0xff {
252 throw("atomicand8")
253 }
254
255 *(*uint64)(unsafe.Pointer(&j)) = ^uint64(0)
256 if j == j {
257 throw("float64nan")
258 }
259 if !(j != j) {
260 throw("float64nan1")
261 }
262
263 *(*uint64)(unsafe.Pointer(&j1)) = ^uint64(1)
264 if j == j1 {
265 throw("float64nan2")
266 }
267 if !(j != j1) {
268 throw("float64nan3")
269 }
270
271 *(*uint32)(unsafe.Pointer(&i)) = ^uint32(0)
272 if i == i {
273 throw("float32nan")
274 }
275 if i == i {
276 throw("float32nan1")
277 }
278
279 *(*uint32)(unsafe.Pointer(&i1)) = ^uint32(1)
280 if i == i1 {
281 throw("float32nan2")
282 }
283 if i == i1 {
284 throw("float32nan3")
285 }
286
287 testAtomic64()
288
289 if fixedStack != round2(fixedStack) {
290 throw("FixedStack is not power-of-2")
291 }
292
293 if !checkASM() {
294 throw("assembly checks failed")
295 }
296 }
297
298 type dbgVar struct {
299 name string
300 value *int32
301 atomic *atomic.Int32
302 def int32
303 }
304
305
306
307
308
309 var debug struct {
310 cgocheck int32
311 clobberfree int32
312 disablethp int32
313 dontfreezetheworld int32
314 efence int32
315 gccheckmark int32
316 gcpacertrace int32
317 gcshrinkstackoff int32
318 gcstoptheworld int32
319 gctrace int32
320 invalidptr int32
321 madvdontneed int32
322 runtimeContentionStacks atomic.Int32
323 scavtrace int32
324 scheddetail int32
325 schedtrace int32
326 tracebackancestors int32
327 asyncpreemptoff int32
328 harddecommit int32
329 adaptivestackstart int32
330 tracefpunwindoff int32
331 traceadvanceperiod int32
332 traceCheckStackOwnership int32
333
334
335
336
337 malloc bool
338 allocfreetrace int32
339 inittrace int32
340 sbrk int32
341
342 panicnil atomic.Int32
343
344
345
346
347
348
349
350
351
352 asynctimerchan atomic.Int32
353 }
354
355 var dbgvars = []*dbgVar{
356 {name: "adaptivestackstart", value: &debug.adaptivestackstart},
357 {name: "allocfreetrace", value: &debug.allocfreetrace},
358 {name: "asyncpreemptoff", value: &debug.asyncpreemptoff},
359 {name: "asynctimerchan", atomic: &debug.asynctimerchan},
360 {name: "cgocheck", value: &debug.cgocheck},
361 {name: "clobberfree", value: &debug.clobberfree},
362 {name: "disablethp", value: &debug.disablethp},
363 {name: "dontfreezetheworld", value: &debug.dontfreezetheworld},
364 {name: "efence", value: &debug.efence},
365 {name: "gccheckmark", value: &debug.gccheckmark},
366 {name: "gcpacertrace", value: &debug.gcpacertrace},
367 {name: "gcshrinkstackoff", value: &debug.gcshrinkstackoff},
368 {name: "gcstoptheworld", value: &debug.gcstoptheworld},
369 {name: "gctrace", value: &debug.gctrace},
370 {name: "harddecommit", value: &debug.harddecommit},
371 {name: "inittrace", value: &debug.inittrace},
372 {name: "invalidptr", value: &debug.invalidptr},
373 {name: "madvdontneed", value: &debug.madvdontneed},
374 {name: "panicnil", atomic: &debug.panicnil},
375 {name: "runtimecontentionstacks", atomic: &debug.runtimeContentionStacks},
376 {name: "sbrk", value: &debug.sbrk},
377 {name: "scavtrace", value: &debug.scavtrace},
378 {name: "scheddetail", value: &debug.scheddetail},
379 {name: "schedtrace", value: &debug.schedtrace},
380 {name: "traceadvanceperiod", value: &debug.traceadvanceperiod},
381 {name: "tracecheckstackownership", value: &debug.traceCheckStackOwnership},
382 {name: "tracebackancestors", value: &debug.tracebackancestors},
383 {name: "tracefpunwindoff", value: &debug.tracefpunwindoff},
384 }
385
386 func parsedebugvars() {
387
388 debug.cgocheck = 1
389 debug.invalidptr = 1
390 debug.adaptivestackstart = 1
391 if GOOS == "linux" {
392
393
394
395
396
397
398
399
400 debug.madvdontneed = 1
401 }
402 debug.traceadvanceperiod = defaultTraceAdvancePeriod
403
404 godebug := gogetenv("GODEBUG")
405
406 p := new(string)
407 *p = godebug
408 godebugEnv.Store(p)
409
410
411 for _, v := range dbgvars {
412 if v.def != 0 {
413
414 if v.value != nil {
415 *v.value = v.def
416 } else if v.atomic != nil {
417 v.atomic.Store(v.def)
418 }
419 }
420 }
421
422
423 parsegodebug(godebugDefault, nil)
424
425
426 parsegodebug(godebug, nil)
427
428 debug.malloc = (debug.allocfreetrace | debug.inittrace | debug.sbrk) != 0
429
430 setTraceback(gogetenv("GOTRACEBACK"))
431 traceback_env = traceback_cache
432 }
433
434
435
436 func reparsedebugvars(env string) {
437 seen := make(map[string]bool)
438
439 parsegodebug(env, seen)
440
441 parsegodebug(godebugDefault, seen)
442
443 for _, v := range dbgvars {
444 if v.atomic != nil && !seen[v.name] {
445 v.atomic.Store(0)
446 }
447 }
448 }
449
450
451
452
453
454
455
456
457
458
459
460 func parsegodebug(godebug string, seen map[string]bool) {
461 for p := godebug; p != ""; {
462 var field string
463 if seen == nil {
464
465 i := bytealg.IndexByteString(p, ',')
466 if i < 0 {
467 field, p = p, ""
468 } else {
469 field, p = p[:i], p[i+1:]
470 }
471 } else {
472
473 i := len(p) - 1
474 for i >= 0 && p[i] != ',' {
475 i--
476 }
477 if i < 0 {
478 p, field = "", p
479 } else {
480 p, field = p[:i], p[i+1:]
481 }
482 }
483 i := bytealg.IndexByteString(field, '=')
484 if i < 0 {
485 continue
486 }
487 key, value := field[:i], field[i+1:]
488 if seen[key] {
489 continue
490 }
491 if seen != nil {
492 seen[key] = true
493 }
494
495
496
497
498 if seen == nil && key == "memprofilerate" {
499 if n, ok := atoi(value); ok {
500 MemProfileRate = n
501 }
502 } else {
503 for _, v := range dbgvars {
504 if v.name == key {
505 if n, ok := atoi32(value); ok {
506 if seen == nil && v.value != nil {
507 *v.value = n
508 } else if v.atomic != nil {
509 v.atomic.Store(n)
510 }
511 }
512 }
513 }
514 }
515 }
516
517 if debug.cgocheck > 1 {
518 throw("cgocheck > 1 mode is no longer supported at runtime. Use GOEXPERIMENT=cgocheck2 at build time instead.")
519 }
520 }
521
522
523 func setTraceback(level string) {
524 var t uint32
525 switch level {
526 case "none":
527 t = 0
528 case "single", "":
529 t = 1 << tracebackShift
530 case "all":
531 t = 1<<tracebackShift | tracebackAll
532 case "system":
533 t = 2<<tracebackShift | tracebackAll
534 case "crash":
535 t = 2<<tracebackShift | tracebackAll | tracebackCrash
536 case "wer":
537 if GOOS == "windows" {
538 t = 2<<tracebackShift | tracebackAll | tracebackCrash
539 enableWER()
540 break
541 }
542 fallthrough
543 default:
544 t = tracebackAll
545 if n, ok := atoi(level); ok && n == int(uint32(n)) {
546 t |= uint32(n) << tracebackShift
547 }
548 }
549
550
551 if islibrary || isarchive {
552 t |= tracebackCrash
553 }
554
555 t |= traceback_env
556
557 atomic.Store(&traceback_cache, t)
558 }
559
560
561
562
563
564
565
566
567 func timediv(v int64, div int32, rem *int32) int32 {
568 res := int32(0)
569 for bit := 30; bit >= 0; bit-- {
570 if v >= int64(div)<<uint(bit) {
571 v = v - (int64(div) << uint(bit))
572
573
574 res |= 1 << uint(bit)
575 }
576 }
577 if v >= int64(div) {
578 if rem != nil {
579 *rem = 0
580 }
581 return 0x7fffffff
582 }
583 if rem != nil {
584 *rem = int32(v)
585 }
586 return res
587 }
588
589
590
591
592 func acquirem() *m {
593 gp := getg()
594 gp.m.locks++
595 return gp.m
596 }
597
598
599 func releasem(mp *m) {
600 gp := getg()
601 mp.locks--
602 if mp.locks == 0 && gp.preempt {
603
604 gp.stackguard0 = stackPreempt
605 }
606 }
607
608
609 func reflect_typelinks() ([]unsafe.Pointer, [][]int32) {
610 modules := activeModules()
611 sections := []unsafe.Pointer{unsafe.Pointer(modules[0].types)}
612 ret := [][]int32{modules[0].typelinks}
613 for _, md := range modules[1:] {
614 sections = append(sections, unsafe.Pointer(md.types))
615 ret = append(ret, md.typelinks)
616 }
617 return sections, ret
618 }
619
620
621
622
623 func reflect_resolveNameOff(ptrInModule unsafe.Pointer, off int32) unsafe.Pointer {
624 return unsafe.Pointer(resolveNameOff(ptrInModule, nameOff(off)).Bytes)
625 }
626
627
628
629
630 func reflect_resolveTypeOff(rtype unsafe.Pointer, off int32) unsafe.Pointer {
631 return unsafe.Pointer(toRType((*_type)(rtype)).typeOff(typeOff(off)))
632 }
633
634
635
636
637 func reflect_resolveTextOff(rtype unsafe.Pointer, off int32) unsafe.Pointer {
638 return toRType((*_type)(rtype)).textOff(textOff(off))
639 }
640
641
642
643
644 func reflectlite_resolveNameOff(ptrInModule unsafe.Pointer, off int32) unsafe.Pointer {
645 return unsafe.Pointer(resolveNameOff(ptrInModule, nameOff(off)).Bytes)
646 }
647
648
649
650
651 func reflectlite_resolveTypeOff(rtype unsafe.Pointer, off int32) unsafe.Pointer {
652 return unsafe.Pointer(toRType((*_type)(rtype)).typeOff(typeOff(off)))
653 }
654
655
656
657
658 func reflect_addReflectOff(ptr unsafe.Pointer) int32 {
659 reflectOffsLock()
660 if reflectOffs.m == nil {
661 reflectOffs.m = make(map[int32]unsafe.Pointer)
662 reflectOffs.minv = make(map[unsafe.Pointer]int32)
663 reflectOffs.next = -1
664 }
665 id, found := reflectOffs.minv[ptr]
666 if !found {
667 id = reflectOffs.next
668 reflectOffs.next--
669 reflectOffs.m[id] = ptr
670 reflectOffs.minv[ptr] = id
671 }
672 reflectOffsUnlock()
673 return id
674 }
675
View as plain text