Source file
src/runtime/mbitmap.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56 package runtime
57
58 import (
59 "internal/abi"
60 "internal/goarch"
61 "internal/goexperiment"
62 "internal/runtime/atomic"
63 "internal/runtime/gc"
64 "internal/runtime/sys"
65 "unsafe"
66 )
67
68
69
70
71
72
73
74
75
76 func heapBitsInSpan(userSize uintptr) bool {
77
78
79 return userSize <= gc.MinSizeForMallocHeader
80 }
81
82
83
84
85
86 type typePointers struct {
87
88
89
90 elem uintptr
91
92
93
94 addr uintptr
95
96
97
98
99
100 mask uintptr
101
102
103
104 typ *_type
105 }
106
107
108
109
110
111
112
113
114
115
116
117
118 func (span *mspan) typePointersOf(addr, size uintptr) typePointers {
119 base := span.objBase(addr)
120 tp := span.typePointersOfUnchecked(base)
121 if base == addr && size == span.elemsize {
122 return tp
123 }
124 return tp.fastForward(addr-tp.addr, addr+size)
125 }
126
127
128
129
130
131
132
133
134
135 func (span *mspan) typePointersOfUnchecked(addr uintptr) typePointers {
136 const doubleCheck = false
137 if doubleCheck && span.objBase(addr) != addr {
138 print("runtime: addr=", addr, " base=", span.objBase(addr), "\n")
139 throw("typePointersOfUnchecked consisting of non-base-address for object")
140 }
141
142 spc := span.spanclass
143 if spc.noscan() {
144 return typePointers{}
145 }
146 if heapBitsInSpan(span.elemsize) {
147
148 return typePointers{elem: addr, addr: addr, mask: span.heapBitsSmallForAddr(addr)}
149 }
150
151
152 var typ *_type
153 if spc.sizeclass() != 0 {
154
155 typ = *(**_type)(unsafe.Pointer(addr))
156 addr += gc.MallocHeaderSize
157 } else {
158
159
160 typ = (*_type)(atomic.Loadp(unsafe.Pointer(&span.largeType)))
161 if typ == nil {
162
163 return typePointers{}
164 }
165 }
166 gcmask := getGCMask(typ)
167 return typePointers{elem: addr, addr: addr, mask: readUintptr(gcmask), typ: typ}
168 }
169
170
171
172
173
174
175
176
177
178
179 func (span *mspan) typePointersOfType(typ *abi.Type, addr uintptr) typePointers {
180 const doubleCheck = false
181 if doubleCheck && typ == nil {
182 throw("bad type passed to typePointersOfType")
183 }
184 if span.spanclass.noscan() {
185 return typePointers{}
186 }
187
188 gcmask := getGCMask(typ)
189 return typePointers{elem: addr, addr: addr, mask: readUintptr(gcmask), typ: typ}
190 }
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212 func (tp typePointers) nextFast() (typePointers, uintptr) {
213
214 if tp.mask == 0 {
215 return tp, 0
216 }
217
218 var i int
219 if goarch.PtrSize == 8 {
220 i = sys.TrailingZeros64(uint64(tp.mask))
221 } else {
222 i = sys.TrailingZeros32(uint32(tp.mask))
223 }
224 if GOARCH == "amd64" {
225
226 tp.mask ^= uintptr(1) << (i & (ptrBits - 1))
227 } else {
228
229 tp.mask &= tp.mask - 1
230 }
231
232 return tp, tp.addr + uintptr(i)*goarch.PtrSize
233 }
234
235
236
237
238
239
240
241
242
243 func (tp typePointers) next(limit uintptr) (typePointers, uintptr) {
244 for {
245 if tp.mask != 0 {
246 return tp.nextFast()
247 }
248
249
250 if tp.typ == nil {
251 return typePointers{}, 0
252 }
253
254
255 if tp.addr+goarch.PtrSize*ptrBits >= tp.elem+tp.typ.PtrBytes {
256 tp.elem += tp.typ.Size_
257 tp.addr = tp.elem
258 } else {
259 tp.addr += ptrBits * goarch.PtrSize
260 }
261
262
263 if tp.addr >= limit {
264 return typePointers{}, 0
265 }
266
267
268 tp.mask = readUintptr(addb(getGCMask(tp.typ), (tp.addr-tp.elem)/goarch.PtrSize/8))
269 if tp.addr+goarch.PtrSize*ptrBits > limit {
270 bits := (tp.addr + goarch.PtrSize*ptrBits - limit) / goarch.PtrSize
271 tp.mask &^= ((1 << (bits)) - 1) << (ptrBits - bits)
272 }
273 }
274 }
275
276
277
278
279
280
281
282
283 func (tp typePointers) fastForward(n, limit uintptr) typePointers {
284
285 target := tp.addr + n
286 if target >= limit {
287 return typePointers{}
288 }
289 if tp.typ == nil {
290
291
292 tp.mask &^= (1 << ((target - tp.addr) / goarch.PtrSize)) - 1
293
294 if tp.addr+goarch.PtrSize*ptrBits > limit {
295 bits := (tp.addr + goarch.PtrSize*ptrBits - limit) / goarch.PtrSize
296 tp.mask &^= ((1 << (bits)) - 1) << (ptrBits - bits)
297 }
298 return tp
299 }
300
301
302
303 if n >= tp.typ.Size_ {
304
305
306 oldelem := tp.elem
307 tp.elem += (tp.addr - tp.elem + n) / tp.typ.Size_ * tp.typ.Size_
308 tp.addr = tp.elem + alignDown(n-(tp.elem-oldelem), ptrBits*goarch.PtrSize)
309 } else {
310 tp.addr += alignDown(n, ptrBits*goarch.PtrSize)
311 }
312
313 if tp.addr-tp.elem >= tp.typ.PtrBytes {
314
315
316 tp.elem += tp.typ.Size_
317 tp.addr = tp.elem
318 tp.mask = readUintptr(getGCMask(tp.typ))
319
320
321 if tp.addr >= limit {
322 return typePointers{}
323 }
324 } else {
325
326
327 tp.mask = readUintptr(addb(getGCMask(tp.typ), (tp.addr-tp.elem)/goarch.PtrSize/8))
328 tp.mask &^= (1 << ((target - tp.addr) / goarch.PtrSize)) - 1
329 }
330 if tp.addr+goarch.PtrSize*ptrBits > limit {
331 bits := (tp.addr + goarch.PtrSize*ptrBits - limit) / goarch.PtrSize
332 tp.mask &^= ((1 << (bits)) - 1) << (ptrBits - bits)
333 }
334 return tp
335 }
336
337
338
339
340
341
342 func (span *mspan) objBase(addr uintptr) uintptr {
343 return span.base() + span.objIndex(addr)*span.elemsize
344 }
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388 func bulkBarrierPreWrite(dst, src, size uintptr, typ *abi.Type) {
389 if (dst|src|size)&(goarch.PtrSize-1) != 0 {
390 throw("bulkBarrierPreWrite: unaligned arguments")
391 }
392 if !writeBarrier.enabled {
393 return
394 }
395 s := spanOf(dst)
396 if s == nil {
397
398
399 for _, datap := range activeModules() {
400 if datap.data <= dst && dst < datap.edata {
401 bulkBarrierBitmap(dst, src, size, dst-datap.data, datap.gcdatamask.bytedata)
402 return
403 }
404 }
405 for _, datap := range activeModules() {
406 if datap.bss <= dst && dst < datap.ebss {
407 bulkBarrierBitmap(dst, src, size, dst-datap.bss, datap.gcbssmask.bytedata)
408 return
409 }
410 }
411 return
412 } else if s.state.get() != mSpanInUse || dst < s.base() || s.limit <= dst {
413
414
415
416
417
418
419 return
420 }
421 buf := &getg().m.p.ptr().wbBuf
422
423
424 const doubleCheck = false
425 if doubleCheck {
426 doubleCheckTypePointersOfType(s, typ, dst, size)
427 }
428
429 var tp typePointers
430 if typ != nil {
431 tp = s.typePointersOfType(typ, dst)
432 } else {
433 tp = s.typePointersOf(dst, size)
434 }
435 if src == 0 {
436 for {
437 var addr uintptr
438 if tp, addr = tp.next(dst + size); addr == 0 {
439 break
440 }
441 dstx := (*uintptr)(unsafe.Pointer(addr))
442 p := buf.get1()
443 p[0] = *dstx
444 }
445 } else {
446 for {
447 var addr uintptr
448 if tp, addr = tp.next(dst + size); addr == 0 {
449 break
450 }
451 dstx := (*uintptr)(unsafe.Pointer(addr))
452 srcx := (*uintptr)(unsafe.Pointer(src + (addr - dst)))
453 p := buf.get2()
454 p[0] = *dstx
455 p[1] = *srcx
456 }
457 }
458 }
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474 func bulkBarrierPreWriteSrcOnly(dst, src, size uintptr, typ *abi.Type) {
475 if (dst|src|size)&(goarch.PtrSize-1) != 0 {
476 throw("bulkBarrierPreWrite: unaligned arguments")
477 }
478 if !writeBarrier.enabled {
479 return
480 }
481 buf := &getg().m.p.ptr().wbBuf
482 s := spanOf(dst)
483
484
485 const doubleCheck = false
486 if doubleCheck {
487 doubleCheckTypePointersOfType(s, typ, dst, size)
488 }
489
490 var tp typePointers
491 if typ != nil {
492 tp = s.typePointersOfType(typ, dst)
493 } else {
494 tp = s.typePointersOf(dst, size)
495 }
496 for {
497 var addr uintptr
498 if tp, addr = tp.next(dst + size); addr == 0 {
499 break
500 }
501 srcx := (*uintptr)(unsafe.Pointer(addr - dst + src))
502 p := buf.get1()
503 p[0] = *srcx
504 }
505 }
506
507
508 func (s *mspan) initHeapBits() {
509 if goarch.PtrSize == 8 && !s.spanclass.noscan() && s.spanclass.sizeclass() == 1 {
510 b := s.heapBits()
511 for i := range b {
512 b[i] = ^uintptr(0)
513 }
514 } else if (!s.spanclass.noscan() && heapBitsInSpan(s.elemsize)) || s.isUserArenaChunk {
515 b := s.heapBits()
516 clear(b)
517 }
518 if goexperiment.GreenTeaGC && gcUsesSpanInlineMarkBits(s.elemsize) {
519 s.initInlineMarkBits()
520 }
521 }
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537 func (span *mspan) heapBits() []uintptr {
538 const doubleCheck = false
539
540 if doubleCheck && !span.isUserArenaChunk {
541 if span.spanclass.noscan() {
542 throw("heapBits called for noscan")
543 }
544 if span.elemsize > gc.MinSizeForMallocHeader {
545 throw("heapBits called for span class that should have a malloc header")
546 }
547 }
548
549
550
551 if span.npages == 1 {
552
553 return heapBitsSlice(span.base(), pageSize, span.elemsize)
554 }
555 return heapBitsSlice(span.base(), span.npages*pageSize, span.elemsize)
556 }
557
558
559
560
561 func heapBitsSlice(spanBase, spanSize, elemsize uintptr) []uintptr {
562 base, bitmapSize := spanHeapBitsRange(spanBase, spanSize, elemsize)
563 elems := int(bitmapSize / goarch.PtrSize)
564 var sl notInHeapSlice
565 sl = notInHeapSlice{(*notInHeap)(unsafe.Pointer(base)), elems, elems}
566 return *(*[]uintptr)(unsafe.Pointer(&sl))
567 }
568
569
570 func spanHeapBitsRange(spanBase, spanSize, elemsize uintptr) (base, size uintptr) {
571 size = spanSize / goarch.PtrSize / 8
572 base = spanBase + spanSize - size
573 if goexperiment.GreenTeaGC && gcUsesSpanInlineMarkBits(elemsize) {
574 base -= unsafe.Sizeof(spanInlineMarkBits{})
575 }
576 return
577 }
578
579
580
581
582
583
584
585 func (span *mspan) heapBitsSmallForAddr(addr uintptr) uintptr {
586 hbitsBase, _ := spanHeapBitsRange(span.base(), span.npages*pageSize, span.elemsize)
587 hbits := (*byte)(unsafe.Pointer(hbitsBase))
588
589
590
591
592
593
594
595
596
597 i := (addr - span.base()) / goarch.PtrSize / ptrBits
598 j := (addr - span.base()) / goarch.PtrSize % ptrBits
599 bits := span.elemsize / goarch.PtrSize
600 word0 := (*uintptr)(unsafe.Pointer(addb(hbits, goarch.PtrSize*(i+0))))
601 word1 := (*uintptr)(unsafe.Pointer(addb(hbits, goarch.PtrSize*(i+1))))
602
603 var read uintptr
604 if j+bits > ptrBits {
605
606 bits0 := ptrBits - j
607 bits1 := bits - bits0
608 read = *word0 >> j
609 read |= (*word1 & ((1 << bits1) - 1)) << bits0
610 } else {
611
612 read = (*word0 >> j) & ((1 << bits) - 1)
613 }
614 return read
615 }
616
617
618
619
620
621
622
623
624 func (span *mspan) writeHeapBitsSmall(x, dataSize uintptr, typ *_type) (scanSize uintptr) {
625
626 src0 := readUintptr(getGCMask(typ))
627
628
629 scanSize = typ.PtrBytes
630 src := src0
631 if typ.Size_ == goarch.PtrSize {
632 src = (1 << (dataSize / goarch.PtrSize)) - 1
633 } else {
634
635
636
637 if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 {
638 throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_")
639 }
640 for i := typ.Size_; i < dataSize; i += typ.Size_ {
641 src |= src0 << (i / goarch.PtrSize)
642 scanSize += typ.Size_
643 }
644 if asanenabled {
645
646
647 src &= (1 << (dataSize / goarch.PtrSize)) - 1
648 }
649 }
650
651
652
653 dstBase, _ := spanHeapBitsRange(span.base(), pageSize, span.elemsize)
654 dst := unsafe.Pointer(dstBase)
655 o := (x - span.base()) / goarch.PtrSize
656 i := o / ptrBits
657 j := o % ptrBits
658 bits := span.elemsize / goarch.PtrSize
659 if j+bits > ptrBits {
660
661 bits0 := ptrBits - j
662 bits1 := bits - bits0
663 dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize))
664 dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize))
665 *dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j)
666 *dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0)
667 } else {
668
669 dst := (*uintptr)(add(dst, i*goarch.PtrSize))
670 *dst = (*dst)&^(((1<<bits)-1)<<j) | (src << j)
671 }
672
673 const doubleCheck = false
674 if doubleCheck {
675 srcRead := span.heapBitsSmallForAddr(x)
676 if srcRead != src {
677 print("runtime: x=", hex(x), " i=", i, " j=", j, " bits=", bits, "\n")
678 print("runtime: dataSize=", dataSize, " typ.Size_=", typ.Size_, " typ.PtrBytes=", typ.PtrBytes, "\n")
679 print("runtime: src0=", hex(src0), " src=", hex(src), " srcRead=", hex(srcRead), "\n")
680 throw("bad pointer bits written for small object")
681 }
682 }
683 return
684 }
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703 const doubleCheckHeapSetType = doubleCheckMalloc
704
705 func heapSetTypeNoHeader(x, dataSize uintptr, typ *_type, span *mspan) uintptr {
706 if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(span.elemsize)) {
707 throw("tried to write heap bits, but no heap bits in span")
708 }
709 scanSize := span.writeHeapBitsSmall(x, dataSize, typ)
710 if doubleCheckHeapSetType {
711 doubleCheckHeapType(x, dataSize, typ, nil, span)
712 }
713 return scanSize
714 }
715
716 func heapSetTypeSmallHeader(x, dataSize uintptr, typ *_type, header **_type, span *mspan) uintptr {
717 *header = typ
718 if doubleCheckHeapSetType {
719 doubleCheckHeapType(x, dataSize, typ, header, span)
720 }
721 return span.elemsize
722 }
723
724 func heapSetTypeLarge(x, dataSize uintptr, typ *_type, span *mspan) uintptr {
725 gctyp := typ
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775 atomic.StorepNoWB(unsafe.Pointer(&span.largeType), unsafe.Pointer(gctyp))
776 if doubleCheckHeapSetType {
777 doubleCheckHeapType(x, dataSize, typ, &span.largeType, span)
778 }
779 return span.elemsize
780 }
781
782 func doubleCheckHeapType(x, dataSize uintptr, gctyp *_type, header **_type, span *mspan) {
783 doubleCheckHeapPointers(x, dataSize, gctyp, header, span)
784
785
786
787
788 maxIterBytes := span.elemsize
789 if header == nil {
790 maxIterBytes = dataSize
791 }
792 off := alignUp(uintptr(cheaprand())%dataSize, goarch.PtrSize)
793 size := dataSize - off
794 if size == 0 {
795 off -= goarch.PtrSize
796 size += goarch.PtrSize
797 }
798 interior := x + off
799 size -= alignDown(uintptr(cheaprand())%size, goarch.PtrSize)
800 if size == 0 {
801 size = goarch.PtrSize
802 }
803
804 size = (size + gctyp.Size_ - 1) / gctyp.Size_ * gctyp.Size_
805 if interior+size > x+maxIterBytes {
806 size = x + maxIterBytes - interior
807 }
808 doubleCheckHeapPointersInterior(x, interior, size, dataSize, gctyp, header, span)
809 }
810
811 func doubleCheckHeapPointers(x, dataSize uintptr, typ *_type, header **_type, span *mspan) {
812
813 tp := span.typePointersOfUnchecked(span.objBase(x))
814 maxIterBytes := span.elemsize
815 if header == nil {
816 maxIterBytes = dataSize
817 }
818 bad := false
819 for i := uintptr(0); i < maxIterBytes; i += goarch.PtrSize {
820
821 want := false
822 if i < span.elemsize {
823 off := i % typ.Size_
824 if off < typ.PtrBytes {
825 j := off / goarch.PtrSize
826 want = *addb(getGCMask(typ), j/8)>>(j%8)&1 != 0
827 }
828 }
829 if want {
830 var addr uintptr
831 tp, addr = tp.next(x + span.elemsize)
832 if addr == 0 {
833 println("runtime: found bad iterator")
834 }
835 if addr != x+i {
836 print("runtime: addr=", hex(addr), " x+i=", hex(x+i), "\n")
837 bad = true
838 }
839 }
840 }
841 if !bad {
842 var addr uintptr
843 tp, addr = tp.next(x + span.elemsize)
844 if addr == 0 {
845 return
846 }
847 println("runtime: extra pointer:", hex(addr))
848 }
849 print("runtime: hasHeader=", header != nil, " typ.Size_=", typ.Size_, " TFlagGCMaskOnDemaind=", typ.TFlag&abi.TFlagGCMaskOnDemand != 0, "\n")
850 print("runtime: x=", hex(x), " dataSize=", dataSize, " elemsize=", span.elemsize, "\n")
851 print("runtime: typ=", unsafe.Pointer(typ), " typ.PtrBytes=", typ.PtrBytes, "\n")
852 print("runtime: limit=", hex(x+span.elemsize), "\n")
853 tp = span.typePointersOfUnchecked(x)
854 dumpTypePointers(tp)
855 for {
856 var addr uintptr
857 if tp, addr = tp.next(x + span.elemsize); addr == 0 {
858 println("runtime: would've stopped here")
859 dumpTypePointers(tp)
860 break
861 }
862 print("runtime: addr=", hex(addr), "\n")
863 dumpTypePointers(tp)
864 }
865 throw("heapSetType: pointer entry not correct")
866 }
867
868 func doubleCheckHeapPointersInterior(x, interior, size, dataSize uintptr, typ *_type, header **_type, span *mspan) {
869 bad := false
870 if interior < x {
871 print("runtime: interior=", hex(interior), " x=", hex(x), "\n")
872 throw("found bad interior pointer")
873 }
874 off := interior - x
875 tp := span.typePointersOf(interior, size)
876 for i := off; i < off+size; i += goarch.PtrSize {
877
878 want := false
879 if i < span.elemsize {
880 off := i % typ.Size_
881 if off < typ.PtrBytes {
882 j := off / goarch.PtrSize
883 want = *addb(getGCMask(typ), j/8)>>(j%8)&1 != 0
884 }
885 }
886 if want {
887 var addr uintptr
888 tp, addr = tp.next(interior + size)
889 if addr == 0 {
890 println("runtime: found bad iterator")
891 bad = true
892 }
893 if addr != x+i {
894 print("runtime: addr=", hex(addr), " x+i=", hex(x+i), "\n")
895 bad = true
896 }
897 }
898 }
899 if !bad {
900 var addr uintptr
901 tp, addr = tp.next(interior + size)
902 if addr == 0 {
903 return
904 }
905 println("runtime: extra pointer:", hex(addr))
906 }
907 print("runtime: hasHeader=", header != nil, " typ.Size_=", typ.Size_, "\n")
908 print("runtime: x=", hex(x), " dataSize=", dataSize, " elemsize=", span.elemsize, " interior=", hex(interior), " size=", size, "\n")
909 print("runtime: limit=", hex(interior+size), "\n")
910 tp = span.typePointersOf(interior, size)
911 dumpTypePointers(tp)
912 for {
913 var addr uintptr
914 if tp, addr = tp.next(interior + size); addr == 0 {
915 println("runtime: would've stopped here")
916 dumpTypePointers(tp)
917 break
918 }
919 print("runtime: addr=", hex(addr), "\n")
920 dumpTypePointers(tp)
921 }
922
923 print("runtime: want: ")
924 for i := off; i < off+size; i += goarch.PtrSize {
925
926 want := false
927 if i < dataSize {
928 off := i % typ.Size_
929 if off < typ.PtrBytes {
930 j := off / goarch.PtrSize
931 want = *addb(getGCMask(typ), j/8)>>(j%8)&1 != 0
932 }
933 }
934 if want {
935 print("1")
936 } else {
937 print("0")
938 }
939 }
940 println()
941
942 throw("heapSetType: pointer entry not correct")
943 }
944
945
946 func doubleCheckTypePointersOfType(s *mspan, typ *_type, addr, size uintptr) {
947 if typ == nil {
948 return
949 }
950 if typ.Kind_&abi.KindMask == abi.Interface {
951
952
953
954 return
955 }
956 tp0 := s.typePointersOfType(typ, addr)
957 tp1 := s.typePointersOf(addr, size)
958 failed := false
959 for {
960 var addr0, addr1 uintptr
961 tp0, addr0 = tp0.next(addr + size)
962 tp1, addr1 = tp1.next(addr + size)
963 if addr0 != addr1 {
964 failed = true
965 break
966 }
967 if addr0 == 0 {
968 break
969 }
970 }
971 if failed {
972 tp0 := s.typePointersOfType(typ, addr)
973 tp1 := s.typePointersOf(addr, size)
974 print("runtime: addr=", hex(addr), " size=", size, "\n")
975 print("runtime: type=", toRType(typ).string(), "\n")
976 dumpTypePointers(tp0)
977 dumpTypePointers(tp1)
978 for {
979 var addr0, addr1 uintptr
980 tp0, addr0 = tp0.next(addr + size)
981 tp1, addr1 = tp1.next(addr + size)
982 print("runtime: ", hex(addr0), " ", hex(addr1), "\n")
983 if addr0 == 0 && addr1 == 0 {
984 break
985 }
986 }
987 throw("mismatch between typePointersOfType and typePointersOf")
988 }
989 }
990
991 func dumpTypePointers(tp typePointers) {
992 print("runtime: tp.elem=", hex(tp.elem), " tp.typ=", unsafe.Pointer(tp.typ), "\n")
993 print("runtime: tp.addr=", hex(tp.addr), " tp.mask=")
994 for i := uintptr(0); i < ptrBits; i++ {
995 if tp.mask&(uintptr(1)<<i) != 0 {
996 print("1")
997 } else {
998 print("0")
999 }
1000 }
1001 println()
1002 }
1003
1004
1005
1006
1007
1008 func addb(p *byte, n uintptr) *byte {
1009
1010
1011
1012 return (*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + n))
1013 }
1014
1015
1016
1017
1018
1019 func subtractb(p *byte, n uintptr) *byte {
1020
1021
1022
1023 return (*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) - n))
1024 }
1025
1026
1027
1028
1029
1030 func add1(p *byte) *byte {
1031
1032
1033
1034 return (*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + 1))
1035 }
1036
1037
1038
1039
1040
1041
1042
1043 func subtract1(p *byte) *byte {
1044
1045
1046
1047 return (*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) - 1))
1048 }
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059 type markBits struct {
1060 bytep *uint8
1061 mask uint8
1062 index uintptr
1063 }
1064
1065
1066 func (s *mspan) allocBitsForIndex(allocBitIndex uintptr) markBits {
1067 bytep, mask := s.allocBits.bitp(allocBitIndex)
1068 return markBits{bytep, mask, allocBitIndex}
1069 }
1070
1071
1072
1073
1074
1075 func (s *mspan) refillAllocCache(whichByte uint16) {
1076 bytes := (*[8]uint8)(unsafe.Pointer(s.allocBits.bytep(uintptr(whichByte))))
1077 aCache := uint64(0)
1078 aCache |= uint64(bytes[0])
1079 aCache |= uint64(bytes[1]) << (1 * 8)
1080 aCache |= uint64(bytes[2]) << (2 * 8)
1081 aCache |= uint64(bytes[3]) << (3 * 8)
1082 aCache |= uint64(bytes[4]) << (4 * 8)
1083 aCache |= uint64(bytes[5]) << (5 * 8)
1084 aCache |= uint64(bytes[6]) << (6 * 8)
1085 aCache |= uint64(bytes[7]) << (7 * 8)
1086 s.allocCache = ^aCache
1087 }
1088
1089
1090
1091
1092
1093 func (s *mspan) nextFreeIndex() uint16 {
1094 sfreeindex := s.freeindex
1095 snelems := s.nelems
1096 if sfreeindex == snelems {
1097 return sfreeindex
1098 }
1099 if sfreeindex > snelems {
1100 throw("s.freeindex > s.nelems")
1101 }
1102
1103 aCache := s.allocCache
1104
1105 bitIndex := sys.TrailingZeros64(aCache)
1106 for bitIndex == 64 {
1107
1108 sfreeindex = (sfreeindex + 64) &^ (64 - 1)
1109 if sfreeindex >= snelems {
1110 s.freeindex = snelems
1111 return snelems
1112 }
1113 whichByte := sfreeindex / 8
1114
1115 s.refillAllocCache(whichByte)
1116 aCache = s.allocCache
1117 bitIndex = sys.TrailingZeros64(aCache)
1118
1119
1120 }
1121 result := sfreeindex + uint16(bitIndex)
1122 if result >= snelems {
1123 s.freeindex = snelems
1124 return snelems
1125 }
1126
1127 s.allocCache >>= uint(bitIndex + 1)
1128 sfreeindex = result + 1
1129
1130 if sfreeindex%64 == 0 && sfreeindex != snelems {
1131
1132
1133
1134
1135
1136 whichByte := sfreeindex / 8
1137 s.refillAllocCache(whichByte)
1138 }
1139 s.freeindex = sfreeindex
1140 return result
1141 }
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152 func (s *mspan) isFree(index uintptr) bool {
1153 if index < uintptr(s.freeindex) {
1154 return false
1155 }
1156 bytep, mask := s.allocBits.bitp(index)
1157 return *bytep&mask == 0
1158 }
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173 func (s *mspan) isFreeOrNewlyAllocated(index uintptr) bool {
1174 if index < uintptr(s.freeIndexForScan) {
1175 return false
1176 }
1177 bytep, mask := s.allocBits.bitp(index)
1178 return *bytep&mask == 0
1179 }
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189 func (s *mspan) divideByElemSize(n uintptr) uintptr {
1190 const doubleCheck = false
1191
1192
1193 q := uintptr((uint64(n) * uint64(s.divMul)) >> 32)
1194
1195 if doubleCheck && q != n/s.elemsize {
1196 println(n, "/", s.elemsize, "should be", n/s.elemsize, "but got", q)
1197 throw("bad magic division")
1198 }
1199 return q
1200 }
1201
1202
1203
1204
1205 func (s *mspan) objIndex(p uintptr) uintptr {
1206 return s.divideByElemSize(p - s.base())
1207 }
1208
1209 func markBitsForAddr(p uintptr) markBits {
1210 s := spanOf(p)
1211 objIndex := s.objIndex(p)
1212 return s.markBitsForIndex(objIndex)
1213 }
1214
1215
1216 func (m markBits) isMarked() bool {
1217 return *m.bytep&m.mask != 0
1218 }
1219
1220
1221 func (m markBits) setMarked() {
1222
1223
1224
1225 atomic.Or8(m.bytep, m.mask)
1226 }
1227
1228
1229 func (m markBits) setMarkedNonAtomic() {
1230 *m.bytep |= m.mask
1231 }
1232
1233
1234 func (m markBits) clearMarked() {
1235
1236
1237
1238 atomic.And8(m.bytep, ^m.mask)
1239 }
1240
1241
1242 func markBitsForSpan(base uintptr) (mbits markBits) {
1243 mbits = markBitsForAddr(base)
1244 if mbits.mask != 1 {
1245 throw("markBitsForSpan: unaligned start")
1246 }
1247 return mbits
1248 }
1249
1250
1251 func (m *markBits) advance() {
1252 if m.mask == 1<<7 {
1253 m.bytep = (*uint8)(unsafe.Pointer(uintptr(unsafe.Pointer(m.bytep)) + 1))
1254 m.mask = 1
1255 } else {
1256 m.mask = m.mask << 1
1257 }
1258 m.index++
1259 }
1260
1261
1262
1263 const clobberdeadPtr = uintptr(0xdeaddead | 0xdeaddead<<((^uintptr(0)>>63)*32))
1264
1265
1266 func badPointer(s *mspan, p, refBase, refOff uintptr) {
1267
1268
1269
1270
1271
1272
1273
1274
1275 printlock()
1276 print("runtime: pointer ", hex(p))
1277 if s != nil {
1278 state := s.state.get()
1279 if state != mSpanInUse {
1280 print(" to unallocated span")
1281 } else {
1282 print(" to unused region of span")
1283 }
1284 print(" span.base()=", hex(s.base()), " span.limit=", hex(s.limit), " span.state=", state)
1285 }
1286 print("\n")
1287 if refBase != 0 {
1288 print("runtime: found in object at *(", hex(refBase), "+", hex(refOff), ")\n")
1289 gcDumpObject("object", refBase, refOff)
1290 }
1291 getg().m.traceback = 2
1292 throw("found bad pointer in Go heap (incorrect use of unsafe or cgo?)")
1293 }
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319 func findObject(p, refBase, refOff uintptr) (base uintptr, s *mspan, objIndex uintptr) {
1320 s = spanOf(p)
1321
1322
1323 if s == nil {
1324 if (GOARCH == "amd64" || GOARCH == "arm64") && p == clobberdeadPtr && debug.invalidptr != 0 {
1325
1326
1327
1328 badPointer(s, p, refBase, refOff)
1329 }
1330 return
1331 }
1332
1333
1334
1335
1336 if state := s.state.get(); state != mSpanInUse || p < s.base() || p >= s.limit {
1337
1338 if state == mSpanManual {
1339 return
1340 }
1341
1342
1343 if debug.invalidptr != 0 {
1344 badPointer(s, p, refBase, refOff)
1345 }
1346 return
1347 }
1348
1349 objIndex = s.objIndex(p)
1350 base = s.base() + objIndex*s.elemsize
1351 return
1352 }
1353
1354
1355
1356
1357 func reflect_verifyNotInHeapPtr(p uintptr) bool {
1358
1359
1360
1361 return spanOf(p) == nil && p != clobberdeadPtr
1362 }
1363
1364 const ptrBits = 8 * goarch.PtrSize
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374 func bulkBarrierBitmap(dst, src, size, maskOffset uintptr, bits *uint8) {
1375 word := maskOffset / goarch.PtrSize
1376 bits = addb(bits, word/8)
1377 mask := uint8(1) << (word % 8)
1378
1379 buf := &getg().m.p.ptr().wbBuf
1380 for i := uintptr(0); i < size; i += goarch.PtrSize {
1381 if mask == 0 {
1382 bits = addb(bits, 1)
1383 if *bits == 0 {
1384
1385 i += 7 * goarch.PtrSize
1386 continue
1387 }
1388 mask = 1
1389 }
1390 if *bits&mask != 0 {
1391 dstx := (*uintptr)(unsafe.Pointer(dst + i))
1392 if src == 0 {
1393 p := buf.get1()
1394 p[0] = *dstx
1395 } else {
1396 srcx := (*uintptr)(unsafe.Pointer(src + i))
1397 p := buf.get2()
1398 p[0] = *dstx
1399 p[1] = *srcx
1400 }
1401 }
1402 mask <<= 1
1403 }
1404 }
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420 func typeBitsBulkBarrier(typ *_type, dst, src, size uintptr) {
1421 if typ == nil {
1422 throw("runtime: typeBitsBulkBarrier without type")
1423 }
1424 if typ.Size_ != size {
1425 println("runtime: typeBitsBulkBarrier with type ", toRType(typ).string(), " of size ", typ.Size_, " but memory size", size)
1426 throw("runtime: invalid typeBitsBulkBarrier")
1427 }
1428 if !writeBarrier.enabled {
1429 return
1430 }
1431 ptrmask := getGCMask(typ)
1432 buf := &getg().m.p.ptr().wbBuf
1433 var bits uint32
1434 for i := uintptr(0); i < typ.PtrBytes; i += goarch.PtrSize {
1435 if i&(goarch.PtrSize*8-1) == 0 {
1436 bits = uint32(*ptrmask)
1437 ptrmask = addb(ptrmask, 1)
1438 } else {
1439 bits = bits >> 1
1440 }
1441 if bits&1 != 0 {
1442 dstx := (*uintptr)(unsafe.Pointer(dst + i))
1443 srcx := (*uintptr)(unsafe.Pointer(src + i))
1444 p := buf.get2()
1445 p[0] = *dstx
1446 p[1] = *srcx
1447 }
1448 }
1449 }
1450
1451
1452
1453 func (s *mspan) countAlloc() int {
1454 count := 0
1455 bytes := divRoundUp(uintptr(s.nelems), 8)
1456
1457
1458
1459
1460 for i := uintptr(0); i < bytes; i += 8 {
1461
1462
1463
1464
1465 mrkBits := *(*uint64)(unsafe.Pointer(s.gcmarkBits.bytep(i)))
1466 count += sys.OnesCount64(mrkBits)
1467 }
1468 return count
1469 }
1470
1471
1472
1473 func readUintptr(p *byte) uintptr {
1474 x := *(*uintptr)(unsafe.Pointer(p))
1475 if goarch.BigEndian {
1476 if goarch.PtrSize == 8 {
1477 return uintptr(sys.Bswap64(uint64(x)))
1478 }
1479 return uintptr(sys.Bswap32(uint32(x)))
1480 }
1481 return x
1482 }
1483
1484 var debugPtrmask struct {
1485 lock mutex
1486 data *byte
1487 }
1488
1489
1490
1491
1492 func progToPointerMask(prog *byte, size uintptr) bitvector {
1493 n := (size/goarch.PtrSize + 7) / 8
1494 x := (*[1 << 30]byte)(persistentalloc(n+1, 1, &memstats.buckhash_sys))[:n+1]
1495 x[len(x)-1] = 0xa1
1496 n = runGCProg(prog, &x[0])
1497 if x[len(x)-1] != 0xa1 {
1498 throw("progToPointerMask: overflow")
1499 }
1500 return bitvector{int32(n), &x[0]}
1501 }
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521 func runGCProg(prog, dst *byte) uintptr {
1522 dstStart := dst
1523
1524
1525 var bits uintptr
1526 var nbits uintptr
1527
1528 p := prog
1529 Run:
1530 for {
1531
1532
1533 for ; nbits >= 8; nbits -= 8 {
1534 *dst = uint8(bits)
1535 dst = add1(dst)
1536 bits >>= 8
1537 }
1538
1539
1540 inst := uintptr(*p)
1541 p = add1(p)
1542 n := inst & 0x7F
1543 if inst&0x80 == 0 {
1544
1545 if n == 0 {
1546
1547 break Run
1548 }
1549 nbyte := n / 8
1550 for i := uintptr(0); i < nbyte; i++ {
1551 bits |= uintptr(*p) << nbits
1552 p = add1(p)
1553 *dst = uint8(bits)
1554 dst = add1(dst)
1555 bits >>= 8
1556 }
1557 if n %= 8; n > 0 {
1558 bits |= uintptr(*p) << nbits
1559 p = add1(p)
1560 nbits += n
1561 }
1562 continue Run
1563 }
1564
1565
1566 if n == 0 {
1567 for off := uint(0); ; off += 7 {
1568 x := uintptr(*p)
1569 p = add1(p)
1570 n |= (x & 0x7F) << off
1571 if x&0x80 == 0 {
1572 break
1573 }
1574 }
1575 }
1576
1577
1578 c := uintptr(0)
1579 for off := uint(0); ; off += 7 {
1580 x := uintptr(*p)
1581 p = add1(p)
1582 c |= (x & 0x7F) << off
1583 if x&0x80 == 0 {
1584 break
1585 }
1586 }
1587 c *= n
1588
1589
1590
1591
1592
1593
1594
1595
1596 src := dst
1597 const maxBits = goarch.PtrSize*8 - 7
1598 if n <= maxBits {
1599
1600 pattern := bits
1601 npattern := nbits
1602
1603
1604 src = subtract1(src)
1605 for npattern < n {
1606 pattern <<= 8
1607 pattern |= uintptr(*src)
1608 src = subtract1(src)
1609 npattern += 8
1610 }
1611
1612
1613
1614
1615
1616 if npattern > n {
1617 pattern >>= npattern - n
1618 npattern = n
1619 }
1620
1621
1622 if npattern == 1 {
1623
1624
1625
1626
1627
1628
1629 if pattern == 1 {
1630 pattern = 1<<maxBits - 1
1631 npattern = maxBits
1632 } else {
1633 npattern = c
1634 }
1635 } else {
1636 b := pattern
1637 nb := npattern
1638 if nb+nb <= maxBits {
1639
1640 for nb <= goarch.PtrSize*8 {
1641 b |= b << nb
1642 nb += nb
1643 }
1644
1645
1646 nb = maxBits / npattern * npattern
1647 b &= 1<<nb - 1
1648 pattern = b
1649 npattern = nb
1650 }
1651 }
1652
1653
1654
1655
1656 for ; c >= npattern; c -= npattern {
1657 bits |= pattern << nbits
1658 nbits += npattern
1659 for nbits >= 8 {
1660 *dst = uint8(bits)
1661 dst = add1(dst)
1662 bits >>= 8
1663 nbits -= 8
1664 }
1665 }
1666
1667
1668 if c > 0 {
1669 pattern &= 1<<c - 1
1670 bits |= pattern << nbits
1671 nbits += c
1672 }
1673 continue Run
1674 }
1675
1676
1677
1678
1679 off := n - nbits
1680
1681 src = subtractb(src, (off+7)/8)
1682 if frag := off & 7; frag != 0 {
1683 bits |= uintptr(*src) >> (8 - frag) << nbits
1684 src = add1(src)
1685 nbits += frag
1686 c -= frag
1687 }
1688
1689
1690 for i := c / 8; i > 0; i-- {
1691 bits |= uintptr(*src) << nbits
1692 src = add1(src)
1693 *dst = uint8(bits)
1694 dst = add1(dst)
1695 bits >>= 8
1696 }
1697
1698 if c %= 8; c > 0 {
1699 bits |= (uintptr(*src) & (1<<c - 1)) << nbits
1700 nbits += c
1701 }
1702 }
1703
1704
1705 totalBits := (uintptr(unsafe.Pointer(dst))-uintptr(unsafe.Pointer(dstStart)))*8 + nbits
1706 nbits += -nbits & 7
1707 for ; nbits > 0; nbits -= 8 {
1708 *dst = uint8(bits)
1709 dst = add1(dst)
1710 bits >>= 8
1711 }
1712 return totalBits
1713 }
1714
1715 func dumpGCProg(p *byte) {
1716 nptr := 0
1717 for {
1718 x := *p
1719 p = add1(p)
1720 if x == 0 {
1721 print("\t", nptr, " end\n")
1722 break
1723 }
1724 if x&0x80 == 0 {
1725 print("\t", nptr, " lit ", x, ":")
1726 n := int(x+7) / 8
1727 for i := 0; i < n; i++ {
1728 print(" ", hex(*p))
1729 p = add1(p)
1730 }
1731 print("\n")
1732 nptr += int(x)
1733 } else {
1734 nbit := int(x &^ 0x80)
1735 if nbit == 0 {
1736 for nb := uint(0); ; nb += 7 {
1737 x := *p
1738 p = add1(p)
1739 nbit |= int(x&0x7f) << nb
1740 if x&0x80 == 0 {
1741 break
1742 }
1743 }
1744 }
1745 count := 0
1746 for nb := uint(0); ; nb += 7 {
1747 x := *p
1748 p = add1(p)
1749 count |= int(x&0x7f) << nb
1750 if x&0x80 == 0 {
1751 break
1752 }
1753 }
1754 print("\t", nptr, " repeat ", nbit, " × ", count, "\n")
1755 nptr += nbit * count
1756 }
1757 }
1758 }
1759
1760
1761
1762
1763
1764
1765
1766 func reflect_gcbits(x any) []byte {
1767 return pointerMask(x)
1768 }
1769
1770
1771
1772
1773 func pointerMask(ep any) (mask []byte) {
1774 e := *efaceOf(&ep)
1775 p := e.data
1776 t := e._type
1777
1778 var et *_type
1779 if t.Kind_&abi.KindMask != abi.Pointer {
1780 throw("bad argument to getgcmask: expected type to be a pointer to the value type whose mask is being queried")
1781 }
1782 et = (*ptrtype)(unsafe.Pointer(t)).Elem
1783
1784
1785 for _, datap := range activeModules() {
1786
1787 if datap.data <= uintptr(p) && uintptr(p) < datap.edata {
1788 bitmap := datap.gcdatamask.bytedata
1789 n := et.Size_
1790 mask = make([]byte, n/goarch.PtrSize)
1791 for i := uintptr(0); i < n; i += goarch.PtrSize {
1792 off := (uintptr(p) + i - datap.data) / goarch.PtrSize
1793 mask[i/goarch.PtrSize] = (*addb(bitmap, off/8) >> (off % 8)) & 1
1794 }
1795 return
1796 }
1797
1798
1799 if datap.bss <= uintptr(p) && uintptr(p) < datap.ebss {
1800 bitmap := datap.gcbssmask.bytedata
1801 n := et.Size_
1802 mask = make([]byte, n/goarch.PtrSize)
1803 for i := uintptr(0); i < n; i += goarch.PtrSize {
1804 off := (uintptr(p) + i - datap.bss) / goarch.PtrSize
1805 mask[i/goarch.PtrSize] = (*addb(bitmap, off/8) >> (off % 8)) & 1
1806 }
1807 return
1808 }
1809 }
1810
1811
1812 if base, s, _ := findObject(uintptr(p), 0, 0); base != 0 {
1813 if s.spanclass.noscan() {
1814 return nil
1815 }
1816 limit := base + s.elemsize
1817
1818
1819
1820
1821 tp := s.typePointersOfUnchecked(base)
1822 base = tp.addr
1823
1824
1825 maskFromHeap := make([]byte, (limit-base)/goarch.PtrSize)
1826 for {
1827 var addr uintptr
1828 if tp, addr = tp.next(limit); addr == 0 {
1829 break
1830 }
1831 maskFromHeap[(addr-base)/goarch.PtrSize] = 1
1832 }
1833
1834
1835
1836
1837 for i := limit; i < s.elemsize; i++ {
1838 if *(*byte)(unsafe.Pointer(i)) != 0 {
1839 throw("found non-zeroed tail of allocation")
1840 }
1841 }
1842
1843
1844
1845 for len(maskFromHeap) > 0 && maskFromHeap[len(maskFromHeap)-1] == 0 {
1846 maskFromHeap = maskFromHeap[:len(maskFromHeap)-1]
1847 }
1848
1849
1850 maskFromType := make([]byte, (limit-base)/goarch.PtrSize)
1851 tp = s.typePointersOfType(et, base)
1852 for {
1853 var addr uintptr
1854 if tp, addr = tp.next(limit); addr == 0 {
1855 break
1856 }
1857 maskFromType[(addr-base)/goarch.PtrSize] = 1
1858 }
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870 differs := false
1871 for i := range maskFromHeap {
1872 if maskFromHeap[i] != maskFromType[i] {
1873 differs = true
1874 break
1875 }
1876 }
1877
1878 if differs {
1879 print("runtime: heap mask=")
1880 for _, b := range maskFromHeap {
1881 print(b)
1882 }
1883 println()
1884 print("runtime: type mask=")
1885 for _, b := range maskFromType {
1886 print(b)
1887 }
1888 println()
1889 print("runtime: type=", toRType(et).string(), "\n")
1890 throw("found two different masks from two different methods")
1891 }
1892
1893
1894 mask = maskFromHeap
1895
1896
1897
1898
1899 KeepAlive(ep)
1900 return
1901 }
1902
1903
1904 if gp := getg(); gp.m.curg.stack.lo <= uintptr(p) && uintptr(p) < gp.m.curg.stack.hi {
1905 found := false
1906 var u unwinder
1907 for u.initAt(gp.m.curg.sched.pc, gp.m.curg.sched.sp, 0, gp.m.curg, 0); u.valid(); u.next() {
1908 if u.frame.sp <= uintptr(p) && uintptr(p) < u.frame.varp {
1909 found = true
1910 break
1911 }
1912 }
1913 if found {
1914 locals, _, _ := u.frame.getStackMap(false)
1915 if locals.n == 0 {
1916 return
1917 }
1918 size := uintptr(locals.n) * goarch.PtrSize
1919 n := (*ptrtype)(unsafe.Pointer(t)).Elem.Size_
1920 mask = make([]byte, n/goarch.PtrSize)
1921 for i := uintptr(0); i < n; i += goarch.PtrSize {
1922 off := (uintptr(p) + i - u.frame.varp + size) / goarch.PtrSize
1923 mask[i/goarch.PtrSize] = locals.ptrbit(off)
1924 }
1925 }
1926 return
1927 }
1928
1929
1930
1931
1932 return
1933 }
1934
View as plain text