Source file
src/runtime/mbitmap.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56 package runtime
57
58 import (
59 "internal/abi"
60 "internal/goarch"
61 "internal/runtime/atomic"
62 "runtime/internal/sys"
63 "unsafe"
64 )
65
66 const (
67
68
69
70
71 mallocHeaderSize = 8
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101 minSizeForMallocHeader = goarch.PtrSize * ptrBits
102 )
103
104
105
106
107
108
109
110
111
112 func heapBitsInSpan(userSize uintptr) bool {
113
114
115 return userSize <= minSizeForMallocHeader
116 }
117
118
119
120
121
122 type typePointers struct {
123
124
125
126 elem uintptr
127
128
129
130 addr uintptr
131
132
133
134
135
136 mask uintptr
137
138
139
140 typ *_type
141 }
142
143
144
145
146
147
148
149
150
151
152
153
154 func (span *mspan) typePointersOf(addr, size uintptr) typePointers {
155 base := span.objBase(addr)
156 tp := span.typePointersOfUnchecked(base)
157 if base == addr && size == span.elemsize {
158 return tp
159 }
160 return tp.fastForward(addr-tp.addr, addr+size)
161 }
162
163
164
165
166
167
168
169
170
171 func (span *mspan) typePointersOfUnchecked(addr uintptr) typePointers {
172 const doubleCheck = false
173 if doubleCheck && span.objBase(addr) != addr {
174 print("runtime: addr=", addr, " base=", span.objBase(addr), "\n")
175 throw("typePointersOfUnchecked consisting of non-base-address for object")
176 }
177
178 spc := span.spanclass
179 if spc.noscan() {
180 return typePointers{}
181 }
182 if heapBitsInSpan(span.elemsize) {
183
184 return typePointers{elem: addr, addr: addr, mask: span.heapBitsSmallForAddr(addr)}
185 }
186
187
188 var typ *_type
189 if spc.sizeclass() != 0 {
190
191 typ = *(**_type)(unsafe.Pointer(addr))
192 addr += mallocHeaderSize
193 } else {
194 typ = span.largeType
195 if typ == nil {
196
197 return typePointers{}
198 }
199 }
200 gcdata := typ.GCData
201 return typePointers{elem: addr, addr: addr, mask: readUintptr(gcdata), typ: typ}
202 }
203
204
205
206
207
208
209
210
211
212
213
214 func (span *mspan) typePointersOfType(typ *abi.Type, addr uintptr) typePointers {
215 const doubleCheck = false
216 if doubleCheck && (typ == nil || typ.Kind_&abi.KindGCProg != 0) {
217 throw("bad type passed to typePointersOfType")
218 }
219 if span.spanclass.noscan() {
220 return typePointers{}
221 }
222
223 gcdata := typ.GCData
224 return typePointers{elem: addr, addr: addr, mask: readUintptr(gcdata), typ: typ}
225 }
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247 func (tp typePointers) nextFast() (typePointers, uintptr) {
248
249 if tp.mask == 0 {
250 return tp, 0
251 }
252
253 var i int
254 if goarch.PtrSize == 8 {
255 i = sys.TrailingZeros64(uint64(tp.mask))
256 } else {
257 i = sys.TrailingZeros32(uint32(tp.mask))
258 }
259
260 tp.mask ^= uintptr(1) << (i & (ptrBits - 1))
261
262 return tp, tp.addr + uintptr(i)*goarch.PtrSize
263 }
264
265
266
267
268
269
270
271
272
273 func (tp typePointers) next(limit uintptr) (typePointers, uintptr) {
274 for {
275 if tp.mask != 0 {
276 return tp.nextFast()
277 }
278
279
280 if tp.typ == nil {
281 return typePointers{}, 0
282 }
283
284
285 if tp.addr+goarch.PtrSize*ptrBits >= tp.elem+tp.typ.PtrBytes {
286 tp.elem += tp.typ.Size_
287 tp.addr = tp.elem
288 } else {
289 tp.addr += ptrBits * goarch.PtrSize
290 }
291
292
293 if tp.addr >= limit {
294 return typePointers{}, 0
295 }
296
297
298 tp.mask = readUintptr(addb(tp.typ.GCData, (tp.addr-tp.elem)/goarch.PtrSize/8))
299 if tp.addr+goarch.PtrSize*ptrBits > limit {
300 bits := (tp.addr + goarch.PtrSize*ptrBits - limit) / goarch.PtrSize
301 tp.mask &^= ((1 << (bits)) - 1) << (ptrBits - bits)
302 }
303 }
304 }
305
306
307
308
309
310
311
312
313 func (tp typePointers) fastForward(n, limit uintptr) typePointers {
314
315 target := tp.addr + n
316 if target >= limit {
317 return typePointers{}
318 }
319 if tp.typ == nil {
320
321
322 tp.mask &^= (1 << ((target - tp.addr) / goarch.PtrSize)) - 1
323
324 if tp.addr+goarch.PtrSize*ptrBits > limit {
325 bits := (tp.addr + goarch.PtrSize*ptrBits - limit) / goarch.PtrSize
326 tp.mask &^= ((1 << (bits)) - 1) << (ptrBits - bits)
327 }
328 return tp
329 }
330
331
332
333 if n >= tp.typ.Size_ {
334
335
336 oldelem := tp.elem
337 tp.elem += (tp.addr - tp.elem + n) / tp.typ.Size_ * tp.typ.Size_
338 tp.addr = tp.elem + alignDown(n-(tp.elem-oldelem), ptrBits*goarch.PtrSize)
339 } else {
340 tp.addr += alignDown(n, ptrBits*goarch.PtrSize)
341 }
342
343 if tp.addr-tp.elem >= tp.typ.PtrBytes {
344
345
346 tp.elem += tp.typ.Size_
347 tp.addr = tp.elem
348 tp.mask = readUintptr(tp.typ.GCData)
349
350
351 if tp.addr >= limit {
352 return typePointers{}
353 }
354 } else {
355
356
357 tp.mask = readUintptr(addb(tp.typ.GCData, (tp.addr-tp.elem)/goarch.PtrSize/8))
358 tp.mask &^= (1 << ((target - tp.addr) / goarch.PtrSize)) - 1
359 }
360 if tp.addr+goarch.PtrSize*ptrBits > limit {
361 bits := (tp.addr + goarch.PtrSize*ptrBits - limit) / goarch.PtrSize
362 tp.mask &^= ((1 << (bits)) - 1) << (ptrBits - bits)
363 }
364 return tp
365 }
366
367
368
369
370
371
372 func (span *mspan) objBase(addr uintptr) uintptr {
373 return span.base() + span.objIndex(addr)*span.elemsize
374 }
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418 func bulkBarrierPreWrite(dst, src, size uintptr, typ *abi.Type) {
419 if (dst|src|size)&(goarch.PtrSize-1) != 0 {
420 throw("bulkBarrierPreWrite: unaligned arguments")
421 }
422 if !writeBarrier.enabled {
423 return
424 }
425 s := spanOf(dst)
426 if s == nil {
427
428
429 for _, datap := range activeModules() {
430 if datap.data <= dst && dst < datap.edata {
431 bulkBarrierBitmap(dst, src, size, dst-datap.data, datap.gcdatamask.bytedata)
432 return
433 }
434 }
435 for _, datap := range activeModules() {
436 if datap.bss <= dst && dst < datap.ebss {
437 bulkBarrierBitmap(dst, src, size, dst-datap.bss, datap.gcbssmask.bytedata)
438 return
439 }
440 }
441 return
442 } else if s.state.get() != mSpanInUse || dst < s.base() || s.limit <= dst {
443
444
445
446
447
448
449 return
450 }
451 buf := &getg().m.p.ptr().wbBuf
452
453
454 const doubleCheck = false
455 if doubleCheck {
456 doubleCheckTypePointersOfType(s, typ, dst, size)
457 }
458
459 var tp typePointers
460 if typ != nil && typ.Kind_&abi.KindGCProg == 0 {
461 tp = s.typePointersOfType(typ, dst)
462 } else {
463 tp = s.typePointersOf(dst, size)
464 }
465 if src == 0 {
466 for {
467 var addr uintptr
468 if tp, addr = tp.next(dst + size); addr == 0 {
469 break
470 }
471 dstx := (*uintptr)(unsafe.Pointer(addr))
472 p := buf.get1()
473 p[0] = *dstx
474 }
475 } else {
476 for {
477 var addr uintptr
478 if tp, addr = tp.next(dst + size); addr == 0 {
479 break
480 }
481 dstx := (*uintptr)(unsafe.Pointer(addr))
482 srcx := (*uintptr)(unsafe.Pointer(src + (addr - dst)))
483 p := buf.get2()
484 p[0] = *dstx
485 p[1] = *srcx
486 }
487 }
488 }
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504 func bulkBarrierPreWriteSrcOnly(dst, src, size uintptr, typ *abi.Type) {
505 if (dst|src|size)&(goarch.PtrSize-1) != 0 {
506 throw("bulkBarrierPreWrite: unaligned arguments")
507 }
508 if !writeBarrier.enabled {
509 return
510 }
511 buf := &getg().m.p.ptr().wbBuf
512 s := spanOf(dst)
513
514
515 const doubleCheck = false
516 if doubleCheck {
517 doubleCheckTypePointersOfType(s, typ, dst, size)
518 }
519
520 var tp typePointers
521 if typ != nil && typ.Kind_&abi.KindGCProg == 0 {
522 tp = s.typePointersOfType(typ, dst)
523 } else {
524 tp = s.typePointersOf(dst, size)
525 }
526 for {
527 var addr uintptr
528 if tp, addr = tp.next(dst + size); addr == 0 {
529 break
530 }
531 srcx := (*uintptr)(unsafe.Pointer(addr - dst + src))
532 p := buf.get1()
533 p[0] = *srcx
534 }
535 }
536
537
538
539
540
541
542 func (s *mspan) initHeapBits(forceClear bool) {
543 if (!s.spanclass.noscan() && heapBitsInSpan(s.elemsize)) || s.isUserArenaChunk {
544 b := s.heapBits()
545 clear(b)
546 }
547 }
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563 func (span *mspan) heapBits() []uintptr {
564 const doubleCheck = false
565
566 if doubleCheck && !span.isUserArenaChunk {
567 if span.spanclass.noscan() {
568 throw("heapBits called for noscan")
569 }
570 if span.elemsize > minSizeForMallocHeader {
571 throw("heapBits called for span class that should have a malloc header")
572 }
573 }
574
575
576
577 if span.npages == 1 {
578
579 return heapBitsSlice(span.base(), pageSize)
580 }
581 return heapBitsSlice(span.base(), span.npages*pageSize)
582 }
583
584
585
586
587 func heapBitsSlice(spanBase, spanSize uintptr) []uintptr {
588 bitmapSize := spanSize / goarch.PtrSize / 8
589 elems := int(bitmapSize / goarch.PtrSize)
590 var sl notInHeapSlice
591 sl = notInHeapSlice{(*notInHeap)(unsafe.Pointer(spanBase + spanSize - bitmapSize)), elems, elems}
592 return *(*[]uintptr)(unsafe.Pointer(&sl))
593 }
594
595
596
597
598
599
600
601 func (span *mspan) heapBitsSmallForAddr(addr uintptr) uintptr {
602 spanSize := span.npages * pageSize
603 bitmapSize := spanSize / goarch.PtrSize / 8
604 hbits := (*byte)(unsafe.Pointer(span.base() + spanSize - bitmapSize))
605
606
607
608
609
610
611
612
613
614 i := (addr - span.base()) / goarch.PtrSize / ptrBits
615 j := (addr - span.base()) / goarch.PtrSize % ptrBits
616 bits := span.elemsize / goarch.PtrSize
617 word0 := (*uintptr)(unsafe.Pointer(addb(hbits, goarch.PtrSize*(i+0))))
618 word1 := (*uintptr)(unsafe.Pointer(addb(hbits, goarch.PtrSize*(i+1))))
619
620 var read uintptr
621 if j+bits > ptrBits {
622
623 bits0 := ptrBits - j
624 bits1 := bits - bits0
625 read = *word0 >> j
626 read |= (*word1 & ((1 << bits1) - 1)) << bits0
627 } else {
628
629 read = (*word0 >> j) & ((1 << bits) - 1)
630 }
631 return read
632 }
633
634
635
636
637
638
639
640
641 func (span *mspan) writeHeapBitsSmall(x, dataSize uintptr, typ *_type) (scanSize uintptr) {
642
643 src0 := readUintptr(typ.GCData)
644
645
646 bits := span.elemsize / goarch.PtrSize
647 scanSize = typ.PtrBytes
648 src := src0
649 switch typ.Size_ {
650 case goarch.PtrSize:
651 src = (1 << (dataSize / goarch.PtrSize)) - 1
652 default:
653 for i := typ.Size_; i < dataSize; i += typ.Size_ {
654 src |= src0 << (i / goarch.PtrSize)
655 scanSize += typ.Size_
656 }
657 }
658
659
660
661 dst := span.heapBits()
662 o := (x - span.base()) / goarch.PtrSize
663 i := o / ptrBits
664 j := o % ptrBits
665 if j+bits > ptrBits {
666
667 bits0 := ptrBits - j
668 bits1 := bits - bits0
669 dst[i+0] = dst[i+0]&(^uintptr(0)>>bits0) | (src << j)
670 dst[i+1] = dst[i+1]&^((1<<bits1)-1) | (src >> bits0)
671 } else {
672
673 dst[i] = (dst[i] &^ (((1 << bits) - 1) << j)) | (src << j)
674 }
675
676 const doubleCheck = false
677 if doubleCheck {
678 srcRead := span.heapBitsSmallForAddr(x)
679 if srcRead != src {
680 print("runtime: x=", hex(x), " i=", i, " j=", j, " bits=", bits, "\n")
681 print("runtime: dataSize=", dataSize, " typ.Size_=", typ.Size_, " typ.PtrBytes=", typ.PtrBytes, "\n")
682 print("runtime: src0=", hex(src0), " src=", hex(src), " srcRead=", hex(srcRead), "\n")
683 throw("bad pointer bits written for small object")
684 }
685 }
686 return
687 }
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705 func heapSetType(x, dataSize uintptr, typ *_type, header **_type, span *mspan) (scanSize uintptr) {
706 const doubleCheck = false
707
708 gctyp := typ
709 if header == nil {
710 if doubleCheck && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(span.elemsize)) {
711 throw("tried to write heap bits, but no heap bits in span")
712 }
713
714 scanSize = span.writeHeapBitsSmall(x, dataSize, typ)
715 } else {
716 if typ.Kind_&abi.KindGCProg != 0 {
717
718
719
720 if span.spanclass.sizeclass() != 0 {
721 throw("GCProg for type that isn't large")
722 }
723 spaceNeeded := alignUp(unsafe.Sizeof(_type{}), goarch.PtrSize)
724 heapBitsOff := spaceNeeded
725 spaceNeeded += alignUp(typ.PtrBytes/goarch.PtrSize/8, goarch.PtrSize)
726 npages := alignUp(spaceNeeded, pageSize) / pageSize
727 var progSpan *mspan
728 systemstack(func() {
729 progSpan = mheap_.allocManual(npages, spanAllocPtrScalarBits)
730 memclrNoHeapPointers(unsafe.Pointer(progSpan.base()), progSpan.npages*pageSize)
731 })
732
733
734
735
736 gctyp = (*_type)(unsafe.Pointer(progSpan.base()))
737 gctyp.Size_ = typ.Size_
738 gctyp.PtrBytes = typ.PtrBytes
739 gctyp.GCData = (*byte)(add(unsafe.Pointer(progSpan.base()), heapBitsOff))
740 gctyp.TFlag = abi.TFlagUnrolledBitmap
741
742
743 runGCProg(addb(typ.GCData, 4), gctyp.GCData)
744 }
745
746
747 *header = gctyp
748 scanSize = span.elemsize
749 }
750
751 if doubleCheck {
752 doubleCheckHeapPointers(x, dataSize, gctyp, header, span)
753
754
755
756
757 maxIterBytes := span.elemsize
758 if header == nil {
759 maxIterBytes = dataSize
760 }
761 off := alignUp(uintptr(cheaprand())%dataSize, goarch.PtrSize)
762 size := dataSize - off
763 if size == 0 {
764 off -= goarch.PtrSize
765 size += goarch.PtrSize
766 }
767 interior := x + off
768 size -= alignDown(uintptr(cheaprand())%size, goarch.PtrSize)
769 if size == 0 {
770 size = goarch.PtrSize
771 }
772
773 size = (size + gctyp.Size_ - 1) / gctyp.Size_ * gctyp.Size_
774 if interior+size > x+maxIterBytes {
775 size = x + maxIterBytes - interior
776 }
777 doubleCheckHeapPointersInterior(x, interior, size, dataSize, gctyp, header, span)
778 }
779 return
780 }
781
782 func doubleCheckHeapPointers(x, dataSize uintptr, typ *_type, header **_type, span *mspan) {
783
784 tp := span.typePointersOfUnchecked(span.objBase(x))
785 maxIterBytes := span.elemsize
786 if header == nil {
787 maxIterBytes = dataSize
788 }
789 bad := false
790 for i := uintptr(0); i < maxIterBytes; i += goarch.PtrSize {
791
792 want := false
793 if i < span.elemsize {
794 off := i % typ.Size_
795 if off < typ.PtrBytes {
796 j := off / goarch.PtrSize
797 want = *addb(typ.GCData, j/8)>>(j%8)&1 != 0
798 }
799 }
800 if want {
801 var addr uintptr
802 tp, addr = tp.next(x + span.elemsize)
803 if addr == 0 {
804 println("runtime: found bad iterator")
805 }
806 if addr != x+i {
807 print("runtime: addr=", hex(addr), " x+i=", hex(x+i), "\n")
808 bad = true
809 }
810 }
811 }
812 if !bad {
813 var addr uintptr
814 tp, addr = tp.next(x + span.elemsize)
815 if addr == 0 {
816 return
817 }
818 println("runtime: extra pointer:", hex(addr))
819 }
820 print("runtime: hasHeader=", header != nil, " typ.Size_=", typ.Size_, " hasGCProg=", typ.Kind_&abi.KindGCProg != 0, "\n")
821 print("runtime: x=", hex(x), " dataSize=", dataSize, " elemsize=", span.elemsize, "\n")
822 print("runtime: typ=", unsafe.Pointer(typ), " typ.PtrBytes=", typ.PtrBytes, "\n")
823 print("runtime: limit=", hex(x+span.elemsize), "\n")
824 tp = span.typePointersOfUnchecked(x)
825 dumpTypePointers(tp)
826 for {
827 var addr uintptr
828 if tp, addr = tp.next(x + span.elemsize); addr == 0 {
829 println("runtime: would've stopped here")
830 dumpTypePointers(tp)
831 break
832 }
833 print("runtime: addr=", hex(addr), "\n")
834 dumpTypePointers(tp)
835 }
836 throw("heapSetType: pointer entry not correct")
837 }
838
839 func doubleCheckHeapPointersInterior(x, interior, size, dataSize uintptr, typ *_type, header **_type, span *mspan) {
840 bad := false
841 if interior < x {
842 print("runtime: interior=", hex(interior), " x=", hex(x), "\n")
843 throw("found bad interior pointer")
844 }
845 off := interior - x
846 tp := span.typePointersOf(interior, size)
847 for i := off; i < off+size; i += goarch.PtrSize {
848
849 want := false
850 if i < span.elemsize {
851 off := i % typ.Size_
852 if off < typ.PtrBytes {
853 j := off / goarch.PtrSize
854 want = *addb(typ.GCData, j/8)>>(j%8)&1 != 0
855 }
856 }
857 if want {
858 var addr uintptr
859 tp, addr = tp.next(interior + size)
860 if addr == 0 {
861 println("runtime: found bad iterator")
862 bad = true
863 }
864 if addr != x+i {
865 print("runtime: addr=", hex(addr), " x+i=", hex(x+i), "\n")
866 bad = true
867 }
868 }
869 }
870 if !bad {
871 var addr uintptr
872 tp, addr = tp.next(interior + size)
873 if addr == 0 {
874 return
875 }
876 println("runtime: extra pointer:", hex(addr))
877 }
878 print("runtime: hasHeader=", header != nil, " typ.Size_=", typ.Size_, "\n")
879 print("runtime: x=", hex(x), " dataSize=", dataSize, " elemsize=", span.elemsize, " interior=", hex(interior), " size=", size, "\n")
880 print("runtime: limit=", hex(interior+size), "\n")
881 tp = span.typePointersOf(interior, size)
882 dumpTypePointers(tp)
883 for {
884 var addr uintptr
885 if tp, addr = tp.next(interior + size); addr == 0 {
886 println("runtime: would've stopped here")
887 dumpTypePointers(tp)
888 break
889 }
890 print("runtime: addr=", hex(addr), "\n")
891 dumpTypePointers(tp)
892 }
893
894 print("runtime: want: ")
895 for i := off; i < off+size; i += goarch.PtrSize {
896
897 want := false
898 if i < dataSize {
899 off := i % typ.Size_
900 if off < typ.PtrBytes {
901 j := off / goarch.PtrSize
902 want = *addb(typ.GCData, j/8)>>(j%8)&1 != 0
903 }
904 }
905 if want {
906 print("1")
907 } else {
908 print("0")
909 }
910 }
911 println()
912
913 throw("heapSetType: pointer entry not correct")
914 }
915
916
917 func doubleCheckTypePointersOfType(s *mspan, typ *_type, addr, size uintptr) {
918 if typ == nil || typ.Kind_&abi.KindGCProg != 0 {
919 return
920 }
921 if typ.Kind_&abi.KindMask == abi.Interface {
922
923
924
925 return
926 }
927 tp0 := s.typePointersOfType(typ, addr)
928 tp1 := s.typePointersOf(addr, size)
929 failed := false
930 for {
931 var addr0, addr1 uintptr
932 tp0, addr0 = tp0.next(addr + size)
933 tp1, addr1 = tp1.next(addr + size)
934 if addr0 != addr1 {
935 failed = true
936 break
937 }
938 if addr0 == 0 {
939 break
940 }
941 }
942 if failed {
943 tp0 := s.typePointersOfType(typ, addr)
944 tp1 := s.typePointersOf(addr, size)
945 print("runtime: addr=", hex(addr), " size=", size, "\n")
946 print("runtime: type=", toRType(typ).string(), "\n")
947 dumpTypePointers(tp0)
948 dumpTypePointers(tp1)
949 for {
950 var addr0, addr1 uintptr
951 tp0, addr0 = tp0.next(addr + size)
952 tp1, addr1 = tp1.next(addr + size)
953 print("runtime: ", hex(addr0), " ", hex(addr1), "\n")
954 if addr0 == 0 && addr1 == 0 {
955 break
956 }
957 }
958 throw("mismatch between typePointersOfType and typePointersOf")
959 }
960 }
961
962 func dumpTypePointers(tp typePointers) {
963 print("runtime: tp.elem=", hex(tp.elem), " tp.typ=", unsafe.Pointer(tp.typ), "\n")
964 print("runtime: tp.addr=", hex(tp.addr), " tp.mask=")
965 for i := uintptr(0); i < ptrBits; i++ {
966 if tp.mask&(uintptr(1)<<i) != 0 {
967 print("1")
968 } else {
969 print("0")
970 }
971 }
972 println()
973 }
974
975
976
977
978
979 func addb(p *byte, n uintptr) *byte {
980
981
982
983 return (*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + n))
984 }
985
986
987
988
989
990 func subtractb(p *byte, n uintptr) *byte {
991
992
993
994 return (*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) - n))
995 }
996
997
998
999
1000
1001 func add1(p *byte) *byte {
1002
1003
1004
1005 return (*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + 1))
1006 }
1007
1008
1009
1010
1011
1012
1013
1014 func subtract1(p *byte) *byte {
1015
1016
1017
1018 return (*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) - 1))
1019 }
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030 type markBits struct {
1031 bytep *uint8
1032 mask uint8
1033 index uintptr
1034 }
1035
1036
1037 func (s *mspan) allocBitsForIndex(allocBitIndex uintptr) markBits {
1038 bytep, mask := s.allocBits.bitp(allocBitIndex)
1039 return markBits{bytep, mask, allocBitIndex}
1040 }
1041
1042
1043
1044
1045
1046 func (s *mspan) refillAllocCache(whichByte uint16) {
1047 bytes := (*[8]uint8)(unsafe.Pointer(s.allocBits.bytep(uintptr(whichByte))))
1048 aCache := uint64(0)
1049 aCache |= uint64(bytes[0])
1050 aCache |= uint64(bytes[1]) << (1 * 8)
1051 aCache |= uint64(bytes[2]) << (2 * 8)
1052 aCache |= uint64(bytes[3]) << (3 * 8)
1053 aCache |= uint64(bytes[4]) << (4 * 8)
1054 aCache |= uint64(bytes[5]) << (5 * 8)
1055 aCache |= uint64(bytes[6]) << (6 * 8)
1056 aCache |= uint64(bytes[7]) << (7 * 8)
1057 s.allocCache = ^aCache
1058 }
1059
1060
1061
1062
1063
1064 func (s *mspan) nextFreeIndex() uint16 {
1065 sfreeindex := s.freeindex
1066 snelems := s.nelems
1067 if sfreeindex == snelems {
1068 return sfreeindex
1069 }
1070 if sfreeindex > snelems {
1071 throw("s.freeindex > s.nelems")
1072 }
1073
1074 aCache := s.allocCache
1075
1076 bitIndex := sys.TrailingZeros64(aCache)
1077 for bitIndex == 64 {
1078
1079 sfreeindex = (sfreeindex + 64) &^ (64 - 1)
1080 if sfreeindex >= snelems {
1081 s.freeindex = snelems
1082 return snelems
1083 }
1084 whichByte := sfreeindex / 8
1085
1086 s.refillAllocCache(whichByte)
1087 aCache = s.allocCache
1088 bitIndex = sys.TrailingZeros64(aCache)
1089
1090
1091 }
1092 result := sfreeindex + uint16(bitIndex)
1093 if result >= snelems {
1094 s.freeindex = snelems
1095 return snelems
1096 }
1097
1098 s.allocCache >>= uint(bitIndex + 1)
1099 sfreeindex = result + 1
1100
1101 if sfreeindex%64 == 0 && sfreeindex != snelems {
1102
1103
1104
1105
1106
1107 whichByte := sfreeindex / 8
1108 s.refillAllocCache(whichByte)
1109 }
1110 s.freeindex = sfreeindex
1111 return result
1112 }
1113
1114
1115
1116
1117
1118
1119 func (s *mspan) isFree(index uintptr) bool {
1120 if index < uintptr(s.freeIndexForScan) {
1121 return false
1122 }
1123 bytep, mask := s.allocBits.bitp(index)
1124 return *bytep&mask == 0
1125 }
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135 func (s *mspan) divideByElemSize(n uintptr) uintptr {
1136 const doubleCheck = false
1137
1138
1139 q := uintptr((uint64(n) * uint64(s.divMul)) >> 32)
1140
1141 if doubleCheck && q != n/s.elemsize {
1142 println(n, "/", s.elemsize, "should be", n/s.elemsize, "but got", q)
1143 throw("bad magic division")
1144 }
1145 return q
1146 }
1147
1148
1149
1150
1151 func (s *mspan) objIndex(p uintptr) uintptr {
1152 return s.divideByElemSize(p - s.base())
1153 }
1154
1155 func markBitsForAddr(p uintptr) markBits {
1156 s := spanOf(p)
1157 objIndex := s.objIndex(p)
1158 return s.markBitsForIndex(objIndex)
1159 }
1160
1161 func (s *mspan) markBitsForIndex(objIndex uintptr) markBits {
1162 bytep, mask := s.gcmarkBits.bitp(objIndex)
1163 return markBits{bytep, mask, objIndex}
1164 }
1165
1166 func (s *mspan) markBitsForBase() markBits {
1167 return markBits{&s.gcmarkBits.x, uint8(1), 0}
1168 }
1169
1170
1171 func (m markBits) isMarked() bool {
1172 return *m.bytep&m.mask != 0
1173 }
1174
1175
1176 func (m markBits) setMarked() {
1177
1178
1179
1180 atomic.Or8(m.bytep, m.mask)
1181 }
1182
1183
1184 func (m markBits) setMarkedNonAtomic() {
1185 *m.bytep |= m.mask
1186 }
1187
1188
1189 func (m markBits) clearMarked() {
1190
1191
1192
1193 atomic.And8(m.bytep, ^m.mask)
1194 }
1195
1196
1197 func markBitsForSpan(base uintptr) (mbits markBits) {
1198 mbits = markBitsForAddr(base)
1199 if mbits.mask != 1 {
1200 throw("markBitsForSpan: unaligned start")
1201 }
1202 return mbits
1203 }
1204
1205
1206 func (m *markBits) advance() {
1207 if m.mask == 1<<7 {
1208 m.bytep = (*uint8)(unsafe.Pointer(uintptr(unsafe.Pointer(m.bytep)) + 1))
1209 m.mask = 1
1210 } else {
1211 m.mask = m.mask << 1
1212 }
1213 m.index++
1214 }
1215
1216
1217
1218 const clobberdeadPtr = uintptr(0xdeaddead | 0xdeaddead<<((^uintptr(0)>>63)*32))
1219
1220
1221 func badPointer(s *mspan, p, refBase, refOff uintptr) {
1222
1223
1224
1225
1226
1227
1228
1229
1230 printlock()
1231 print("runtime: pointer ", hex(p))
1232 if s != nil {
1233 state := s.state.get()
1234 if state != mSpanInUse {
1235 print(" to unallocated span")
1236 } else {
1237 print(" to unused region of span")
1238 }
1239 print(" span.base()=", hex(s.base()), " span.limit=", hex(s.limit), " span.state=", state)
1240 }
1241 print("\n")
1242 if refBase != 0 {
1243 print("runtime: found in object at *(", hex(refBase), "+", hex(refOff), ")\n")
1244 gcDumpObject("object", refBase, refOff)
1245 }
1246 getg().m.traceback = 2
1247 throw("found bad pointer in Go heap (incorrect use of unsafe or cgo?)")
1248 }
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265 func findObject(p, refBase, refOff uintptr) (base uintptr, s *mspan, objIndex uintptr) {
1266 s = spanOf(p)
1267
1268
1269 if s == nil {
1270 if (GOARCH == "amd64" || GOARCH == "arm64") && p == clobberdeadPtr && debug.invalidptr != 0 {
1271
1272
1273
1274 badPointer(s, p, refBase, refOff)
1275 }
1276 return
1277 }
1278
1279
1280
1281
1282 if state := s.state.get(); state != mSpanInUse || p < s.base() || p >= s.limit {
1283
1284 if state == mSpanManual {
1285 return
1286 }
1287
1288
1289 if debug.invalidptr != 0 {
1290 badPointer(s, p, refBase, refOff)
1291 }
1292 return
1293 }
1294
1295 objIndex = s.objIndex(p)
1296 base = s.base() + objIndex*s.elemsize
1297 return
1298 }
1299
1300
1301
1302
1303 func reflect_verifyNotInHeapPtr(p uintptr) bool {
1304
1305
1306
1307 return spanOf(p) == nil && p != clobberdeadPtr
1308 }
1309
1310 const ptrBits = 8 * goarch.PtrSize
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320 func bulkBarrierBitmap(dst, src, size, maskOffset uintptr, bits *uint8) {
1321 word := maskOffset / goarch.PtrSize
1322 bits = addb(bits, word/8)
1323 mask := uint8(1) << (word % 8)
1324
1325 buf := &getg().m.p.ptr().wbBuf
1326 for i := uintptr(0); i < size; i += goarch.PtrSize {
1327 if mask == 0 {
1328 bits = addb(bits, 1)
1329 if *bits == 0 {
1330
1331 i += 7 * goarch.PtrSize
1332 continue
1333 }
1334 mask = 1
1335 }
1336 if *bits&mask != 0 {
1337 dstx := (*uintptr)(unsafe.Pointer(dst + i))
1338 if src == 0 {
1339 p := buf.get1()
1340 p[0] = *dstx
1341 } else {
1342 srcx := (*uintptr)(unsafe.Pointer(src + i))
1343 p := buf.get2()
1344 p[0] = *dstx
1345 p[1] = *srcx
1346 }
1347 }
1348 mask <<= 1
1349 }
1350 }
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369 func typeBitsBulkBarrier(typ *_type, dst, src, size uintptr) {
1370 if typ == nil {
1371 throw("runtime: typeBitsBulkBarrier without type")
1372 }
1373 if typ.Size_ != size {
1374 println("runtime: typeBitsBulkBarrier with type ", toRType(typ).string(), " of size ", typ.Size_, " but memory size", size)
1375 throw("runtime: invalid typeBitsBulkBarrier")
1376 }
1377 if typ.Kind_&abi.KindGCProg != 0 {
1378 println("runtime: typeBitsBulkBarrier with type ", toRType(typ).string(), " with GC prog")
1379 throw("runtime: invalid typeBitsBulkBarrier")
1380 }
1381 if !writeBarrier.enabled {
1382 return
1383 }
1384 ptrmask := typ.GCData
1385 buf := &getg().m.p.ptr().wbBuf
1386 var bits uint32
1387 for i := uintptr(0); i < typ.PtrBytes; i += goarch.PtrSize {
1388 if i&(goarch.PtrSize*8-1) == 0 {
1389 bits = uint32(*ptrmask)
1390 ptrmask = addb(ptrmask, 1)
1391 } else {
1392 bits = bits >> 1
1393 }
1394 if bits&1 != 0 {
1395 dstx := (*uintptr)(unsafe.Pointer(dst + i))
1396 srcx := (*uintptr)(unsafe.Pointer(src + i))
1397 p := buf.get2()
1398 p[0] = *dstx
1399 p[1] = *srcx
1400 }
1401 }
1402 }
1403
1404
1405
1406 func (s *mspan) countAlloc() int {
1407 count := 0
1408 bytes := divRoundUp(uintptr(s.nelems), 8)
1409
1410
1411
1412
1413 for i := uintptr(0); i < bytes; i += 8 {
1414
1415
1416
1417
1418 mrkBits := *(*uint64)(unsafe.Pointer(s.gcmarkBits.bytep(i)))
1419 count += sys.OnesCount64(mrkBits)
1420 }
1421 return count
1422 }
1423
1424
1425
1426 func readUintptr(p *byte) uintptr {
1427 x := *(*uintptr)(unsafe.Pointer(p))
1428 if goarch.BigEndian {
1429 if goarch.PtrSize == 8 {
1430 return uintptr(sys.Bswap64(uint64(x)))
1431 }
1432 return uintptr(sys.Bswap32(uint32(x)))
1433 }
1434 return x
1435 }
1436
1437 var debugPtrmask struct {
1438 lock mutex
1439 data *byte
1440 }
1441
1442
1443
1444
1445 func progToPointerMask(prog *byte, size uintptr) bitvector {
1446 n := (size/goarch.PtrSize + 7) / 8
1447 x := (*[1 << 30]byte)(persistentalloc(n+1, 1, &memstats.buckhash_sys))[:n+1]
1448 x[len(x)-1] = 0xa1
1449 n = runGCProg(prog, &x[0])
1450 if x[len(x)-1] != 0xa1 {
1451 throw("progToPointerMask: overflow")
1452 }
1453 return bitvector{int32(n), &x[0]}
1454 }
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471 func runGCProg(prog, dst *byte) uintptr {
1472 dstStart := dst
1473
1474
1475 var bits uintptr
1476 var nbits uintptr
1477
1478 p := prog
1479 Run:
1480 for {
1481
1482
1483 for ; nbits >= 8; nbits -= 8 {
1484 *dst = uint8(bits)
1485 dst = add1(dst)
1486 bits >>= 8
1487 }
1488
1489
1490 inst := uintptr(*p)
1491 p = add1(p)
1492 n := inst & 0x7F
1493 if inst&0x80 == 0 {
1494
1495 if n == 0 {
1496
1497 break Run
1498 }
1499 nbyte := n / 8
1500 for i := uintptr(0); i < nbyte; i++ {
1501 bits |= uintptr(*p) << nbits
1502 p = add1(p)
1503 *dst = uint8(bits)
1504 dst = add1(dst)
1505 bits >>= 8
1506 }
1507 if n %= 8; n > 0 {
1508 bits |= uintptr(*p) << nbits
1509 p = add1(p)
1510 nbits += n
1511 }
1512 continue Run
1513 }
1514
1515
1516 if n == 0 {
1517 for off := uint(0); ; off += 7 {
1518 x := uintptr(*p)
1519 p = add1(p)
1520 n |= (x & 0x7F) << off
1521 if x&0x80 == 0 {
1522 break
1523 }
1524 }
1525 }
1526
1527
1528 c := uintptr(0)
1529 for off := uint(0); ; off += 7 {
1530 x := uintptr(*p)
1531 p = add1(p)
1532 c |= (x & 0x7F) << off
1533 if x&0x80 == 0 {
1534 break
1535 }
1536 }
1537 c *= n
1538
1539
1540
1541
1542
1543
1544
1545
1546 src := dst
1547 const maxBits = goarch.PtrSize*8 - 7
1548 if n <= maxBits {
1549
1550 pattern := bits
1551 npattern := nbits
1552
1553
1554 src = subtract1(src)
1555 for npattern < n {
1556 pattern <<= 8
1557 pattern |= uintptr(*src)
1558 src = subtract1(src)
1559 npattern += 8
1560 }
1561
1562
1563
1564
1565
1566 if npattern > n {
1567 pattern >>= npattern - n
1568 npattern = n
1569 }
1570
1571
1572 if npattern == 1 {
1573
1574
1575
1576
1577
1578
1579 if pattern == 1 {
1580 pattern = 1<<maxBits - 1
1581 npattern = maxBits
1582 } else {
1583 npattern = c
1584 }
1585 } else {
1586 b := pattern
1587 nb := npattern
1588 if nb+nb <= maxBits {
1589
1590 for nb <= goarch.PtrSize*8 {
1591 b |= b << nb
1592 nb += nb
1593 }
1594
1595
1596 nb = maxBits / npattern * npattern
1597 b &= 1<<nb - 1
1598 pattern = b
1599 npattern = nb
1600 }
1601 }
1602
1603
1604
1605
1606 for ; c >= npattern; c -= npattern {
1607 bits |= pattern << nbits
1608 nbits += npattern
1609 for nbits >= 8 {
1610 *dst = uint8(bits)
1611 dst = add1(dst)
1612 bits >>= 8
1613 nbits -= 8
1614 }
1615 }
1616
1617
1618 if c > 0 {
1619 pattern &= 1<<c - 1
1620 bits |= pattern << nbits
1621 nbits += c
1622 }
1623 continue Run
1624 }
1625
1626
1627
1628
1629 off := n - nbits
1630
1631 src = subtractb(src, (off+7)/8)
1632 if frag := off & 7; frag != 0 {
1633 bits |= uintptr(*src) >> (8 - frag) << nbits
1634 src = add1(src)
1635 nbits += frag
1636 c -= frag
1637 }
1638
1639
1640 for i := c / 8; i > 0; i-- {
1641 bits |= uintptr(*src) << nbits
1642 src = add1(src)
1643 *dst = uint8(bits)
1644 dst = add1(dst)
1645 bits >>= 8
1646 }
1647
1648 if c %= 8; c > 0 {
1649 bits |= (uintptr(*src) & (1<<c - 1)) << nbits
1650 nbits += c
1651 }
1652 }
1653
1654
1655 totalBits := (uintptr(unsafe.Pointer(dst))-uintptr(unsafe.Pointer(dstStart)))*8 + nbits
1656 nbits += -nbits & 7
1657 for ; nbits > 0; nbits -= 8 {
1658 *dst = uint8(bits)
1659 dst = add1(dst)
1660 bits >>= 8
1661 }
1662 return totalBits
1663 }
1664
1665
1666
1667
1668
1669
1670 func materializeGCProg(ptrdata uintptr, prog *byte) *mspan {
1671
1672 bitmapBytes := divRoundUp(ptrdata, 8*goarch.PtrSize)
1673
1674 pages := divRoundUp(bitmapBytes, pageSize)
1675 s := mheap_.allocManual(pages, spanAllocPtrScalarBits)
1676 runGCProg(addb(prog, 4), (*byte)(unsafe.Pointer(s.startAddr)))
1677 return s
1678 }
1679 func dematerializeGCProg(s *mspan) {
1680 mheap_.freeManual(s, spanAllocPtrScalarBits)
1681 }
1682
1683 func dumpGCProg(p *byte) {
1684 nptr := 0
1685 for {
1686 x := *p
1687 p = add1(p)
1688 if x == 0 {
1689 print("\t", nptr, " end\n")
1690 break
1691 }
1692 if x&0x80 == 0 {
1693 print("\t", nptr, " lit ", x, ":")
1694 n := int(x+7) / 8
1695 for i := 0; i < n; i++ {
1696 print(" ", hex(*p))
1697 p = add1(p)
1698 }
1699 print("\n")
1700 nptr += int(x)
1701 } else {
1702 nbit := int(x &^ 0x80)
1703 if nbit == 0 {
1704 for nb := uint(0); ; nb += 7 {
1705 x := *p
1706 p = add1(p)
1707 nbit |= int(x&0x7f) << nb
1708 if x&0x80 == 0 {
1709 break
1710 }
1711 }
1712 }
1713 count := 0
1714 for nb := uint(0); ; nb += 7 {
1715 x := *p
1716 p = add1(p)
1717 count |= int(x&0x7f) << nb
1718 if x&0x80 == 0 {
1719 break
1720 }
1721 }
1722 print("\t", nptr, " repeat ", nbit, " × ", count, "\n")
1723 nptr += nbit * count
1724 }
1725 }
1726 }
1727
1728
1729
1730
1731
1732
1733
1734 func reflect_gcbits(x any) []byte {
1735 return getgcmask(x)
1736 }
1737
1738
1739
1740
1741 func getgcmask(ep any) (mask []byte) {
1742 e := *efaceOf(&ep)
1743 p := e.data
1744 t := e._type
1745
1746 var et *_type
1747 if t.Kind_&abi.KindMask != abi.Pointer {
1748 throw("bad argument to getgcmask: expected type to be a pointer to the value type whose mask is being queried")
1749 }
1750 et = (*ptrtype)(unsafe.Pointer(t)).Elem
1751
1752
1753 for _, datap := range activeModules() {
1754
1755 if datap.data <= uintptr(p) && uintptr(p) < datap.edata {
1756 bitmap := datap.gcdatamask.bytedata
1757 n := et.Size_
1758 mask = make([]byte, n/goarch.PtrSize)
1759 for i := uintptr(0); i < n; i += goarch.PtrSize {
1760 off := (uintptr(p) + i - datap.data) / goarch.PtrSize
1761 mask[i/goarch.PtrSize] = (*addb(bitmap, off/8) >> (off % 8)) & 1
1762 }
1763 return
1764 }
1765
1766
1767 if datap.bss <= uintptr(p) && uintptr(p) < datap.ebss {
1768 bitmap := datap.gcbssmask.bytedata
1769 n := et.Size_
1770 mask = make([]byte, n/goarch.PtrSize)
1771 for i := uintptr(0); i < n; i += goarch.PtrSize {
1772 off := (uintptr(p) + i - datap.bss) / goarch.PtrSize
1773 mask[i/goarch.PtrSize] = (*addb(bitmap, off/8) >> (off % 8)) & 1
1774 }
1775 return
1776 }
1777 }
1778
1779
1780 if base, s, _ := findObject(uintptr(p), 0, 0); base != 0 {
1781 if s.spanclass.noscan() {
1782 return nil
1783 }
1784 limit := base + s.elemsize
1785
1786
1787
1788
1789 tp := s.typePointersOfUnchecked(base)
1790 base = tp.addr
1791
1792
1793 maskFromHeap := make([]byte, (limit-base)/goarch.PtrSize)
1794 for {
1795 var addr uintptr
1796 if tp, addr = tp.next(limit); addr == 0 {
1797 break
1798 }
1799 maskFromHeap[(addr-base)/goarch.PtrSize] = 1
1800 }
1801
1802
1803
1804
1805 for i := limit; i < s.elemsize; i++ {
1806 if *(*byte)(unsafe.Pointer(i)) != 0 {
1807 throw("found non-zeroed tail of allocation")
1808 }
1809 }
1810
1811
1812
1813 for len(maskFromHeap) > 0 && maskFromHeap[len(maskFromHeap)-1] == 0 {
1814 maskFromHeap = maskFromHeap[:len(maskFromHeap)-1]
1815 }
1816
1817 if et.Kind_&abi.KindGCProg == 0 {
1818
1819 maskFromType := make([]byte, (limit-base)/goarch.PtrSize)
1820 tp = s.typePointersOfType(et, base)
1821 for {
1822 var addr uintptr
1823 if tp, addr = tp.next(limit); addr == 0 {
1824 break
1825 }
1826 maskFromType[(addr-base)/goarch.PtrSize] = 1
1827 }
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839 differs := false
1840 for i := range maskFromHeap {
1841 if maskFromHeap[i] != maskFromType[i] {
1842 differs = true
1843 break
1844 }
1845 }
1846
1847 if differs {
1848 print("runtime: heap mask=")
1849 for _, b := range maskFromHeap {
1850 print(b)
1851 }
1852 println()
1853 print("runtime: type mask=")
1854 for _, b := range maskFromType {
1855 print(b)
1856 }
1857 println()
1858 print("runtime: type=", toRType(et).string(), "\n")
1859 throw("found two different masks from two different methods")
1860 }
1861 }
1862
1863
1864 mask = maskFromHeap
1865
1866
1867
1868
1869 KeepAlive(ep)
1870 return
1871 }
1872
1873
1874 if gp := getg(); gp.m.curg.stack.lo <= uintptr(p) && uintptr(p) < gp.m.curg.stack.hi {
1875 found := false
1876 var u unwinder
1877 for u.initAt(gp.m.curg.sched.pc, gp.m.curg.sched.sp, 0, gp.m.curg, 0); u.valid(); u.next() {
1878 if u.frame.sp <= uintptr(p) && uintptr(p) < u.frame.varp {
1879 found = true
1880 break
1881 }
1882 }
1883 if found {
1884 locals, _, _ := u.frame.getStackMap(false)
1885 if locals.n == 0 {
1886 return
1887 }
1888 size := uintptr(locals.n) * goarch.PtrSize
1889 n := (*ptrtype)(unsafe.Pointer(t)).Elem.Size_
1890 mask = make([]byte, n/goarch.PtrSize)
1891 for i := uintptr(0); i < n; i += goarch.PtrSize {
1892 off := (uintptr(p) + i - u.frame.varp + size) / goarch.PtrSize
1893 mask[i/goarch.PtrSize] = locals.ptrbit(off)
1894 }
1895 }
1896 return
1897 }
1898
1899
1900
1901
1902 return
1903 }
1904
View as plain text