Source file src/runtime/mpagealloc.go
1 // Copyright 2019 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 // Page allocator. 6 // 7 // The page allocator manages mapped pages (defined by pageSize, NOT 8 // physPageSize) for allocation and re-use. It is embedded into mheap. 9 // 10 // Pages are managed using a bitmap that is sharded into chunks. 11 // In the bitmap, 1 means in-use, and 0 means free. The bitmap spans the 12 // process's address space. Chunks are managed in a sparse-array-style structure 13 // similar to mheap.arenas, since the bitmap may be large on some systems. 14 // 15 // The bitmap is efficiently searched by using a radix tree in combination 16 // with fast bit-wise intrinsics. Allocation is performed using an address-ordered 17 // first-fit approach. 18 // 19 // Each entry in the radix tree is a summary that describes three properties of 20 // a particular region of the address space: the number of contiguous free pages 21 // at the start and end of the region it represents, and the maximum number of 22 // contiguous free pages found anywhere in that region. 23 // 24 // Each level of the radix tree is stored as one contiguous array, which represents 25 // a different granularity of subdivision of the processes' address space. Thus, this 26 // radix tree is actually implicit in these large arrays, as opposed to having explicit 27 // dynamically-allocated pointer-based node structures. Naturally, these arrays may be 28 // quite large for system with large address spaces, so in these cases they are mapped 29 // into memory as needed. The leaf summaries of the tree correspond to a bitmap chunk. 30 // 31 // The root level (referred to as L0 and index 0 in pageAlloc.summary) has each 32 // summary represent the largest section of address space (16 GiB on 64-bit systems), 33 // with each subsequent level representing successively smaller subsections until we 34 // reach the finest granularity at the leaves, a chunk. 35 // 36 // More specifically, each summary in each level (except for leaf summaries) 37 // represents some number of entries in the following level. For example, each 38 // summary in the root level may represent a 16 GiB region of address space, 39 // and in the next level there could be 8 corresponding entries which represent 2 40 // GiB subsections of that 16 GiB region, each of which could correspond to 8 41 // entries in the next level which each represent 256 MiB regions, and so on. 42 // 43 // Thus, this design only scales to heaps so large, but can always be extended to 44 // larger heaps by simply adding levels to the radix tree, which mostly costs 45 // additional virtual address space. The choice of managing large arrays also means 46 // that a large amount of virtual address space may be reserved by the runtime. 47 48 package runtime 49 50 import ( 51 "internal/goarch" 52 "internal/runtime/atomic" 53 "internal/runtime/gc" 54 "unsafe" 55 ) 56 57 const ( 58 // The size of a bitmap chunk, i.e. the amount of bits (that is, pages) to consider 59 // in the bitmap at once. It is 4MB on most platforms, except on Wasm it is 512KB. 60 // We use a smaller chuck size on Wasm for the same reason as the smaller arena 61 // size (see heapArenaBytes). 62 pallocChunkPages = 1 << logPallocChunkPages 63 pallocChunkBytes = pallocChunkPages * pageSize 64 logPallocChunkPages = 9*(1-goarch.IsWasm) + 6*goarch.IsWasm 65 logPallocChunkBytes = logPallocChunkPages + gc.PageShift 66 67 // The number of radix bits for each level. 68 // 69 // The value of 3 is chosen such that the block of summaries we need to scan at 70 // each level fits in 64 bytes (2^3 summaries * 8 bytes per summary), which is 71 // close to the L1 cache line width on many systems. Also, a value of 3 fits 4 tree 72 // levels perfectly into the 21-bit pallocBits summary field at the root level. 73 // 74 // The following equation explains how each of the constants relate: 75 // summaryL0Bits + (summaryLevels-1)*summaryLevelBits + logPallocChunkBytes = heapAddrBits 76 // 77 // summaryLevels is an architecture-dependent value defined in mpagealloc_*.go. 78 summaryLevelBits = 3 79 summaryL0Bits = heapAddrBits - logPallocChunkBytes - (summaryLevels-1)*summaryLevelBits 80 81 // pallocChunksL2Bits is the number of bits of the chunk index number 82 // covered by the second level of the chunks map. 83 // 84 // See (*pageAlloc).chunks for more details. Update the documentation 85 // there should this change. 86 pallocChunksL2Bits = heapAddrBits - logPallocChunkBytes - pallocChunksL1Bits 87 pallocChunksL1Shift = pallocChunksL2Bits 88 89 vmaNamePageAllocIndex = "page alloc index" 90 ) 91 92 // maxSearchAddr returns the maximum searchAddr value, which indicates 93 // that the heap has no free space. 94 // 95 // This function exists just to make it clear that this is the maximum address 96 // for the page allocator's search space. See maxOffAddr for details. 97 // 98 // It's a function (rather than a variable) because it needs to be 99 // usable before package runtime's dynamic initialization is complete. 100 // See #51913 for details. 101 func maxSearchAddr() offAddr { return maxOffAddr } 102 103 // Global chunk index. 104 // 105 // Represents an index into the leaf level of the radix tree. 106 // Similar to arenaIndex, except instead of arenas, it divides the address 107 // space into chunks. 108 type chunkIdx uint 109 110 // chunkIndex returns the global index of the palloc chunk containing the 111 // pointer p. 112 func chunkIndex(p uintptr) chunkIdx { 113 return chunkIdx((p - arenaBaseOffset) / pallocChunkBytes) 114 } 115 116 // chunkBase returns the base address of the palloc chunk at index ci. 117 func chunkBase(ci chunkIdx) uintptr { 118 return uintptr(ci)*pallocChunkBytes + arenaBaseOffset 119 } 120 121 // chunkPageIndex computes the index of the page that contains p, 122 // relative to the chunk which contains p. 123 func chunkPageIndex(p uintptr) uint { 124 return uint(p % pallocChunkBytes / pageSize) 125 } 126 127 // l1 returns the index into the first level of (*pageAlloc).chunks. 128 func (i chunkIdx) l1() uint { 129 if pallocChunksL1Bits == 0 { 130 // Let the compiler optimize this away if there's no 131 // L1 map. 132 return 0 133 } else { 134 return uint(i) >> pallocChunksL1Shift 135 } 136 } 137 138 // l2 returns the index into the second level of (*pageAlloc).chunks. 139 func (i chunkIdx) l2() uint { 140 if pallocChunksL1Bits == 0 { 141 return uint(i) 142 } else { 143 return uint(i) & (1<<pallocChunksL2Bits - 1) 144 } 145 } 146 147 // offAddrToLevelIndex converts an address in the offset address space 148 // to the index into summary[level] containing addr. 149 func offAddrToLevelIndex(level int, addr offAddr) int { 150 return int((addr.a - arenaBaseOffset) >> levelShift[level]) 151 } 152 153 // levelIndexToOffAddr converts an index into summary[level] into 154 // the corresponding address in the offset address space. 155 func levelIndexToOffAddr(level, idx int) offAddr { 156 return offAddr{(uintptr(idx) << levelShift[level]) + arenaBaseOffset} 157 } 158 159 // addrsToSummaryRange converts base and limit pointers into a range 160 // of entries for the given summary level. 161 // 162 // The returned range is inclusive on the lower bound and exclusive on 163 // the upper bound. 164 func addrsToSummaryRange(level int, base, limit uintptr) (lo int, hi int) { 165 // This is slightly more nuanced than just a shift for the exclusive 166 // upper-bound. Note that the exclusive upper bound may be within a 167 // summary at this level, meaning if we just do the obvious computation 168 // hi will end up being an inclusive upper bound. Unfortunately, just 169 // adding 1 to that is too broad since we might be on the very edge 170 // of a summary's max page count boundary for this level 171 // (1 << levelLogPages[level]). So, make limit an inclusive upper bound 172 // then shift, then add 1, so we get an exclusive upper bound at the end. 173 lo = int((base - arenaBaseOffset) >> levelShift[level]) 174 hi = int(((limit-1)-arenaBaseOffset)>>levelShift[level]) + 1 175 return 176 } 177 178 // blockAlignSummaryRange aligns indices into the given level to that 179 // level's block width (1 << levelBits[level]). It assumes lo is inclusive 180 // and hi is exclusive, and so aligns them down and up respectively. 181 func blockAlignSummaryRange(level int, lo, hi int) (int, int) { 182 e := uintptr(1) << levelBits[level] 183 return int(alignDown(uintptr(lo), e)), int(alignUp(uintptr(hi), e)) 184 } 185 186 type pageAlloc struct { 187 // Radix tree of summaries. 188 // 189 // Each slice's cap represents the whole memory reservation. 190 // Each slice's len reflects the allocator's maximum known 191 // mapped heap address for that level. 192 // 193 // The backing store of each summary level is reserved in init 194 // and may or may not be committed in grow (small address spaces 195 // may commit all the memory in init). 196 // 197 // The purpose of keeping len <= cap is to enforce bounds checks 198 // on the top end of the slice so that instead of an unknown 199 // runtime segmentation fault, we get a much friendlier out-of-bounds 200 // error. 201 // 202 // To iterate over a summary level, use inUse to determine which ranges 203 // are currently available. Otherwise one might try to access 204 // memory which is only Reserved which may result in a hard fault. 205 // 206 // We may still get segmentation faults < len since some of that 207 // memory may not be committed yet. 208 summary [summaryLevels][]pallocSum 209 210 // chunks is a slice of bitmap chunks. 211 // 212 // The total size of chunks is quite large on most 64-bit platforms 213 // (O(GiB) or more) if flattened, so rather than making one large mapping 214 // (which has problems on some platforms, even when PROT_NONE) we use a 215 // two-level sparse array approach similar to the arena index in mheap. 216 // 217 // To find the chunk containing a memory address `a`, do: 218 // chunkOf(chunkIndex(a)) 219 // 220 // Below is a table describing the configuration for chunks for various 221 // heapAddrBits supported by the runtime. 222 // 223 // heapAddrBits | L1 Bits | L2 Bits | L2 Entry Size 224 // ------------------------------------------------ 225 // 32 | 0 | 10 | 128 KiB 226 // 32 (wasm) | 0 | 13 | 128 KiB 227 // 33 (iOS) | 0 | 11 | 256 KiB 228 // 48 | 13 | 13 | 1 MiB 229 // 230 // There's no reason to use the L1 part of chunks on 32-bit, the 231 // address space is small so the L2 is small. For platforms with a 232 // 48-bit address space, we pick the L1 such that the L2 is 1 MiB 233 // in size, which is a good balance between low granularity without 234 // making the impact on BSS too high (note the L1 is stored directly 235 // in pageAlloc). 236 // 237 // To iterate over the bitmap, use inUse to determine which ranges 238 // are currently available. Otherwise one might iterate over unused 239 // ranges. 240 // 241 // Protected by mheapLock. 242 // 243 // TODO(mknyszek): Consider changing the definition of the bitmap 244 // such that 1 means free and 0 means in-use so that summaries and 245 // the bitmaps align better on zero-values. 246 chunks [1 << pallocChunksL1Bits]*[1 << pallocChunksL2Bits]pallocData 247 248 // The address to start an allocation search with. It must never 249 // point to any memory that is not contained in inUse, i.e. 250 // inUse.contains(searchAddr.addr()) must always be true. The one 251 // exception to this rule is that it may take on the value of 252 // maxOffAddr to indicate that the heap is exhausted. 253 // 254 // We guarantee that all valid heap addresses below this value 255 // are allocated and not worth searching. 256 searchAddr offAddr 257 258 // start and end represent the chunk indices 259 // which pageAlloc knows about. It assumes 260 // chunks in the range [start, end) are 261 // currently ready to use. 262 start, end chunkIdx 263 264 // inUse is a slice of ranges of address space which are 265 // known by the page allocator to be currently in-use (passed 266 // to grow). 267 // 268 // We care much more about having a contiguous heap in these cases 269 // and take additional measures to ensure that, so in nearly all 270 // cases this should have just 1 element. 271 // 272 // All access is protected by the mheapLock. 273 inUse addrRanges 274 275 // scav stores the scavenger state. 276 scav struct { 277 // index is an efficient index of chunks that have pages available to 278 // scavenge. 279 index scavengeIndex 280 281 // releasedBg is the amount of memory released in the background this 282 // scavenge cycle. 283 releasedBg atomic.Uintptr 284 285 // releasedEager is the amount of memory released eagerly this scavenge 286 // cycle. 287 releasedEager atomic.Uintptr 288 } 289 290 // mheap_.lock. This level of indirection makes it possible 291 // to test pageAlloc independently of the runtime allocator. 292 mheapLock *mutex 293 294 // sysStat is the runtime memstat to update when new system 295 // memory is committed by the pageAlloc for allocation metadata. 296 sysStat *sysMemStat 297 298 // summaryMappedReady is the number of bytes mapped in the Ready state 299 // in the summary structure. Used only for testing currently. 300 // 301 // Protected by mheapLock. 302 summaryMappedReady uintptr 303 304 // chunkHugePages indicates whether page bitmap chunks should be backed 305 // by huge pages. 306 chunkHugePages bool 307 308 // Whether or not this struct is being used in tests. 309 test bool 310 } 311 312 func (p *pageAlloc) init(mheapLock *mutex, sysStat *sysMemStat, test bool) { 313 if levelLogPages[0] > logMaxPackedValue { 314 // We can't represent 1<<levelLogPages[0] pages, the maximum number 315 // of pages we need to represent at the root level, in a summary, which 316 // is a big problem. Throw. 317 print("runtime: root level max pages = ", 1<<levelLogPages[0], "\n") 318 print("runtime: summary max pages = ", maxPackedValue, "\n") 319 throw("root level max pages doesn't fit in summary") 320 } 321 p.sysStat = sysStat 322 323 // Initialize p.inUse. 324 p.inUse.init(sysStat) 325 326 // System-dependent initialization. 327 p.sysInit(test) 328 329 // Start with the searchAddr in a state indicating there's no free memory. 330 p.searchAddr = maxSearchAddr() 331 332 // Set the mheapLock. 333 p.mheapLock = mheapLock 334 335 // Initialize the scavenge index. 336 p.summaryMappedReady += p.scav.index.init(test, sysStat) 337 338 // Set if we're in a test. 339 p.test = test 340 } 341 342 // tryChunkOf returns the bitmap data for the given chunk. 343 // 344 // Returns nil if the chunk data has not been mapped. 345 func (p *pageAlloc) tryChunkOf(ci chunkIdx) *pallocData { 346 l2 := p.chunks[ci.l1()] 347 if l2 == nil { 348 return nil 349 } 350 return &l2[ci.l2()] 351 } 352 353 // chunkOf returns the chunk at the given chunk index. 354 // 355 // The chunk index must be valid or this method may throw. 356 func (p *pageAlloc) chunkOf(ci chunkIdx) *pallocData { 357 return &p.chunks[ci.l1()][ci.l2()] 358 } 359 360 // grow sets up the metadata for the address range [base, base+size). 361 // It may allocate metadata, in which case *p.sysStat will be updated. 362 // 363 // p.mheapLock must be held. 364 func (p *pageAlloc) grow(base, size uintptr) { 365 assertLockHeld(p.mheapLock) 366 367 // Round up to chunks, since we can't deal with increments smaller 368 // than chunks. Also, sysGrow expects aligned values. 369 limit := alignUp(base+size, pallocChunkBytes) 370 base = alignDown(base, pallocChunkBytes) 371 372 // Grow the summary levels in a system-dependent manner. 373 // We just update a bunch of additional metadata here. 374 p.sysGrow(base, limit) 375 376 // Grow the scavenge index. 377 p.summaryMappedReady += p.scav.index.grow(base, limit, p.sysStat) 378 379 // Update p.start and p.end. 380 // If no growth happened yet, start == 0. This is generally 381 // safe since the zero page is unmapped. 382 firstGrowth := p.start == 0 383 start, end := chunkIndex(base), chunkIndex(limit) 384 if firstGrowth || start < p.start { 385 p.start = start 386 } 387 if end > p.end { 388 p.end = end 389 } 390 // Note that [base, limit) will never overlap with any existing 391 // range inUse because grow only ever adds never-used memory 392 // regions to the page allocator. 393 p.inUse.add(makeAddrRange(base, limit)) 394 395 // A grow operation is a lot like a free operation, so if our 396 // chunk ends up below p.searchAddr, update p.searchAddr to the 397 // new address, just like in free. 398 if b := (offAddr{base}); b.lessThan(p.searchAddr) { 399 p.searchAddr = b 400 } 401 402 // Add entries into chunks, which is sparse, if needed. Then, 403 // initialize the bitmap. 404 // 405 // Newly-grown memory is always considered scavenged. 406 // Set all the bits in the scavenged bitmaps high. 407 for c := chunkIndex(base); c < chunkIndex(limit); c++ { 408 if p.chunks[c.l1()] == nil { 409 // Create the necessary l2 entry. 410 const l2Size = unsafe.Sizeof(*p.chunks[0]) 411 r := sysAlloc(l2Size, p.sysStat, vmaNamePageAllocIndex) 412 if r == nil { 413 throw("pageAlloc: out of memory") 414 } 415 if !p.test { 416 // Make the chunk mapping eligible or ineligible 417 // for huge pages, depending on what our current 418 // state is. 419 if p.chunkHugePages { 420 sysHugePage(r, l2Size) 421 } else { 422 sysNoHugePage(r, l2Size) 423 } 424 } 425 // Store the new chunk block but avoid a write barrier. 426 // grow is used in call chains that disallow write barriers. 427 *(*uintptr)(unsafe.Pointer(&p.chunks[c.l1()])) = uintptr(r) 428 } 429 p.chunkOf(c).scavenged.setRange(0, pallocChunkPages) 430 } 431 432 // Update summaries accordingly. The grow acts like a free, so 433 // we need to ensure this newly-free memory is visible in the 434 // summaries. 435 p.update(base, size/pageSize, true, false) 436 } 437 438 // enableChunkHugePages enables huge pages for the chunk bitmap mappings (disabled by default). 439 // 440 // This function is idempotent. 441 // 442 // A note on latency: for sufficiently small heaps (<10s of GiB) this function will take constant 443 // time, but may take time proportional to the size of the mapped heap beyond that. 444 // 445 // The heap lock must not be held over this operation, since it will briefly acquire 446 // the heap lock. 447 // 448 // Must be called on the system stack because it acquires the heap lock. 449 // 450 //go:systemstack 451 func (p *pageAlloc) enableChunkHugePages() { 452 // Grab the heap lock to turn on huge pages for new chunks and clone the current 453 // heap address space ranges. 454 // 455 // After the lock is released, we can be sure that bitmaps for any new chunks may 456 // be backed with huge pages, and we have the address space for the rest of the 457 // chunks. At the end of this function, all chunk metadata should be backed by huge 458 // pages. 459 lock(&mheap_.lock) 460 if p.chunkHugePages { 461 unlock(&mheap_.lock) 462 return 463 } 464 p.chunkHugePages = true 465 var inUse addrRanges 466 inUse.sysStat = p.sysStat 467 p.inUse.cloneInto(&inUse) 468 unlock(&mheap_.lock) 469 470 // This might seem like a lot of work, but all these loops are for generality. 471 // 472 // For a 1 GiB contiguous heap, a 48-bit address space, 13 L1 bits, a palloc chunk size 473 // of 4 MiB, and adherence to the default set of heap address hints, this will result in 474 // exactly 1 call to sysHugePage. 475 for _, r := range p.inUse.ranges { 476 for i := chunkIndex(r.base.addr()).l1(); i < chunkIndex(r.limit.addr()-1).l1(); i++ { 477 // N.B. We can assume that p.chunks[i] is non-nil and in a mapped part of p.chunks 478 // because it's derived from inUse, which never shrinks. 479 sysHugePage(unsafe.Pointer(p.chunks[i]), unsafe.Sizeof(*p.chunks[0])) 480 } 481 } 482 } 483 484 // update updates heap metadata. It must be called each time the bitmap 485 // is updated. 486 // 487 // If contig is true, update does some optimizations assuming that there was 488 // a contiguous allocation or free between addr and addr+npages. alloc indicates 489 // whether the operation performed was an allocation or a free. 490 // 491 // p.mheapLock must be held. 492 func (p *pageAlloc) update(base, npages uintptr, contig, alloc bool) { 493 assertLockHeld(p.mheapLock) 494 495 // base, limit, start, and end are inclusive. 496 limit := base + npages*pageSize - 1 497 sc, ec := chunkIndex(base), chunkIndex(limit) 498 499 // Handle updating the lowest level first. 500 if sc == ec { 501 // Fast path: the allocation doesn't span more than one chunk, 502 // so update this one and if the summary didn't change, return. 503 x := p.summary[len(p.summary)-1][sc] 504 y := p.chunkOf(sc).summarize() 505 if x == y { 506 return 507 } 508 p.summary[len(p.summary)-1][sc] = y 509 } else if contig { 510 // Slow contiguous path: the allocation spans more than one chunk 511 // and at least one summary is guaranteed to change. 512 summary := p.summary[len(p.summary)-1] 513 514 // Update the summary for chunk sc. 515 summary[sc] = p.chunkOf(sc).summarize() 516 517 // Update the summaries for chunks in between, which are 518 // either totally allocated or freed. 519 whole := p.summary[len(p.summary)-1][sc+1 : ec] 520 if alloc { 521 clear(whole) 522 } else { 523 for i := range whole { 524 whole[i] = freeChunkSum 525 } 526 } 527 528 // Update the summary for chunk ec. 529 summary[ec] = p.chunkOf(ec).summarize() 530 } else { 531 // Slow general path: the allocation spans more than one chunk 532 // and at least one summary is guaranteed to change. 533 // 534 // We can't assume a contiguous allocation happened, so walk over 535 // every chunk in the range and manually recompute the summary. 536 summary := p.summary[len(p.summary)-1] 537 for c := sc; c <= ec; c++ { 538 summary[c] = p.chunkOf(c).summarize() 539 } 540 } 541 542 // Walk up the radix tree and update the summaries appropriately. 543 changed := true 544 for l := len(p.summary) - 2; l >= 0 && changed; l-- { 545 // Update summaries at level l from summaries at level l+1. 546 changed = false 547 548 // "Constants" for the previous level which we 549 // need to compute the summary from that level. 550 logEntriesPerBlock := levelBits[l+1] 551 logMaxPages := levelLogPages[l+1] 552 553 // lo and hi describe all the parts of the level we need to look at. 554 lo, hi := addrsToSummaryRange(l, base, limit+1) 555 556 // Iterate over each block, updating the corresponding summary in the less-granular level. 557 for i := lo; i < hi; i++ { 558 children := p.summary[l+1][i<<logEntriesPerBlock : (i+1)<<logEntriesPerBlock] 559 sum := mergeSummaries(children, logMaxPages) 560 old := p.summary[l][i] 561 if old != sum { 562 changed = true 563 p.summary[l][i] = sum 564 } 565 } 566 } 567 } 568 569 // allocRange marks the range of memory [base, base+npages*pageSize) as 570 // allocated. It also updates the summaries to reflect the newly-updated 571 // bitmap. 572 // 573 // Returns the amount of scavenged memory in bytes present in the 574 // allocated range. 575 // 576 // p.mheapLock must be held. 577 func (p *pageAlloc) allocRange(base, npages uintptr) uintptr { 578 assertLockHeld(p.mheapLock) 579 580 limit := base + npages*pageSize - 1 581 sc, ec := chunkIndex(base), chunkIndex(limit) 582 si, ei := chunkPageIndex(base), chunkPageIndex(limit) 583 584 scav := uint(0) 585 if sc == ec { 586 // The range doesn't cross any chunk boundaries. 587 chunk := p.chunkOf(sc) 588 scav += chunk.scavenged.popcntRange(si, ei+1-si) 589 chunk.allocRange(si, ei+1-si) 590 p.scav.index.alloc(sc, ei+1-si) 591 } else { 592 // The range crosses at least one chunk boundary. 593 chunk := p.chunkOf(sc) 594 scav += chunk.scavenged.popcntRange(si, pallocChunkPages-si) 595 chunk.allocRange(si, pallocChunkPages-si) 596 p.scav.index.alloc(sc, pallocChunkPages-si) 597 for c := sc + 1; c < ec; c++ { 598 chunk := p.chunkOf(c) 599 scav += chunk.scavenged.popcntRange(0, pallocChunkPages) 600 chunk.allocAll() 601 p.scav.index.alloc(c, pallocChunkPages) 602 } 603 chunk = p.chunkOf(ec) 604 scav += chunk.scavenged.popcntRange(0, ei+1) 605 chunk.allocRange(0, ei+1) 606 p.scav.index.alloc(ec, ei+1) 607 } 608 p.update(base, npages, true, true) 609 return uintptr(scav) * pageSize 610 } 611 612 // findMappedAddr returns the smallest mapped offAddr that is 613 // >= addr. That is, if addr refers to mapped memory, then it is 614 // returned. If addr is higher than any mapped region, then 615 // it returns maxOffAddr. 616 // 617 // p.mheapLock must be held. 618 func (p *pageAlloc) findMappedAddr(addr offAddr) offAddr { 619 assertLockHeld(p.mheapLock) 620 621 // If we're not in a test, validate first by checking mheap_.arenas. 622 // This is a fast path which is only safe to use outside of testing. 623 ai := arenaIndex(addr.addr()) 624 if p.test || mheap_.arenas[ai.l1()] == nil || mheap_.arenas[ai.l1()][ai.l2()] == nil { 625 vAddr, ok := p.inUse.findAddrGreaterEqual(addr.addr()) 626 if ok { 627 return offAddr{vAddr} 628 } else { 629 // The candidate search address is greater than any 630 // known address, which means we definitely have no 631 // free memory left. 632 return maxOffAddr 633 } 634 } 635 return addr 636 } 637 638 // find searches for the first (address-ordered) contiguous free region of 639 // npages in size and returns a base address for that region. 640 // 641 // It uses p.searchAddr to prune its search and assumes that no palloc chunks 642 // below chunkIndex(p.searchAddr) contain any free memory at all. 643 // 644 // find also computes and returns a candidate p.searchAddr, which may or 645 // may not prune more of the address space than p.searchAddr already does. 646 // This candidate is always a valid p.searchAddr. 647 // 648 // find represents the slow path and the full radix tree search. 649 // 650 // Returns a base address of 0 on failure, in which case the candidate 651 // searchAddr returned is invalid and must be ignored. 652 // 653 // p.mheapLock must be held. 654 func (p *pageAlloc) find(npages uintptr) (uintptr, offAddr) { 655 assertLockHeld(p.mheapLock) 656 657 // Search algorithm. 658 // 659 // This algorithm walks each level l of the radix tree from the root level 660 // to the leaf level. It iterates over at most 1 << levelBits[l] of entries 661 // in a given level in the radix tree, and uses the summary information to 662 // find either: 663 // 1) That a given subtree contains a large enough contiguous region, at 664 // which point it continues iterating on the next level, or 665 // 2) That there are enough contiguous boundary-crossing bits to satisfy 666 // the allocation, at which point it knows exactly where to start 667 // allocating from. 668 // 669 // i tracks the index into the current level l's structure for the 670 // contiguous 1 << levelBits[l] entries we're actually interested in. 671 // 672 // NOTE: Technically this search could allocate a region which crosses 673 // the arenaBaseOffset boundary, which when arenaBaseOffset != 0, is 674 // a discontinuity. However, the only way this could happen is if the 675 // page at the zero address is mapped, and this is impossible on 676 // every system we support where arenaBaseOffset != 0. So, the 677 // discontinuity is already encoded in the fact that the OS will never 678 // map the zero page for us, and this function doesn't try to handle 679 // this case in any way. 680 681 // i is the beginning of the block of entries we're searching at the 682 // current level. 683 i := 0 684 685 // firstFree is the region of address space that we are certain to 686 // find the first free page in the heap. base and bound are the inclusive 687 // bounds of this window, and both are addresses in the linearized, contiguous 688 // view of the address space (with arenaBaseOffset pre-added). At each level, 689 // this window is narrowed as we find the memory region containing the 690 // first free page of memory. To begin with, the range reflects the 691 // full process address space. 692 // 693 // firstFree is updated by calling foundFree each time free space in the 694 // heap is discovered. 695 // 696 // At the end of the search, base.addr() is the best new 697 // searchAddr we could deduce in this search. 698 firstFree := struct { 699 base, bound offAddr 700 }{ 701 base: minOffAddr, 702 bound: maxOffAddr, 703 } 704 // foundFree takes the given address range [addr, addr+size) and 705 // updates firstFree if it is a narrower range. The input range must 706 // either be fully contained within firstFree or not overlap with it 707 // at all. 708 // 709 // This way, we'll record the first summary we find with any free 710 // pages on the root level and narrow that down if we descend into 711 // that summary. But as soon as we need to iterate beyond that summary 712 // in a level to find a large enough range, we'll stop narrowing. 713 foundFree := func(addr offAddr, size uintptr) { 714 if firstFree.base.lessEqual(addr) && addr.add(size-1).lessEqual(firstFree.bound) { 715 // This range fits within the current firstFree window, so narrow 716 // down the firstFree window to the base and bound of this range. 717 firstFree.base = addr 718 firstFree.bound = addr.add(size - 1) 719 } else if !(addr.add(size-1).lessThan(firstFree.base) || firstFree.bound.lessThan(addr)) { 720 // This range only partially overlaps with the firstFree range, 721 // so throw. 722 print("runtime: addr = ", hex(addr.addr()), ", size = ", size, "\n") 723 print("runtime: base = ", hex(firstFree.base.addr()), ", bound = ", hex(firstFree.bound.addr()), "\n") 724 throw("range partially overlaps") 725 } 726 } 727 728 // lastSum is the summary which we saw on the previous level that made us 729 // move on to the next level. Used to print additional information in the 730 // case of a catastrophic failure. 731 // lastSumIdx is that summary's index in the previous level. 732 lastSum := packPallocSum(0, 0, 0) 733 lastSumIdx := -1 734 735 nextLevel: 736 for l := 0; l < len(p.summary); l++ { 737 // For the root level, entriesPerBlock is the whole level. 738 entriesPerBlock := 1 << levelBits[l] 739 logMaxPages := levelLogPages[l] 740 741 // We've moved into a new level, so let's update i to our new 742 // starting index. This is a no-op for level 0. 743 i <<= levelBits[l] 744 745 // Slice out the block of entries we care about. 746 entries := p.summary[l][i : i+entriesPerBlock] 747 748 // Determine j0, the first index we should start iterating from. 749 // The searchAddr may help us eliminate iterations if we followed the 750 // searchAddr on the previous level or we're on the root level, in which 751 // case the searchAddr should be the same as i after levelShift. 752 j0 := 0 753 if searchIdx := offAddrToLevelIndex(l, p.searchAddr); searchIdx&^(entriesPerBlock-1) == i { 754 j0 = searchIdx & (entriesPerBlock - 1) 755 } 756 757 // Run over the level entries looking for 758 // a contiguous run of at least npages either 759 // within an entry or across entries. 760 // 761 // base contains the page index (relative to 762 // the first entry's first page) of the currently 763 // considered run of consecutive pages. 764 // 765 // size contains the size of the currently considered 766 // run of consecutive pages. 767 var base, size uint 768 for j := j0; j < len(entries); j++ { 769 sum := entries[j] 770 if sum == 0 { 771 // A full entry means we broke any streak and 772 // that we should skip it altogether. 773 size = 0 774 continue 775 } 776 777 // We've encountered a non-zero summary which means 778 // free memory, so update firstFree. 779 foundFree(levelIndexToOffAddr(l, i+j), (uintptr(1)<<logMaxPages)*pageSize) 780 781 s := sum.start() 782 if size+s >= uint(npages) { 783 // If size == 0 we don't have a run yet, 784 // which means base isn't valid. So, set 785 // base to the first page in this block. 786 if size == 0 { 787 base = uint(j) << logMaxPages 788 } 789 // We hit npages; we're done! 790 size += s 791 break 792 } 793 if sum.max() >= uint(npages) { 794 // The entry itself contains npages contiguous 795 // free pages, so continue on the next level 796 // to find that run. 797 i += j 798 lastSumIdx = i 799 lastSum = sum 800 continue nextLevel 801 } 802 if size == 0 || s < 1<<logMaxPages { 803 // We either don't have a current run started, or this entry 804 // isn't totally free (meaning we can't continue the current 805 // one), so try to begin a new run by setting size and base 806 // based on sum.end. 807 size = sum.end() 808 base = uint(j+1)<<logMaxPages - size 809 continue 810 } 811 // The entry is completely free, so continue the run. 812 size += 1 << logMaxPages 813 } 814 if size >= uint(npages) { 815 // We found a sufficiently large run of free pages straddling 816 // some boundary, so compute the address and return it. 817 addr := levelIndexToOffAddr(l, i).add(uintptr(base) * pageSize).addr() 818 return addr, p.findMappedAddr(firstFree.base) 819 } 820 if l == 0 { 821 // We're at level zero, so that means we've exhausted our search. 822 return 0, maxSearchAddr() 823 } 824 825 // We're not at level zero, and we exhausted the level we were looking in. 826 // This means that either our calculations were wrong or the level above 827 // lied to us. In either case, dump some useful state and throw. 828 print("runtime: summary[", l-1, "][", lastSumIdx, "] = ", lastSum.start(), ", ", lastSum.max(), ", ", lastSum.end(), "\n") 829 print("runtime: level = ", l, ", npages = ", npages, ", j0 = ", j0, "\n") 830 print("runtime: p.searchAddr = ", hex(p.searchAddr.addr()), ", i = ", i, "\n") 831 print("runtime: levelShift[level] = ", levelShift[l], ", levelBits[level] = ", levelBits[l], "\n") 832 for j := 0; j < len(entries); j++ { 833 sum := entries[j] 834 print("runtime: summary[", l, "][", i+j, "] = (", sum.start(), ", ", sum.max(), ", ", sum.end(), ")\n") 835 } 836 throw("bad summary data") 837 } 838 839 // Since we've gotten to this point, that means we haven't found a 840 // sufficiently-sized free region straddling some boundary (chunk or larger). 841 // This means the last summary we inspected must have had a large enough "max" 842 // value, so look inside the chunk to find a suitable run. 843 // 844 // After iterating over all levels, i must contain a chunk index which 845 // is what the final level represents. 846 ci := chunkIdx(i) 847 j, searchIdx := p.chunkOf(ci).find(npages, 0) 848 if j == ^uint(0) { 849 // We couldn't find any space in this chunk despite the summaries telling 850 // us it should be there. There's likely a bug, so dump some state and throw. 851 sum := p.summary[len(p.summary)-1][i] 852 print("runtime: summary[", len(p.summary)-1, "][", i, "] = (", sum.start(), ", ", sum.max(), ", ", sum.end(), ")\n") 853 print("runtime: npages = ", npages, "\n") 854 throw("bad summary data") 855 } 856 857 // Compute the address at which the free space starts. 858 addr := chunkBase(ci) + uintptr(j)*pageSize 859 860 // Since we actually searched the chunk, we may have 861 // found an even narrower free window. 862 searchAddr := chunkBase(ci) + uintptr(searchIdx)*pageSize 863 foundFree(offAddr{searchAddr}, chunkBase(ci+1)-searchAddr) 864 return addr, p.findMappedAddr(firstFree.base) 865 } 866 867 // alloc allocates npages worth of memory from the page heap, returning the base 868 // address for the allocation and the amount of scavenged memory in bytes 869 // contained in the region [base address, base address + npages*pageSize). 870 // 871 // Returns a 0 base address on failure, in which case other returned values 872 // should be ignored. 873 // 874 // p.mheapLock must be held. 875 // 876 // Must run on the system stack because p.mheapLock must be held. 877 // 878 //go:systemstack 879 func (p *pageAlloc) alloc(npages uintptr) (addr uintptr, scav uintptr) { 880 assertLockHeld(p.mheapLock) 881 882 // If the searchAddr refers to a region which has a higher address than 883 // any known chunk, then we know we're out of memory. 884 if chunkIndex(p.searchAddr.addr()) >= p.end { 885 return 0, 0 886 } 887 888 // If npages has a chance of fitting in the chunk where the searchAddr is, 889 // search it directly. 890 searchAddr := minOffAddr 891 if pallocChunkPages-chunkPageIndex(p.searchAddr.addr()) >= uint(npages) { 892 // npages is guaranteed to be no greater than pallocChunkPages here. 893 i := chunkIndex(p.searchAddr.addr()) 894 if max := p.summary[len(p.summary)-1][i].max(); max >= uint(npages) { 895 j, searchIdx := p.chunkOf(i).find(npages, chunkPageIndex(p.searchAddr.addr())) 896 if j == ^uint(0) { 897 print("runtime: max = ", max, ", npages = ", npages, "\n") 898 print("runtime: searchIdx = ", chunkPageIndex(p.searchAddr.addr()), ", p.searchAddr = ", hex(p.searchAddr.addr()), "\n") 899 throw("bad summary data") 900 } 901 addr = chunkBase(i) + uintptr(j)*pageSize 902 searchAddr = offAddr{chunkBase(i) + uintptr(searchIdx)*pageSize} 903 goto Found 904 } 905 } 906 // We failed to use a searchAddr for one reason or another, so try 907 // the slow path. 908 addr, searchAddr = p.find(npages) 909 if addr == 0 { 910 if npages == 1 { 911 // We failed to find a single free page, the smallest unit 912 // of allocation. This means we know the heap is completely 913 // exhausted. Otherwise, the heap still might have free 914 // space in it, just not enough contiguous space to 915 // accommodate npages. 916 p.searchAddr = maxSearchAddr() 917 } 918 return 0, 0 919 } 920 Found: 921 // Go ahead and actually mark the bits now that we have an address. 922 scav = p.allocRange(addr, npages) 923 924 // If we found a higher searchAddr, we know that all the 925 // heap memory before that searchAddr in an offset address space is 926 // allocated, so bump p.searchAddr up to the new one. 927 if p.searchAddr.lessThan(searchAddr) { 928 p.searchAddr = searchAddr 929 } 930 return addr, scav 931 } 932 933 // free returns npages worth of memory starting at base back to the page heap. 934 // 935 // p.mheapLock must be held. 936 // 937 // Must run on the system stack because p.mheapLock must be held. 938 // 939 //go:systemstack 940 func (p *pageAlloc) free(base, npages uintptr) { 941 assertLockHeld(p.mheapLock) 942 943 // If we're freeing pages below the p.searchAddr, update searchAddr. 944 if b := (offAddr{base}); b.lessThan(p.searchAddr) { 945 p.searchAddr = b 946 } 947 limit := base + npages*pageSize - 1 948 if npages == 1 { 949 // Fast path: we're clearing a single bit, and we know exactly 950 // where it is, so mark it directly. 951 i := chunkIndex(base) 952 pi := chunkPageIndex(base) 953 p.chunkOf(i).free1(pi) 954 p.scav.index.free(i, pi, 1) 955 } else { 956 // Slow path: we're clearing more bits so we may need to iterate. 957 sc, ec := chunkIndex(base), chunkIndex(limit) 958 si, ei := chunkPageIndex(base), chunkPageIndex(limit) 959 960 if sc == ec { 961 // The range doesn't cross any chunk boundaries. 962 p.chunkOf(sc).free(si, ei+1-si) 963 p.scav.index.free(sc, si, ei+1-si) 964 } else { 965 // The range crosses at least one chunk boundary. 966 p.chunkOf(sc).free(si, pallocChunkPages-si) 967 p.scav.index.free(sc, si, pallocChunkPages-si) 968 for c := sc + 1; c < ec; c++ { 969 p.chunkOf(c).freeAll() 970 p.scav.index.free(c, 0, pallocChunkPages) 971 } 972 p.chunkOf(ec).free(0, ei+1) 973 p.scav.index.free(ec, 0, ei+1) 974 } 975 } 976 p.update(base, npages, true, false) 977 } 978 979 // markRandomPaddingPages marks the range of memory [base, base+npages*pageSize] 980 // as both allocated and scavenged. This is used for randomizing the base heap 981 // address. Both the alloc and scav bits are set so that the pages are not used 982 // and so the memory accounting stats are correctly calculated. 983 // 984 // Similar to allocRange, it also updates the summaries to reflect the 985 // newly-updated bitmap. 986 // 987 // p.mheapLock must be held. 988 func (p *pageAlloc) markRandomPaddingPages(base uintptr, npages uintptr) { 989 assertLockHeld(p.mheapLock) 990 991 limit := base + npages*pageSize - 1 992 sc, ec := chunkIndex(base), chunkIndex(limit) 993 si, ei := chunkPageIndex(base), chunkPageIndex(limit) 994 if sc == ec { 995 chunk := p.chunkOf(sc) 996 chunk.allocRange(si, ei+1-si) 997 p.scav.index.alloc(sc, ei+1-si) 998 chunk.scavenged.setRange(si, ei+1-si) 999 } else { 1000 chunk := p.chunkOf(sc) 1001 chunk.allocRange(si, pallocChunkPages-si) 1002 p.scav.index.alloc(sc, pallocChunkPages-si) 1003 chunk.scavenged.setRange(si, pallocChunkPages-si) 1004 for c := sc + 1; c < ec; c++ { 1005 chunk := p.chunkOf(c) 1006 chunk.allocAll() 1007 p.scav.index.alloc(c, pallocChunkPages) 1008 chunk.scavenged.setAll() 1009 } 1010 chunk = p.chunkOf(ec) 1011 chunk.allocRange(0, ei+1) 1012 p.scav.index.alloc(ec, ei+1) 1013 chunk.scavenged.setRange(0, ei+1) 1014 } 1015 p.update(base, npages, true, true) 1016 } 1017 1018 const ( 1019 pallocSumBytes = unsafe.Sizeof(pallocSum(0)) 1020 1021 // maxPackedValue is the maximum value that any of the three fields in 1022 // the pallocSum may take on. 1023 maxPackedValue = 1 << logMaxPackedValue 1024 logMaxPackedValue = logPallocChunkPages + (summaryLevels-1)*summaryLevelBits 1025 1026 freeChunkSum = pallocSum(uint64(pallocChunkPages) | 1027 uint64(pallocChunkPages<<logMaxPackedValue) | 1028 uint64(pallocChunkPages<<(2*logMaxPackedValue))) 1029 ) 1030 1031 // pallocSum is a packed summary type which packs three numbers: start, max, 1032 // and end into a single 8-byte value. Each of these values are a summary of 1033 // a bitmap and are thus counts, each of which may have a maximum value of 1034 // 2^21 - 1, or all three may be equal to 2^21. The latter case is represented 1035 // by just setting the 64th bit. 1036 type pallocSum uint64 1037 1038 // packPallocSum takes a start, max, and end value and produces a pallocSum. 1039 func packPallocSum(start, max, end uint) pallocSum { 1040 if max == maxPackedValue { 1041 return pallocSum(uint64(1 << 63)) 1042 } 1043 return pallocSum((uint64(start) & (maxPackedValue - 1)) | 1044 ((uint64(max) & (maxPackedValue - 1)) << logMaxPackedValue) | 1045 ((uint64(end) & (maxPackedValue - 1)) << (2 * logMaxPackedValue))) 1046 } 1047 1048 // start extracts the start value from a packed sum. 1049 func (p pallocSum) start() uint { 1050 if uint64(p)&uint64(1<<63) != 0 { 1051 return maxPackedValue 1052 } 1053 return uint(uint64(p) & (maxPackedValue - 1)) 1054 } 1055 1056 // max extracts the max value from a packed sum. 1057 func (p pallocSum) max() uint { 1058 if uint64(p)&uint64(1<<63) != 0 { 1059 return maxPackedValue 1060 } 1061 return uint((uint64(p) >> logMaxPackedValue) & (maxPackedValue - 1)) 1062 } 1063 1064 // end extracts the end value from a packed sum. 1065 func (p pallocSum) end() uint { 1066 if uint64(p)&uint64(1<<63) != 0 { 1067 return maxPackedValue 1068 } 1069 return uint((uint64(p) >> (2 * logMaxPackedValue)) & (maxPackedValue - 1)) 1070 } 1071 1072 // unpack unpacks all three values from the summary. 1073 func (p pallocSum) unpack() (uint, uint, uint) { 1074 if uint64(p)&uint64(1<<63) != 0 { 1075 return maxPackedValue, maxPackedValue, maxPackedValue 1076 } 1077 return uint(uint64(p) & (maxPackedValue - 1)), 1078 uint((uint64(p) >> logMaxPackedValue) & (maxPackedValue - 1)), 1079 uint((uint64(p) >> (2 * logMaxPackedValue)) & (maxPackedValue - 1)) 1080 } 1081 1082 // mergeSummaries merges consecutive summaries which may each represent at 1083 // most 1 << logMaxPagesPerSum pages each together into one. 1084 func mergeSummaries(sums []pallocSum, logMaxPagesPerSum uint) pallocSum { 1085 // Merge the summaries in sums into one. 1086 // 1087 // We do this by keeping a running summary representing the merged 1088 // summaries of sums[:i] in start, most, and end. 1089 start, most, end := sums[0].unpack() 1090 for i := 1; i < len(sums); i++ { 1091 // Merge in sums[i]. 1092 si, mi, ei := sums[i].unpack() 1093 1094 // Merge in sums[i].start only if the running summary is 1095 // completely free, otherwise this summary's start 1096 // plays no role in the combined sum. 1097 if start == uint(i)<<logMaxPagesPerSum { 1098 start += si 1099 } 1100 1101 // Recompute the max value of the running sum by looking 1102 // across the boundary between the running sum and sums[i] 1103 // and at the max sums[i], taking the greatest of those two 1104 // and the max of the running sum. 1105 most = max(most, end+si, mi) 1106 1107 // Merge in end by checking if this new summary is totally 1108 // free. If it is, then we want to extend the running sum's 1109 // end by the new summary. If not, then we have some alloc'd 1110 // pages in there and we just want to take the end value in 1111 // sums[i]. 1112 if ei == 1<<logMaxPagesPerSum { 1113 end += 1 << logMaxPagesPerSum 1114 } else { 1115 end = ei 1116 } 1117 } 1118 return packPallocSum(start, most, end) 1119 } 1120