Source File
mpagecache.go
Belonging Package
runtime
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package runtime
import (
)
const pageCachePages = 8 * unsafe.Sizeof(pageCache{}.cache)
// pageCache represents a per-p cache of pages the allocator can
// allocate from without a lock. More specifically, it represents
// a pageCachePages*pageSize chunk of memory with 0 or more free
// pages in it.
type pageCache struct {
base uintptr // base address of the chunk
cache uint64 // 64-bit bitmap representing free pages (1 means free)
scav uint64 // 64-bit bitmap representing scavenged pages (1 means scavenged)
}
// empty reports whether the page cache has no free pages.
func ( *pageCache) () bool {
return .cache == 0
}
// alloc allocates npages from the page cache and is the main entry
// point for allocation.
//
// Returns a base address and the amount of scavenged memory in the
// allocated region in bytes.
//
// Returns a base address of zero on failure, in which case the
// amount of scavenged memory should be ignored.
func ( *pageCache) ( uintptr) (uintptr, uintptr) {
if .cache == 0 {
return 0, 0
}
if == 1 {
:= uintptr(sys.TrailingZeros64(.cache))
:= (.scav >> ) & 1
.cache &^= 1 << // set bit to mark in-use
.scav &^= 1 << // clear bit to mark unscavenged
return .base + *pageSize, uintptr() * pageSize
}
return .allocN()
}
// allocN is a helper which attempts to allocate npages worth of pages
// from the cache. It represents the general case for allocating from
// the page cache.
//
// Returns a base address and the amount of scavenged memory in the
// allocated region in bytes.
func ( *pageCache) ( uintptr) (uintptr, uintptr) {
:= findBitRange64(.cache, uint())
if >= 64 {
return 0, 0
}
:= ((uint64(1) << ) - 1) <<
:= sys.OnesCount64(.scav & )
.cache &^= // mark in-use bits
.scav &^= // clear scavenged bits
return .base + uintptr(*pageSize), uintptr() * pageSize
}
// flush empties out unallocated free pages in the given cache
// into s. Then, it clears the cache, such that empty returns
// true.
//
// p.mheapLock must be held.
//
// Must run on the system stack because p.mheapLock must be held.
//
//go:systemstack
func ( *pageCache) ( *pageAlloc) {
assertLockHeld(.mheapLock)
if .empty() {
return
}
:= chunkIndex(.base)
:= chunkPageIndex(.base)
// This method is called very infrequently, so just do the
// slower, safer thing by iterating over each bit individually.
for := uint(0); < 64; ++ {
if .cache&(1<<) != 0 {
.chunkOf().free1( + )
// Update density statistics.
.scav.index.free(, +, 1)
}
if .scav&(1<<) != 0 {
.chunkOf().scavenged.setRange(+, 1)
}
}
// Since this is a lot like a free, we need to make sure
// we update the searchAddr just like free does.
if := (offAddr{.base}); .lessThan(.searchAddr) {
.searchAddr =
}
.update(.base, pageCachePages, false, false)
* = pageCache{}
}
// allocToCache acquires a pageCachePages-aligned chunk of free pages which
// may not be contiguous, and returns a pageCache structure which owns the
// chunk.
//
// p.mheapLock must be held.
//
// Must run on the system stack because p.mheapLock must be held.
//
//go:systemstack
func ( *pageAlloc) () pageCache {
assertLockHeld(.mheapLock)
// If the searchAddr refers to a region which has a higher address than
// any known chunk, then we know we're out of memory.
if chunkIndex(.searchAddr.addr()) >= .end {
return pageCache{}
}
:= pageCache{}
:= chunkIndex(.searchAddr.addr()) // chunk index
var *pallocData
if .summary[len(.summary)-1][] != 0 {
// Fast path: there's free pages at or near the searchAddr address.
= .chunkOf()
, := .find(1, chunkPageIndex(.searchAddr.addr()))
if == ^uint(0) {
throw("bad summary data")
}
= pageCache{
base: chunkBase() + alignDown(uintptr(), 64)*pageSize,
cache: ^.pages64(),
scav: .scavenged.block64(),
}
} else {
// Slow path: the searchAddr address had nothing there, so go find
// the first free page the slow way.
, := .find(1)
if == 0 {
// We failed to find adequate free space, so mark the searchAddr as OoM
// and return an empty pageCache.
.searchAddr = maxSearchAddr()
return pageCache{}
}
= chunkIndex()
= .chunkOf()
= pageCache{
base: alignDown(, 64*pageSize),
cache: ^.pages64(chunkPageIndex()),
scav: .scavenged.block64(chunkPageIndex()),
}
}
// Set the page bits as allocated and clear the scavenged bits, but
// be careful to only set and clear the relevant bits.
:= chunkPageIndex(.base)
.allocPages64(, .cache)
.scavenged.clearBlock64(, .cache&.scav /* free and scavenged */)
// Update as an allocation, but note that it's not contiguous.
.update(.base, pageCachePages, false, true)
// Update density statistics.
.scav.index.alloc(, uint(sys.OnesCount64(.cache)))
// Set the search address to the last page represented by the cache.
// Since all of the pages in this block are going to the cache, and we
// searched for the first free page, we can confidently start at the
// next page.
//
// However, p.searchAddr is not allowed to point into unmapped heap memory
// unless it is maxSearchAddr, so make it the last page as opposed to
// the page after.
.searchAddr = offAddr{.base + pageSize*(pageCachePages-1)}
return
}
The pages are generated with Golds v0.7.0-preview. (GOOS=linux GOARCH=amd64) Golds is a Go 101 project developed by Tapir Liu. PR and bug reports are welcome and can be submitted to the issue list. Please follow @zigo_101 (reachable from the left QR code) to get the latest news of Golds. |