Source File
traceregion.go
Belonging Package
runtime
// Copyright 2023 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Simple not-in-heap bump-pointer traceRegion allocator.
package runtime
import (
)
// traceRegionAlloc is a thread-safe region allocator.
// It holds a linked list of traceRegionAllocBlock.
type traceRegionAlloc struct {
lock mutex
dropping atomic.Bool // For checking invariants.
current atomic.UnsafePointer // *traceRegionAllocBlock
full *traceRegionAllocBlock
}
// traceRegionAllocBlock is a block in traceRegionAlloc.
//
// traceRegionAllocBlock is allocated from non-GC'd memory, so it must not
// contain heap pointers. Writes to pointers to traceRegionAllocBlocks do
// not need write barriers.
type traceRegionAllocBlock struct {
_ sys.NotInHeap
traceRegionAllocBlockHeader
data [traceRegionAllocBlockData]byte
}
type traceRegionAllocBlockHeader struct {
next *traceRegionAllocBlock
off atomic.Uintptr
}
const traceRegionAllocBlockData = 64<<10 - unsafe.Sizeof(traceRegionAllocBlockHeader{})
// alloc allocates n-byte block. The block is always aligned to 8 bytes, regardless of platform.
func ( *traceRegionAlloc) ( uintptr) *notInHeap {
= alignUp(, 8)
if > traceRegionAllocBlockData {
throw("traceRegion: alloc too large")
}
if .dropping.Load() {
throw("traceRegion: alloc with concurrent drop")
}
// Try to bump-pointer allocate into the current block.
:= (*traceRegionAllocBlock)(.current.Load())
if != nil {
:= .off.Add()
if <= uintptr(len(.data)) {
return (*notInHeap)(unsafe.Pointer(&.data[-]))
}
}
// Try to install a new block.
lock(&.lock)
// Check block again under the lock. Someone may
// have gotten here first.
= (*traceRegionAllocBlock)(.current.Load())
if != nil {
:= .off.Add()
if <= uintptr(len(.data)) {
unlock(&.lock)
return (*notInHeap)(unsafe.Pointer(&.data[-]))
}
// Add the existing block to the full list.
.next = .full
.full =
}
// Allocate a new block.
= (*traceRegionAllocBlock)(sysAlloc(unsafe.Sizeof(traceRegionAllocBlock{}), &memstats.other_sys))
if == nil {
throw("traceRegion: out of memory")
}
// Allocate space for our current request, so we always make
// progress.
.off.Store()
:= (*notInHeap)(unsafe.Pointer(&.data[0]))
// Publish the new block.
.current.Store(unsafe.Pointer())
unlock(&.lock)
return
}
// drop frees all previously allocated memory and resets the allocator.
//
// drop is not safe to call concurrently with other calls to drop or with calls to alloc. The caller
// must ensure that it is not possible for anything else to be using the same structure.
func ( *traceRegionAlloc) () {
.dropping.Store(true)
for .full != nil {
:= .full
.full = .next
sysFree(unsafe.Pointer(), unsafe.Sizeof(traceRegionAllocBlock{}), &memstats.other_sys)
}
if := .current.Load(); != nil {
sysFree(, unsafe.Sizeof(traceRegionAllocBlock{}), &memstats.other_sys)
.current.Store(nil)
}
.dropping.Store(false)
}
The pages are generated with Golds v0.7.0-preview. (GOOS=linux GOARCH=amd64) Golds is a Go 101 project developed by Tapir Liu. PR and bug reports are welcome and can be submitted to the issue list. Please follow @zigo_101 (reachable from the left QR code) to get the latest news of Golds. |