// Copyright 2023 The Go Authors. All rights reserved.// Use of this source code is governed by a BSD-style// license that can be found in the LICENSE file.package runtimeimport ()// A Pinner is a set of Go objects each pinned to a fixed location in memory. The// [Pinner.Pin] method pins one object, while [Pinner.Unpin] unpins all pinned// objects.//// The purpose of a Pinner is two-fold.// First, it allows C code to safely use Go pointers that have not been passed// explicitly to the C code via a cgo call.// For example, for safely interacting with a pointer stored inside of a struct// whose pointer is passed to a C function.// Second, it allows C memory to safely retain that Go pointer even after the// cgo call returns, provided the object remains pinned.//// A Pinner arranges for its objects to be automatically unpinned some time after// it becomes unreachable, so its referents will not leak. However, this means the// Pinner itself must be kept alive across a cgo call, or as long as C retains a// reference to the pinned Go pointers.//// Reusing a Pinner is safe, and in fact encouraged, to avoid the cost of// initializing new Pinners on first use.//// The zero value of Pinner is ready to use.typePinnerstruct { *pinner}// Pin pins a Go object, preventing it from being moved or freed by the garbage// collector until the [Pinner.Unpin] method has been called.//// A pointer to a pinned object can be directly stored in C memory or can be// contained in Go memory passed to C functions. If the pinned object itself// contains pointers to Go objects, these objects must be pinned separately if they// are going to be accessed from C code.//// The argument must be a pointer of any type or an [unsafe.Pointer].//// It's safe to call Pin on non-Go pointers, in which case Pin will do nothing.func ( *Pinner) ( any) {if .pinner == nil {// Check the pinner cache first. := acquirem()if := .p.ptr(); != nil { .pinner = .pinnerCache .pinnerCache = nil }releasem()if .pinner == nil {// Didn't get anything from the pinner cache. .pinner = new(pinner) .refs = .refStore[:0]// We set this finalizer once and never clear it. Thus, if the // pinner gets cached, we'll reuse it, along with its finalizer. // This lets us avoid the relatively expensive SetFinalizer call // when reusing from the cache. The finalizer however has to be // resilient to an empty pinner being finalized, which is done // by checking p.refs' length.SetFinalizer(.pinner, func( *pinner) {iflen(.refs) != 0 { .unpin() // only required to make the test idempotentpinnerLeakPanic() } }) } } := pinnerGetPtr(&)ifsetPinned(, true) { .refs = append(.refs, ) }}// Unpin unpins all pinned objects of the [Pinner].// It's safe and encouraged to reuse a Pinner after calling Unpin.func ( *Pinner) () { .pinner.unpin() := acquirem()if := .p.ptr(); != nil && .pinnerCache == nil {// Put the pinner back in the cache, but only if the // cache is empty. If application code is reusing Pinners // on its own, we want to leave the backing store in place // so reuse is more efficient. .pinnerCache = .pinner .pinner = nil }releasem()}const ( pinnerSize = 64 pinnerRefStoreSize = (pinnerSize - unsafe.Sizeof([]unsafe.Pointer{})) / unsafe.Sizeof(unsafe.Pointer(nil)))type pinner struct { refs []unsafe.Pointer refStore [pinnerRefStoreSize]unsafe.Pointer}func ( *pinner) () {if == nil || .refs == nil {return }for := range .refs {setPinned(.refs[], false) }// The following two lines make all pointers to references // in p.refs unreachable, either by deleting them or dropping // p.refs' backing store (if it was not backed by refStore). .refStore = [pinnerRefStoreSize]unsafe.Pointer{} .refs = .refStore[:0]}func pinnerGetPtr( *any) unsafe.Pointer { := efaceOf() := ._typeif == nil {panic(errorString("runtime.Pinner: argument is nil")) }if := .Kind(); != abi.Pointer && != abi.UnsafePointer {panic(errorString("runtime.Pinner: argument is not a pointer: " + toRType().string())) }ifinUserArenaChunk(uintptr(.data)) {// Arena-allocated objects are not eligible for pinning.panic(errorString("runtime.Pinner: object was allocated into an arena")) }return .data}// isPinned checks if a Go pointer is pinned.// nosplit, because it's called from nosplit code in cgocheck.////go:nosplitfunc isPinned( unsafe.Pointer) bool { := spanOfHeap(uintptr())if == nil {// this code is only called for Go pointer, so this must be a // linker-allocated global object.returntrue } := .getPinnerBits()// these pinnerBits might get unlinked by a concurrently running sweep, but // that's OK because gcBits don't get cleared until the following GC cycle // (nextMarkBitArenaEpoch)if == nil {returnfalse } := .objIndex(uintptr()) := .ofObject()KeepAlive() // make sure ptr is alive until we are done so the span can't be freedreturn .isPinned()}// setPinned marks or unmarks a Go pointer as pinned, when the ptr is a Go pointer.// It will be ignored while trying to pin a non-Go pointer.// It will panic while trying to unpin a non-Go pointer,// which should not happen in normal usage.func setPinned( unsafe.Pointer, bool) bool { := spanOfHeap(uintptr())if == nil {if ! {panic(errorString("tried to unpin non-Go pointer")) }// This is a linker-allocated, zero size object or other object, // nothing to do, silently ignore it.returnfalse }// ensure that the span is swept, b/c sweeping accesses the specials list // w/o locks. := acquirem() .ensureSwept()KeepAlive() // make sure ptr is still alive after span is swept := .objIndex(uintptr())lock(&.speciallock) // guard against concurrent calls of setPinned on same span := .getPinnerBits()if == nil { = .newPinnerBits() .setPinnerBits() } := .ofObject()if {if .isPinned() {// multiple pins on same object, set multipin bit .setMultiPinned(true)// and increase the pin counter // TODO(mknyszek): investigate if systemstack is necessary heresystemstack(func() { := * .elemsize .incPinCounter() }) } else {// set pin bit .setPinned(true) } } else {// unpinif .isPinned() {if .isMultiPinned() {varbool// TODO(mknyszek): investigate if systemstack is necessary heresystemstack(func() { := * .elemsize = .decPinCounter() })if ! {// counter is 0, clear multipin bit .setMultiPinned(false) } } else {// no multipins recorded. unpin object. .setPinned(false) } } else {// unpinning unpinned object, bail outthrow("runtime.Pinner: object already unpinned") } }unlock(&.speciallock)releasem()returntrue}type pinState struct { bytep *uint8 byteVal uint8 mask uint8}// nosplit, because it's called by isPinned, which is nosplit////go:nosplitfunc ( *pinState) () bool {return (.byteVal & .mask) != 0}func ( *pinState) () bool {return (.byteVal & (.mask << 1)) != 0}func ( *pinState) ( bool) { .set(, false)}func ( *pinState) ( bool) { .set(, true)}// set sets the pin bit of the pinState to val. If multipin is true, it// sets/unsets the multipin bit instead.func ( *pinState) ( bool, bool) { := .maskif { <<= 1 }if {atomic.Or8(.bytep, ) } else {atomic.And8(.bytep, ^) }}// pinnerBits is the same type as gcBits but has different methods.type pinnerBits gcBits// ofObject returns the pinState of the n'th object.// nosplit, because it's called by isPinned, which is nosplit////go:nosplitfunc ( *pinnerBits) ( uintptr) pinState { , := (*gcBits)().bitp( * 2) := atomic.Load8()returnpinState{, , }}func ( *mspan) () uintptr {returndivRoundUp(uintptr(.nelems)*2, 8)}// newPinnerBits returns a pointer to 8 byte aligned bytes to be used for this// span's pinner bits. newPinnerBits is used to mark objects that are pinned.// They are copied when the span is swept.func ( *mspan) () *pinnerBits {return (*pinnerBits)(newMarkBits(uintptr(.nelems) * 2))}// nosplit, because it's called by isPinned, which is nosplit////go:nosplitfunc ( *mspan) () *pinnerBits {return (*pinnerBits)(atomic.Loadp(unsafe.Pointer(&.pinnerBits)))}func ( *mspan) ( *pinnerBits) {atomicstorep(unsafe.Pointer(&.pinnerBits), unsafe.Pointer())}// refreshPinnerBits replaces pinnerBits with a fresh copy in the arenas for the// next GC cycle. If it does not contain any pinned objects, pinnerBits of the// span is set to nil.func ( *mspan) () { := .getPinnerBits()if == nil {return } := false := alignUp(.pinnerBitSize(), 8)// Iterate over each 8-byte chunk and check for pins. Note that // newPinnerBits guarantees that pinnerBits will be 8-byte aligned, so we // don't have to worry about edge cases, irrelevant bits will simply be // zero.for , := rangeunsafe.Slice((*uint64)(unsafe.Pointer(&.x)), /8) {if != 0 { = truebreak } }if { := .newPinnerBits()memmove(unsafe.Pointer(&.x), unsafe.Pointer(&.x), ) .setPinnerBits() } else { .setPinnerBits(nil) }}// incPinCounter is only called for multiple pins of the same object and records// the _additional_ pins.func ( *mspan) ( uintptr) {var *specialPinCounter , := .specialFindSplicePoint(, _KindSpecialPinCounter)if ! {lock(&mheap_.speciallock) = (*specialPinCounter)(mheap_.specialPinCounterAlloc.alloc())unlock(&mheap_.speciallock)// splice in record, fill in offset. .special.offset = .special.kind = _KindSpecialPinCounter .special.next = * * = (*special)(unsafe.Pointer())spanHasSpecials() } else { = (*specialPinCounter)(unsafe.Pointer(*)) } .counter++}// decPinCounter decreases the counter. If the counter reaches 0, the counter// special is deleted and false is returned. Otherwise true is returned.func ( *mspan) ( uintptr) bool { , := .specialFindSplicePoint(, _KindSpecialPinCounter)if ! {throw("runtime.Pinner: decreased non-existing pin counter") } := (*specialPinCounter)(unsafe.Pointer(*)) .counter--if .counter == 0 { * = .special.nextif .specials == nil {spanHasNoSpecials() }lock(&mheap_.speciallock)mheap_.specialPinCounterAlloc.free(unsafe.Pointer())unlock(&mheap_.speciallock)returnfalse }returntrue}// only for testsfunc pinnerGetPinCounter( unsafe.Pointer) *uintptr { , , := findObject(uintptr(), 0, 0) := * .elemsize , := .specialFindSplicePoint(, _KindSpecialPinCounter)if ! {returnnil } := (*specialPinCounter)(unsafe.Pointer(*))return &.counter}// to be able to test that the GC panics when a pinned pointer is leaking, this// panic function is a variable, that can be overwritten by a test.var pinnerLeakPanic = func() {panic(errorString("runtime.Pinner: found leaking pinned pointer; forgot to call Unpin()?"))}
The pages are generated with Goldsv0.8.3-preview. (GOOS=linux GOARCH=amd64)
Golds is a Go 101 project developed by Tapir Liu.
PR and bug reports are welcome and can be submitted to the issue list.
Please follow @zigo_101 (reachable from the left QR code) to get the latest news of Golds.