Source File
mgcwork.go
Belonging Package
runtime
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package runtime
import (
)
const (
_WorkbufSize = 2048 // in bytes; larger values result in less contention
// workbufAlloc is the number of bytes to allocate at a time
// for new workbufs. This must be a multiple of pageSize and
// should be a multiple of _WorkbufSize.
//
// Larger values reduce workbuf allocation overhead. Smaller
// values reduce heap fragmentation.
workbufAlloc = 32 << 10
)
func init() {
if workbufAlloc%pageSize != 0 || workbufAlloc%_WorkbufSize != 0 {
throw("bad workbufAlloc")
}
}
// Garbage collector work pool abstraction.
//
// This implements a producer/consumer model for pointers to grey
// objects. A grey object is one that is marked and on a work
// queue. A black object is marked and not on a work queue.
//
// Write barriers, root discovery, stack scanning, and object scanning
// produce pointers to grey objects. Scanning consumes pointers to
// grey objects, thus blackening them, and then scans them,
// potentially producing new pointers to grey objects.
// A gcWork provides the interface to produce and consume work for the
// garbage collector.
//
// A gcWork can be used on the stack as follows:
//
// (preemption must be disabled)
// gcw := &getg().m.p.ptr().gcw
// .. call gcw.put() to produce and gcw.tryGet() to consume ..
//
// It's important that any use of gcWork during the mark phase prevent
// the garbage collector from transitioning to mark termination since
// gcWork may locally hold GC work buffers. This can be done by
// disabling preemption (systemstack or acquirem).
type gcWork struct {
// wbuf1 and wbuf2 are the primary and secondary work buffers.
//
// This can be thought of as a stack of both work buffers'
// pointers concatenated. When we pop the last pointer, we
// shift the stack up by one work buffer by bringing in a new
// full buffer and discarding an empty one. When we fill both
// buffers, we shift the stack down by one work buffer by
// bringing in a new empty buffer and discarding a full one.
// This way we have one buffer's worth of hysteresis, which
// amortizes the cost of getting or putting a work buffer over
// at least one buffer of work and reduces contention on the
// global work lists.
//
// wbuf1 is always the buffer we're currently pushing to and
// popping from and wbuf2 is the buffer that will be discarded
// next.
//
// Invariant: Both wbuf1 and wbuf2 are nil or neither are.
wbuf1, wbuf2 *workbuf
// Bytes marked (blackened) on this gcWork. This is aggregated
// into work.bytesMarked by dispose.
bytesMarked uint64
// Heap scan work performed on this gcWork. This is aggregated into
// gcController by dispose and may also be flushed by callers.
// Other types of scan work are flushed immediately.
heapScanWork int64
// flushedWork indicates that a non-empty work buffer was
// flushed to the global work list since the last gcMarkDone
// termination check. Specifically, this indicates that this
// gcWork may have communicated work to another gcWork.
flushedWork bool
}
// Most of the methods of gcWork are go:nowritebarrierrec because the
// write barrier itself can invoke gcWork methods but the methods are
// not generally re-entrant. Hence, if a gcWork method invoked the
// write barrier while the gcWork was in an inconsistent state, and
// the write barrier in turn invoked a gcWork method, it could
// permanently corrupt the gcWork.
func ( *gcWork) () {
.wbuf1 = getempty()
:= trygetfull()
if == nil {
= getempty()
}
.wbuf2 =
}
// put enqueues a pointer for the garbage collector to trace.
// obj must point to the beginning of a heap object or an oblet.
//
//go:nowritebarrierrec
func ( *gcWork) ( uintptr) {
:= false
:= .wbuf1
// Record that this may acquire the wbufSpans or heap lock to
// allocate a workbuf.
lockWithRankMayAcquire(&work.wbufSpans.lock, lockRankWbufSpans)
lockWithRankMayAcquire(&mheap_.lock, lockRankMheap)
if == nil {
.init()
= .wbuf1
// wbuf is empty at this point.
} else if .nobj == len(.obj) {
.wbuf1, .wbuf2 = .wbuf2, .wbuf1
= .wbuf1
if .nobj == len(.obj) {
putfull()
.flushedWork = true
= getempty()
.wbuf1 =
= true
}
}
.obj[.nobj] =
.nobj++
// If we put a buffer on full, let the GC controller know so
// it can encourage more workers to run. We delay this until
// the end of put so that w is in a consistent state, since
// enlistWorker may itself manipulate w.
if && gcphase == _GCmark {
gcController.enlistWorker()
}
}
// putFast does a put and reports whether it can be done quickly
// otherwise it returns false and the caller needs to call put.
//
//go:nowritebarrierrec
func ( *gcWork) ( uintptr) bool {
:= .wbuf1
if == nil || .nobj == len(.obj) {
return false
}
.obj[.nobj] =
.nobj++
return true
}
// putBatch performs a put on every pointer in obj. See put for
// constraints on these pointers.
//
//go:nowritebarrierrec
func ( *gcWork) ( []uintptr) {
if len() == 0 {
return
}
:= false
:= .wbuf1
if == nil {
.init()
= .wbuf1
}
for len() > 0 {
for .nobj == len(.obj) {
putfull()
.flushedWork = true
.wbuf1, .wbuf2 = .wbuf2, getempty()
= .wbuf1
= true
}
:= copy(.obj[.nobj:], )
.nobj +=
= [:]
}
if && gcphase == _GCmark {
gcController.enlistWorker()
}
}
// tryGet dequeues a pointer for the garbage collector to trace.
//
// If there are no pointers remaining in this gcWork or in the global
// queue, tryGet returns 0. Note that there may still be pointers in
// other gcWork instances or other caches.
//
//go:nowritebarrierrec
func ( *gcWork) () uintptr {
:= .wbuf1
if == nil {
.init()
= .wbuf1
// wbuf is empty at this point.
}
if .nobj == 0 {
.wbuf1, .wbuf2 = .wbuf2, .wbuf1
= .wbuf1
if .nobj == 0 {
:=
= trygetfull()
if == nil {
return 0
}
putempty()
.wbuf1 =
}
}
.nobj--
return .obj[.nobj]
}
// tryGetFast dequeues a pointer for the garbage collector to trace
// if one is readily available. Otherwise it returns 0 and
// the caller is expected to call tryGet().
//
//go:nowritebarrierrec
func ( *gcWork) () uintptr {
:= .wbuf1
if == nil || .nobj == 0 {
return 0
}
.nobj--
return .obj[.nobj]
}
// dispose returns any cached pointers to the global queue.
// The buffers are being put on the full queue so that the
// write barriers will not simply reacquire them before the
// GC can inspect them. This helps reduce the mutator's
// ability to hide pointers during the concurrent mark phase.
//
//go:nowritebarrierrec
func ( *gcWork) () {
if := .wbuf1; != nil {
if .nobj == 0 {
putempty()
} else {
putfull()
.flushedWork = true
}
.wbuf1 = nil
= .wbuf2
if .nobj == 0 {
putempty()
} else {
putfull()
.flushedWork = true
}
.wbuf2 = nil
}
if .bytesMarked != 0 {
// dispose happens relatively infrequently. If this
// atomic becomes a problem, we should first try to
// dispose less and if necessary aggregate in a per-P
// counter.
atomic.Xadd64(&work.bytesMarked, int64(.bytesMarked))
.bytesMarked = 0
}
if .heapScanWork != 0 {
gcController.heapScanWork.Add(.heapScanWork)
.heapScanWork = 0
}
}
// balance moves some work that's cached in this gcWork back on the
// global queue.
//
//go:nowritebarrierrec
func ( *gcWork) () {
if .wbuf1 == nil {
return
}
if := .wbuf2; .nobj != 0 {
putfull()
.flushedWork = true
.wbuf2 = getempty()
} else if := .wbuf1; .nobj > 4 {
.wbuf1 = handoff()
.flushedWork = true // handoff did putfull
} else {
return
}
// We flushed a buffer to the full list, so wake a worker.
if gcphase == _GCmark {
gcController.enlistWorker()
}
}
// empty reports whether w has no mark work available.
//
//go:nowritebarrierrec
func ( *gcWork) () bool {
return .wbuf1 == nil || (.wbuf1.nobj == 0 && .wbuf2.nobj == 0)
}
// Internally, the GC work pool is kept in arrays in work buffers.
// The gcWork interface caches a work buffer until full (or empty) to
// avoid contending on the global work buffer lists.
type workbufhdr struct {
node lfnode // must be first
nobj int
}
type workbuf struct {
_ sys.NotInHeap
workbufhdr
// account for the above fields
obj [(_WorkbufSize - unsafe.Sizeof(workbufhdr{})) / goarch.PtrSize]uintptr
}
// workbuf factory routines. These funcs are used to manage the
// workbufs.
// If the GC asks for some work these are the only routines that
// make wbufs available to the GC.
func ( *workbuf) () {
if .nobj == 0 {
throw("workbuf is empty")
}
}
func ( *workbuf) () {
if .nobj != 0 {
throw("workbuf is not empty")
}
}
// getempty pops an empty work buffer off the work.empty list,
// allocating new buffers if none are available.
//
//go:nowritebarrier
func getempty() *workbuf {
var *workbuf
if work.empty != 0 {
= (*workbuf)(work.empty.pop())
if != nil {
.checkempty()
}
}
// Record that this may acquire the wbufSpans or heap lock to
// allocate a workbuf.
lockWithRankMayAcquire(&work.wbufSpans.lock, lockRankWbufSpans)
lockWithRankMayAcquire(&mheap_.lock, lockRankMheap)
if == nil {
// Allocate more workbufs.
var *mspan
if work.wbufSpans.free.first != nil {
lock(&work.wbufSpans.lock)
= work.wbufSpans.free.first
if != nil {
work.wbufSpans.free.remove()
work.wbufSpans.busy.insert()
}
unlock(&work.wbufSpans.lock)
}
if == nil {
systemstack(func() {
= mheap_.allocManual(workbufAlloc/pageSize, spanAllocWorkBuf)
})
if == nil {
throw("out of memory")
}
// Record the new span in the busy list.
lock(&work.wbufSpans.lock)
work.wbufSpans.busy.insert()
unlock(&work.wbufSpans.lock)
}
// Slice up the span into new workbufs. Return one and
// put the rest on the empty list.
for := uintptr(0); +_WorkbufSize <= workbufAlloc; += _WorkbufSize {
:= (*workbuf)(unsafe.Pointer(.base() + ))
.nobj = 0
lfnodeValidate(&.node)
if == 0 {
=
} else {
putempty()
}
}
}
return
}
// putempty puts a workbuf onto the work.empty list.
// Upon entry this goroutine owns b. The lfstack.push relinquishes ownership.
//
//go:nowritebarrier
func putempty( *workbuf) {
.checkempty()
work.empty.push(&.node)
}
// putfull puts the workbuf on the work.full list for the GC.
// putfull accepts partially full buffers so the GC can avoid competing
// with the mutators for ownership of partially full buffers.
//
//go:nowritebarrier
func putfull( *workbuf) {
.checknonempty()
work.full.push(&.node)
}
// trygetfull tries to get a full or partially empty workbuffer.
// If one is not immediately available return nil.
//
//go:nowritebarrier
func trygetfull() *workbuf {
:= (*workbuf)(work.full.pop())
if != nil {
.checknonempty()
return
}
return
}
//go:nowritebarrier
func handoff( *workbuf) *workbuf {
// Make new buffer with half of b's pointers.
:= getempty()
:= .nobj / 2
.nobj -=
.nobj =
memmove(unsafe.Pointer(&.obj[0]), unsafe.Pointer(&.obj[.nobj]), uintptr()*unsafe.Sizeof(.obj[0]))
// Put b on full list - let first half of b get stolen.
putfull()
return
}
// prepareFreeWorkbufs moves busy workbuf spans to free list so they
// can be freed to the heap. This must only be called when all
// workbufs are on the empty list.
func prepareFreeWorkbufs() {
lock(&work.wbufSpans.lock)
if work.full != 0 {
throw("cannot free workbufs when work.full != 0")
}
// Since all workbufs are on the empty list, we don't care
// which ones are in which spans. We can wipe the entire empty
// list and move all workbuf spans to the free list.
work.empty = 0
work.wbufSpans.free.takeAll(&work.wbufSpans.busy)
unlock(&work.wbufSpans.lock)
}
// freeSomeWbufs frees some workbufs back to the heap and returns
// true if it should be called again to free more.
func freeSomeWbufs( bool) bool {
const = 64 // ~1–2 µs per span.
lock(&work.wbufSpans.lock)
if gcphase != _GCoff || work.wbufSpans.free.isEmpty() {
unlock(&work.wbufSpans.lock)
return false
}
systemstack(func() {
:= getg().m.curg
for := 0; < && !( && .preempt); ++ {
:= work.wbufSpans.free.first
if == nil {
break
}
work.wbufSpans.free.remove()
mheap_.freeManual(, spanAllocWorkBuf)
}
})
:= !work.wbufSpans.free.isEmpty()
unlock(&work.wbufSpans.lock)
return
}
The pages are generated with Golds v0.7.0-preview. (GOOS=linux GOARCH=amd64) Golds is a Go 101 project developed by Tapir Liu. PR and bug reports are welcome and can be submitted to the issue list. Please follow @zigo_101 (reachable from the left QR code) to get the latest news of Golds. |