// Copyright 2024 The Go Authors. All rights reserved.// Use of this source code is governed by a BSD-style// license that can be found in the LICENSE file.package runtimeimport ()// A synctestGroup is a group of goroutines started by synctest.Run.type synctestGroup struct { mu mutex timers timers now int64// current fake time root *g// caller of synctest.Run waiter *g// caller of synctest.Wait waiting bool// true if a goroutine is calling synctest.Wait// The group is active (not blocked) so long as running > 0 || active > 0. // // running is the number of goroutines which are not "durably blocked": // Goroutines which are either running, runnable, or non-durably blocked // (for example, blocked in a syscall). // // active is used to keep the group from becoming blocked, // even if all goroutines in the group are blocked. // For example, park_m can choose to immediately unpark a goroutine after parking it. // It increments the active count to keep the group active until it has determined // that the park operation has completed. total int// total goroutines running int// non-blocked goroutines active int// other sources of activity}// changegstatus is called when the non-lock status of a g changes.// It is never called with a Gscanstatus.func ( *synctestGroup) ( *g, , uint32) {// Determine whether this change in status affects the idleness of the group. // If this isn't a goroutine starting, stopping, durably blocking, // or waking up after durably blocking, then return immediately without // locking sg.mu. // // For example, stack growth (newstack) will changegstatus // from _Grunning to _Gcopystack. This is uninteresting to synctest, // but if stack growth occurs while sg.mu is held, we must not recursively lock. := 0 := trueswitch {case_Gdead: = false ++case_Gwaiting:if .waitreason.isIdleInSynctest() { = false } } := trueswitch {case_Gdead: = false --case_Gwaiting:if .waitreason.isIdleInSynctest() { = false } }// It's possible for wasRunning == isRunning while totalDelta != 0; // for example, if a new goroutine is created in a non-running state.if == && == 0 {return }lock(&.mu) .total += if != {if { .running++ } else { .running--ifraceenabled && != _Gdead {racereleasemergeg(, .raceaddr()) } } }if .total < 0 {fatal("total < 0") }if .running < 0 {fatal("running < 0") } := .maybeWakeLocked()unlock(&.mu)if != nil {goready(, 0) }}// incActive increments the active-count for the group.// A group does not become durably blocked while the active-count is non-zero.func ( *synctestGroup) () {lock(&.mu) .active++unlock(&.mu)}// decActive decrements the active-count for the group.func ( *synctestGroup) () {lock(&.mu) .active--if .active < 0 {throw("active < 0") } := .maybeWakeLocked()unlock(&.mu)if != nil {goready(, 0) }}// maybeWakeLocked returns a g to wake if the group is durably blocked.func ( *synctestGroup) () *g {if .running > 0 || .active > 0 {returnnil }// Increment the group active count, since we've determined to wake something. // The woken goroutine will decrement the count. // We can't just call goready and let it increment sg.running, // since we can't call goready with sg.mu held. // // Incrementing the active count here is only necessary if something has gone wrong, // and a goroutine that we considered durably blocked wakes up unexpectedly. // Two wakes happening at the same time leads to very confusing failure modes, // so we take steps to avoid it happening. .active++if := .waiter; != nil {// A goroutine is blocked in Wait. Wake it.return }// All goroutines in the group are durably blocked, and nothing has called Wait. // Wake the root goroutine.return .root}func ( *synctestGroup) () unsafe.Pointer {// Address used to record happens-before relationships created by the group. // // Wait creates a happens-before relationship between itself and // the blocking operations which caused other goroutines in the group to park.returnunsafe.Pointer()}//go:linkname synctestRun internal/synctest.Runfunc synctestRun( func()) {ifdebug.asynctimerchan.Load() != 0 {panic("synctest.Run not supported with asynctimerchan!=0") } := getg()if .syncGroup != nil {panic("synctest.Run called from within a synctest bubble") } .syncGroup = &synctestGroup{total: 1,running: 1,root: , }const = 946684800000000000// midnight UTC 2000-01-01 .syncGroup.now = .syncGroup.timers.syncGroup = .syncGrouplockInit(&.syncGroup.mu, lockRankSynctest)lockInit(&.syncGroup.timers.mu, lockRankTimers)deferfunc() { .syncGroup = nil }() := *(**funcval)(unsafe.Pointer(&))newproc() := .syncGrouplock(&.mu) .active++for {ifraceenabled {raceacquireg(, .syncGroup.raceaddr()) }unlock(&.mu)systemstack(func() { .syncGroup.timers.check(.syncGroup.now) })gopark(synctestidle_c, nil, waitReasonSynctestRun, traceBlockSynctest, 0)lock(&.mu)if .active < 0 {throw("active < 0") } := .timers.wakeTime()if == 0 {break }if < .now {throw("time went backwards") } .now = } := .totalunlock(&.mu)if != 1 {panic("deadlock: all goroutines in bubble are blocked") }if .timer != nil && .timer.isFake {// Verify that we haven't marked this goroutine's sleep timer as fake. // This could happen if something in Run were to call timeSleep.throw("synctest root goroutine has a fake timer") }}func synctestidle_c( *g, unsafe.Pointer) bool {lock(&.syncGroup.mu) := trueif .syncGroup.running == 0 && .syncGroup.active == 1 {// All goroutines in the group have blocked or exited. = false } else { .syncGroup.active-- }unlock(&.syncGroup.mu)return}//go:linkname synctestWait internal/synctest.Waitfunc synctestWait() { := getg()if .syncGroup == nil {panic("goroutine is not in a bubble") }lock(&.syncGroup.mu)// We use a syncGroup.waiting bool to detect simultaneous calls to Wait rather than // checking to see if syncGroup.waiter is non-nil. This avoids a race between unlocking // syncGroup.mu and setting syncGroup.waiter while parking.if .syncGroup.waiting {unlock(&.syncGroup.mu)panic("wait already in progress") } .syncGroup.waiting = trueunlock(&.syncGroup.mu)gopark(synctestwait_c, nil, waitReasonSynctestWait, traceBlockSynctest, 0)lock(&.syncGroup.mu) .syncGroup.active--if .syncGroup.active < 0 {throw("active < 0") } .syncGroup.waiter = nil .syncGroup.waiting = falseunlock(&.syncGroup.mu)// Establish a happens-before relationship on the activity of the now-blocked // goroutines in the group.ifraceenabled {raceacquireg(, .syncGroup.raceaddr()) }}func synctestwait_c( *g, unsafe.Pointer) bool {lock(&.syncGroup.mu)if .syncGroup.running == 0 && .syncGroup.active == 0 {// This shouldn't be possible, since gopark increments active during unlockf.throw("running == 0 && active == 0") } .syncGroup.waiter = unlock(&.syncGroup.mu)returntrue}//go:linkname synctest_acquire internal/synctest.acquirefunc synctest_acquire() any {if := getg().syncGroup; != nil { .incActive()return }returnnil}//go:linkname synctest_release internal/synctest.releasefunc synctest_release( any) { .(*synctestGroup).decActive()}//go:linkname synctest_inBubble internal/synctest.inBubblefunc synctest_inBubble( any, func()) { := getg()if .syncGroup != nil {panic("goroutine is already bubbled") } .syncGroup = .(*synctestGroup)deferfunc() { .syncGroup = nil }() ()}
The pages are generated with Goldsv0.7.3-preview. (GOOS=linux GOARCH=amd64)
Golds is a Go 101 project developed by Tapir Liu.
PR and bug reports are welcome and can be submitted to the issue list.
Please follow @zigo_101 (reachable from the left QR code) to get the latest news of Golds.