// Copyright 2009 The Go Authors. All rights reserved.// Use of this source code is governed by a BSD-style// license that can be found in the LICENSE file.package runtimeimport ()// Keep a cached value to make gotraceback fast,// since we call it on every call to gentraceback.// The cached value is a uint32 in which the low bits// are the "crash" and "all" settings and the remaining// bits are the traceback value (0 off, 1 on, 2 include system).const ( tracebackCrash = 1 << iota tracebackAll tracebackShift = iota)var traceback_cache uint32 = 2 << tracebackShiftvar traceback_env uint32// gotraceback returns the current traceback settings.//// If level is 0, suppress all tracebacks.// If level is 1, show tracebacks, but exclude runtime frames.// If level is 2, show tracebacks including runtime frames.// If all is set, print all goroutine stacks. Otherwise, print just the current goroutine.// If crash is set, crash (core dump, etc) after tracebacking.////go:nosplitfunc gotraceback() ( int32, , bool) { := getg() := atomic.Load(&traceback_cache) = &tracebackCrash != 0 = .m.throwing >= throwTypeUser || &tracebackAll != 0if .m.traceback != 0 { = int32(.m.traceback) } elseif .m.throwing >= throwTypeRuntime {// Always include runtime frames in runtime throws unless // otherwise overridden by m.traceback. = 2 } else { = int32( >> tracebackShift) }return}var ( argc int32 argv **byte)// nosplit for use in linux startup sysargs.////go:nosplitfunc argv_index( **byte, int32) *byte {return *(**byte)(add(unsafe.Pointer(), uintptr()*goarch.PtrSize))}func args( int32, **byte) {argc = argv = sysargs(, )}func goargs() {ifGOOS == "windows" {return }argslice = make([]string, argc)for := int32(0); < argc; ++ {argslice[] = gostringnocopy(argv_index(argv, )) }}func goenvs_unix() {// TODO(austin): ppc64 in dynamic linking mode doesn't // guarantee env[] will immediately follow argv. Might cause // problems. := int32(0)forargv_index(argv, argc+1+) != nil { ++ }envs = make([]string, )for := int32(0); < ; ++ {envs[] = gostring(argv_index(argv, argc+1+)) }}func environ() []string {returnenvs}// TODO: These should be locals in testAtomic64, but we don't 8-byte// align stack variables on 386.var test_z64, test_x64 uint64func testAtomic64() {test_z64 = 42test_x64 = 0ifatomic.Cas64(&test_z64, test_x64, 1) {throw("cas64 failed") }iftest_x64 != 0 {throw("cas64 failed") }test_x64 = 42if !atomic.Cas64(&test_z64, test_x64, 1) {throw("cas64 failed") }iftest_x64 != 42 || test_z64 != 1 {throw("cas64 failed") }ifatomic.Load64(&test_z64) != 1 {throw("load64 failed") }atomic.Store64(&test_z64, (1<<40)+1)ifatomic.Load64(&test_z64) != (1<<40)+1 {throw("store64 failed") }ifatomic.Xadd64(&test_z64, (1<<40)+1) != (2<<40)+2 {throw("xadd64 failed") }ifatomic.Load64(&test_z64) != (2<<40)+2 {throw("xadd64 failed") }ifatomic.Xchg64(&test_z64, (3<<40)+3) != (2<<40)+2 {throw("xchg64 failed") }ifatomic.Load64(&test_z64) != (3<<40)+3 {throw("xchg64 failed") }}func check() {var (int8uint8int16uint16int32uint32int64uint64 , float32 , float64unsafe.Pointer *uint16 [4]byte )typestruct {uint8 }typestruct {uint8 }varvarifunsafe.Sizeof() != 1 {throw("bad a") }ifunsafe.Sizeof() != 1 {throw("bad b") }ifunsafe.Sizeof() != 2 {throw("bad c") }ifunsafe.Sizeof() != 2 {throw("bad d") }ifunsafe.Sizeof() != 4 {throw("bad e") }ifunsafe.Sizeof() != 4 {throw("bad f") }ifunsafe.Sizeof() != 8 {throw("bad g") }ifunsafe.Sizeof() != 8 {throw("bad h") }ifunsafe.Sizeof() != 4 {throw("bad i") }ifunsafe.Sizeof() != 8 {throw("bad j") }ifunsafe.Sizeof() != goarch.PtrSize {throw("bad k") }ifunsafe.Sizeof() != goarch.PtrSize {throw("bad l") }ifunsafe.Sizeof() != 1 {throw("bad unsafe.Sizeof x1") }ifunsafe.Offsetof(.) != 1 {throw("bad offsetof y1.y") }ifunsafe.Sizeof() != 2 {throw("bad unsafe.Sizeof y1") }iftimediv(12345*1000000000+54321, 1000000000, &) != 12345 || != 54321 {throw("bad timediv") }varuint32 = 1if !atomic.Cas(&, 1, 2) {throw("cas1") }if != 2 {throw("cas2") } = 4ifatomic.Cas(&, 5, 6) {throw("cas3") }if != 4 {throw("cas4") } = 0xffffffffif !atomic.Cas(&, 0xffffffff, 0xfffffffe) {throw("cas5") }if != 0xfffffffe {throw("cas6") } = [4]byte{1, 1, 1, 1}atomic.Or8(&[1], 0xf0)if [0] != 1 || [1] != 0xf1 || [2] != 1 || [3] != 1 {throw("atomicor8") } = [4]byte{0xff, 0xff, 0xff, 0xff}atomic.And8(&[1], 0x1)if [0] != 0xff || [1] != 0x1 || [2] != 0xff || [3] != 0xff {throw("atomicand8") } *(*uint64)(unsafe.Pointer(&)) = ^uint64(0)if == {throw("float64nan") }if !( != ) {throw("float64nan1") } *(*uint64)(unsafe.Pointer(&)) = ^uint64(1)if == {throw("float64nan2") }if !( != ) {throw("float64nan3") } *(*uint32)(unsafe.Pointer(&)) = ^uint32(0)if == {throw("float32nan") }if == {throw("float32nan1") } *(*uint32)(unsafe.Pointer(&)) = ^uint32(1)if == {throw("float32nan2") }if == {throw("float32nan3") }testAtomic64()iffixedStack != round2(fixedStack) {throw("FixedStack is not power-of-2") }if !checkASM() {throw("assembly checks failed") }}type dbgVar struct { name string value *int32// for variables that can only be set at startup atomic *atomic.Int32// for variables that can be changed during execution def int32// default value (ideally zero)}// Holds variables parsed from GODEBUG env var,// except for "memprofilerate" since there is an// existing int var for that value, which may// already have an initial value.var debug struct { cgocheck int32 clobberfree int32 disablethp int32 dontfreezetheworld int32 efence int32 gccheckmark int32 gcpacertrace int32 gcshrinkstackoff int32 gcstoptheworld int32 gctrace int32 invalidptr int32 madvdontneed int32// for Linux; issue 28466 runtimeContentionStacks atomic.Int32 scavtrace int32 scheddetail int32 schedtrace int32 tracebackancestors int32 asyncpreemptoff int32 harddecommit int32 adaptivestackstart int32 tracefpunwindoff int32 traceadvanceperiod int32 traceCheckStackOwnership int32 profstackdepth int32// debug.malloc is used as a combined debug check // in the malloc function and should be set // if any of the below debug options is != 0. malloc bool inittrace int32 sbrk int32// traceallocfree controls whether execution traces contain // detailed trace data about memory allocation. This value // affects debug.malloc only if it is != 0 and the execution // tracer is enabled, in which case debug.malloc will be // set to "true" if it isn't already while tracing is enabled. // It will be set while the world is stopped, so it's safe. // The value of traceallocfree can be changed any time in response // to os.Setenv("GODEBUG"). traceallocfree atomic.Int32 panicnil atomic.Int32// asynctimerchan controls whether timer channels // behave asynchronously (as in Go 1.22 and earlier) // instead of their Go 1.23+ synchronous behavior. // The value can change at any time (in response to os.Setenv("GODEBUG")) // and affects all extant timer channels immediately. // Programs wouldn't normally change over an execution, // but allowing it is convenient for testing and for programs // that do an os.Setenv in main.init or main.main. asynctimerchan atomic.Int32}var dbgvars = []*dbgVar{ {name: "adaptivestackstart", value: &debug.adaptivestackstart}, {name: "asyncpreemptoff", value: &debug.asyncpreemptoff}, {name: "asynctimerchan", atomic: &debug.asynctimerchan}, {name: "cgocheck", value: &debug.cgocheck}, {name: "clobberfree", value: &debug.clobberfree}, {name: "disablethp", value: &debug.disablethp}, {name: "dontfreezetheworld", value: &debug.dontfreezetheworld}, {name: "efence", value: &debug.efence}, {name: "gccheckmark", value: &debug.gccheckmark}, {name: "gcpacertrace", value: &debug.gcpacertrace}, {name: "gcshrinkstackoff", value: &debug.gcshrinkstackoff}, {name: "gcstoptheworld", value: &debug.gcstoptheworld}, {name: "gctrace", value: &debug.gctrace}, {name: "harddecommit", value: &debug.harddecommit}, {name: "inittrace", value: &debug.inittrace}, {name: "invalidptr", value: &debug.invalidptr}, {name: "madvdontneed", value: &debug.madvdontneed}, {name: "panicnil", atomic: &debug.panicnil}, {name: "profstackdepth", value: &debug.profstackdepth, def: 128}, {name: "runtimecontentionstacks", atomic: &debug.runtimeContentionStacks}, {name: "sbrk", value: &debug.sbrk}, {name: "scavtrace", value: &debug.scavtrace}, {name: "scheddetail", value: &debug.scheddetail}, {name: "schedtrace", value: &debug.schedtrace}, {name: "traceadvanceperiod", value: &debug.traceadvanceperiod}, {name: "traceallocfree", atomic: &debug.traceallocfree}, {name: "tracecheckstackownership", value: &debug.traceCheckStackOwnership}, {name: "tracebackancestors", value: &debug.tracebackancestors}, {name: "tracefpunwindoff", value: &debug.tracefpunwindoff},}func parsedebugvars() {// defaultsdebug.cgocheck = 1debug.invalidptr = 1debug.adaptivestackstart = 1// set this to 0 to turn larger initial goroutine stacks offifGOOS == "linux" {// On Linux, MADV_FREE is faster than MADV_DONTNEED, // but doesn't affect many of the statistics that // MADV_DONTNEED does until the memory is actually // reclaimed. This generally leads to poor user // experience, like confusing stats in top and other // monitoring tools; and bad integration with // management systems that respond to memory usage. // Hence, default to MADV_DONTNEED.debug.madvdontneed = 1 }debug.traceadvanceperiod = defaultTraceAdvancePeriod := gogetenv("GODEBUG") := new(string) * = godebugEnv.Store()// apply runtime defaults, if anyfor , := rangedbgvars {if .def != 0 {// Every var should have either v.value or v.atomic set.if .value != nil { *.value = .def } elseif .atomic != nil { .atomic.Store(.def) } } }// apply compile-time GODEBUG settingsparsegodebug(godebugDefault, nil)// apply environment settingsparsegodebug(, nil)debug.malloc = (debug.inittrace | debug.sbrk) != 0debug.profstackdepth = min(debug.profstackdepth, maxProfStackDepth)setTraceback(gogetenv("GOTRACEBACK"))traceback_env = traceback_cache}// reparsedebugvars reparses the runtime's debug variables// because the environment variable has been changed to env.func reparsedebugvars( string) { := make(map[string]bool)// apply environment settingsparsegodebug(, )// apply compile-time GODEBUG settings for as-yet-unseen variablesparsegodebug(godebugDefault, )// apply defaults for as-yet-unseen variablesfor , := rangedbgvars {if .atomic != nil && ![.name] { .atomic.Store(0) } }}// parsegodebug parses the godebug string, updating variables listed in dbgvars.// If seen == nil, this is startup time and we process the string left to right// overwriting older settings with newer ones.// If seen != nil, $GODEBUG has changed and we are doing an// incremental update. To avoid flapping in the case where a value is// set multiple times (perhaps in the default and the environment,// or perhaps twice in the environment), we process the string right-to-left// and only change values not already seen. After doing this for both// the environment and the default settings, the caller must also call// cleargodebug(seen) to reset any now-unset values back to their defaults.func parsegodebug( string, map[string]bool) {for := ; != ""; {varstringif == nil {// startup: process left to right, overwriting older settings with newer := bytealg.IndexByteString(, ',')if < 0 { , = , "" } else { , = [:], [+1:] } } else {// incremental update: process right to left, updating and skipping seen := len() - 1for >= 0 && [] != ',' { -- }if < 0 { , = "", } else { , = [:], [+1:] } } := bytealg.IndexByteString(, '=')if < 0 {continue } , := [:], [+1:]if [] {continue }if != nil { [] = true }// Update MemProfileRate directly here since it // is int, not int32, and should only be updated // if specified in GODEBUG.if == nil && == "memprofilerate" {if , := atoi(); {MemProfileRate = } } else {for , := rangedbgvars {if .name == {if , := atoi32(); {if == nil && .value != nil { *.value = } elseif .atomic != nil { .atomic.Store() } } } } } }ifdebug.cgocheck > 1 {throw("cgocheck > 1 mode is no longer supported at runtime. Use GOEXPERIMENT=cgocheck2 at build time instead.") }}//go:linkname setTraceback runtime/debug.SetTracebackfunc setTraceback( string) {varuint32switch {case"none": = 0case"single", "": = 1 << tracebackShiftcase"all": = 1<<tracebackShift | tracebackAllcase"system": = 2<<tracebackShift | tracebackAllcase"crash": = 2<<tracebackShift | tracebackAll | tracebackCrashcase"wer":ifGOOS == "windows" { = 2<<tracebackShift | tracebackAll | tracebackCrashenableWER()break }fallthroughdefault: = tracebackAllif , := atoi(); && == int(uint32()) { |= uint32() << tracebackShift } }// when C owns the process, simply exit'ing the process on fatal errors // and panics is surprising. Be louder and abort instead.ifislibrary || isarchive { |= tracebackCrash } |= traceback_envatomic.Store(&traceback_cache, )}// Poor mans 64-bit division.// This is a very special function, do not use it if you are not sure what you are doing.// int64 division is lowered into _divv() call on 386, which does not fit into nosplit functions.// Handles overflow in a time-specific manner.// This keeps us within no-split stack limits on 32-bit processors.////go:nosplitfunc timediv( int64, int32, *int32) int32 { := int32(0)for := 30; >= 0; -- {if >= int64()<<uint() { = - (int64() << uint())// Before this for loop, res was 0, thus all these // power of 2 increments are now just bitsets. |= 1 << uint() } }if >= int64() {if != nil { * = 0 }return0x7fffffff }if != nil { * = int32() }return}// Helpers for Go. Must be NOSPLIT, must only call NOSPLIT functions, and must not block.//go:nosplitfunc acquirem() *m { := getg() .m.locks++return .m}//go:nosplitfunc releasem( *m) { := getg() .locks--if .locks == 0 && .preempt {// restore the preemption request in case we've cleared it in newstack .stackguard0 = stackPreempt }}// reflect_typelinks is meant for package reflect,// but widely used packages access it using linkname.// Notable members of the hall of shame include:// - gitee.com/quant1x/gox// - github.com/goccy/json// - github.com/modern-go/reflect2// - github.com/vmware/govmomi// - github.com/pinpoint-apm/pinpoint-go-agent// - github.com/timandy/routine// - github.com/v2pro/plz//// Do not remove or change the type signature.// See go.dev/issue/67401.////go:linkname reflect_typelinks reflect.typelinksfunc reflect_typelinks() ([]unsafe.Pointer, [][]int32) { := activeModules() := []unsafe.Pointer{unsafe.Pointer([0].types)} := [][]int32{[0].typelinks}for , := range [1:] { = append(, unsafe.Pointer(.types)) = append(, .typelinks) }return , }// reflect_resolveNameOff resolves a name offset from a base pointer.//// reflect_resolveNameOff is for package reflect,// but widely used packages access it using linkname.// Notable members of the hall of shame include:// - github.com/agiledragon/gomonkey/v2//// Do not remove or change the type signature.// See go.dev/issue/67401.////go:linkname reflect_resolveNameOff reflect.resolveNameOfffunc reflect_resolveNameOff( unsafe.Pointer, int32) unsafe.Pointer {returnunsafe.Pointer(resolveNameOff(, nameOff()).Bytes)}// reflect_resolveTypeOff resolves an *rtype offset from a base type.//// reflect_resolveTypeOff is meant for package reflect,// but widely used packages access it using linkname.// Notable members of the hall of shame include:// - gitee.com/quant1x/gox// - github.com/modern-go/reflect2// - github.com/v2pro/plz// - github.com/timandy/routine//// Do not remove or change the type signature.// See go.dev/issue/67401.////go:linkname reflect_resolveTypeOff reflect.resolveTypeOfffunc reflect_resolveTypeOff( unsafe.Pointer, int32) unsafe.Pointer {returnunsafe.Pointer(toRType((*_type)()).typeOff(typeOff()))}// reflect_resolveTextOff resolves a function pointer offset from a base type.//// reflect_resolveTextOff is for package reflect,// but widely used packages access it using linkname.// Notable members of the hall of shame include:// - github.com/cloudwego/frugal// - github.com/agiledragon/gomonkey/v2//// Do not remove or change the type signature.// See go.dev/issue/67401.////go:linkname reflect_resolveTextOff reflect.resolveTextOfffunc reflect_resolveTextOff( unsafe.Pointer, int32) unsafe.Pointer {returntoRType((*_type)()).textOff(textOff())}// reflectlite_resolveNameOff resolves a name offset from a base pointer.////go:linkname reflectlite_resolveNameOff internal/reflectlite.resolveNameOfffunc reflectlite_resolveNameOff( unsafe.Pointer, int32) unsafe.Pointer {returnunsafe.Pointer(resolveNameOff(, nameOff()).Bytes)}// reflectlite_resolveTypeOff resolves an *rtype offset from a base type.////go:linkname reflectlite_resolveTypeOff internal/reflectlite.resolveTypeOfffunc reflectlite_resolveTypeOff( unsafe.Pointer, int32) unsafe.Pointer {returnunsafe.Pointer(toRType((*_type)()).typeOff(typeOff()))}// reflect_addReflectOff adds a pointer to the reflection offset lookup map.////go:linkname reflect_addReflectOff reflect.addReflectOfffunc reflect_addReflectOff( unsafe.Pointer) int32 {reflectOffsLock()ifreflectOffs.m == nil {reflectOffs.m = make(map[int32]unsafe.Pointer)reflectOffs.minv = make(map[unsafe.Pointer]int32)reflectOffs.next = -1 } , := reflectOffs.minv[]if ! { = reflectOffs.nextreflectOffs.next-- // use negative offsets as IDs to aid debuggingreflectOffs.m[] = reflectOffs.minv[] = }reflectOffsUnlock()return}
The pages are generated with Goldsv0.7.0-preview. (GOOS=linux GOARCH=amd64)
Golds is a Go 101 project developed by Tapir Liu.
PR and bug reports are welcome and can be submitted to the issue list.
Please follow @zigo_101 (reachable from the left QR code) to get the latest news of Golds.