// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.

package runtime

import (
	
	
	
)

//go:generate go run wincallback.go
//go:generate go run mkduff.go
//go:generate go run mkfastlog2table.go
//go:generate go run mklockrank.go -o lockrank.go

var ticks ticksType

type ticksType struct {
	// lock protects access to start* and val.
	lock       mutex
	startTicks int64
	startTime  int64
	val        atomic.Int64
}

// init initializes ticks to maximize the chance that we have a good ticksPerSecond reference.
//
// Must not run concurrently with ticksPerSecond.
func ( *ticksType) () {
	lock(&ticks.lock)
	.startTime = nanotime()
	.startTicks = cputicks()
	unlock(&ticks.lock)
}

// minTimeForTicksPerSecond is the minimum elapsed time we require to consider our ticksPerSecond
// measurement to be of decent enough quality for profiling.
//
// There's a linear relationship here between minimum time and error from the true value.
// The error from the true ticks-per-second in a linux/amd64 VM seems to be:
// -   1 ms -> ~0.02% error
// -   5 ms -> ~0.004% error
// -  10 ms -> ~0.002% error
// -  50 ms -> ~0.0003% error
// - 100 ms -> ~0.0001% error
//
// We're willing to take 0.004% error here, because ticksPerSecond is intended to be used for
// converting durations, not timestamps. Durations are usually going to be much larger, and so
// the tiny error doesn't matter. The error is definitely going to be a problem when trying to
// use this for timestamps, as it'll make those timestamps much less likely to line up.
const minTimeForTicksPerSecond = 5_000_000*(1-osHasLowResClockInt) + 100_000_000*osHasLowResClockInt

// ticksPerSecond returns a conversion rate between the cputicks clock and the nanotime clock.
//
// Note: Clocks are hard. Using this as an actual conversion rate for timestamps is ill-advised
// and should be avoided when possible. Use only for durations, where a tiny error term isn't going
// to make a meaningful difference in even a 1ms duration. If an accurate timestamp is needed,
// use nanotime instead. (The entire Windows platform is a broad exception to this rule, where nanotime
// produces timestamps on such a coarse granularity that the error from this conversion is actually
// preferable.)
//
// The strategy for computing the conversion rate is to write down nanotime and cputicks as
// early in process startup as possible. From then, we just need to wait until we get values
// from nanotime that we can use (some platforms have a really coarse system time granularity).
// We require some amount of time to pass to ensure that the conversion rate is fairly accurate
// in aggregate. But because we compute this rate lazily, there's a pretty good chance a decent
// amount of time has passed by the time we get here.
//
// Must be called from a normal goroutine context (running regular goroutine with a P).
//
// Called by runtime/pprof in addition to runtime code.
//
// TODO(mknyszek): This doesn't account for things like CPU frequency scaling. Consider
// a more sophisticated and general approach in the future.
func ticksPerSecond() int64 {
	// Get the conversion rate if we've already computed it.
	 := ticks.val.Load()
	if  != 0 {
		return 
	}

	// Compute the conversion rate.
	for {
		lock(&ticks.lock)
		 = ticks.val.Load()
		if  != 0 {
			unlock(&ticks.lock)
			return 
		}

		// Grab the current time in both clocks.
		 := nanotime()
		 := cputicks()

		// See if we can use these times.
		if  > ticks.startTicks && -ticks.startTime > minTimeForTicksPerSecond {
			// Perform the calculation with floats. We don't want to risk overflow.
			 = int64(float64(-ticks.startTicks) * 1e9 / float64(-ticks.startTime))
			if  == 0 {
				// Zero is both a sentinel value and it would be bad if callers used this as
				// a divisor. We tried out best, so just make it 1.
				++
			}
			ticks.val.Store()
			unlock(&ticks.lock)
			break
		}
		unlock(&ticks.lock)

		// Sleep in one millisecond increments until we have a reliable time.
		timeSleep(1_000_000)
	}
	return 
}

var envs []string
var argslice []string

//go:linkname syscall_runtime_envs syscall.runtime_envs
func syscall_runtime_envs() []string { return append([]string{}, envs...) }

//go:linkname syscall_Getpagesize syscall.Getpagesize
func syscall_Getpagesize() int { return int(physPageSize) }

//go:linkname os_runtime_args os.runtime_args
func os_runtime_args() []string { return append([]string{}, argslice...) }

//go:linkname syscall_Exit syscall.Exit
//go:nosplit
func syscall_Exit( int) {
	exit(int32())
}

var godebugDefault string
var godebugUpdate atomic.Pointer[func(string, string)]
var godebugEnv atomic.Pointer[string] // set by parsedebugvars
var godebugNewIncNonDefault atomic.Pointer[func(string) func()]

//go:linkname godebug_setUpdate internal/godebug.setUpdate
func godebug_setUpdate( func(string, string)) {
	 := new(func(string, string))
	* = 
	godebugUpdate.Store()
	godebugNotify(false)
}

//go:linkname godebug_setNewIncNonDefault internal/godebug.setNewIncNonDefault
func godebug_setNewIncNonDefault( func(string) func()) {
	 := new(func(string) func())
	* = 
	godebugNewIncNonDefault.Store()
}

// A godebugInc provides access to internal/godebug's IncNonDefault function
// for a given GODEBUG setting.
// Calls before internal/godebug registers itself are dropped on the floor.
type godebugInc struct {
	name string
	inc  atomic.Pointer[func()]
}

func ( *godebugInc) () {
	 := .inc.Load()
	if  == nil {
		 := godebugNewIncNonDefault.Load()
		if  == nil {
			return
		}
		 = new(func())
		* = (*)(.name)
		if raceenabled {
			racereleasemerge(unsafe.Pointer(&.inc))
		}
		if !.inc.CompareAndSwap(nil, ) {
			 = .inc.Load()
		}
	}
	if raceenabled {
		raceacquire(unsafe.Pointer(&.inc))
	}
	(*)()
}

func godebugNotify( bool) {
	 := godebugUpdate.Load()
	var  string
	if  := godebugEnv.Load();  != nil {
		 = *
	}
	if  {
		reparsedebugvars()
	}
	if  != nil {
		(*)(godebugDefault, )
	}
}

//go:linkname syscall_runtimeSetenv syscall.runtimeSetenv
func syscall_runtimeSetenv(,  string) {
	setenv_c(, )
	if  == "GODEBUG" {
		 := new(string)
		* = 
		godebugEnv.Store()
		godebugNotify(true)
	}
}

//go:linkname syscall_runtimeUnsetenv syscall.runtimeUnsetenv
func syscall_runtimeUnsetenv( string) {
	unsetenv_c()
	if  == "GODEBUG" {
		godebugEnv.Store(nil)
		godebugNotify(true)
	}
}

// writeErrStr writes a string to descriptor 2.
// If SetCrashOutput(f) was called, it also writes to f.
//
//go:nosplit
func writeErrStr( string) {
	writeErrData(unsafe.StringData(), int32(len()))
}

// writeErrData is the common parts of writeErr{,Str}.
//
//go:nosplit
func writeErrData( *byte,  int32) {
	write(2, unsafe.Pointer(), )

	// If crashing, print a copy to the SetCrashOutput fd.
	 := getg()
	if  != nil && .m.dying > 0 ||
		 == nil && panicking.Load() > 0 {
		if  := crashFD.Load();  != ^uintptr(0) {
			write(, unsafe.Pointer(), )
		}
	}
}

// crashFD is an optional file descriptor to use for fatal panics, as
// set by debug.SetCrashOutput (see #42888). If it is a valid fd (not
// all ones), writeErr and related functions write to it in addition
// to standard error.
//
// Initialized to -1 in schedinit.
var crashFD atomic.Uintptr

//go:linkname setCrashFD
func setCrashFD( uintptr) uintptr {
	// Don't change the crash FD if a crash is already in progress.
	//
	// Unlike the case below, this is not required for correctness, but it
	// is generally nicer to have all of the crash output go to the same
	// place rather than getting split across two different FDs.
	if panicking.Load() > 0 {
		return ^uintptr(0)
	}

	 := crashFD.Swap()

	// If we are panicking, don't return the old FD to runtime/debug for
	// closing. writeErrData may have already read the old FD from crashFD
	// before the swap and closing it would cause the write to be lost [1].
	// The old FD will never be closed, but we are about to crash anyway.
	//
	// On the writeErrData thread, panicking.Add(1) happens-before
	// crashFD.Load() [2].
	//
	// On this thread, swapping old FD for new in crashFD happens-before
	// panicking.Load() > 0.
	//
	// Therefore, if panicking.Load() == 0 here (old FD will be closed), it
	// is impossible for the writeErrData thread to observe
	// crashFD.Load() == old FD.
	//
	// [1] Or, if really unlucky, another concurrent open could reuse the
	// FD, sending the write into an unrelated file.
	//
	// [2] If gp != nil, it occurs when incrementing gp.m.dying in
	// startpanic_m. If gp == nil, we read panicking.Load() > 0, so an Add
	// must have happened-before.
	if panicking.Load() > 0 {
		return ^uintptr(0)
	}
	return 
}

// auxv is populated on relevant platforms but defined here for all platforms
// so x/sys/cpu can assume the getAuxv symbol exists without keeping its list
// of auxv-using GOOS build tags in sync.
//
// It contains an even number of elements, (tag, value) pairs.
var auxv []uintptr

// golang.org/x/sys/cpu uses getAuxv via linkname.
// Do not remove or change the type signature.
// (See go.dev/issue/57336.)
//
// getAuxv should be an internal detail,
// but widely used packages access it using linkname.
// Notable members of the hall of shame include:
//   - github.com/cilium/ebpf
//
// Do not remove or change the type signature.
// See go.dev/issue/67401.
//
//go:linkname getAuxv
func getAuxv() []uintptr { return auxv }

// zeroVal is used by reflect via linkname.
//
// zeroVal should be an internal detail,
// but widely used packages access it using linkname.
// Notable members of the hall of shame include:
//   - github.com/ugorji/go/codec
//
// Do not remove or change the type signature.
// See go.dev/issue/67401.
//
//go:linkname zeroVal
var zeroVal [abi.ZeroValSize]byte