// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.

package runtime

import (
	
	
	
	
)

// Keep a cached value to make gotraceback fast,
// since we call it on every call to gentraceback.
// The cached value is a uint32 in which the low bits
// are the "crash" and "all" settings and the remaining
// bits are the traceback value (0 off, 1 on, 2 include system).
const (
	tracebackCrash = 1 << iota
	tracebackAll
	tracebackShift = iota
)

var traceback_cache uint32 = 2 << tracebackShift
var traceback_env uint32

// gotraceback returns the current traceback settings.
//
// If level is 0, suppress all tracebacks.
// If level is 1, show tracebacks, but exclude runtime frames.
// If level is 2, show tracebacks including runtime frames.
// If all is set, print all goroutine stacks. Otherwise, print just the current goroutine.
// If crash is set, crash (core dump, etc) after tracebacking.
//
//go:nosplit
func gotraceback() ( int32, ,  bool) {
	 := getg()
	 := atomic.Load(&traceback_cache)
	 = &tracebackCrash != 0
	 = .m.throwing >= throwTypeUser || &tracebackAll != 0
	if .m.traceback != 0 {
		 = int32(.m.traceback)
	} else if .m.throwing >= throwTypeRuntime {
		// Always include runtime frames in runtime throws unless
		// otherwise overridden by m.traceback.
		 = 2
	} else {
		 = int32( >> tracebackShift)
	}
	return
}

var (
	argc int32
	argv **byte
)

// nosplit for use in linux startup sysargs.
//
//go:nosplit
func argv_index( **byte,  int32) *byte {
	return *(**byte)(add(unsafe.Pointer(), uintptr()*goarch.PtrSize))
}

func args( int32,  **byte) {
	argc = 
	argv = 
	sysargs(, )
}

func goargs() {
	if GOOS == "windows" {
		return
	}
	argslice = make([]string, argc)
	for  := int32(0);  < argc; ++ {
		argslice[] = gostringnocopy(argv_index(argv, ))
	}
}

func goenvs_unix() {
	// TODO(austin): ppc64 in dynamic linking mode doesn't
	// guarantee env[] will immediately follow argv. Might cause
	// problems.
	 := int32(0)
	for argv_index(argv, argc+1+) != nil {
		++
	}

	envs = make([]string, )
	for  := int32(0);  < ; ++ {
		envs[] = gostring(argv_index(argv, argc+1+))
	}
}

func environ() []string {
	return envs
}

// TODO: These should be locals in testAtomic64, but we don't 8-byte
// align stack variables on 386.
var test_z64, test_x64 uint64

func testAtomic64() {
	test_z64 = 42
	test_x64 = 0
	if atomic.Cas64(&test_z64, test_x64, 1) {
		throw("cas64 failed")
	}
	if test_x64 != 0 {
		throw("cas64 failed")
	}
	test_x64 = 42
	if !atomic.Cas64(&test_z64, test_x64, 1) {
		throw("cas64 failed")
	}
	if test_x64 != 42 || test_z64 != 1 {
		throw("cas64 failed")
	}
	if atomic.Load64(&test_z64) != 1 {
		throw("load64 failed")
	}
	atomic.Store64(&test_z64, (1<<40)+1)
	if atomic.Load64(&test_z64) != (1<<40)+1 {
		throw("store64 failed")
	}
	if atomic.Xadd64(&test_z64, (1<<40)+1) != (2<<40)+2 {
		throw("xadd64 failed")
	}
	if atomic.Load64(&test_z64) != (2<<40)+2 {
		throw("xadd64 failed")
	}
	if atomic.Xchg64(&test_z64, (3<<40)+3) != (2<<40)+2 {
		throw("xchg64 failed")
	}
	if atomic.Load64(&test_z64) != (3<<40)+3 {
		throw("xchg64 failed")
	}
}

func check() {
	var (
		     int8
		     uint8
		     int16
		     uint16
		     int32
		     uint32
		     int64
		     uint64
		,  float32
		,  float64
		     unsafe.Pointer
		     *uint16
		     [4]byte
	)
	type  struct {
		 uint8
	}
	type  struct {
		 
		  uint8
	}
	var  
	var  

	if unsafe.Sizeof() != 1 {
		throw("bad a")
	}
	if unsafe.Sizeof() != 1 {
		throw("bad b")
	}
	if unsafe.Sizeof() != 2 {
		throw("bad c")
	}
	if unsafe.Sizeof() != 2 {
		throw("bad d")
	}
	if unsafe.Sizeof() != 4 {
		throw("bad e")
	}
	if unsafe.Sizeof() != 4 {
		throw("bad f")
	}
	if unsafe.Sizeof() != 8 {
		throw("bad g")
	}
	if unsafe.Sizeof() != 8 {
		throw("bad h")
	}
	if unsafe.Sizeof() != 4 {
		throw("bad i")
	}
	if unsafe.Sizeof() != 8 {
		throw("bad j")
	}
	if unsafe.Sizeof() != goarch.PtrSize {
		throw("bad k")
	}
	if unsafe.Sizeof() != goarch.PtrSize {
		throw("bad l")
	}
	if unsafe.Sizeof() != 1 {
		throw("bad unsafe.Sizeof x1")
	}
	if unsafe.Offsetof(.) != 1 {
		throw("bad offsetof y1.y")
	}
	if unsafe.Sizeof() != 2 {
		throw("bad unsafe.Sizeof y1")
	}

	if timediv(12345*1000000000+54321, 1000000000, &) != 12345 ||  != 54321 {
		throw("bad timediv")
	}

	var  uint32
	 = 1
	if !atomic.Cas(&, 1, 2) {
		throw("cas1")
	}
	if  != 2 {
		throw("cas2")
	}

	 = 4
	if atomic.Cas(&, 5, 6) {
		throw("cas3")
	}
	if  != 4 {
		throw("cas4")
	}

	 = 0xffffffff
	if !atomic.Cas(&, 0xffffffff, 0xfffffffe) {
		throw("cas5")
	}
	if  != 0xfffffffe {
		throw("cas6")
	}

	 = [4]byte{1, 1, 1, 1}
	atomic.Or8(&[1], 0xf0)
	if [0] != 1 || [1] != 0xf1 || [2] != 1 || [3] != 1 {
		throw("atomicor8")
	}

	 = [4]byte{0xff, 0xff, 0xff, 0xff}
	atomic.And8(&[1], 0x1)
	if [0] != 0xff || [1] != 0x1 || [2] != 0xff || [3] != 0xff {
		throw("atomicand8")
	}

	*(*uint64)(unsafe.Pointer(&)) = ^uint64(0)
	if  ==  {
		throw("float64nan")
	}
	if !( != ) {
		throw("float64nan1")
	}

	*(*uint64)(unsafe.Pointer(&)) = ^uint64(1)
	if  ==  {
		throw("float64nan2")
	}
	if !( != ) {
		throw("float64nan3")
	}

	*(*uint32)(unsafe.Pointer(&)) = ^uint32(0)
	if  ==  {
		throw("float32nan")
	}
	if  ==  {
		throw("float32nan1")
	}

	*(*uint32)(unsafe.Pointer(&)) = ^uint32(1)
	if  ==  {
		throw("float32nan2")
	}
	if  ==  {
		throw("float32nan3")
	}

	testAtomic64()

	if fixedStack != round2(fixedStack) {
		throw("FixedStack is not power-of-2")
	}

	if !checkASM() {
		throw("assembly checks failed")
	}
}

type dbgVar struct {
	name   string
	value  *int32        // for variables that can only be set at startup
	atomic *atomic.Int32 // for variables that can be changed during execution
	def    int32         // default value (ideally zero)
}

// Holds variables parsed from GODEBUG env var,
// except for "memprofilerate" since there is an
// existing int var for that value, which may
// already have an initial value.
var debug struct {
	cgocheck                int32
	clobberfree             int32
	disablethp              int32
	dontfreezetheworld      int32
	efence                  int32
	gccheckmark             int32
	gcpacertrace            int32
	gcshrinkstackoff        int32
	gcstoptheworld          int32
	gctrace                 int32
	invalidptr              int32
	madvdontneed            int32 // for Linux; issue 28466
	runtimeContentionStacks atomic.Int32
	scavtrace               int32
	scheddetail             int32
	schedtrace              int32
	tracebackancestors      int32
	asyncpreemptoff         int32
	harddecommit            int32
	adaptivestackstart      int32
	tracefpunwindoff        int32
	traceadvanceperiod      int32

	// debug.malloc is used as a combined debug check
	// in the malloc function and should be set
	// if any of the below debug options is != 0.
	malloc         bool
	allocfreetrace int32
	inittrace      int32
	sbrk           int32

	panicnil atomic.Int32
}

var dbgvars = []*dbgVar{
	{name: "allocfreetrace", value: &debug.allocfreetrace},
	{name: "clobberfree", value: &debug.clobberfree},
	{name: "cgocheck", value: &debug.cgocheck},
	{name: "disablethp", value: &debug.disablethp},
	{name: "dontfreezetheworld", value: &debug.dontfreezetheworld},
	{name: "efence", value: &debug.efence},
	{name: "gccheckmark", value: &debug.gccheckmark},
	{name: "gcpacertrace", value: &debug.gcpacertrace},
	{name: "gcshrinkstackoff", value: &debug.gcshrinkstackoff},
	{name: "gcstoptheworld", value: &debug.gcstoptheworld},
	{name: "gctrace", value: &debug.gctrace},
	{name: "invalidptr", value: &debug.invalidptr},
	{name: "madvdontneed", value: &debug.madvdontneed},
	{name: "runtimecontentionstacks", atomic: &debug.runtimeContentionStacks},
	{name: "sbrk", value: &debug.sbrk},
	{name: "scavtrace", value: &debug.scavtrace},
	{name: "scheddetail", value: &debug.scheddetail},
	{name: "schedtrace", value: &debug.schedtrace},
	{name: "tracebackancestors", value: &debug.tracebackancestors},
	{name: "asyncpreemptoff", value: &debug.asyncpreemptoff},
	{name: "inittrace", value: &debug.inittrace},
	{name: "harddecommit", value: &debug.harddecommit},
	{name: "adaptivestackstart", value: &debug.adaptivestackstart},
	{name: "tracefpunwindoff", value: &debug.tracefpunwindoff},
	{name: "panicnil", atomic: &debug.panicnil},
	{name: "traceadvanceperiod", value: &debug.traceadvanceperiod},
}

func parsedebugvars() {
	// defaults
	debug.cgocheck = 1
	debug.invalidptr = 1
	debug.adaptivestackstart = 1 // set this to 0 to turn larger initial goroutine stacks off
	if GOOS == "linux" {
		// On Linux, MADV_FREE is faster than MADV_DONTNEED,
		// but doesn't affect many of the statistics that
		// MADV_DONTNEED does until the memory is actually
		// reclaimed. This generally leads to poor user
		// experience, like confusing stats in top and other
		// monitoring tools; and bad integration with
		// management systems that respond to memory usage.
		// Hence, default to MADV_DONTNEED.
		debug.madvdontneed = 1
	}
	debug.traceadvanceperiod = defaultTraceAdvancePeriod

	 := gogetenv("GODEBUG")

	 := new(string)
	* = 
	godebugEnv.Store()

	// apply runtime defaults, if any
	for ,  := range dbgvars {
		if .def != 0 {
			// Every var should have either v.value or v.atomic set.
			if .value != nil {
				*.value = .def
			} else if .atomic != nil {
				.atomic.Store(.def)
			}
		}
	}

	// apply compile-time GODEBUG settings
	parsegodebug(godebugDefault, nil)

	// apply environment settings
	parsegodebug(, nil)

	debug.malloc = (debug.allocfreetrace | debug.inittrace | debug.sbrk) != 0

	setTraceback(gogetenv("GOTRACEBACK"))
	traceback_env = traceback_cache
}

// reparsedebugvars reparses the runtime's debug variables
// because the environment variable has been changed to env.
func reparsedebugvars( string) {
	 := make(map[string]bool)
	// apply environment settings
	parsegodebug(, )
	// apply compile-time GODEBUG settings for as-yet-unseen variables
	parsegodebug(godebugDefault, )
	// apply defaults for as-yet-unseen variables
	for ,  := range dbgvars {
		if .atomic != nil && ![.name] {
			.atomic.Store(0)
		}
	}
}

// parsegodebug parses the godebug string, updating variables listed in dbgvars.
// If seen == nil, this is startup time and we process the string left to right
// overwriting older settings with newer ones.
// If seen != nil, $GODEBUG has changed and we are doing an
// incremental update. To avoid flapping in the case where a value is
// set multiple times (perhaps in the default and the environment,
// or perhaps twice in the environment), we process the string right-to-left
// and only change values not already seen. After doing this for both
// the environment and the default settings, the caller must also call
// cleargodebug(seen) to reset any now-unset values back to their defaults.
func parsegodebug( string,  map[string]bool) {
	for  := ;  != ""; {
		var  string
		if  == nil {
			// startup: process left to right, overwriting older settings with newer
			 := bytealg.IndexByteString(, ',')
			if  < 0 {
				,  = , ""
			} else {
				,  = [:], [+1:]
			}
		} else {
			// incremental update: process right to left, updating and skipping seen
			 := len() - 1
			for  >= 0 && [] != ',' {
				--
			}
			if  < 0 {
				,  = "", 
			} else {
				,  = [:], [+1:]
			}
		}
		 := bytealg.IndexByteString(, '=')
		if  < 0 {
			continue
		}
		,  := [:], [+1:]
		if [] {
			continue
		}
		if  != nil {
			[] = true
		}

		// Update MemProfileRate directly here since it
		// is int, not int32, and should only be updated
		// if specified in GODEBUG.
		if  == nil &&  == "memprofilerate" {
			if ,  := atoi();  {
				MemProfileRate = 
			}
		} else {
			for ,  := range dbgvars {
				if .name ==  {
					if ,  := atoi32();  {
						if  == nil && .value != nil {
							*.value = 
						} else if .atomic != nil {
							.atomic.Store()
						}
					}
				}
			}
		}
	}

	if debug.cgocheck > 1 {
		throw("cgocheck > 1 mode is no longer supported at runtime. Use GOEXPERIMENT=cgocheck2 at build time instead.")
	}
}

//go:linkname setTraceback runtime/debug.SetTraceback
func setTraceback( string) {
	var  uint32
	switch  {
	case "none":
		 = 0
	case "single", "":
		 = 1 << tracebackShift
	case "all":
		 = 1<<tracebackShift | tracebackAll
	case "system":
		 = 2<<tracebackShift | tracebackAll
	case "crash":
		 = 2<<tracebackShift | tracebackAll | tracebackCrash
	case "wer":
		if GOOS == "windows" {
			 = 2<<tracebackShift | tracebackAll | tracebackCrash
			enableWER()
			break
		}
		fallthrough
	default:
		 = tracebackAll
		if ,  := atoi();  &&  == int(uint32()) {
			 |= uint32() << tracebackShift
		}
	}
	// when C owns the process, simply exit'ing the process on fatal errors
	// and panics is surprising. Be louder and abort instead.
	if islibrary || isarchive {
		 |= tracebackCrash
	}

	 |= traceback_env

	atomic.Store(&traceback_cache, )
}

// Poor mans 64-bit division.
// This is a very special function, do not use it if you are not sure what you are doing.
// int64 division is lowered into _divv() call on 386, which does not fit into nosplit functions.
// Handles overflow in a time-specific manner.
// This keeps us within no-split stack limits on 32-bit processors.
//
//go:nosplit
func timediv( int64,  int32,  *int32) int32 {
	 := int32(0)
	for  := 30;  >= 0; -- {
		if  >= int64()<<uint() {
			 =  - (int64() << uint())
			// Before this for loop, res was 0, thus all these
			// power of 2 increments are now just bitsets.
			 |= 1 << uint()
		}
	}
	if  >= int64() {
		if  != nil {
			* = 0
		}
		return 0x7fffffff
	}
	if  != nil {
		* = int32()
	}
	return 
}

// Helpers for Go. Must be NOSPLIT, must only call NOSPLIT functions, and must not block.

//go:nosplit
func acquirem() *m {
	 := getg()
	.m.locks++
	return .m
}

//go:nosplit
func releasem( *m) {
	 := getg()
	.locks--
	if .locks == 0 && .preempt {
		// restore the preemption request in case we've cleared it in newstack
		.stackguard0 = stackPreempt
	}
}

//go:linkname reflect_typelinks reflect.typelinks
func reflect_typelinks() ([]unsafe.Pointer, [][]int32) {
	 := activeModules()
	 := []unsafe.Pointer{unsafe.Pointer([0].types)}
	 := [][]int32{[0].typelinks}
	for ,  := range [1:] {
		 = append(, unsafe.Pointer(.types))
		 = append(, .typelinks)
	}
	return , 
}

// reflect_resolveNameOff resolves a name offset from a base pointer.
//
//go:linkname reflect_resolveNameOff reflect.resolveNameOff
func reflect_resolveNameOff( unsafe.Pointer,  int32) unsafe.Pointer {
	return unsafe.Pointer(resolveNameOff(, nameOff()).Bytes)
}

// reflect_resolveTypeOff resolves an *rtype offset from a base type.
//
//go:linkname reflect_resolveTypeOff reflect.resolveTypeOff
func reflect_resolveTypeOff( unsafe.Pointer,  int32) unsafe.Pointer {
	return unsafe.Pointer(toRType((*_type)()).typeOff(typeOff()))
}

// reflect_resolveTextOff resolves a function pointer offset from a base type.
//
//go:linkname reflect_resolveTextOff reflect.resolveTextOff
func reflect_resolveTextOff( unsafe.Pointer,  int32) unsafe.Pointer {
	return toRType((*_type)()).textOff(textOff())
}

// reflectlite_resolveNameOff resolves a name offset from a base pointer.
//
//go:linkname reflectlite_resolveNameOff internal/reflectlite.resolveNameOff
func reflectlite_resolveNameOff( unsafe.Pointer,  int32) unsafe.Pointer {
	return unsafe.Pointer(resolveNameOff(, nameOff()).Bytes)
}

// reflectlite_resolveTypeOff resolves an *rtype offset from a base type.
//
//go:linkname reflectlite_resolveTypeOff internal/reflectlite.resolveTypeOff
func reflectlite_resolveTypeOff( unsafe.Pointer,  int32) unsafe.Pointer {
	return unsafe.Pointer(toRType((*_type)()).typeOff(typeOff()))
}

// reflect_addReflectOff adds a pointer to the reflection offset lookup map.
//
//go:linkname reflect_addReflectOff reflect.addReflectOff
func reflect_addReflectOff( unsafe.Pointer) int32 {
	reflectOffsLock()
	if reflectOffs.m == nil {
		reflectOffs.m = make(map[int32]unsafe.Pointer)
		reflectOffs.minv = make(map[unsafe.Pointer]int32)
		reflectOffs.next = -1
	}
	,  := reflectOffs.minv[]
	if ! {
		 = reflectOffs.next
		reflectOffs.next-- // use negative offsets as IDs to aid debugging
		reflectOffs.m[] = 
		reflectOffs.minv[] = 
	}
	reflectOffsUnlock()
	return 
}