// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.

// Runtime type representation.

package runtime

import (
	
	
	
	
	
)

type nameOff = abi.NameOff
type typeOff = abi.TypeOff
type textOff = abi.TextOff

type _type = abi.Type

// rtype is a wrapper that allows us to define additional methods.
type rtype struct {
	*abi.Type // embedding is okay here (unlike reflect) because none of this is public
}

func ( rtype) () string {
	 := .nameOff(.Str).Name()
	if .TFlag&abi.TFlagExtraStar != 0 {
		return [1:]
	}
	return 
}

func ( rtype) () *uncommontype {
	return .Uncommon()
}

func ( rtype) () string {
	if .TFlag&abi.TFlagNamed == 0 {
		return ""
	}
	 := .string()
	 := len() - 1
	 := 0
	for  >= 0 && ([] != '.' ||  != 0) {
		switch [] {
		case ']':
			++
		case '[':
			--
		}
		--
	}
	return [+1:]
}

// pkgpath returns the path of the package where t was defined, if
// available. This is not the same as the reflect package's PkgPath
// method, in that it returns the package path for struct and interface
// types, not just named types.
func ( rtype) () string {
	if  := .uncommon();  != nil {
		return .nameOff(.PkgPath).Name()
	}
	switch .Kind_ & abi.KindMask {
	case abi.Struct:
		 := (*structtype)(unsafe.Pointer(.Type))
		return .PkgPath.Name()
	case abi.Interface:
		 := (*interfacetype)(unsafe.Pointer(.Type))
		return .PkgPath.Name()
	}
	return ""
}

// getGCMask returns the pointer/nonpointer bitmask for type t.
//
// nosplit because it is used during write barriers and must not be preempted.
//
//go:nosplit
func getGCMask( *_type) *byte {
	if .TFlag&abi.TFlagGCMaskOnDemand != 0 {
		// Split the rest into getGCMaskOnDemand so getGCMask itself is inlineable.
		return getGCMaskOnDemand()
	}
	return .GCData
}

// inProgress is a byte whose address is a sentinel indicating that
// some thread is currently building the GC bitmask for a type.
var inProgress byte

// nosplit because it is used during write barriers and must not be preempted.
//
//go:nosplit
func getGCMaskOnDemand( *_type) *byte {
	// For large types, GCData doesn't point directly to a bitmask.
	// Instead it points to a pointer to a bitmask, and the runtime
	// is responsible for (on first use) creating the bitmask and
	// storing a pointer to it in that slot.
	// TODO: we could use &t.GCData as the slot, but types are
	// in read-only memory currently.
	 := unsafe.Pointer(.GCData)

	for {
		 := (*byte)(atomic.Loadp())
		switch  {
		default: // Already built.
			return 
		case &inProgress: // Someone else is currently building it.
			// Just wait until the builder is done.
			// We can't block here, so spinning while having
			// the OS thread yield is about the best we can do.
			osyield()
			continue
		case nil: // Not built yet.
			// Attempt to get exclusive access to build it.
			if !atomic.Casp1((*unsafe.Pointer)(), nil, unsafe.Pointer(&inProgress)) {
				continue
			}

			// Build gcmask for this type.
			 := goarch.PtrSize * divRoundUp(.PtrBytes/goarch.PtrSize, 8*goarch.PtrSize)
			 = (*byte)(persistentalloc(, goarch.PtrSize, &memstats.other_sys))
			systemstack(func() {
				buildGCMask(, bitCursor{ptr: , n: 0})
			})

			// Store the newly-built gcmask for future callers.
			atomic.StorepNoWB(, unsafe.Pointer())
			return 
		}
	}
}

// A bitCursor is a simple cursor to memory to which we
// can write a set of bits.
type bitCursor struct {
	ptr *byte   // base of region
	n   uintptr // cursor points to bit n of region
}

// Write to b cnt bits starting at bit 0 of data.
// Requires cnt>0.
func ( bitCursor) ( *byte,  uintptr) {
	// Starting byte for writing.
	 := addb(.ptr, .n/8)

	// Note: if we're starting halfway through a byte, we load the
	// existing lower bits so we don't clobber them.
	 := .n % 8                    // # of valid bits in buf
	 := uintptr(*) & (1<< - 1) // buffered bits to start

	// Work 8 bits at a time.
	for  > 8 {
		// Read 8 more bits, now buf has 8-15 valid bits in it.
		 |= uintptr(*) << 
		 += 8
		 = addb(, 1)
		 -= 8
		// Write 8 of the buffered bits out.
		* = byte()
		 >>= 8
		 -= 8
		 = addb(, 1)
	}
	// Read remaining bits.
	 |= (uintptr(*) & (1<< - 1)) << 
	 += 

	// Flush remaining bits.
	if  > 8 {
		* = byte()
		 >>= 8
		 -= 8
		 = addb(, 1)
	}
	* &^= 1<< - 1
	* |= byte()
}

func ( bitCursor) ( uintptr) bitCursor {
	return bitCursor{ptr: .ptr, n: .n + }
}

// buildGCMask writes the ptr/nonptr bitmap for t to dst.
// t must have a pointer.
func buildGCMask( *_type,  bitCursor) {
	// Note: we want to avoid a situation where buildGCMask gets into a
	// very deep recursion, because M stacks are fixed size and pretty small
	// (16KB). We do that by ensuring that any recursive
	// call operates on a type at most half the size of its parent.
	// Thus, the recursive chain can be at most 64 calls deep (on a
	// 64-bit machine).
	// Recursion is avoided by using a "tail call" (jumping to the
	// "top" label) for any recursive call with a large subtype.
:
	if .PtrBytes == 0 {
		throw("pointerless type")
	}
	if .TFlag&abi.TFlagGCMaskOnDemand == 0 {
		// copy t.GCData to dst
		.write(.GCData, .PtrBytes/goarch.PtrSize)
		return
	}
	// The above case should handle all kinds except
	// possibly arrays and structs.
	switch .Kind() {
	case abi.Array:
		 := .ArrayType()
		if .Len == 1 {
			// Avoid recursive call for element type that
			// isn't smaller than the parent type.
			 = .Elem
			goto 
		}
		 := .Elem
		for  := uintptr(0);  < .Len; ++ {
			(, )
			 = .offset(.Size_ / goarch.PtrSize)
		}
	case abi.Struct:
		 := .StructType()
		var  abi.StructField
		for ,  := range .Fields {
			 := .Typ
			if !.Pointers() {
				continue
			}
			if .Size_ > .Size_/2 {
				// Avoid recursive call for field type that
				// is larger than half of the parent type.
				// There can be only one.
				 = 
				continue
			}
			(, .offset(.Offset/goarch.PtrSize))
		}
		if .Typ != nil {
			// Note: this case causes bits to be written out of order.
			 = .Typ
			 = .offset(.Offset / goarch.PtrSize)
			goto 
		}
	default:
		throw("unexpected kind")
	}
}

// reflectOffs holds type offsets defined at run time by the reflect package.
//
// When a type is defined at run time, its *rtype data lives on the heap.
// There are a wide range of possible addresses the heap may use, that
// may not be representable as a 32-bit offset. Moreover the GC may
// one day start moving heap memory, in which case there is no stable
// offset that can be defined.
//
// To provide stable offsets, we add pin *rtype objects in a global map
// and treat the offset as an identifier. We use negative offsets that
// do not overlap with any compile-time module offsets.
//
// Entries are created by reflect.addReflectOff.
var reflectOffs struct {
	lock mutex
	next int32
	m    map[int32]unsafe.Pointer
	minv map[unsafe.Pointer]int32
}

func reflectOffsLock() {
	lock(&reflectOffs.lock)
	if raceenabled {
		raceacquire(unsafe.Pointer(&reflectOffs.lock))
	}
}

func reflectOffsUnlock() {
	if raceenabled {
		racerelease(unsafe.Pointer(&reflectOffs.lock))
	}
	unlock(&reflectOffs.lock)
}

func resolveNameOff( unsafe.Pointer,  nameOff) name {
	if  == 0 {
		return name{}
	}
	 := uintptr()
	for  := &firstmoduledata;  != nil;  = .next {
		if  >= .types &&  < .etypes {
			 := .types + uintptr()
			if  > .etypes {
				println("runtime: nameOff", hex(), "out of range", hex(.types), "-", hex(.etypes))
				throw("runtime: name offset out of range")
			}
			return name{Bytes: (*byte)(unsafe.Pointer())}
		}
	}

	// No module found. see if it is a run time name.
	reflectOffsLock()
	,  := reflectOffs.m[int32()]
	reflectOffsUnlock()
	if ! {
		println("runtime: nameOff", hex(), "base", hex(), "not in ranges:")
		for  := &firstmoduledata;  != nil;  = .next {
			println("\ttypes", hex(.types), "etypes", hex(.etypes))
		}
		throw("runtime: name offset base pointer out of range")
	}
	return name{Bytes: (*byte)()}
}

func ( rtype) ( nameOff) name {
	return resolveNameOff(unsafe.Pointer(.Type), )
}

func resolveTypeOff( unsafe.Pointer,  typeOff) *_type {
	if  == 0 ||  == -1 {
		// -1 is the sentinel value for unreachable code.
		// See cmd/link/internal/ld/data.go:relocsym.
		return nil
	}
	 := uintptr()
	var  *moduledata
	for  := &firstmoduledata;  != nil;  = .next {
		if  >= .types &&  < .etypes {
			 = 
			break
		}
	}
	if  == nil {
		reflectOffsLock()
		 := reflectOffs.m[int32()]
		reflectOffsUnlock()
		if  == nil {
			println("runtime: typeOff", hex(), "base", hex(), "not in ranges:")
			for  := &firstmoduledata;  != nil;  = .next {
				println("\ttypes", hex(.types), "etypes", hex(.etypes))
			}
			throw("runtime: type offset base pointer out of range")
		}
		return (*_type)()
	}
	if  := .typemap[];  != nil {
		return 
	}
	 := .types + uintptr()
	if  > .etypes {
		println("runtime: typeOff", hex(), "out of range", hex(.types), "-", hex(.etypes))
		throw("runtime: type offset out of range")
	}
	return (*_type)(unsafe.Pointer())
}

func ( rtype) ( typeOff) *_type {
	return resolveTypeOff(unsafe.Pointer(.Type), )
}

func ( rtype) ( textOff) unsafe.Pointer {
	if  == -1 {
		// -1 is the sentinel value for unreachable code.
		// See cmd/link/internal/ld/data.go:relocsym.
		return unsafe.Pointer(abi.FuncPCABIInternal(unreachableMethod))
	}
	 := uintptr(unsafe.Pointer(.Type))
	var  *moduledata
	for  := &firstmoduledata;  != nil;  = .next {
		if  >= .types &&  < .etypes {
			 = 
			break
		}
	}
	if  == nil {
		reflectOffsLock()
		 := reflectOffs.m[int32()]
		reflectOffsUnlock()
		if  == nil {
			println("runtime: textOff", hex(), "base", hex(), "not in ranges:")
			for  := &firstmoduledata;  != nil;  = .next {
				println("\ttypes", hex(.types), "etypes", hex(.etypes))
			}
			throw("runtime: text offset base pointer out of range")
		}
		return 
	}
	 := .textAddr(uint32())
	return unsafe.Pointer()
}

type uncommontype = abi.UncommonType

type interfacetype = abi.InterfaceType

type arraytype = abi.ArrayType

type chantype = abi.ChanType

type slicetype = abi.SliceType

type functype = abi.FuncType

type ptrtype = abi.PtrType

type name = abi.Name

type structtype = abi.StructType

func pkgPath( name) string {
	if .Bytes == nil || *.Data(0)&(1<<2) == 0 {
		return ""
	}
	,  := .ReadVarint(1)
	 := 1 +  + 
	if *.Data(0)&(1<<1) != 0 {
		,  := .ReadVarint()
		 +=  + 
	}
	var  nameOff
	copy((*[4]byte)(unsafe.Pointer(&))[:], (*[4]byte)(unsafe.Pointer(.Data()))[:])
	 := resolveNameOff(unsafe.Pointer(.Bytes), )
	return .Name()
}

// typelinksinit scans the types from extra modules and builds the
// moduledata typemap used to de-duplicate type pointers.
func typelinksinit() {
	if firstmoduledata.next == nil {
		return
	}
	 := make(map[uint32][]*_type, len(firstmoduledata.typelinks))

	 := activeModules()
	 := [0]
	for ,  := range [1:] {
		// Collect types from the previous module into typehash.
	:
		for ,  := range .typelinks {
			var  *_type
			if .typemap == nil {
				 = (*_type)(unsafe.Pointer(.types + uintptr()))
			} else {
				 = .typemap[typeOff()]
			}
			// Add to typehash if not seen before.
			 := [.Hash]
			for ,  := range  {
				if  ==  {
					continue 
				}
			}
			[.Hash] = append(, )
		}

		if .typemap == nil {
			// If any of this module's typelinks match a type from a
			// prior module, prefer that prior type by adding the offset
			// to this module's typemap.
			 := make(map[typeOff]*_type, len(.typelinks))
			pinnedTypemaps = append(pinnedTypemaps, )
			.typemap = 
			for ,  := range .typelinks {
				 := (*_type)(unsafe.Pointer(.types + uintptr()))
				for ,  := range [.Hash] {
					 := map[_typePair]struct{}{}
					if typesEqual(, , ) {
						 = 
						break
					}
				}
				.typemap[typeOff()] = 
			}
		}

		 = 
	}
}

type _typePair struct {
	t1 *_type
	t2 *_type
}

func toRType( *abi.Type) rtype {
	return rtype{}
}

// typesEqual reports whether two types are equal.
//
// Everywhere in the runtime and reflect packages, it is assumed that
// there is exactly one *_type per Go type, so that pointer equality
// can be used to test if types are equal. There is one place that
// breaks this assumption: buildmode=shared. In this case a type can
// appear as two different pieces of memory. This is hidden from the
// runtime and reflect package by the per-module typemap built in
// typelinksinit. It uses typesEqual to map types from later modules
// back into earlier ones.
//
// Only typelinksinit needs this function.
func typesEqual(,  *_type,  map[_typePair]struct{}) bool {
	 := _typePair{, }
	if ,  := [];  {
		return true
	}

	// mark these types as seen, and thus equivalent which prevents an infinite loop if
	// the two types are identical, but recursively defined and loaded from
	// different modules
	[] = struct{}{}

	if  ==  {
		return true
	}
	 := .Kind_ & abi.KindMask
	if  != .Kind_&abi.KindMask {
		return false
	}
	,  := toRType(), toRType()
	if .string() != .string() {
		return false
	}
	 := .Uncommon()
	 := .Uncommon()
	if  != nil ||  != nil {
		if  == nil ||  == nil {
			return false
		}
		 := .nameOff(.PkgPath).Name()
		 := .nameOff(.PkgPath).Name()
		if  !=  {
			return false
		}
	}
	if abi.Bool <=  &&  <= abi.Complex128 {
		return true
	}
	switch  {
	case abi.String, abi.UnsafePointer:
		return true
	case abi.Array:
		 := (*arraytype)(unsafe.Pointer())
		 := (*arraytype)(unsafe.Pointer())
		return (.Elem, .Elem, ) && .Len == .Len
	case abi.Chan:
		 := (*chantype)(unsafe.Pointer())
		 := (*chantype)(unsafe.Pointer())
		return .Dir == .Dir && (.Elem, .Elem, )
	case abi.Func:
		 := (*functype)(unsafe.Pointer())
		 := (*functype)(unsafe.Pointer())
		if .OutCount != .OutCount || .InCount != .InCount {
			return false
		}
		,  := .InSlice(), .InSlice()
		for  := 0;  < len(); ++ {
			if !([], [], ) {
				return false
			}
		}
		,  := .OutSlice(), .OutSlice()
		for  := 0;  < len(); ++ {
			if !([], [], ) {
				return false
			}
		}
		return true
	case abi.Interface:
		 := (*interfacetype)(unsafe.Pointer())
		 := (*interfacetype)(unsafe.Pointer())
		if .PkgPath.Name() != .PkgPath.Name() {
			return false
		}
		if len(.Methods) != len(.Methods) {
			return false
		}
		for  := range .Methods {
			 := &.Methods[]
			 := &.Methods[]
			// Note the mhdr array can be relocated from
			// another module. See #17724.
			 := resolveNameOff(unsafe.Pointer(), .Name)
			 := resolveNameOff(unsafe.Pointer(), .Name)
			if .Name() != .Name() {
				return false
			}
			if pkgPath() != pkgPath() {
				return false
			}
			 := resolveTypeOff(unsafe.Pointer(), .Typ)
			 := resolveTypeOff(unsafe.Pointer(), .Typ)
			if !(, , ) {
				return false
			}
		}
		return true
	case abi.Map:
		if goexperiment.SwissMap {
			 := (*abi.SwissMapType)(unsafe.Pointer())
			 := (*abi.SwissMapType)(unsafe.Pointer())
			return (.Key, .Key, ) && (.Elem, .Elem, )
		}
		 := (*abi.OldMapType)(unsafe.Pointer())
		 := (*abi.OldMapType)(unsafe.Pointer())
		return (.Key, .Key, ) && (.Elem, .Elem, )
	case abi.Pointer:
		 := (*ptrtype)(unsafe.Pointer())
		 := (*ptrtype)(unsafe.Pointer())
		return (.Elem, .Elem, )
	case abi.Slice:
		 := (*slicetype)(unsafe.Pointer())
		 := (*slicetype)(unsafe.Pointer())
		return (.Elem, .Elem, )
	case abi.Struct:
		 := (*structtype)(unsafe.Pointer())
		 := (*structtype)(unsafe.Pointer())
		if len(.Fields) != len(.Fields) {
			return false
		}
		if .PkgPath.Name() != .PkgPath.Name() {
			return false
		}
		for  := range .Fields {
			 := &.Fields[]
			 := &.Fields[]
			if .Name.Name() != .Name.Name() {
				return false
			}
			if !(.Typ, .Typ, ) {
				return false
			}
			if .Name.Tag() != .Name.Tag() {
				return false
			}
			if .Offset != .Offset {
				return false
			}
			if .Name.IsEmbedded() != .Name.IsEmbedded() {
				return false
			}
		}
		return true
	default:
		println("runtime: impossible type kind", )
		throw("runtime: impossible type kind")
		return false
	}
}