// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.

// Package reflect implements run-time reflection, allowing a program to // manipulate objects with arbitrary types. The typical use is to take a value // with static type interface{} and extract its dynamic type information by // calling TypeOf, which returns a Type. // // A call to ValueOf returns a Value representing the run-time data. // Zero takes a Type and returns a Value representing a zero value // for that type. // // See "The Laws of Reflection" for an introduction to reflection in Go: // https://golang.org/doc/articles/laws_of_reflection.html
package reflect import ( ) // Type is the representation of a Go type. // // Not all methods apply to all kinds of types. Restrictions, // if any, are noted in the documentation for each method. // Use the Kind method to find out the kind of type before // calling kind-specific methods. Calling a method // inappropriate to the kind of type causes a run-time panic. // // Type values are comparable, such as with the == operator, // so they can be used as map keys. // Two Type values are equal if they represent identical types. type Type interface { // Methods applicable to all types. // Align returns the alignment in bytes of a value of // this type when allocated in memory. Align() int // FieldAlign returns the alignment in bytes of a value of // this type when used as a field in a struct. FieldAlign() int // Method returns the i'th method in the type's method set. // It panics if i is not in the range [0, NumMethod()). // // For a non-interface type T or *T, the returned Method's Type and Func // fields describe a function whose first argument is the receiver, // and only exported methods are accessible. // // For an interface type, the returned Method's Type field gives the // method signature, without a receiver, and the Func field is nil. // // Methods are sorted in lexicographic order. Method(int) Method // MethodByName returns the method with that name in the type's // method set and a boolean indicating if the method was found. // // For a non-interface type T or *T, the returned Method's Type and Func // fields describe a function whose first argument is the receiver. // // For an interface type, the returned Method's Type field gives the // method signature, without a receiver, and the Func field is nil. MethodByName(string) (Method, bool) // NumMethod returns the number of methods accessible using Method. // // For a non-interface type, it returns the number of exported methods. // // For an interface type, it returns the number of exported and unexported methods. NumMethod() int // Name returns the type's name within its package for a defined type. // For other (non-defined) types it returns the empty string. Name() string // PkgPath returns a defined type's package path, that is, the import path // that uniquely identifies the package, such as "encoding/base64". // If the type was predeclared (string, error) or not defined (*T, struct{}, // []int, or A where A is an alias for a non-defined type), the package path // will be the empty string. PkgPath() string // Size returns the number of bytes needed to store // a value of the given type; it is analogous to unsafe.Sizeof. Size() uintptr // String returns a string representation of the type. // The string representation may use shortened package names // (e.g., base64 instead of "encoding/base64") and is not // guaranteed to be unique among types. To test for type identity, // compare the Types directly. String() string // Kind returns the specific kind of this type. Kind() Kind // Implements reports whether the type implements the interface type u. Implements(u Type) bool // AssignableTo reports whether a value of the type is assignable to type u. AssignableTo(u Type) bool // ConvertibleTo reports whether a value of the type is convertible to type u. // Even if ConvertibleTo returns true, the conversion may still panic. // For example, a slice of type []T is convertible to *[N]T, // but the conversion will panic if its length is less than N. ConvertibleTo(u Type) bool // Comparable reports whether values of this type are comparable. // Even if Comparable returns true, the comparison may still panic. // For example, values of interface type are comparable, // but the comparison will panic if their dynamic type is not comparable. Comparable() bool // Methods applicable only to some types, depending on Kind. // The methods allowed for each kind are: // // Int*, Uint*, Float*, Complex*: Bits // Array: Elem, Len // Chan: ChanDir, Elem // Func: In, NumIn, Out, NumOut, IsVariadic. // Map: Key, Elem // Pointer: Elem // Slice: Elem // Struct: Field, FieldByIndex, FieldByName, FieldByNameFunc, NumField // Bits returns the size of the type in bits. // It panics if the type's Kind is not one of the // sized or unsized Int, Uint, Float, or Complex kinds. Bits() int // ChanDir returns a channel type's direction. // It panics if the type's Kind is not Chan. ChanDir() ChanDir // IsVariadic reports whether a function type's final input parameter // is a "..." parameter. If so, t.In(t.NumIn() - 1) returns the parameter's // implicit actual type []T. // // For concreteness, if t represents func(x int, y ... float64), then // // t.NumIn() == 2 // t.In(0) is the reflect.Type for "int" // t.In(1) is the reflect.Type for "[]float64" // t.IsVariadic() == true // // IsVariadic panics if the type's Kind is not Func. IsVariadic() bool // Elem returns a type's element type. // It panics if the type's Kind is not Array, Chan, Map, Pointer, or Slice. Elem() Type // Field returns a struct type's i'th field. // It panics if the type's Kind is not Struct. // It panics if i is not in the range [0, NumField()). Field(i int) StructField // FieldByIndex returns the nested field corresponding // to the index sequence. It is equivalent to calling Field // successively for each index i. // It panics if the type's Kind is not Struct. FieldByIndex(index []int) StructField // FieldByName returns the struct field with the given name // and a boolean indicating if the field was found. // If the returned field is promoted from an embedded struct, // then Offset in the returned StructField is the offset in // the embedded struct. FieldByName(name string) (StructField, bool) // FieldByNameFunc returns the struct field with a name // that satisfies the match function and a boolean indicating if // the field was found. // // FieldByNameFunc considers the fields in the struct itself // and then the fields in any embedded structs, in breadth first order, // stopping at the shallowest nesting depth containing one or more // fields satisfying the match function. If multiple fields at that depth // satisfy the match function, they cancel each other // and FieldByNameFunc returns no match. // This behavior mirrors Go's handling of name lookup in // structs containing embedded fields. // // If the returned field is promoted from an embedded struct, // then Offset in the returned StructField is the offset in // the embedded struct. FieldByNameFunc(match func(string) bool) (StructField, bool) // In returns the type of a function type's i'th input parameter. // It panics if the type's Kind is not Func. // It panics if i is not in the range [0, NumIn()). In(i int) Type // Key returns a map type's key type. // It panics if the type's Kind is not Map. Key() Type // Len returns an array type's length. // It panics if the type's Kind is not Array. Len() int // NumField returns a struct type's field count. // It panics if the type's Kind is not Struct. NumField() int // NumIn returns a function type's input parameter count. // It panics if the type's Kind is not Func. NumIn() int // NumOut returns a function type's output parameter count. // It panics if the type's Kind is not Func. NumOut() int // Out returns the type of a function type's i'th output parameter. // It panics if the type's Kind is not Func. // It panics if i is not in the range [0, NumOut()). Out(i int) Type common() *abi.Type uncommon() *uncommonType } // BUG(rsc): FieldByName and related functions consider struct field names to be equal // if the names are equal, even if they are unexported names originating // in different packages. The practical effect of this is that the result of // t.FieldByName("x") is not well defined if the struct type t contains // multiple fields named x (embedded from different packages). // FieldByName may return one of the fields named x or may report that there are none. // See https://golang.org/issue/4876 for more details. /* * These data structures are known to the compiler (../cmd/compile/internal/reflectdata/reflect.go). * A few are known to ../runtime/type.go to convey to debuggers. * They are also known to ../runtime/type.go. */ // A Kind represents the specific kind of type that a [Type] represents. // The zero Kind is not a valid kind. type Kind uint const ( Invalid Kind = iota Bool Int Int8 Int16 Int32 Int64 Uint Uint8 Uint16 Uint32 Uint64 Uintptr Float32 Float64 Complex64 Complex128 Array Chan Func Interface Map Pointer Slice String Struct UnsafePointer ) // Ptr is the old name for the [Pointer] kind. const Ptr = Pointer // uncommonType is present only for defined types or types with methods // (if T is a defined type, the uncommonTypes for T and *T have methods). // Using a pointer to this struct reduces the overall size required // to describe a non-defined type with no methods. type uncommonType = abi.UncommonType // Embed this type to get common/uncommon type common struct { abi.Type } // rtype is the common implementation of most values. // It is embedded in other struct types. type rtype struct { t abi.Type } func ( *rtype) () *abi.Type { return &.t } func ( *rtype) () *abi.UncommonType { return .t.Uncommon() } type aNameOff = abi.NameOff type aTypeOff = abi.TypeOff type aTextOff = abi.TextOff // ChanDir represents a channel type's direction. type ChanDir int const ( RecvDir ChanDir = 1 << iota // <-chan SendDir // chan<- BothDir = RecvDir | SendDir // chan ) // arrayType represents a fixed array type. type arrayType = abi.ArrayType // chanType represents a channel type. type chanType = abi.ChanType // funcType represents a function type. // // A *rtype for each in and out parameter is stored in an array that // directly follows the funcType (and possibly its uncommonType). So // a function type with one method, one input, and one output is: // // struct { // funcType // uncommonType // [2]*rtype // [0] is in, [1] is out // } type funcType = abi.FuncType // interfaceType represents an interface type. type interfaceType struct { abi.InterfaceType // can embed directly because not a public type. } func ( *interfaceType) ( aNameOff) abi.Name { return toRType(&.Type).nameOff() } func nameOffFor( *abi.Type, aNameOff) abi.Name { return toRType().nameOff() } func typeOffFor( *abi.Type, aTypeOff) *abi.Type { return toRType().typeOff() } func ( *interfaceType) ( aTypeOff) *abi.Type { return toRType(&.Type).typeOff() } func ( *interfaceType) () *abi.Type { return &.Type } func ( *interfaceType) () *abi.UncommonType { return .Uncommon() } // mapType represents a map type. type mapType struct { abi.MapType } // ptrType represents a pointer type. type ptrType struct { abi.PtrType } // sliceType represents a slice type. type sliceType struct { abi.SliceType } // Struct field type structField = abi.StructField // structType represents a struct type. type structType struct { abi.StructType } func pkgPath( abi.Name) string { if .Bytes == nil || *.DataChecked(0, "name flag field")&(1<<2) == 0 { return "" } , := .ReadVarint(1) := 1 + + if .HasTag() { , := .ReadVarint() += + } var int32 // Note that this field may not be aligned in memory, // so we cannot use a direct int32 assignment here. copy((*[4]byte)(unsafe.Pointer(&))[:], (*[4]byte)(unsafe.Pointer(.DataChecked(, "name offset field")))[:]) := abi.Name{Bytes: (*byte)(resolveTypeOff(unsafe.Pointer(.Bytes), ))} return .Name() } func newName(, string, , bool) abi.Name { return abi.NewName(, , , ) } /* * The compiler knows the exact layout of all the data structures above. * The compiler does not know about the data structures and methods below. */ // Method represents a single method. type Method struct { // Name is the method name. Name string // PkgPath is the package path that qualifies a lower case (unexported) // method name. It is empty for upper case (exported) method names. // The combination of PkgPath and Name uniquely identifies a method // in a method set. // See https://golang.org/ref/spec#Uniqueness_of_identifiers PkgPath string Type Type // method type Func Value // func with receiver as first argument Index int // index for Type.Method } // IsExported reports whether the method is exported. func ( Method) () bool { return .PkgPath == "" } const ( kindDirectIface = 1 << 5 kindGCProg = 1 << 6 // Type.gc points to GC program kindMask = (1 << 5) - 1 ) // String returns the name of k. func ( Kind) () string { if uint() < uint(len(kindNames)) { return kindNames[uint()] } return "kind" + strconv.Itoa(int()) } var kindNames = []string{ Invalid: "invalid", Bool: "bool", Int: "int", Int8: "int8", Int16: "int16", Int32: "int32", Int64: "int64", Uint: "uint", Uint8: "uint8", Uint16: "uint16", Uint32: "uint32", Uint64: "uint64", Uintptr: "uintptr", Float32: "float32", Float64: "float64", Complex64: "complex64", Complex128: "complex128", Array: "array", Chan: "chan", Func: "func", Interface: "interface", Map: "map", Pointer: "ptr", Slice: "slice", String: "string", Struct: "struct", UnsafePointer: "unsafe.Pointer", } // resolveNameOff resolves a name offset from a base pointer. // The (*rtype).nameOff method is a convenience wrapper for this function. // Implemented in the runtime package. // //go:noescape func resolveNameOff( unsafe.Pointer, int32) unsafe.Pointer // resolveTypeOff resolves an *rtype offset from a base type. // The (*rtype).typeOff method is a convenience wrapper for this function. // Implemented in the runtime package. // //go:noescape func resolveTypeOff( unsafe.Pointer, int32) unsafe.Pointer // resolveTextOff resolves a function pointer offset from a base type. // The (*rtype).textOff method is a convenience wrapper for this function. // Implemented in the runtime package. // //go:noescape func resolveTextOff( unsafe.Pointer, int32) unsafe.Pointer // addReflectOff adds a pointer to the reflection lookup map in the runtime. // It returns a new ID that can be used as a typeOff or textOff, and will // be resolved correctly. Implemented in the runtime package. // //go:noescape func addReflectOff( unsafe.Pointer) int32 // resolveReflectName adds a name to the reflection lookup map in the runtime. // It returns a new nameOff that can be used to refer to the pointer. func resolveReflectName( abi.Name) aNameOff { return aNameOff(addReflectOff(unsafe.Pointer(.Bytes))) } // resolveReflectType adds a *rtype to the reflection lookup map in the runtime. // It returns a new typeOff that can be used to refer to the pointer. func resolveReflectType( *abi.Type) aTypeOff { return aTypeOff(addReflectOff(unsafe.Pointer())) } // resolveReflectText adds a function pointer to the reflection lookup map in // the runtime. It returns a new textOff that can be used to refer to the // pointer. func resolveReflectText( unsafe.Pointer) aTextOff { return aTextOff(addReflectOff()) } func ( *rtype) ( aNameOff) abi.Name { return abi.Name{Bytes: (*byte)(resolveNameOff(unsafe.Pointer(), int32()))} } func ( *rtype) ( aTypeOff) *abi.Type { return (*abi.Type)(resolveTypeOff(unsafe.Pointer(), int32())) } func ( *rtype) ( aTextOff) unsafe.Pointer { return resolveTextOff(unsafe.Pointer(), int32()) } func textOffFor( *abi.Type, aTextOff) unsafe.Pointer { return toRType().textOff() } func ( *rtype) () string { := .nameOff(.t.Str).Name() if .t.TFlag&abi.TFlagExtraStar != 0 { return [1:] } return } func ( *rtype) () uintptr { return .t.Size() } func ( *rtype) () int { if == nil { panic("reflect: Bits of nil Type") } := .Kind() if < Int || > Complex128 { panic("reflect: Bits of non-arithmetic Type " + .String()) } return int(.t.Size_) * 8 } func ( *rtype) () int { return .t.Align() } func ( *rtype) () int { return .t.FieldAlign() } func ( *rtype) () Kind { return Kind(.t.Kind()) } func ( *rtype) () []abi.Method { := .uncommon() if == nil { return nil } return .ExportedMethods() } func ( *rtype) () int { if .Kind() == Interface { := (*interfaceType)(unsafe.Pointer()) return .NumMethod() } return len(.exportedMethods()) } func ( *rtype) ( int) ( Method) { if .Kind() == Interface { := (*interfaceType)(unsafe.Pointer()) return .Method() } := .exportedMethods() if < 0 || >= len() { panic("reflect: Method index out of range") } := [] := .nameOff(.Name) .Name = .Name() := flag(Func) := .typeOff(.Mtyp) := (*funcType)(unsafe.Pointer()) := make([]Type, 0, 1+.NumIn()) = append(, ) for , := range .InSlice() { = append(, toRType()) } := make([]Type, 0, .NumOut()) for , := range .OutSlice() { = append(, toRType()) } := FuncOf(, , .IsVariadic()) .Type = := .textOff(.Tfn) := unsafe.Pointer(&) .Func = Value{&.(*rtype).t, , } .Index = return } func ( *rtype) ( string) ( Method, bool) { if .Kind() == Interface { := (*interfaceType)(unsafe.Pointer()) return .MethodByName() } := .uncommon() if == nil { return Method{}, false } := .ExportedMethods() // We are looking for the first index i where the string becomes >= s. // This is a copy of sort.Search, with f(h) replaced by (t.nameOff(methods[h].name).name() >= name). , := 0, len() for < { := int(uint(+) >> 1) // avoid overflow when computing h // i ≤ h < j if !(.nameOff([].Name).Name() >= ) { = + 1 // preserves f(i-1) == false } else { = // preserves f(j) == true } } // i == j, f(i-1) == false, and f(j) (= f(i)) == true => answer is i. if < len() && == .nameOff([].Name).Name() { return .Method(), true } return Method{}, false } func ( *rtype) () string { if .t.TFlag&abi.TFlagNamed == 0 { return "" } := .uncommon() if == nil { return "" } return .nameOff(.PkgPath).Name() } func pkgPathFor( *abi.Type) string { return toRType().PkgPath() } func ( *rtype) () string { if !.t.HasName() { return "" } := .String() := len() - 1 := 0 for >= 0 && ([] != '.' || != 0) { switch [] { case ']': ++ case '[': -- } -- } return [+1:] } func nameFor( *abi.Type) string { return toRType().Name() } func ( *rtype) () ChanDir { if .Kind() != Chan { panic("reflect: ChanDir of non-chan type " + .String()) } := (*abi.ChanType)(unsafe.Pointer()) return ChanDir(.Dir) } func toRType( *abi.Type) *rtype { return (*rtype)(unsafe.Pointer()) } func elem( *abi.Type) *abi.Type { := .Elem() if != nil { return } panic("reflect: Elem of invalid type " + stringFor()) } func ( *rtype) () Type { return toType(elem(.common())) } func ( *rtype) ( int) StructField { if .Kind() != Struct { panic("reflect: Field of non-struct type " + .String()) } := (*structType)(unsafe.Pointer()) return .Field() } func ( *rtype) ( []int) StructField { if .Kind() != Struct { panic("reflect: FieldByIndex of non-struct type " + .String()) } := (*structType)(unsafe.Pointer()) return .FieldByIndex() } func ( *rtype) ( string) (StructField, bool) { if .Kind() != Struct { panic("reflect: FieldByName of non-struct type " + .String()) } := (*structType)(unsafe.Pointer()) return .FieldByName() } func ( *rtype) ( func(string) bool) (StructField, bool) { if .Kind() != Struct { panic("reflect: FieldByNameFunc of non-struct type " + .String()) } := (*structType)(unsafe.Pointer()) return .FieldByNameFunc() } func ( *rtype) () Type { if .Kind() != Map { panic("reflect: Key of non-map type " + .String()) } := (*mapType)(unsafe.Pointer()) return toType(.Key) } func ( *rtype) () int { if .Kind() != Array { panic("reflect: Len of non-array type " + .String()) } := (*arrayType)(unsafe.Pointer()) return int(.Len) } func ( *rtype) () int { if .Kind() != Struct { panic("reflect: NumField of non-struct type " + .String()) } := (*structType)(unsafe.Pointer()) return len(.Fields) } func ( *rtype) ( int) Type { if .Kind() != Func { panic("reflect: In of non-func type " + .String()) } := (*abi.FuncType)(unsafe.Pointer()) return toType(.InSlice()[]) } func ( *rtype) () int { if .Kind() != Func { panic("reflect: NumIn of non-func type " + .String()) } := (*abi.FuncType)(unsafe.Pointer()) return .NumIn() } func ( *rtype) () int { if .Kind() != Func { panic("reflect: NumOut of non-func type " + .String()) } := (*abi.FuncType)(unsafe.Pointer()) return .NumOut() } func ( *rtype) ( int) Type { if .Kind() != Func { panic("reflect: Out of non-func type " + .String()) } := (*abi.FuncType)(unsafe.Pointer()) return toType(.OutSlice()[]) } func ( *rtype) () bool { if .Kind() != Func { panic("reflect: IsVariadic of non-func type " + .String()) } := (*abi.FuncType)(unsafe.Pointer()) return .IsVariadic() } // add returns p+x. // // The whySafe string is ignored, so that the function still inlines // as efficiently as p+x, but all call sites should use the string to // record why the addition is safe, which is to say why the addition // does not cause x to advance to the very end of p's allocation // and therefore point incorrectly at the next block in memory. func add( unsafe.Pointer, uintptr, string) unsafe.Pointer { return unsafe.Pointer(uintptr() + ) } func ( ChanDir) () string { switch { case SendDir: return "chan<-" case RecvDir: return "<-chan" case BothDir: return "chan" } return "ChanDir" + strconv.Itoa(int()) } // Method returns the i'th method in the type's method set. func ( *interfaceType) ( int) ( Method) { if < 0 || >= len(.Methods) { return } := &.Methods[] := .nameOff(.Name) .Name = .Name() if !.IsExported() { .PkgPath = pkgPath() if .PkgPath == "" { .PkgPath = .PkgPath.Name() } } .Type = toType(.typeOff(.Typ)) .Index = return } // NumMethod returns the number of interface methods in the type's method set. func ( *interfaceType) () int { return len(.Methods) } // MethodByName method with the given name in the type's method set. func ( *interfaceType) ( string) ( Method, bool) { if == nil { return } var *abi.Imethod for := range .Methods { = &.Methods[] if .nameOff(.Name).Name() == { return .Method(), true } } return } // A StructField describes a single field in a struct. type StructField struct { // Name is the field name. Name string // PkgPath is the package path that qualifies a lower case (unexported) // field name. It is empty for upper case (exported) field names. // See https://golang.org/ref/spec#Uniqueness_of_identifiers PkgPath string Type Type // field type Tag StructTag // field tag string Offset uintptr // offset within struct, in bytes Index []int // index sequence for Type.FieldByIndex Anonymous bool // is an embedded field } // IsExported reports whether the field is exported. func ( StructField) () bool { return .PkgPath == "" } // A StructTag is the tag string in a struct field. // // By convention, tag strings are a concatenation of // optionally space-separated key:"value" pairs. // Each key is a non-empty string consisting of non-control // characters other than space (U+0020 ' '), quote (U+0022 '"'), // and colon (U+003A ':'). Each value is quoted using U+0022 '"' // characters and Go string literal syntax. type StructTag string // Get returns the value associated with key in the tag string. // If there is no such key in the tag, Get returns the empty string. // If the tag does not have the conventional format, the value // returned by Get is unspecified. To determine whether a tag is // explicitly set to the empty string, use Lookup. func ( StructTag) ( string) string { , := .Lookup() return } // Lookup returns the value associated with key in the tag string. // If the key is present in the tag the value (which may be empty) // is returned. Otherwise the returned value will be the empty string. // The ok return value reports whether the value was explicitly set in // the tag string. If the tag does not have the conventional format, // the value returned by Lookup is unspecified. func ( StructTag) ( string) ( string, bool) { // When modifying this code, also update the validateStructTag code // in cmd/vet/structtag.go. for != "" { // Skip leading space. := 0 for < len() && [] == ' ' { ++ } = [:] if == "" { break } // Scan to colon. A space, a quote or a control character is a syntax error. // Strictly speaking, control chars include the range [0x7f, 0x9f], not just // [0x00, 0x1f], but in practice, we ignore the multi-byte control characters // as it is simpler to inspect the tag's bytes than the tag's runes. = 0 for < len() && [] > ' ' && [] != ':' && [] != '"' && [] != 0x7f { ++ } if == 0 || +1 >= len() || [] != ':' || [+1] != '"' { break } := string([:]) = [+1:] // Scan quoted string to find value. = 1 for < len() && [] != '"' { if [] == '\\' { ++ } ++ } if >= len() { break } := string([:+1]) = [+1:] if == { , := strconv.Unquote() if != nil { break } return , true } } return "", false } // Field returns the i'th struct field. func ( *structType) ( int) ( StructField) { if < 0 || >= len(.Fields) { panic("reflect: Field index out of bounds") } := &.Fields[] .Type = toType(.Typ) .Name = .Name.Name() .Anonymous = .Embedded() if !.Name.IsExported() { .PkgPath = .PkgPath.Name() } if := .Name.Tag(); != "" { .Tag = StructTag() } .Offset = .Offset // NOTE(rsc): This is the only allocation in the interface // presented by a reflect.Type. It would be nice to avoid, // at least in the common cases, but we need to make sure // that misbehaving clients of reflect cannot affect other // uses of reflect. One possibility is CL 5371098, but we // postponed that ugliness until there is a demonstrated // need for the performance. This is issue 2320. .Index = []int{} return } // TODO(gri): Should there be an error/bool indicator if the index // is wrong for FieldByIndex? // FieldByIndex returns the nested field corresponding to index. func ( *structType) ( []int) ( StructField) { .Type = toType(&.Type) for , := range { if > 0 { := .Type if .Kind() == Pointer && .Elem().Kind() == Struct { = .Elem() } .Type = } = .Type.Field() } return } // A fieldScan represents an item on the fieldByNameFunc scan work list. type fieldScan struct { typ *structType index []int } // FieldByNameFunc returns the struct field with a name that satisfies the // match function and a boolean to indicate if the field was found. func ( *structType) ( func(string) bool) ( StructField, bool) { // This uses the same condition that the Go language does: there must be a unique instance // of the match at a given depth level. If there are multiple instances of a match at the // same depth, they annihilate each other and inhibit any possible match at a lower level. // The algorithm is breadth first search, one depth level at a time. // The current and next slices are work queues: // current lists the fields to visit on this depth level, // and next lists the fields on the next lower level. := []fieldScan{} := []fieldScan{{typ: }} // nextCount records the number of times an embedded type has been // encountered and considered for queueing in the 'next' slice. // We only queue the first one, but we increment the count on each. // If a struct type T can be reached more than once at a given depth level, // then it annihilates itself and need not be considered at all when we // process that next depth level. var map[*structType]int // visited records the structs that have been considered already. // Embedded pointer fields can create cycles in the graph of // reachable embedded types; visited avoids following those cycles. // It also avoids duplicated effort: if we didn't find the field in an // embedded type T at level 2, we won't find it in one at level 4 either. := map[*structType]bool{} for len() > 0 { , = , [:0] := = nil // Process all the fields at this depth, now listed in 'current'. // The loop queues embedded fields found in 'next', for processing during the next // iteration. The multiplicity of the 'current' field counts is recorded // in 'count'; the multiplicity of the 'next' field counts is recorded in 'nextCount'. for , := range { := .typ if [] { // We've looked through this type before, at a higher level. // That higher level would shadow the lower level we're now at, // so this one can't be useful to us. Ignore it. continue } [] = true for := range .Fields { := &.Fields[] // Find name and (for embedded field) type for field f. := .Name.Name() var *abi.Type if .Embedded() { // Embedded field of type T or *T. = .Typ if .Kind() == abi.Pointer { = .Elem() } } // Does it match? if () { // Potential match if [] > 1 || { // Name appeared multiple times at this level: annihilate. return StructField{}, false } = .Field() .Index = nil .Index = append(.Index, .index...) .Index = append(.Index, ) = true continue } // Queue embedded struct fields for processing with next level, // but only if we haven't seen a match yet at this level and only // if the embedded types haven't already been queued. if || == nil || .Kind() != abi.Struct { continue } := (*structType)(unsafe.Pointer()) if [] > 0 { [] = 2 // exact multiple doesn't matter continue } if == nil { = map[*structType]int{} } [] = 1 if [] > 1 { [] = 2 // exact multiple doesn't matter } var []int = append(, .index...) = append(, ) = append(, fieldScan{, }) } } if { break } } return } // FieldByName returns the struct field with the given name // and a boolean to indicate if the field was found. func ( *structType) ( string) ( StructField, bool) { // Quick check for top-level name, or struct without embedded fields. := false if != "" { for := range .Fields { := &.Fields[] if .Name.Name() == { return .Field(), true } if .Embedded() { = true } } } if ! { return } return .FieldByNameFunc(func( string) bool { return == }) } // TypeOf returns the reflection [Type] that represents the dynamic type of i. // If i is a nil interface value, TypeOf returns nil. func ( any) Type { := *(*emptyInterface)(unsafe.Pointer(&)) // Noescape so this doesn't make i to escape. See the comment // at Value.typ for why this is safe. return toType((*abi.Type)(noescape(unsafe.Pointer(.typ)))) } // rtypeOf directly extracts the *rtype of the provided value. func rtypeOf( any) *abi.Type { := *(*emptyInterface)(unsafe.Pointer(&)) return .typ } // ptrMap is the cache for PointerTo. var ptrMap sync.Map // map[*rtype]*ptrType // PtrTo returns the pointer type with element t. // For example, if t represents type Foo, PtrTo(t) represents *Foo. // // PtrTo is the old spelling of [PointerTo]. // The two functions behave identically. // // Deprecated: Superseded by [PointerTo]. func ( Type) Type { return PointerTo() } // PointerTo returns the pointer type with element t. // For example, if t represents type Foo, PointerTo(t) represents *Foo. func ( Type) Type { return toRType(.(*rtype).ptrTo()) } func ( *rtype) () *abi.Type { := &.t if .PtrToThis != 0 { return .typeOff(.PtrToThis) } // Check the cache. if , := ptrMap.Load(); { return &.(*ptrType).Type } // Look in known types. := "*" + .String() for , := range typesByString() { := (*ptrType)(unsafe.Pointer()) if .Elem != &.t { continue } , := ptrMap.LoadOrStore(, ) return &.(*ptrType).Type } // Create a new ptrType starting with the description // of an *unsafe.Pointer. var any = (*unsafe.Pointer)(nil) := *(**ptrType)(unsafe.Pointer(&)) := * .Str = resolveReflectName(newName(, "", false, false)) .PtrToThis = 0 // For the type structures linked into the binary, the // compiler provides a good hash of the string. // Create a good hash for the new string by using // the FNV-1 hash's mixing function to combine the // old hash and the new "*". .Hash = fnv1(.t.Hash, '*') .Elem = , := ptrMap.LoadOrStore(, &) return &.(*ptrType).Type } func ptrTo( *abi.Type) *abi.Type { return toRType().ptrTo() } // fnv1 incorporates the list of bytes into the hash x using the FNV-1 hash function. func fnv1( uint32, ...byte) uint32 { for , := range { = *16777619 ^ uint32() } return } func ( *rtype) ( Type) bool { if == nil { panic("reflect: nil type passed to Type.Implements") } if .Kind() != Interface { panic("reflect: non-interface type passed to Type.Implements") } return implements(.common(), .common()) } func ( *rtype) ( Type) bool { if == nil { panic("reflect: nil type passed to Type.AssignableTo") } := .common() return directlyAssignable(, .common()) || implements(, .common()) } func ( *rtype) ( Type) bool { if == nil { panic("reflect: nil type passed to Type.ConvertibleTo") } return convertOp(.common(), .common()) != nil } func ( *rtype) () bool { return .t.Equal != nil } // implements reports whether the type V implements the interface type T. func implements(, *abi.Type) bool { if .Kind() != abi.Interface { return false } := (*interfaceType)(unsafe.Pointer()) if len(.Methods) == 0 { return true } // The same algorithm applies in both cases, but the // method tables for an interface type and a concrete type // are different, so the code is duplicated. // In both cases the algorithm is a linear scan over the two // lists - T's methods and V's methods - simultaneously. // Since method tables are stored in a unique sorted order // (alphabetical, with no duplicate method names), the scan // through V's methods must hit a match for each of T's // methods along the way, or else V does not implement T. // This lets us run the scan in overall linear time instead of // the quadratic time a naive search would require. // See also ../runtime/iface.go. if .Kind() == abi.Interface { := (*interfaceType)(unsafe.Pointer()) := 0 for := 0; < len(.Methods); ++ { := &.Methods[] := .nameOff(.Name) := &.Methods[] := nameOffFor(, .Name) if .Name() == .Name() && typeOffFor(, .Typ) == .typeOff(.Typ) { if !.IsExported() { := pkgPath() if == "" { = .PkgPath.Name() } := pkgPath() if == "" { = .PkgPath.Name() } if != { continue } } if ++; >= len(.Methods) { return true } } } return false } := .Uncommon() if == nil { return false } := 0 := .Methods() for := 0; < int(.Mcount); ++ { := &.Methods[] := .nameOff(.Name) := [] := nameOffFor(, .Name) if .Name() == .Name() && typeOffFor(, .Mtyp) == .typeOff(.Typ) { if !.IsExported() { := pkgPath() if == "" { = .PkgPath.Name() } := pkgPath() if == "" { = nameOffFor(, .PkgPath).Name() } if != { continue } } if ++; >= len(.Methods) { return true } } } return false } // specialChannelAssignability reports whether a value x of channel type V // can be directly assigned (using memmove) to another channel type T. // https://golang.org/doc/go_spec.html#Assignability // T and V must be both of Chan kind. func specialChannelAssignability(, *abi.Type) bool { // Special case: // x is a bidirectional channel value, T is a channel type, // x's type V and T have identical element types, // and at least one of V or T is not a defined type. return .ChanDir() == abi.BothDir && (nameFor() == "" || nameFor() == "") && haveIdenticalType(.Elem(), .Elem(), true) } // directlyAssignable reports whether a value x of type V can be directly // assigned (using memmove) to a value of type T. // https://golang.org/doc/go_spec.html#Assignability // Ignoring the interface rules (implemented elsewhere) // and the ideal constant rules (no ideal constants at run time). func directlyAssignable(, *abi.Type) bool { // x's type V is identical to T? if == { return true } // Otherwise at least one of T and V must not be defined // and they must have the same kind. if .HasName() && .HasName() || .Kind() != .Kind() { return false } if .Kind() == abi.Chan && specialChannelAssignability(, ) { return true } // x's type T and V must have identical underlying types. return haveIdenticalUnderlyingType(, , true) } func haveIdenticalType(, *abi.Type, bool) bool { if { return == } if nameFor() != nameFor() || .Kind() != .Kind() || pkgPathFor() != pkgPathFor() { return false } return haveIdenticalUnderlyingType(, , false) } func haveIdenticalUnderlyingType(, *abi.Type, bool) bool { if == { return true } := Kind(.Kind()) if != Kind(.Kind()) { return false } // Non-composite types of equal kind have same underlying type // (the predefined instance of the type). if Bool <= && <= Complex128 || == String || == UnsafePointer { return true } // Composite types. switch { case Array: return .Len() == .Len() && haveIdenticalType(.Elem(), .Elem(), ) case Chan: return .ChanDir() == .ChanDir() && haveIdenticalType(.Elem(), .Elem(), ) case Func: := (*funcType)(unsafe.Pointer()) := (*funcType)(unsafe.Pointer()) if .OutCount != .OutCount || .InCount != .InCount { return false } for := 0; < .NumIn(); ++ { if !haveIdenticalType(.In(), .In(), ) { return false } } for := 0; < .NumOut(); ++ { if !haveIdenticalType(.Out(), .Out(), ) { return false } } return true case Interface: := (*interfaceType)(unsafe.Pointer()) := (*interfaceType)(unsafe.Pointer()) if len(.Methods) == 0 && len(.Methods) == 0 { return true } // Might have the same methods but still // need a run time conversion. return false case Map: return haveIdenticalType(.Key(), .Key(), ) && haveIdenticalType(.Elem(), .Elem(), ) case Pointer, Slice: return haveIdenticalType(.Elem(), .Elem(), ) case Struct: := (*structType)(unsafe.Pointer()) := (*structType)(unsafe.Pointer()) if len(.Fields) != len(.Fields) { return false } if .PkgPath.Name() != .PkgPath.Name() { return false } for := range .Fields { := &.Fields[] := &.Fields[] if .Name.Name() != .Name.Name() { return false } if !haveIdenticalType(.Typ, .Typ, ) { return false } if && .Name.Tag() != .Name.Tag() { return false } if .Offset != .Offset { return false } if .Embedded() != .Embedded() { return false } } return true } return false } // typelinks is implemented in package runtime. // It returns a slice of the sections in each module, // and a slice of *rtype offsets in each module. // // The types in each module are sorted by string. That is, the first // two linked types of the first module are: // // d0 := sections[0] // t1 := (*rtype)(add(d0, offset[0][0])) // t2 := (*rtype)(add(d0, offset[0][1])) // // and // // t1.String() < t2.String() // // Note that strings are not unique identifiers for types: // there can be more than one with a given string. // Only types we might want to look up are included: // pointers, channels, maps, slices, and arrays. func typelinks() ( []unsafe.Pointer, [][]int32) func rtypeOff( unsafe.Pointer, int32) *abi.Type { return (*abi.Type)(add(, uintptr(), "sizeof(rtype) > 0")) } // typesByString returns the subslice of typelinks() whose elements have // the given string representation. // It may be empty (no known types with that string) or may have // multiple elements (multiple types with that string). func typesByString( string) []*abi.Type { , := typelinks() var []*abi.Type for , := range { := [] // We are looking for the first index i where the string becomes >= s. // This is a copy of sort.Search, with f(h) replaced by (*typ[h].String() >= s). , := 0, len() for < { := int(uint(+) >> 1) // avoid overflow when computing h // i ≤ h < j if !(stringFor(rtypeOff(, [])) >= ) { = + 1 // preserves f(i-1) == false } else { = // preserves f(j) == true } } // i == j, f(i-1) == false, and f(j) (= f(i)) == true => answer is i. // Having found the first, linear scan forward to find the last. // We could do a second binary search, but the caller is going // to do a linear scan anyway. for := ; < len(); ++ { := rtypeOff(, []) if stringFor() != { break } = append(, ) } } return } // The lookupCache caches ArrayOf, ChanOf, MapOf and SliceOf lookups. var lookupCache sync.Map // map[cacheKey]*rtype // A cacheKey is the key for use in the lookupCache. // Four values describe any of the types we are looking for: // type kind, one or two subtypes, and an extra integer. type cacheKey struct { kind Kind t1 *abi.Type t2 *abi.Type extra uintptr } // The funcLookupCache caches FuncOf lookups. // FuncOf does not share the common lookupCache since cacheKey is not // sufficient to represent functions unambiguously. var funcLookupCache struct { sync.Mutex // Guards stores (but not loads) on m. // m is a map[uint32][]*rtype keyed by the hash calculated in FuncOf. // Elements of m are append-only and thus safe for concurrent reading. m sync.Map } // ChanOf returns the channel type with the given direction and element type. // For example, if t represents int, ChanOf(RecvDir, t) represents <-chan int. // // The gc runtime imposes a limit of 64 kB on channel element types. // If t's size is equal to or exceeds this limit, ChanOf panics. func ( ChanDir, Type) Type { := .common() // Look in cache. := cacheKey{Chan, , nil, uintptr()} if , := lookupCache.Load(); { return .(*rtype) } // This restriction is imposed by the gc compiler and the runtime. if .Size_ >= 1<<16 { panic("reflect.ChanOf: element size too large") } // Look in known types. var string switch { default: panic("reflect.ChanOf: invalid dir") case SendDir: = "chan<- " + stringFor() case RecvDir: = "<-chan " + stringFor() case BothDir: := stringFor() if [0] == '<' { // typ is recv chan, need parentheses as "<-" associates with leftmost // chan possible, see: // * https://golang.org/ref/spec#Channel_types // * https://github.com/golang/go/issues/39897 = "chan (" + + ")" } else { = "chan " + } } for , := range typesByString() { := (*chanType)(unsafe.Pointer()) if .Elem == && .Dir == abi.ChanDir() { , := lookupCache.LoadOrStore(, toRType()) return .(Type) } } // Make a channel type. var any = (chan unsafe.Pointer)(nil) := *(**chanType)(unsafe.Pointer(&)) := * .TFlag = abi.TFlagRegularMemory .Dir = abi.ChanDir() .Str = resolveReflectName(newName(, "", false, false)) .Hash = fnv1(.Hash, 'c', byte()) .Elem = , := lookupCache.LoadOrStore(, toRType(&.Type)) return .(Type) } // MapOf returns the map type with the given key and element types. // For example, if k represents int and e represents string, // MapOf(k, e) represents map[int]string. // // If the key type is not a valid map key type (that is, if it does // not implement Go's == operator), MapOf panics. func (, Type) Type { := .common() := .common() if .Equal == nil { panic("reflect.MapOf: invalid key type " + stringFor()) } // Look in cache. := cacheKey{Map, , , 0} if , := lookupCache.Load(); { return .(Type) } // Look in known types. := "map[" + stringFor() + "]" + stringFor() for , := range typesByString() { := (*mapType)(unsafe.Pointer()) if .Key == && .Elem == { , := lookupCache.LoadOrStore(, toRType()) return .(Type) } } // Make a map type. // Note: flag values must match those used in the TMAP case // in ../cmd/compile/internal/reflectdata/reflect.go:writeType. var any = (map[unsafe.Pointer]unsafe.Pointer)(nil) := **(**mapType)(unsafe.Pointer(&)) .Str = resolveReflectName(newName(, "", false, false)) .TFlag = 0 .Hash = fnv1(.Hash, 'm', byte(.Hash>>24), byte(.Hash>>16), byte(.Hash>>8), byte(.Hash)) .Key = .Elem = .Bucket = bucketOf(, ) .Hasher = func( unsafe.Pointer, uintptr) uintptr { return typehash(, , ) } .Flags = 0 if .Size_ > maxKeySize { .KeySize = uint8(goarch.PtrSize) .Flags |= 1 // indirect key } else { .KeySize = uint8(.Size_) } if .Size_ > maxValSize { .ValueSize = uint8(goarch.PtrSize) .Flags |= 2 // indirect value } else { .MapType.ValueSize = uint8(.Size_) } .MapType.BucketSize = uint16(.Bucket.Size_) if isReflexive() { .Flags |= 4 } if needKeyUpdate() { .Flags |= 8 } if hashMightPanic() { .Flags |= 16 } .PtrToThis = 0 , := lookupCache.LoadOrStore(, toRType(&.Type)) return .(Type) } var funcTypes []Type var funcTypesMutex sync.Mutex func initFuncTypes( int) Type { funcTypesMutex.Lock() defer funcTypesMutex.Unlock() if >= len(funcTypes) { := make([]Type, +1) copy(, funcTypes) funcTypes = } if funcTypes[] != nil { return funcTypes[] } funcTypes[] = StructOf([]StructField{ { Name: "FuncType", Type: TypeOf(funcType{}), }, { Name: "Args", Type: ArrayOf(, TypeOf(&rtype{})), }, }) return funcTypes[] } // FuncOf returns the function type with the given argument and result types. // For example if k represents int and e represents string, // FuncOf([]Type{k}, []Type{e}, false) represents func(int) string. // // The variadic argument controls whether the function is variadic. FuncOf // panics if the in[len(in)-1] does not represent a slice and variadic is // true. func (, []Type, bool) Type { if && (len() == 0 || [len()-1].Kind() != Slice) { panic("reflect.FuncOf: last arg of variadic func must be slice") } // Make a func type. var any = (func())(nil) := *(**funcType)(unsafe.Pointer(&)) := len() + len() if > 128 { panic("reflect.FuncOf: too many arguments") } := New(initFuncTypes()).Elem() := (*funcType)(unsafe.Pointer(.Field(0).Addr().Pointer())) := unsafe.Slice((**rtype)(unsafe.Pointer(.Field(1).Addr().Pointer())), )[0:0:] * = * // Build a hash and minimally populate ft. var uint32 for , := range { := .(*rtype) = append(, ) = fnv1(, byte(.t.Hash>>24), byte(.t.Hash>>16), byte(.t.Hash>>8), byte(.t.Hash)) } if { = fnv1(, 'v') } = fnv1(, '.') for , := range { := .(*rtype) = append(, ) = fnv1(, byte(.t.Hash>>24), byte(.t.Hash>>16), byte(.t.Hash>>8), byte(.t.Hash)) } .TFlag = 0 .Hash = .InCount = uint16(len()) .OutCount = uint16(len()) if { .OutCount |= 1 << 15 } // Look in cache. if , := funcLookupCache.m.Load(); { for , := range .([]*abi.Type) { if haveIdenticalUnderlyingType(&.Type, , true) { return toRType() } } } // Not in cache, lock and retry. funcLookupCache.Lock() defer funcLookupCache.Unlock() if , := funcLookupCache.m.Load(); { for , := range .([]*abi.Type) { if haveIdenticalUnderlyingType(&.Type, , true) { return toRType() } } } := func( *abi.Type) Type { var []*abi.Type if , := funcLookupCache.m.Load(); { = .([]*abi.Type) } funcLookupCache.m.Store(, append(, )) return toType() } // Look in known types for the same string representation. := funcStr() for , := range typesByString() { if haveIdenticalUnderlyingType(&.Type, , true) { return () } } // Populate the remaining fields of ft and store in cache. .Str = resolveReflectName(newName(, "", false, false)) .PtrToThis = 0 return (&.Type) } func stringFor( *abi.Type) string { return toRType().String() } // funcStr builds a string representation of a funcType. func funcStr( *funcType) string { := make([]byte, 0, 64) = append(, "func("...) for , := range .InSlice() { if > 0 { = append(, ", "...) } if .IsVariadic() && == int(.InCount)-1 { = append(, "..."...) = append(, stringFor((*sliceType)(unsafe.Pointer()).Elem)...) } else { = append(, stringFor()...) } } = append(, ')') := .OutSlice() if len() == 1 { = append(, ' ') } else if len() > 1 { = append(, " ("...) } for , := range { if > 0 { = append(, ", "...) } = append(, stringFor()...) } if len() > 1 { = append(, ')') } return string() } // isReflexive reports whether the == operation on the type is reflexive. // That is, x == x for all values x of type t. func isReflexive( *abi.Type) bool { switch Kind(.Kind()) { case Bool, Int, Int8, Int16, Int32, Int64, Uint, Uint8, Uint16, Uint32, Uint64, Uintptr, Chan, Pointer, String, UnsafePointer: return true case Float32, Float64, Complex64, Complex128, Interface: return false case Array: := (*arrayType)(unsafe.Pointer()) return (.Elem) case Struct: := (*structType)(unsafe.Pointer()) for , := range .Fields { if !(.Typ) { return false } } return true default: // Func, Map, Slice, Invalid panic("isReflexive called on non-key type " + stringFor()) } } // needKeyUpdate reports whether map overwrites require the key to be copied. func needKeyUpdate( *abi.Type) bool { switch Kind(.Kind()) { case Bool, Int, Int8, Int16, Int32, Int64, Uint, Uint8, Uint16, Uint32, Uint64, Uintptr, Chan, Pointer, UnsafePointer: return false case Float32, Float64, Complex64, Complex128, Interface, String: // Float keys can be updated from +0 to -0. // String keys can be updated to use a smaller backing store. // Interfaces might have floats or strings in them. return true case Array: := (*arrayType)(unsafe.Pointer()) return (.Elem) case Struct: := (*structType)(unsafe.Pointer()) for , := range .Fields { if (.Typ) { return true } } return false default: // Func, Map, Slice, Invalid panic("needKeyUpdate called on non-key type " + stringFor()) } } // hashMightPanic reports whether the hash of a map key of type t might panic. func hashMightPanic( *abi.Type) bool { switch Kind(.Kind()) { case Interface: return true case Array: := (*arrayType)(unsafe.Pointer()) return (.Elem) case Struct: := (*structType)(unsafe.Pointer()) for , := range .Fields { if (.Typ) { return true } } return false default: return false } } // Make sure these routines stay in sync with ../runtime/map.go! // These types exist only for GC, so we only fill out GC relevant info. // Currently, that's just size and the GC program. We also fill in string // for possible debugging use. const ( bucketSize uintptr = abi.MapBucketCount maxKeySize uintptr = abi.MapMaxKeyBytes maxValSize uintptr = abi.MapMaxElemBytes ) func bucketOf(, *abi.Type) *abi.Type { if .Size_ > maxKeySize { = ptrTo() } if .Size_ > maxValSize { = ptrTo() } // Prepare GC data if any. // A bucket is at most bucketSize*(1+maxKeySize+maxValSize)+ptrSize bytes, // or 2064 bytes, or 258 pointer-size words, or 33 bytes of pointer bitmap. // Note that since the key and value are known to be <= 128 bytes, // they're guaranteed to have bitmaps instead of GC programs. var *byte var uintptr := bucketSize*(1+.Size_+.Size_) + goarch.PtrSize if &uintptr(.Align_-1) != 0 || &uintptr(.Align_-1) != 0 { panic("reflect: bad size computation in MapOf") } if .PtrBytes != 0 || .PtrBytes != 0 { := (bucketSize*(1+.Size_+.Size_) + goarch.PtrSize) / goarch.PtrSize := ( + 7) / 8 // Runtime needs pointer masks to be a multiple of uintptr in size. = ( + goarch.PtrSize - 1) &^ (goarch.PtrSize - 1) := make([]byte, ) := bucketSize / goarch.PtrSize if .PtrBytes != 0 { emitGCMask(, , , bucketSize) } += bucketSize * .Size_ / goarch.PtrSize if .PtrBytes != 0 { emitGCMask(, , , bucketSize) } += bucketSize * .Size_ / goarch.PtrSize := [/8] |= 1 << ( % 8) = &[0] = ( + 1) * goarch.PtrSize // overflow word must be last if != { panic("reflect: bad layout computation in MapOf") } } := &abi.Type{ Align_: goarch.PtrSize, Size_: , Kind_: uint8(Struct), PtrBytes: , GCData: , } := "bucket(" + stringFor() + "," + stringFor() + ")" .Str = resolveReflectName(newName(, "", false, false)) return } func ( *rtype) (, uintptr) []byte { return (*[1 << 30]byte)(unsafe.Pointer(.t.GCData))[::] } // emitGCMask writes the GC mask for [n]typ into out, starting at bit // offset base. func emitGCMask( []byte, uintptr, *abi.Type, uintptr) { if .Kind_&kindGCProg != 0 { panic("reflect: unexpected GC program") } := .PtrBytes / goarch.PtrSize := .Size_ / goarch.PtrSize := .GcSlice(0, (+7)/8) for := uintptr(0); < ; ++ { if ([/8]>>(%8))&1 != 0 { for := uintptr(0); < ; ++ { := + * + [/8] |= 1 << ( % 8) } } } } // appendGCProg appends the GC program for the first ptrdata bytes of // typ to dst and returns the extended slice. func appendGCProg( []byte, *abi.Type) []byte { if .Kind_&kindGCProg != 0 { // Element has GC program; emit one element. := uintptr(*(*uint32)(unsafe.Pointer(.GCData))) := .GcSlice(4, 4+-1) return append(, ...) } // Element is small with pointer mask; use as literal bits. := .PtrBytes / goarch.PtrSize := .GcSlice(0, (+7)/8) // Emit 120-bit chunks of full bytes (max is 127 but we avoid using partial bytes). for ; > 120; -= 120 { = append(, 120) = append(, [:15]...) = [15:] } = append(, byte()) = append(, ...) return } // SliceOf returns the slice type with element type t. // For example, if t represents int, SliceOf(t) represents []int. func ( Type) Type { := .common() // Look in cache. := cacheKey{Slice, , nil, 0} if , := lookupCache.Load(); { return .(Type) } // Look in known types. := "[]" + stringFor() for , := range typesByString() { := (*sliceType)(unsafe.Pointer()) if .Elem == { , := lookupCache.LoadOrStore(, toRType()) return .(Type) } } // Make a slice type. var any = ([]unsafe.Pointer)(nil) := *(**sliceType)(unsafe.Pointer(&)) := * .TFlag = 0 .Str = resolveReflectName(newName(, "", false, false)) .Hash = fnv1(.Hash, '[') .Elem = .PtrToThis = 0 , := lookupCache.LoadOrStore(, toRType(&.Type)) return .(Type) } // The structLookupCache caches StructOf lookups. // StructOf does not share the common lookupCache since we need to pin // the memory associated with *structTypeFixedN. var structLookupCache struct { sync.Mutex // Guards stores (but not loads) on m. // m is a map[uint32][]Type keyed by the hash calculated in StructOf. // Elements in m are append-only and thus safe for concurrent reading. m sync.Map } type structTypeUncommon struct { structType u uncommonType } // isLetter reports whether a given 'rune' is classified as a Letter. func isLetter( rune) bool { return 'a' <= && <= 'z' || 'A' <= && <= 'Z' || == '_' || >= utf8.RuneSelf && unicode.IsLetter() } // isValidFieldName checks if a string is a valid (struct) field name or not. // // According to the language spec, a field name should be an identifier. // // identifier = letter { letter | unicode_digit } . // letter = unicode_letter | "_" . func isValidFieldName( string) bool { for , := range { if == 0 && !isLetter() { return false } if !(isLetter() || unicode.IsDigit()) { return false } } return len() > 0 } // StructOf returns the struct type containing fields. // The Offset and Index fields are ignored and computed as they would be // by the compiler. // // StructOf currently does not support promoted methods of embedded fields // and panics if passed unexported StructFields. func ( []StructField) Type { var ( = fnv1(0, []byte("struct {")...) uintptr uint8 = true []abi.Method = make([]structField, len()) = make([]byte, 0, 64) = map[string]struct{}{} // fields' names = false // records whether a struct-field type has a GCProg ) := uintptr(0) = append(, "struct {"...) := "" for , := range { if .Name == "" { panic("reflect.StructOf: field " + strconv.Itoa() + " has no name") } if !isValidFieldName(.Name) { panic("reflect.StructOf: field " + strconv.Itoa() + " has invalid name") } if .Type == nil { panic("reflect.StructOf: field " + strconv.Itoa() + " has no type") } , := runtimeStructField() := .Typ if .Kind_&kindGCProg != 0 { = true } if != "" { if == "" { = } else if != { panic("reflect.Struct: fields with different PkgPath " + + " and " + ) } } // Update string and hash := .Name.Name() = fnv1(, []byte()...) = append(, (" " + )...) if .Embedded() { // Embedded field if .Typ.Kind() == abi.Pointer { // Embedded ** and *interface{} are illegal := .Elem() if := .Kind(); == abi.Pointer || == abi.Interface { panic("reflect.StructOf: illegal embedded field type " + stringFor()) } } switch Kind(.Typ.Kind()) { case Interface: := (*interfaceType)(unsafe.Pointer()) for , := range .Methods { if pkgPath(.nameOff(.Name)) != "" { // TODO(sbinet). Issue 15924. panic("reflect: embedded interface with unexported method(s) not implemented") } := resolveReflectText(unsafe.Pointer(abi.FuncPCABIInternal(embeddedIfaceMethStub))) = append(, abi.Method{ Name: resolveReflectName(.nameOff(.Name)), Mtyp: resolveReflectType(.typeOff(.Typ)), Ifn: , Tfn: , }) } case Pointer: := (*ptrType)(unsafe.Pointer()) if := .Uncommon(); != nil { if > 0 && .Mcount > 0 { // Issue 15924. panic("reflect: embedded type with methods not implemented if type is not first field") } if len() > 1 { panic("reflect: embedded type with methods not implemented if there is more than one field") } for , := range .Methods() { := nameOffFor(, .Name) if pkgPath() != "" { // TODO(sbinet). // Issue 15924. panic("reflect: embedded interface with unexported method(s) not implemented") } = append(, abi.Method{ Name: resolveReflectName(), Mtyp: resolveReflectType(typeOffFor(, .Mtyp)), Ifn: resolveReflectText(textOffFor(, .Ifn)), Tfn: resolveReflectText(textOffFor(, .Tfn)), }) } } if := .Elem.Uncommon(); != nil { for , := range .Methods() { := nameOffFor(, .Name) if pkgPath() != "" { // TODO(sbinet) // Issue 15924. panic("reflect: embedded interface with unexported method(s) not implemented") } = append(, abi.Method{ Name: resolveReflectName(), Mtyp: resolveReflectType(typeOffFor(.Elem, .Mtyp)), Ifn: resolveReflectText(textOffFor(.Elem, .Ifn)), Tfn: resolveReflectText(textOffFor(.Elem, .Tfn)), }) } } default: if := .Uncommon(); != nil { if > 0 && .Mcount > 0 { // Issue 15924. panic("reflect: embedded type with methods not implemented if type is not first field") } if len() > 1 && .Kind_&kindDirectIface != 0 { panic("reflect: embedded type with methods not implemented for non-pointer type") } for , := range .Methods() { := nameOffFor(, .Name) if pkgPath() != "" { // TODO(sbinet) // Issue 15924. panic("reflect: embedded interface with unexported method(s) not implemented") } = append(, abi.Method{ Name: resolveReflectName(), Mtyp: resolveReflectType(typeOffFor(, .Mtyp)), Ifn: resolveReflectText(textOffFor(, .Ifn)), Tfn: resolveReflectText(textOffFor(, .Tfn)), }) } } } } if , := []; && != "_" { panic("reflect.StructOf: duplicate field " + ) } [] = struct{}{} = fnv1(, byte(.Hash>>24), byte(.Hash>>16), byte(.Hash>>8), byte(.Hash)) = append(, (" " + stringFor())...) if .Name.HasTag() { = fnv1(, []byte(.Name.Tag())...) = append(, (" " + strconv.Quote(.Name.Tag()))...) } if < len()-1 { = append(, ';') } = && (.Equal != nil) := align(, uintptr(.Align_)) if < { panic("reflect.StructOf: struct size would exceed virtual address space") } if .Align_ > { = .Align_ } = + .Size_ if < { panic("reflect.StructOf: struct size would exceed virtual address space") } .Offset = if .Size_ == 0 { = } [] = } if > 0 && == { // This is a non-zero sized struct that ends in a // zero-sized field. We add an extra byte of padding, // to ensure that taking the address of the final // zero-sized field can't manufacture a pointer to the // next object in the heap. See issue 9401. ++ if == 0 { panic("reflect.StructOf: struct size would exceed virtual address space") } } var *structType var *uncommonType if len() == 0 { := new(structTypeUncommon) = &.structType = &.u } else { // A *rtype representing a struct is followed directly in memory by an // array of method objects representing the methods attached to the // struct. To get the same layout for a run time generated type, we // need an array directly following the uncommonType memory. // A similar strategy is used for funcTypeFixed4, ...funcTypeFixedN. := New(([]StructField{ {Name: "S", Type: TypeOf(structType{})}, {Name: "U", Type: TypeOf(uncommonType{})}, {Name: "M", Type: ArrayOf(len(), TypeOf([0]))}, })) = (*structType)(.Elem().Field(0).Addr().UnsafePointer()) = (*uncommonType)(.Elem().Field(1).Addr().UnsafePointer()) copy(.Elem().Field(2).Slice(0, len()).Interface().([]abi.Method), ) } // TODO(sbinet): Once we allow embedding multiple types, // methods will need to be sorted like the compiler does. // TODO(sbinet): Once we allow non-exported methods, we will // need to compute xcount as the number of exported methods. .Mcount = uint16(len()) .Xcount = .Mcount .Moff = uint32(unsafe.Sizeof(uncommonType{})) if len() > 0 { = append(, ' ') } = append(, '}') = fnv1(, '}') := string() // Round the size up to be a multiple of the alignment. := align(, uintptr()) if < { panic("reflect.StructOf: struct size would exceed virtual address space") } = // Make the struct type. var any = struct{}{} := *(**structType)(unsafe.Pointer(&)) * = * .Fields = if != "" { .PkgPath = newName(, "", false, false) } // Look in cache. if , := structLookupCache.m.Load(); { for , := range .([]Type) { := .common() if haveIdenticalUnderlyingType(&.Type, , true) { return toType() } } } // Not in cache, lock and retry. structLookupCache.Lock() defer structLookupCache.Unlock() if , := structLookupCache.m.Load(); { for , := range .([]Type) { := .common() if haveIdenticalUnderlyingType(&.Type, , true) { return toType() } } } := func( Type) Type { var []Type if , := structLookupCache.m.Load(); { = .([]Type) } structLookupCache.m.Store(, append(, )) return } // Look in known types. for , := range typesByString() { if haveIdenticalUnderlyingType(&.Type, , true) { // even if 't' wasn't a structType with methods, we should be ok // as the 'u uncommonType' field won't be accessed except when // tflag&abi.TFlagUncommon is set. return (toType()) } } .Str = resolveReflectName(newName(, "", false, false)) .TFlag = 0 // TODO: set tflagRegularMemory .Hash = .Size_ = .PtrBytes = typeptrdata(&.Type) .Align_ = .FieldAlign_ = .PtrToThis = 0 if len() > 0 { .TFlag |= abi.TFlagUncommon } if { := 0 for , := range { if .Typ.Pointers() { = } } := []byte{0, 0, 0, 0} // will be length of prog var uintptr for , := range { if > { // gcprog should not include anything for any field after // the last field that contains pointer data break } if !.Typ.Pointers() { // Ignore pointerless fields. continue } // Pad to start of this field with zeros. if .Offset > { := (.Offset - ) / goarch.PtrSize = append(, 0x01, 0x00) // emit a 0 bit if > 1 { = append(, 0x81) // repeat previous bit = appendVarint(, -1) // n-1 times } = .Offset } = appendGCProg(, .Typ) += .Typ.PtrBytes } = append(, 0) *(*uint32)(unsafe.Pointer(&[0])) = uint32(len() - 4) .Kind_ |= kindGCProg .GCData = &[0] } else { .Kind_ &^= kindGCProg := new(bitVector) addTypeBits(, 0, &.Type) if len(.data) > 0 { .GCData = &.data[0] } } .Equal = nil if { .Equal = func(, unsafe.Pointer) bool { for , := range .Fields { := add(, .Offset, "&x.field safe") := add(, .Offset, "&x.field safe") if !.Typ.Equal(, ) { return false } } return true } } switch { case len() == 1 && !ifaceIndir([0].Typ): // structs of 1 direct iface type can be direct .Kind_ |= kindDirectIface default: .Kind_ &^= kindDirectIface } return (toType(&.Type)) } func embeddedIfaceMethStub() { panic("reflect: StructOf does not support methods of embedded interfaces") } // runtimeStructField takes a StructField value passed to StructOf and // returns both the corresponding internal representation, of type // structField, and the pkgpath value to use for this field. func runtimeStructField( StructField) (structField, string) { if .Anonymous && .PkgPath != "" { panic("reflect.StructOf: field \"" + .Name + "\" is anonymous but has PkgPath set") } if .IsExported() { // Best-effort check for misuse. // Since this field will be treated as exported, not much harm done if Unicode lowercase slips through. := .Name[0] if 'a' <= && <= 'z' || == '_' { panic("reflect.StructOf: field \"" + .Name + "\" is unexported but missing PkgPath") } } resolveReflectType(.Type.common()) // install in runtime := structField{ Name: newName(.Name, string(.Tag), .IsExported(), .Anonymous), Typ: .Type.common(), Offset: 0, } return , .PkgPath } // typeptrdata returns the length in bytes of the prefix of t // containing pointer data. Anything after this offset is scalar data. // keep in sync with ../cmd/compile/internal/reflectdata/reflect.go func typeptrdata( *abi.Type) uintptr { switch .Kind() { case abi.Struct: := (*structType)(unsafe.Pointer()) // find the last field that has pointers. := -1 for := range .Fields { := .Fields[].Typ if .Pointers() { = } } if == -1 { return 0 } := .Fields[] return .Offset + .Typ.PtrBytes default: panic("reflect.typeptrdata: unexpected type, " + stringFor()) } } // See cmd/compile/internal/reflectdata/reflect.go for derivation of constant. const maxPtrmaskBytes = 2048 // ArrayOf returns the array type with the given length and element type. // For example, if t represents int, ArrayOf(5, t) represents [5]int. // // If the resulting type would be larger than the available address space, // ArrayOf panics. func ( int, Type) Type { if < 0 { panic("reflect: negative length passed to ArrayOf") } := .common() // Look in cache. := cacheKey{Array, , nil, uintptr()} if , := lookupCache.Load(); { return .(Type) } // Look in known types. := "[" + strconv.Itoa() + "]" + stringFor() for , := range typesByString() { := (*arrayType)(unsafe.Pointer()) if .Elem == { , := lookupCache.LoadOrStore(, toRType()) return .(Type) } } // Make an array type. var any = [1]unsafe.Pointer{} := *(**arrayType)(unsafe.Pointer(&)) := * .TFlag = .TFlag & abi.TFlagRegularMemory .Str = resolveReflectName(newName(, "", false, false)) .Hash = fnv1(.Hash, '[') for := uint32(); > 0; >>= 8 { .Hash = fnv1(.Hash, byte()) } .Hash = fnv1(.Hash, ']') .Elem = .PtrToThis = 0 if .Size_ > 0 { := ^uintptr(0) / .Size_ if uintptr() > { panic("reflect.ArrayOf: array size would exceed virtual address space") } } .Size_ = .Size_ * uintptr() if > 0 && .PtrBytes != 0 { .PtrBytes = .Size_*uintptr(-1) + .PtrBytes } .Align_ = .Align_ .FieldAlign_ = .FieldAlign_ .Len = uintptr() .Slice = &(SliceOf().(*rtype).t) switch { case .PtrBytes == 0 || .Size_ == 0: // No pointers. .GCData = nil .PtrBytes = 0 case == 1: // In memory, 1-element array looks just like the element. .Kind_ |= .Kind_ & kindGCProg .GCData = .GCData .PtrBytes = .PtrBytes case .Kind_&kindGCProg == 0 && .Size_ <= maxPtrmaskBytes*8*goarch.PtrSize: // Element is small with pointer mask; array is still small. // Create direct pointer mask by turning each 1 bit in elem // into length 1 bits in larger mask. := (.PtrBytes/goarch.PtrSize + 7) / 8 // Runtime needs pointer masks to be a multiple of uintptr in size. = ( + goarch.PtrSize - 1) &^ (goarch.PtrSize - 1) := make([]byte, ) emitGCMask(, 0, , .Len) .GCData = &[0] default: // Create program that emits one element // and then repeats to make the array. := []byte{0, 0, 0, 0} // will be length of prog = appendGCProg(, ) // Pad from ptrdata to size. := .PtrBytes / goarch.PtrSize := .Size_ / goarch.PtrSize if < { // Emit literal 0 bit, then repeat as needed. = append(, 0x01, 0x00) if +1 < { = append(, 0x81) = appendVarint(, --1) } } // Repeat length-1 times. if < 0x80 { = append(, byte(|0x80)) } else { = append(, 0x80) = appendVarint(, ) } = appendVarint(, uintptr()-1) = append(, 0) *(*uint32)(unsafe.Pointer(&[0])) = uint32(len() - 4) .Kind_ |= kindGCProg .GCData = &[0] .PtrBytes = .Size_ // overestimate but ok; must match program } := := .Size() .Equal = nil if := .Equal; != nil { .Equal = func(, unsafe.Pointer) bool { for := 0; < ; ++ { := arrayAt(, , , "i < length") := arrayAt(, , , "i < length") if !(, ) { return false } } return true } } switch { case == 1 && !ifaceIndir(): // array of 1 direct iface type can be direct .Kind_ |= kindDirectIface default: .Kind_ &^= kindDirectIface } , := lookupCache.LoadOrStore(, toRType(&.Type)) return .(Type) } func appendVarint( []byte, uintptr) []byte { for ; >= 0x80; >>= 7 { = append(, byte(|0x80)) } = append(, byte()) return } // toType converts from a *rtype to a Type that can be returned // to the client of package reflect. In gc, the only concern is that // a nil *rtype must be replaced by a nil Type, but in gccgo this // function takes care of ensuring that multiple *rtype for the same // type are coalesced into a single Type. func toType( *abi.Type) Type { if == nil { return nil } return toRType() } type layoutKey struct { ftyp *funcType // function signature rcvr *abi.Type // receiver type, or nil if none } type layoutType struct { t *abi.Type framePool *sync.Pool abid abiDesc } var layoutCache sync.Map // map[layoutKey]layoutType // funcLayout computes a struct type representing the layout of the // stack-assigned function arguments and return values for the function // type t. // If rcvr != nil, rcvr specifies the type of the receiver. // The returned type exists only for GC, so we only fill out GC relevant info. // Currently, that's just size and the GC program. We also fill in // the name for possible debugging use. func funcLayout( *funcType, *abi.Type) ( *abi.Type, *sync.Pool, abiDesc) { if .Kind() != abi.Func { panic("reflect: funcLayout of non-func type " + stringFor(&.Type)) } if != nil && .Kind() == abi.Interface { panic("reflect: funcLayout with interface receiver " + stringFor()) } := layoutKey{, } if , := layoutCache.Load(); { := .(layoutType) return .t, .framePool, .abid } // Compute the ABI layout. = newAbiDesc(, ) // build dummy rtype holding gc program := &abi.Type{ Align_: goarch.PtrSize, // Don't add spill space here; it's only necessary in // reflectcall's frame, not in the allocated frame. // TODO(mknyszek): Remove this comment when register // spill space in the frame is no longer required. Size_: align(.retOffset+.ret.stackBytes, goarch.PtrSize), PtrBytes: uintptr(.stackPtrs.n) * goarch.PtrSize, } if .stackPtrs.n > 0 { .GCData = &.stackPtrs.data[0] } var string if != nil { = "methodargs(" + stringFor() + ")(" + stringFor(&.Type) + ")" } else { = "funcargs(" + stringFor(&.Type) + ")" } .Str = resolveReflectName(newName(, "", false, false)) // cache result for future callers = &sync.Pool{New: func() any { return unsafe_New() }} , := layoutCache.LoadOrStore(, layoutType{ t: , framePool: , abid: , }) := .(layoutType) return .t, .framePool, .abid } // ifaceIndir reports whether t is stored indirectly in an interface value. func ifaceIndir( *abi.Type) bool { return .Kind_&kindDirectIface == 0 } // Note: this type must agree with runtime.bitvector. type bitVector struct { n uint32 // number of bits data []byte } // append a bit to the bitmap. func ( *bitVector) ( uint8) { if .n%(8*goarch.PtrSize) == 0 { // Runtime needs pointer masks to be a multiple of uintptr in size. // Since reflect passes bv.data directly to the runtime as a pointer mask, // we append a full uintptr of zeros at a time. for := 0; < goarch.PtrSize; ++ { .data = append(.data, 0) } } .data[.n/8] |= << (.n % 8) .n++ } func addTypeBits( *bitVector, uintptr, *abi.Type) { if .PtrBytes == 0 { return } switch Kind(.Kind_ & kindMask) { case Chan, Func, Map, Pointer, Slice, String, UnsafePointer: // 1 pointer at start of representation for .n < uint32(/uintptr(goarch.PtrSize)) { .append(0) } .append(1) case Interface: // 2 pointers for .n < uint32(/uintptr(goarch.PtrSize)) { .append(0) } .append(1) .append(1) case Array: // repeat inner type := (*arrayType)(unsafe.Pointer()) for := 0; < int(.Len); ++ { (, +uintptr()*.Elem.Size_, .Elem) } case Struct: // apply fields := (*structType)(unsafe.Pointer()) for := range .Fields { := &.Fields[] (, +.Offset, .Typ) } } } // TypeFor returns the [Type] that represents the type argument T. func [ any]() Type { return TypeOf((*)(nil)).Elem() }