// Copyright 2014 The Go Authors. All rights reserved.// Use of this source code is governed by a BSD-style// license that can be found in the LICENSE file.package runtime// This file contains the implementation of Go's map type.//// A map is just a hash table. The data is arranged// into an array of buckets. Each bucket contains up to// 8 key/elem pairs. The low-order bits of the hash are// used to select a bucket. Each bucket contains a few// high-order bits of each hash to distinguish the entries// within a single bucket.//// If more than 8 keys hash to a bucket, we chain on// extra buckets.//// When the hashtable grows, we allocate a new array// of buckets twice as big. Buckets are incrementally// copied from the old bucket array to the new bucket array.//// Map iterators walk through the array of buckets and// return the keys in walk order (bucket #, then overflow// chain order, then bucket index). To maintain iteration// semantics, we never move keys within their bucket (if// we did, keys might be returned 0 or 2 times). When// growing the table, iterators remain iterating through the// old table and must check the new table if the bucket// they are iterating through has been moved ("evacuated")// to the new table.// Picking loadFactor: too large and we have lots of overflow// buckets, too small and we waste a lot of space. I wrote// a simple program to check some stats for different loads:// (64-bit, 8 byte keys and elems)// loadFactor %overflow bytes/entry hitprobe missprobe// 4.00 2.13 20.77 3.00 4.00// 4.50 4.05 17.30 3.25 4.50// 5.00 6.85 14.77 3.50 5.00// 5.50 10.55 12.94 3.75 5.50// 6.00 15.27 11.67 4.00 6.00// 6.50 20.90 10.79 4.25 6.50// 7.00 27.14 10.15 4.50 7.00// 7.50 34.03 9.73 4.75 7.50// 8.00 41.10 9.40 5.00 8.00//// %overflow = percentage of buckets which have an overflow bucket// bytes/entry = overhead bytes used per key/elem pair// hitprobe = # of entries to check when looking up a present key// missprobe = # of entries to check when looking up an absent key//// Keep in mind this data is for maximally loaded tables, i.e. just// before the table grows. Typical tables will be somewhat less loaded.import ()const (// Maximum number of key/elem pairs a bucket can hold. bucketCntBits = abi.MapBucketCountBits// Maximum average load of a bucket that triggers growth is bucketCnt*13/16 (about 80% full) // Because of minimum alignment rules, bucketCnt is known to be at least 8. // Represent as loadFactorNum/loadFactorDen, to allow integer math. loadFactorDen = 2 loadFactorNum = loadFactorDen * abi.MapBucketCount * 13 / 16// data offset should be the size of the bmap struct, but needs to be // aligned correctly. For amd64p32 this means 64-bit alignment // even though pointers are 32 bit. dataOffset = unsafe.Offsetof(struct { b bmap v int64 }{}.v)// Possible tophash values. We reserve a few possibilities for special marks. // Each bucket (including its overflow buckets, if any) will have either all or none of its // entries in the evacuated* states (except during the evacuate() method, which only happens // during map writes and thus no one else can observe the map during that time). emptyRest = 0// this cell is empty, and there are no more non-empty cells at higher indexes or overflows. emptyOne = 1// this cell is empty evacuatedX = 2// key/elem is valid. Entry has been evacuated to first half of larger table. evacuatedY = 3// same as above, but evacuated to second half of larger table. evacuatedEmpty = 4// cell is empty, bucket is evacuated. minTopHash = 5// minimum tophash for a normal filled cell.// flags iterator = 1// there may be an iterator using buckets oldIterator = 2// there may be an iterator using oldbuckets hashWriting = 4// a goroutine is writing to the map sameSizeGrow = 8// the current map growth is to a new map of the same size// sentinel bucket ID for iterator checks noCheck = 1<<(8*goarch.PtrSize) - 1)// isEmpty reports whether the given tophash array entry represents an empty bucket entry.func isEmpty( uint8) bool {return <= emptyOne}// A header for a Go map.type hmap struct {// Note: the format of the hmap is also encoded in cmd/compile/internal/reflectdata/reflect.go. // Make sure this stays in sync with the compiler's definition. count int// # live cells == size of map. Must be first (used by len() builtin) flags uint8 B uint8// log_2 of # of buckets (can hold up to loadFactor * 2^B items) noverflow uint16// approximate number of overflow buckets; see incrnoverflow for details hash0 uint32// hash seed buckets unsafe.Pointer// array of 2^B Buckets. may be nil if count==0. oldbuckets unsafe.Pointer// previous bucket array of half the size, non-nil only when growing nevacuate uintptr// progress counter for evacuation (buckets less than this have been evacuated) extra *mapextra// optional fields}// mapextra holds fields that are not present on all maps.type mapextra struct {// If both key and elem do not contain pointers and are inline, then we mark bucket // type as containing no pointers. This avoids scanning such maps. // However, bmap.overflow is a pointer. In order to keep overflow buckets // alive, we store pointers to all overflow buckets in hmap.extra.overflow and hmap.extra.oldoverflow. // overflow and oldoverflow are only used if key and elem do not contain pointers. // overflow contains overflow buckets for hmap.buckets. // oldoverflow contains overflow buckets for hmap.oldbuckets. // The indirection allows to store a pointer to the slice in hiter. overflow *[]*bmap oldoverflow *[]*bmap// nextOverflow holds a pointer to a free overflow bucket. nextOverflow *bmap}// A bucket for a Go map.type bmap struct {// tophash generally contains the top byte of the hash value // for each key in this bucket. If tophash[0] < minTopHash, // tophash[0] is a bucket evacuation state instead. tophash [abi.MapBucketCount]uint8// Followed by bucketCnt keys and then bucketCnt elems. // NOTE: packing all the keys together and then all the elems together makes the // code a bit more complicated than alternating key/elem/key/elem/... but it allows // us to eliminate padding which would be needed for, e.g., map[int64]int8. // Followed by an overflow pointer.}// A hash iteration structure.// If you modify hiter, also change cmd/compile/internal/reflectdata/reflect.go// and reflect/value.go to match the layout of this structure.type hiter struct { key unsafe.Pointer// Must be in first position. Write nil to indicate iteration end (see cmd/compile/internal/walk/range.go). elem unsafe.Pointer// Must be in second position (see cmd/compile/internal/walk/range.go). t *maptype h *hmap buckets unsafe.Pointer// bucket ptr at hash_iter initialization time bptr *bmap// current bucket overflow *[]*bmap// keeps overflow buckets of hmap.buckets alive oldoverflow *[]*bmap// keeps overflow buckets of hmap.oldbuckets alive startBucket uintptr// bucket iteration started at offset uint8// intra-bucket offset to start from during iteration (should be big enough to hold bucketCnt-1) wrapped bool// already wrapped around from end of bucket array to beginning B uint8 i uint8 bucket uintptr checkBucket uintptr}// bucketShift returns 1<<b, optimized for code generation.func bucketShift( uint8) uintptr {// Masking the shift amount allows overflow checks to be elided.returnuintptr(1) << ( & (goarch.PtrSize*8 - 1))}// bucketMask returns 1<<b - 1, optimized for code generation.func bucketMask( uint8) uintptr {returnbucketShift() - 1}// tophash calculates the tophash value for hash.func tophash( uintptr) uint8 { := uint8( >> (goarch.PtrSize*8 - 8))if < minTopHash { += minTopHash }return}func evacuated( *bmap) bool { := .tophash[0]return > emptyOne && < minTopHash}func ( *bmap) ( *maptype) *bmap {return *(**bmap)(add(unsafe.Pointer(), uintptr(.BucketSize)-goarch.PtrSize))}func ( *bmap) ( *maptype, *bmap) { *(**bmap)(add(unsafe.Pointer(), uintptr(.BucketSize)-goarch.PtrSize)) = }func ( *bmap) () unsafe.Pointer {returnadd(unsafe.Pointer(), dataOffset)}// incrnoverflow increments h.noverflow.// noverflow counts the number of overflow buckets.// This is used to trigger same-size map growth.// See also tooManyOverflowBuckets.// To keep hmap small, noverflow is a uint16.// When there are few buckets, noverflow is an exact count.// When there are many buckets, noverflow is an approximate count.func ( *hmap) () {// We trigger same-size map growth if there are // as many overflow buckets as buckets. // We need to be able to count to 1<<h.B.if .B < 16 { .noverflow++return }// Increment with probability 1/(1<<(h.B-15)). // When we reach 1<<15 - 1, we will have approximately // as many overflow buckets as buckets. := uint32(1)<<(.B-15) - 1// Example: if h.B == 18, then mask == 7, // and rand() & 7 == 0 with probability 1/8.ifuint32(rand())& == 0 { .noverflow++ }}func ( *hmap) ( *maptype, *bmap) *bmap {var *bmapif .extra != nil && .extra.nextOverflow != nil {// We have preallocated overflow buckets available. // See makeBucketArray for more details. = .extra.nextOverflowif .overflow() == nil {// We're not at the end of the preallocated overflow buckets. Bump the pointer. .extra.nextOverflow = (*bmap)(add(unsafe.Pointer(), uintptr(.BucketSize))) } else {// This is the last preallocated overflow bucket. // Reset the overflow pointer on this bucket, // which was set to a non-nil sentinel value. .setoverflow(, nil) .extra.nextOverflow = nil } } else { = (*bmap)(newobject(.Bucket)) } .incrnoverflow()if !.Bucket.Pointers() { .createOverflow() *.extra.overflow = append(*.extra.overflow, ) } .setoverflow(, )return}func ( *hmap) () {if .extra == nil { .extra = new(mapextra) }if .extra.overflow == nil { .extra.overflow = new([]*bmap) }}func makemap64( *maptype, int64, *hmap) *hmap {ifint64(int()) != { = 0 }returnmakemap(, int(), )}// makemap_small implements Go map creation for make(map[k]v) and// make(map[k]v, hint) when hint is known to be at most bucketCnt// at compile time and the map needs to be allocated on the heap.//// makemap_small should be an internal detail,// but widely used packages access it using linkname.// Notable members of the hall of shame include:// - github.com/bytedance/sonic//// Do not remove or change the type signature.// See go.dev/issue/67401.////go:linkname makemap_smallfunc makemap_small() *hmap { := new(hmap) .hash0 = uint32(rand())return}// makemap implements Go map creation for make(map[k]v, hint).// If the compiler has determined that the map or the first bucket// can be created on the stack, h and/or bucket may be non-nil.// If h != nil, the map can be created directly in h.// If h.buckets != nil, bucket pointed to can be used as the first bucket.//// makemap should be an internal detail,// but widely used packages access it using linkname.// Notable members of the hall of shame include:// - github.com/cloudwego/frugal// - github.com/ugorji/go/codec//// Do not remove or change the type signature.// See go.dev/issue/67401.////go:linkname makemapfunc makemap( *maptype, int, *hmap) *hmap { , := math.MulUintptr(uintptr(), .Bucket.Size_)if || > maxAlloc { = 0 }// initialize Hmapif == nil { = new(hmap) } .hash0 = uint32(rand())// Find the size parameter B which will hold the requested # of elements. // For hint < 0 overLoadFactor returns false since hint < bucketCnt. := uint8(0)foroverLoadFactor(, ) { ++ } .B = // allocate initial hash table // if B == 0, the buckets field is allocated lazily later (in mapassign) // If hint is large zeroing this memory could take a while.if .B != 0 {var *bmap .buckets, = makeBucketArray(, .B, nil)if != nil { .extra = new(mapextra) .extra.nextOverflow = } }return}// makeBucketArray initializes a backing array for map buckets.// 1<<b is the minimum number of buckets to allocate.// dirtyalloc should either be nil or a bucket array previously// allocated by makeBucketArray with the same t and b parameters.// If dirtyalloc is nil a new backing array will be alloced and// otherwise dirtyalloc will be cleared and reused as backing array.func makeBucketArray( *maptype, uint8, unsafe.Pointer) ( unsafe.Pointer, *bmap) { := bucketShift() := // For small b, overflow buckets are unlikely. // Avoid the overhead of the calculation.if >= 4 {// Add on the estimated number of overflow buckets // required to insert the median number of elements // used with this value of b. += bucketShift( - 4) := .Bucket.Size_ * := roundupsize(, !.Bucket.Pointers())if != { = / .Bucket.Size_ } }if == nil { = newarray(.Bucket, int()) } else {// dirtyalloc was previously generated by // the above newarray(t.Bucket, int(nbuckets)) // but may not be empty. = := .Bucket.Size_ * if .Bucket.Pointers() {memclrHasPointers(, ) } else {memclrNoHeapPointers(, ) } }if != {// We preallocated some overflow buckets. // To keep the overhead of tracking these overflow buckets to a minimum, // we use the convention that if a preallocated overflow bucket's overflow // pointer is nil, then there are more available by bumping the pointer. // We need a safe non-nil pointer for the last overflow bucket; just use buckets. = (*bmap)(add(, *uintptr(.BucketSize))) := (*bmap)(add(, (-1)*uintptr(.BucketSize))) .setoverflow(, (*bmap)()) }return , }// mapaccess1 returns a pointer to h[key]. Never returns nil, instead// it will return a reference to the zero object for the elem type if// the key is not in the map.// NOTE: The returned pointer may keep the whole map live, so don't// hold onto it for very long.func mapaccess1( *maptype, *hmap, unsafe.Pointer) unsafe.Pointer {ifraceenabled && != nil { := getcallerpc() := abi.FuncPCABIInternal()racereadpc(unsafe.Pointer(), , )raceReadObjectPC(.Key, , , ) }ifmsanenabled && != nil {msanread(, .Key.Size_) }ifasanenabled && != nil {asanread(, .Key.Size_) }if == nil || .count == 0 {if := mapKeyError(, ); != nil {panic() // see issue 23734 }returnunsafe.Pointer(&zeroVal[0]) }if .flags&hashWriting != 0 {fatal("concurrent map read and map write") } := .Hasher(, uintptr(.hash0)) := bucketMask(.B) := (*bmap)(add(.buckets, (&)*uintptr(.BucketSize)))if := .oldbuckets; != nil {if !.sameSizeGrow() {// There used to be half as many buckets; mask down one more power of two. >>= 1 } := (*bmap)(add(, (&)*uintptr(.BucketSize)))if !evacuated() { = } } := tophash():for ; != nil; = .overflow() {for := uintptr(0); < abi.MapBucketCount; ++ {if .tophash[] != {if .tophash[] == emptyRest {break }continue } := add(unsafe.Pointer(), dataOffset+*uintptr(.KeySize))if .IndirectKey() { = *((*unsafe.Pointer)()) }if .Key.Equal(, ) { := add(unsafe.Pointer(), dataOffset+abi.MapBucketCount*uintptr(.KeySize)+*uintptr(.ValueSize))if .IndirectElem() { = *((*unsafe.Pointer)()) }return } } }returnunsafe.Pointer(&zeroVal[0])}// mapaccess2 should be an internal detail,// but widely used packages access it using linkname.// Notable members of the hall of shame include:// - github.com/ugorji/go/codec//// Do not remove or change the type signature.// See go.dev/issue/67401.////go:linkname mapaccess2func mapaccess2( *maptype, *hmap, unsafe.Pointer) (unsafe.Pointer, bool) {ifraceenabled && != nil { := getcallerpc() := abi.FuncPCABIInternal()racereadpc(unsafe.Pointer(), , )raceReadObjectPC(.Key, , , ) }ifmsanenabled && != nil {msanread(, .Key.Size_) }ifasanenabled && != nil {asanread(, .Key.Size_) }if == nil || .count == 0 {if := mapKeyError(, ); != nil {panic() // see issue 23734 }returnunsafe.Pointer(&zeroVal[0]), false }if .flags&hashWriting != 0 {fatal("concurrent map read and map write") } := .Hasher(, uintptr(.hash0)) := bucketMask(.B) := (*bmap)(add(.buckets, (&)*uintptr(.BucketSize)))if := .oldbuckets; != nil {if !.sameSizeGrow() {// There used to be half as many buckets; mask down one more power of two. >>= 1 } := (*bmap)(add(, (&)*uintptr(.BucketSize)))if !evacuated() { = } } := tophash():for ; != nil; = .overflow() {for := uintptr(0); < abi.MapBucketCount; ++ {if .tophash[] != {if .tophash[] == emptyRest {break }continue } := add(unsafe.Pointer(), dataOffset+*uintptr(.KeySize))if .IndirectKey() { = *((*unsafe.Pointer)()) }if .Key.Equal(, ) { := add(unsafe.Pointer(), dataOffset+abi.MapBucketCount*uintptr(.KeySize)+*uintptr(.ValueSize))if .IndirectElem() { = *((*unsafe.Pointer)()) }return , true } } }returnunsafe.Pointer(&zeroVal[0]), false}// returns both key and elem. Used by map iterator.func mapaccessK( *maptype, *hmap, unsafe.Pointer) (unsafe.Pointer, unsafe.Pointer) {if == nil || .count == 0 {returnnil, nil } := .Hasher(, uintptr(.hash0)) := bucketMask(.B) := (*bmap)(add(.buckets, (&)*uintptr(.BucketSize)))if := .oldbuckets; != nil {if !.sameSizeGrow() {// There used to be half as many buckets; mask down one more power of two. >>= 1 } := (*bmap)(add(, (&)*uintptr(.BucketSize)))if !evacuated() { = } } := tophash():for ; != nil; = .overflow() {for := uintptr(0); < abi.MapBucketCount; ++ {if .tophash[] != {if .tophash[] == emptyRest {break }continue } := add(unsafe.Pointer(), dataOffset+*uintptr(.KeySize))if .IndirectKey() { = *((*unsafe.Pointer)()) }if .Key.Equal(, ) { := add(unsafe.Pointer(), dataOffset+abi.MapBucketCount*uintptr(.KeySize)+*uintptr(.ValueSize))if .IndirectElem() { = *((*unsafe.Pointer)()) }return , } } }returnnil, nil}func mapaccess1_fat( *maptype, *hmap, , unsafe.Pointer) unsafe.Pointer { := mapaccess1(, , )if == unsafe.Pointer(&zeroVal[0]) {return }return}func mapaccess2_fat( *maptype, *hmap, , unsafe.Pointer) (unsafe.Pointer, bool) { := mapaccess1(, , )if == unsafe.Pointer(&zeroVal[0]) {return , false }return , true}// Like mapaccess, but allocates a slot for the key if it is not present in the map.//// mapassign should be an internal detail,// but widely used packages access it using linkname.// Notable members of the hall of shame include:// - github.com/bytedance/sonic// - github.com/cloudwego/frugal// - github.com/RomiChan/protobuf// - github.com/segmentio/encoding// - github.com/ugorji/go/codec//// Do not remove or change the type signature.// See go.dev/issue/67401.////go:linkname mapassignfunc mapassign( *maptype, *hmap, unsafe.Pointer) unsafe.Pointer {if == nil {panic(plainError("assignment to entry in nil map")) }ifraceenabled { := getcallerpc() := abi.FuncPCABIInternal()racewritepc(unsafe.Pointer(), , )raceReadObjectPC(.Key, , , ) }ifmsanenabled {msanread(, .Key.Size_) }ifasanenabled {asanread(, .Key.Size_) }if .flags&hashWriting != 0 {fatal("concurrent map writes") } := .Hasher(, uintptr(.hash0))// Set hashWriting after calling t.hasher, since t.hasher may panic, // in which case we have not actually done a write. .flags ^= hashWritingif .buckets == nil { .buckets = newobject(.Bucket) // newarray(t.Bucket, 1) }: := & bucketMask(.B)if .growing() {growWork(, , ) } := (*bmap)(add(.buckets, *uintptr(.BucketSize))) := tophash()var *uint8varunsafe.Pointervarunsafe.Pointer:for {for := uintptr(0); < abi.MapBucketCount; ++ {if .tophash[] != {ifisEmpty(.tophash[]) && == nil { = &.tophash[] = add(unsafe.Pointer(), dataOffset+*uintptr(.KeySize)) = add(unsafe.Pointer(), dataOffset+abi.MapBucketCount*uintptr(.KeySize)+*uintptr(.ValueSize)) }if .tophash[] == emptyRest {break }continue } := add(unsafe.Pointer(), dataOffset+*uintptr(.KeySize))if .IndirectKey() { = *((*unsafe.Pointer)()) }if !.Key.Equal(, ) {continue }// already have a mapping for key. Update it.if .NeedKeyUpdate() {typedmemmove(.Key, , ) } = add(unsafe.Pointer(), dataOffset+abi.MapBucketCount*uintptr(.KeySize)+*uintptr(.ValueSize))goto } := .overflow()if == nil {break } = }// Did not find mapping for key. Allocate new cell & add entry.// If we hit the max load factor or we have too many overflow buckets, // and we're not already in the middle of growing, start growing.if !.growing() && (overLoadFactor(.count+1, .B) || tooManyOverflowBuckets(.noverflow, .B)) {hashGrow(, )goto// Growing the table invalidates everything, so try again }if == nil {// The current bucket and all the overflow buckets connected to it are full, allocate a new one. := .newoverflow(, ) = &.tophash[0] = add(unsafe.Pointer(), dataOffset) = add(, abi.MapBucketCount*uintptr(.KeySize)) }// store new key/elem at insert positionif .IndirectKey() { := newobject(.Key) *(*unsafe.Pointer)() = = }if .IndirectElem() { := newobject(.Elem) *(*unsafe.Pointer)() = }typedmemmove(.Key, , ) * = .count++:if .flags&hashWriting == 0 {fatal("concurrent map writes") } .flags &^= hashWritingif .IndirectElem() { = *((*unsafe.Pointer)()) }return}// mapdelete should be an internal detail,// but widely used packages access it using linkname.// Notable members of the hall of shame include:// - github.com/ugorji/go/codec//// Do not remove or change the type signature.// See go.dev/issue/67401.////go:linkname mapdeletefunc mapdelete( *maptype, *hmap, unsafe.Pointer) {ifraceenabled && != nil { := getcallerpc() := abi.FuncPCABIInternal()racewritepc(unsafe.Pointer(), , )raceReadObjectPC(.Key, , , ) }ifmsanenabled && != nil {msanread(, .Key.Size_) }ifasanenabled && != nil {asanread(, .Key.Size_) }if == nil || .count == 0 {if := mapKeyError(, ); != nil {panic() // see issue 23734 }return }if .flags&hashWriting != 0 {fatal("concurrent map writes") } := .Hasher(, uintptr(.hash0))// Set hashWriting after calling t.hasher, since t.hasher may panic, // in which case we have not actually done a write (delete). .flags ^= hashWriting := & bucketMask(.B)if .growing() {growWork(, , ) } := (*bmap)(add(.buckets, *uintptr(.BucketSize))) := := tophash():for ; != nil; = .overflow() {for := uintptr(0); < abi.MapBucketCount; ++ {if .tophash[] != {if .tophash[] == emptyRest {break }continue } := add(unsafe.Pointer(), dataOffset+*uintptr(.KeySize)) := if .IndirectKey() { = *((*unsafe.Pointer)()) }if !.Key.Equal(, ) {continue }// Only clear key if there are pointers in it.if .IndirectKey() { *(*unsafe.Pointer)() = nil } elseif .Key.Pointers() {memclrHasPointers(, .Key.Size_) } := add(unsafe.Pointer(), dataOffset+abi.MapBucketCount*uintptr(.KeySize)+*uintptr(.ValueSize))if .IndirectElem() { *(*unsafe.Pointer)() = nil } elseif .Elem.Pointers() {memclrHasPointers(, .Elem.Size_) } else {memclrNoHeapPointers(, .Elem.Size_) } .tophash[] = emptyOne// If the bucket now ends in a bunch of emptyOne states, // change those to emptyRest states. // It would be nice to make this a separate function, but // for loops are not currently inlineable.if == abi.MapBucketCount-1 {if .overflow() != nil && .overflow().tophash[0] != emptyRest {goto } } else {if .tophash[+1] != emptyRest {goto } }for { .tophash[] = emptyRestif == 0 {if == {break// beginning of initial bucket, we're done. }// Find previous bucket, continue at its last entry. := for = ; .overflow() != ; = .overflow() { } = abi.MapBucketCount - 1 } else { -- }if .tophash[] != emptyOne {break } } : .count--// Reset the hash seed to make it more difficult for attackers to // repeatedly trigger hash collisions. See issue 25237.if .count == 0 { .hash0 = uint32(rand()) }break } }if .flags&hashWriting == 0 {fatal("concurrent map writes") } .flags &^= hashWriting}// mapiterinit initializes the hiter struct used for ranging over maps.// The hiter struct pointed to by 'it' is allocated on the stack// by the compilers order pass or on the heap by reflect_mapiterinit.// Both need to have zeroed hiter since the struct contains pointers.//// mapiterinit should be an internal detail,// but widely used packages access it using linkname.// Notable members of the hall of shame include:// - github.com/bytedance/sonic// - github.com/cloudwego/frugal// - github.com/goccy/go-json// - github.com/RomiChan/protobuf// - github.com/segmentio/encoding// - github.com/ugorji/go/codec// - github.com/wI2L/jettison//// Do not remove or change the type signature.// See go.dev/issue/67401.////go:linkname mapiterinitfunc mapiterinit( *maptype, *hmap, *hiter) {ifraceenabled && != nil { := getcallerpc()racereadpc(unsafe.Pointer(), , abi.FuncPCABIInternal()) } .t = if == nil || .count == 0 {return }ifunsafe.Sizeof(hiter{})/goarch.PtrSize != 12 {throw("hash_iter size incorrect") // see cmd/compile/internal/reflectdata/reflect.go } .h = // grab snapshot of bucket state .B = .B .buckets = .bucketsif !.Bucket.Pointers() {// Allocate the current slice and remember pointers to both current and old. // This preserves all relevant overflow buckets alive even if // the table grows and/or overflow buckets are added to the table // while we are iterating. .createOverflow() .overflow = .extra.overflow .oldoverflow = .extra.oldoverflow }// decide where to start := uintptr(rand()) .startBucket = & bucketMask(.B) .offset = uint8( >> .B & (abi.MapBucketCount - 1))// iterator state .bucket = .startBucket// Remember we have an iterator. // Can run concurrently with another mapiterinit().if := .flags; &(iterator|oldIterator) != iterator|oldIterator {atomic.Or8(&.flags, iterator|oldIterator) }mapiternext()}// mapiternext should be an internal detail,// but widely used packages access it using linkname.// Notable members of the hall of shame include:// - github.com/bytedance/sonic// - github.com/cloudwego/frugal// - github.com/RomiChan/protobuf// - github.com/segmentio/encoding// - github.com/ugorji/go/codec// - gonum.org/v1/gonum//// Do not remove or change the type signature.// See go.dev/issue/67401.////go:linkname mapiternextfunc mapiternext( *hiter) { := .hifraceenabled { := getcallerpc()racereadpc(unsafe.Pointer(), , abi.FuncPCABIInternal()) }if .flags&hashWriting != 0 {fatal("concurrent map iteration and map write") } := .t := .bucket := .bptr := .i := .checkBucket:if == nil {if == .startBucket && .wrapped {// end of iteration .key = nil .elem = nilreturn }if .growing() && .B == .B {// Iterator was started in the middle of a grow, and the grow isn't done yet. // If the bucket we're looking at hasn't been filled in yet (i.e. the old // bucket hasn't been evacuated) then we need to iterate through the old // bucket and only return the ones that will be migrated to this bucket. := & .h.oldbucketmask() = (*bmap)(add(.oldbuckets, *uintptr(.BucketSize)))if !evacuated() { = } else { = (*bmap)(add(.buckets, *uintptr(.BucketSize))) = noCheck } } else { = (*bmap)(add(.buckets, *uintptr(.BucketSize))) = noCheck } ++if == bucketShift(.B) { = 0 .wrapped = true } = 0 }for ; < abi.MapBucketCount; ++ { := ( + .offset) & (abi.MapBucketCount - 1)ifisEmpty(.tophash[]) || .tophash[] == evacuatedEmpty {// TODO: emptyRest is hard to use here, as we start iterating // in the middle of a bucket. It's feasible, just tricky.continue } := add(unsafe.Pointer(), dataOffset+uintptr()*uintptr(.KeySize))if .IndirectKey() { = *((*unsafe.Pointer)()) } := add(unsafe.Pointer(), dataOffset+abi.MapBucketCount*uintptr(.KeySize)+uintptr()*uintptr(.ValueSize))if != noCheck && !.sameSizeGrow() {// Special case: iterator was started during a grow to a larger size // and the grow is not done yet. We're working on a bucket whose // oldbucket has not been evacuated yet. Or at least, it wasn't // evacuated when we started the bucket. So we're iterating // through the oldbucket, skipping any keys that will go // to the other new bucket (each oldbucket expands to two // buckets during a grow).if .ReflexiveKey() || .Key.Equal(, ) {// If the item in the oldbucket is not destined for // the current new bucket in the iteration, skip it. := .Hasher(, uintptr(.hash0))if &bucketMask(.B) != {continue } } else {// Hash isn't repeatable if k != k (NaNs). We need a // repeatable and randomish choice of which direction // to send NaNs during evacuation. We'll use the low // bit of tophash to decide which way NaNs go. // NOTE: this case is why we need two evacuate tophash // values, evacuatedX and evacuatedY, that differ in // their low bit.if >>(.B-1) != uintptr(.tophash[]&1) {continue } } }if (.tophash[] != evacuatedX && .tophash[] != evacuatedY) || !(.ReflexiveKey() || .Key.Equal(, )) {// This is the golden data, we can return it. // OR // key!=key, so the entry can't be deleted or updated, so we can just return it. // That's lucky for us because when key!=key we can't look it up successfully. .key = if .IndirectElem() { = *((*unsafe.Pointer)()) } .elem = } else {// The hash table has grown since the iterator was started. // The golden data for this key is now somewhere else. // Check the current hash table for the data. // This code handles the case where the key // has been deleted, updated, or deleted and reinserted. // NOTE: we need to regrab the key as it has potentially been // updated to an equal() but not identical key (e.g. +0.0 vs -0.0). , := mapaccessK(, , )if == nil {continue// key has been deleted } .key = .elem = } .bucket = if .bptr != { // avoid unnecessary write barrier; see issue 14921 .bptr = } .i = + 1 .checkBucket = return } = .overflow() = 0goto}// mapclear deletes all keys from a map.// It is called by the compiler.//// mapclear should be an internal detail,// but widely used packages access it using linkname.// Notable members of the hall of shame include:// - github.com/cloudwego/frugal//// Do not remove or change the type signature.// See go.dev/issue/67401.////go:linkname mapclearfunc mapclear( *maptype, *hmap) {ifraceenabled && != nil { := getcallerpc() := abi.FuncPCABIInternal()racewritepc(unsafe.Pointer(), , ) }if == nil || .count == 0 {return }if .flags&hashWriting != 0 {fatal("concurrent map writes") } .flags ^= hashWriting// Mark buckets empty, so existing iterators can be terminated, see issue #59411. := func( unsafe.Pointer, uintptr) {for := uintptr(0); <= ; ++ { := (*bmap)(add(, *uintptr(.BucketSize)))for ; != nil; = .overflow() {for := uintptr(0); < abi.MapBucketCount; ++ { .tophash[] = emptyRest } } } } (.buckets, bucketMask(.B))if := .oldbuckets; != nil { (, .oldbucketmask()) } .flags &^= sameSizeGrow .oldbuckets = nil .nevacuate = 0 .noverflow = 0 .count = 0// Reset the hash seed to make it more difficult for attackers to // repeatedly trigger hash collisions. See issue 25237. .hash0 = uint32(rand())// Keep the mapextra allocation but clear any extra information.if .extra != nil { *.extra = mapextra{} }// makeBucketArray clears the memory pointed to by h.buckets // and recovers any overflow buckets by generating them // as if h.buckets was newly alloced. , := makeBucketArray(, .B, .buckets)if != nil {// If overflow buckets are created then h.extra // will have been allocated during initial bucket creation. .extra.nextOverflow = }if .flags&hashWriting == 0 {fatal("concurrent map writes") } .flags &^= hashWriting}func hashGrow( *maptype, *hmap) {// If we've hit the load factor, get bigger. // Otherwise, there are too many overflow buckets, // so keep the same number of buckets and "grow" laterally. := uint8(1)if !overLoadFactor(.count+1, .B) { = 0 .flags |= sameSizeGrow } := .buckets , := makeBucketArray(, .B+, nil) := .flags &^ (iterator | oldIterator)if .flags&iterator != 0 { |= oldIterator }// commit the grow (atomic wrt gc) .B += .flags = .oldbuckets = .buckets = .nevacuate = 0 .noverflow = 0if .extra != nil && .extra.overflow != nil {// Promote current overflow buckets to the old generation.if .extra.oldoverflow != nil {throw("oldoverflow is not nil") } .extra.oldoverflow = .extra.overflow .extra.overflow = nil }if != nil {if .extra == nil { .extra = new(mapextra) } .extra.nextOverflow = }// the actual copying of the hash table data is done incrementally // by growWork() and evacuate().}// overLoadFactor reports whether count items placed in 1<<B buckets is over loadFactor.func overLoadFactor( int, uint8) bool {return > abi.MapBucketCount && uintptr() > loadFactorNum*(bucketShift()/loadFactorDen)}// tooManyOverflowBuckets reports whether noverflow buckets is too many for a map with 1<<B buckets.// Note that most of these overflow buckets must be in sparse use;// if use was dense, then we'd have already triggered regular map growth.func tooManyOverflowBuckets( uint16, uint8) bool {// If the threshold is too low, we do extraneous work. // If the threshold is too high, maps that grow and shrink can hold on to lots of unused memory. // "too many" means (approximately) as many overflow buckets as regular buckets. // See incrnoverflow for more details.if > 15 { = 15 }// The compiler doesn't see here that B < 16; mask B to generate shorter shift code.return >= uint16(1)<<(&15)}// growing reports whether h is growing. The growth may be to the same size or bigger.func ( *hmap) () bool {return .oldbuckets != nil}// sameSizeGrow reports whether the current growth is to a map of the same size.func ( *hmap) () bool {return .flags&sameSizeGrow != 0}// noldbuckets calculates the number of buckets prior to the current map growth.func ( *hmap) () uintptr { := .Bif !.sameSizeGrow() { -- }returnbucketShift()}// oldbucketmask provides a mask that can be applied to calculate n % noldbuckets().func ( *hmap) () uintptr {return .noldbuckets() - 1}func growWork( *maptype, *hmap, uintptr) {// make sure we evacuate the oldbucket corresponding // to the bucket we're about to useevacuate(, , &.oldbucketmask())// evacuate one more oldbucket to make progress on growingif .growing() {evacuate(, , .nevacuate) }}func bucketEvacuated( *maptype, *hmap, uintptr) bool { := (*bmap)(add(.oldbuckets, *uintptr(.BucketSize)))returnevacuated()}// evacDst is an evacuation destination.type evacDst struct { b *bmap// current destination bucket i int// key/elem index into b k unsafe.Pointer// pointer to current key storage e unsafe.Pointer// pointer to current elem storage}func evacuate( *maptype, *hmap, uintptr) { := (*bmap)(add(.oldbuckets, *uintptr(.BucketSize))) := .noldbuckets()if !evacuated() {// TODO: reuse overflow buckets instead of using new ones, if there // is no iterator using the old buckets. (If !oldIterator.)// xy contains the x and y (low and high) evacuation destinations.var [2]evacDst := &[0] .b = (*bmap)(add(.buckets, *uintptr(.BucketSize))) .k = add(unsafe.Pointer(.b), dataOffset) .e = add(.k, abi.MapBucketCount*uintptr(.KeySize))if !.sameSizeGrow() {// Only calculate y pointers if we're growing bigger. // Otherwise GC can see bad pointers. := &[1] .b = (*bmap)(add(.buckets, (+)*uintptr(.BucketSize))) .k = add(unsafe.Pointer(.b), dataOffset) .e = add(.k, abi.MapBucketCount*uintptr(.KeySize)) }for ; != nil; = .overflow() { := add(unsafe.Pointer(), dataOffset) := add(, abi.MapBucketCount*uintptr(.KeySize))for := 0; < abi.MapBucketCount; , , = +1, add(, uintptr(.KeySize)), add(, uintptr(.ValueSize)) { := .tophash[]ifisEmpty() { .tophash[] = evacuatedEmptycontinue }if < minTopHash {throw("bad map state") } := if .IndirectKey() { = *((*unsafe.Pointer)()) }varuint8if !.sameSizeGrow() {// Compute hash to make our evacuation decision (whether we need // to send this key/elem to bucket x or bucket y). := .Hasher(, uintptr(.hash0))if .flags&iterator != 0 && !.ReflexiveKey() && !.Key.Equal(, ) {// If key != key (NaNs), then the hash could be (and probably // will be) entirely different from the old hash. Moreover, // it isn't reproducible. Reproducibility is required in the // presence of iterators, as our evacuation decision must // match whatever decision the iterator made. // Fortunately, we have the freedom to send these keys either // way. Also, tophash is meaningless for these kinds of keys. // We let the low bit of tophash drive the evacuation decision. // We recompute a new random tophash for the next level so // these keys will get evenly distributed across all buckets // after multiple grows. = & 1 = tophash() } else {if & != 0 { = 1 } } }ifevacuatedX+1 != evacuatedY || evacuatedX^1 != evacuatedY {throw("bad evacuatedN") } .tophash[] = evacuatedX + // evacuatedX + 1 == evacuatedY := &[] // evacuation destinationif .i == abi.MapBucketCount { .b = .newoverflow(, .b) .i = 0 .k = add(unsafe.Pointer(.b), dataOffset) .e = add(.k, abi.MapBucketCount*uintptr(.KeySize)) } .b.tophash[.i&(abi.MapBucketCount-1)] = // mask dst.i as an optimization, to avoid a bounds checkif .IndirectKey() { *(*unsafe.Pointer)(.k) = // copy pointer } else {typedmemmove(.Key, .k, ) // copy elem }if .IndirectElem() { *(*unsafe.Pointer)(.e) = *(*unsafe.Pointer)() } else {typedmemmove(.Elem, .e, ) } .i++// These updates might push these pointers past the end of the // key or elem arrays. That's ok, as we have the overflow pointer // at the end of the bucket to protect against pointing past the // end of the bucket. .k = add(.k, uintptr(.KeySize)) .e = add(.e, uintptr(.ValueSize)) } }// Unlink the overflow buckets & clear key/elem to help GC.if .flags&oldIterator == 0 && .Bucket.Pointers() { := add(.oldbuckets, *uintptr(.BucketSize))// Preserve b.tophash because the evacuation // state is maintained there. := add(, dataOffset) := uintptr(.BucketSize) - dataOffsetmemclrHasPointers(, ) } }if == .nevacuate {advanceEvacuationMark(, , ) }}func advanceEvacuationMark( *hmap, *maptype, uintptr) { .nevacuate++// Experiments suggest that 1024 is overkill by at least an order of magnitude. // Put it in there as a safeguard anyway, to ensure O(1) behavior. := .nevacuate + 1024if > { = }for .nevacuate != && bucketEvacuated(, , .nevacuate) { .nevacuate++ }if .nevacuate == { // newbit == # of oldbuckets// Growing is all done. Free old main bucket array. .oldbuckets = nil// Can discard old overflow buckets as well. // If they are still referenced by an iterator, // then the iterator holds a pointers to the slice.if .extra != nil { .extra.oldoverflow = nil } .flags &^= sameSizeGrow }}// Reflect stubs. Called from ../reflect/asm_*.s// reflect_makemap is for package reflect,// but widely used packages access it using linkname.// Notable members of the hall of shame include:// - gitee.com/quant1x/gox// - github.com/modern-go/reflect2// - github.com/goccy/go-json// - github.com/RomiChan/protobuf// - github.com/segmentio/encoding// - github.com/v2pro/plz//// Do not remove or change the type signature.// See go.dev/issue/67401.////go:linkname reflect_makemap reflect.makemapfunc reflect_makemap( *maptype, int) *hmap {// Check invariants and reflects math.if .Key.Equal == nil {throw("runtime.reflect_makemap: unsupported map key type") }if .Key.Size_ > abi.MapMaxKeyBytes && (!.IndirectKey() || .KeySize != uint8(goarch.PtrSize)) || .Key.Size_ <= abi.MapMaxKeyBytes && (.IndirectKey() || .KeySize != uint8(.Key.Size_)) {throw("key size wrong") }if .Elem.Size_ > abi.MapMaxElemBytes && (!.IndirectElem() || .ValueSize != uint8(goarch.PtrSize)) || .Elem.Size_ <= abi.MapMaxElemBytes && (.IndirectElem() || .ValueSize != uint8(.Elem.Size_)) {throw("elem size wrong") }if .Key.Align_ > abi.MapBucketCount {throw("key align too big") }if .Elem.Align_ > abi.MapBucketCount {throw("elem align too big") }if .Key.Size_%uintptr(.Key.Align_) != 0 {throw("key size not a multiple of key align") }if .Elem.Size_%uintptr(.Elem.Align_) != 0 {throw("elem size not a multiple of elem align") }ifabi.MapBucketCount < 8 {throw("bucketsize too small for proper alignment") }ifdataOffset%uintptr(.Key.Align_) != 0 {throw("need padding in bucket (key)") }ifdataOffset%uintptr(.Elem.Align_) != 0 {throw("need padding in bucket (elem)") }returnmakemap(, , nil)}// reflect_mapaccess is for package reflect,// but widely used packages access it using linkname.// Notable members of the hall of shame include:// - gitee.com/quant1x/gox// - github.com/modern-go/reflect2// - github.com/v2pro/plz//// Do not remove or change the type signature.// See go.dev/issue/67401.////go:linkname reflect_mapaccess reflect.mapaccessfunc reflect_mapaccess( *maptype, *hmap, unsafe.Pointer) unsafe.Pointer { , := mapaccess2(, , )if ! {// reflect wants nil for a missing element = nil }return}//go:linkname reflect_mapaccess_faststr reflect.mapaccess_faststrfunc reflect_mapaccess_faststr( *maptype, *hmap, string) unsafe.Pointer { , := mapaccess2_faststr(, , )if ! {// reflect wants nil for a missing element = nil }return}// reflect_mapassign is for package reflect,// but widely used packages access it using linkname.// Notable members of the hall of shame include:// - gitee.com/quant1x/gox// - github.com/v2pro/plz//// Do not remove or change the type signature.////go:linkname reflect_mapassign reflect.mapassign0func reflect_mapassign( *maptype, *hmap, unsafe.Pointer, unsafe.Pointer) { := mapassign(, , )typedmemmove(.Elem, , )}//go:linkname reflect_mapassign_faststr reflect.mapassign_faststr0func reflect_mapassign_faststr( *maptype, *hmap, string, unsafe.Pointer) { := mapassign_faststr(, , )typedmemmove(.Elem, , )}//go:linkname reflect_mapdelete reflect.mapdeletefunc reflect_mapdelete( *maptype, *hmap, unsafe.Pointer) {mapdelete(, , )}//go:linkname reflect_mapdelete_faststr reflect.mapdelete_faststrfunc reflect_mapdelete_faststr( *maptype, *hmap, string) {mapdelete_faststr(, , )}// reflect_mapiterinit is for package reflect,// but widely used packages access it using linkname.// Notable members of the hall of shame include:// - github.com/modern-go/reflect2// - gitee.com/quant1x/gox// - github.com/v2pro/plz// - github.com/wI2L/jettison//// Do not remove or change the type signature.// See go.dev/issue/67401.////go:linkname reflect_mapiterinit reflect.mapiterinitfunc reflect_mapiterinit( *maptype, *hmap, *hiter) {mapiterinit(, , )}// reflect_mapiternext is for package reflect,// but widely used packages access it using linkname.// Notable members of the hall of shame include:// - gitee.com/quant1x/gox// - github.com/modern-go/reflect2// - github.com/goccy/go-json// - github.com/v2pro/plz// - github.com/wI2L/jettison//// Do not remove or change the type signature.// See go.dev/issue/67401.////go:linkname reflect_mapiternext reflect.mapiternextfunc reflect_mapiternext( *hiter) {mapiternext()}// reflect_mapiterkey is for package reflect,// but widely used packages access it using linkname.// Notable members of the hall of shame include:// - github.com/goccy/go-json// - gonum.org/v1/gonum//// Do not remove or change the type signature.// See go.dev/issue/67401.////go:linkname reflect_mapiterkey reflect.mapiterkeyfunc reflect_mapiterkey( *hiter) unsafe.Pointer {return .key}// reflect_mapiterelem is for package reflect,// but widely used packages access it using linkname.// Notable members of the hall of shame include:// - github.com/goccy/go-json// - gonum.org/v1/gonum//// Do not remove or change the type signature.// See go.dev/issue/67401.////go:linkname reflect_mapiterelem reflect.mapiterelemfunc reflect_mapiterelem( *hiter) unsafe.Pointer {return .elem}// reflect_maplen is for package reflect,// but widely used packages access it using linkname.// Notable members of the hall of shame include:// - github.com/goccy/go-json// - github.com/wI2L/jettison//// Do not remove or change the type signature.// See go.dev/issue/67401.////go:linkname reflect_maplen reflect.maplenfunc reflect_maplen( *hmap) int {if == nil {return0 }ifraceenabled { := getcallerpc()racereadpc(unsafe.Pointer(), , abi.FuncPCABIInternal()) }return .count}//go:linkname reflect_mapclear reflect.mapclearfunc reflect_mapclear( *maptype, *hmap) {mapclear(, )}//go:linkname reflectlite_maplen internal/reflectlite.maplenfunc reflectlite_maplen( *hmap) int {if == nil {return0 }ifraceenabled { := getcallerpc()racereadpc(unsafe.Pointer(), , abi.FuncPCABIInternal(reflect_maplen)) }return .count}// mapinitnoop is a no-op function known the Go linker; if a given global// map (of the right size) is determined to be dead, the linker will// rewrite the relocation (from the package init func) from the outlined// map init function to this symbol. Defined in assembly so as to avoid// complications with instrumentation (coverage, etc).func mapinitnoop()// mapclone for implementing maps.Clone////go:linkname mapclone maps.clonefunc mapclone( any) any { := efaceOf(&) .data = unsafe.Pointer(mapclone2((*maptype)(unsafe.Pointer(._type)), (*hmap)(.data)))return}// moveToBmap moves a bucket from src to dst. It returns the destination bucket or new destination bucket if it overflows// and the pos that the next key/value will be written, if pos == bucketCnt means needs to written in overflow bucket.func moveToBmap( *maptype, *hmap, *bmap, int, *bmap) (*bmap, int) {for := 0; < abi.MapBucketCount; ++ {ifisEmpty(.tophash[]) {continue }for ; < abi.MapBucketCount; ++ {ifisEmpty(.tophash[]) {break } }if == abi.MapBucketCount { = .newoverflow(, ) = 0 } := add(unsafe.Pointer(), dataOffset+uintptr()*uintptr(.KeySize)) := add(unsafe.Pointer(), dataOffset+abi.MapBucketCount*uintptr(.KeySize)+uintptr()*uintptr(.ValueSize)) := add(unsafe.Pointer(), dataOffset+uintptr()*uintptr(.KeySize)) := add(unsafe.Pointer(), dataOffset+abi.MapBucketCount*uintptr(.KeySize)+uintptr()*uintptr(.ValueSize)) .tophash[] = .tophash[]if .IndirectKey() { = *(*unsafe.Pointer)()if .NeedKeyUpdate() { := newobject(.Key)typedmemmove(.Key, , ) = }// Note: if NeedKeyUpdate is false, then the memory // used to store the key is immutable, so we can share // it between the original map and its clone. *(*unsafe.Pointer)() = } else {typedmemmove(.Key, , ) }if .IndirectElem() { = *(*unsafe.Pointer)() := newobject(.Elem)typedmemmove(.Elem, , ) *(*unsafe.Pointer)() = } else {typedmemmove(.Elem, , ) } ++ .count++ }return , }func mapclone2( *maptype, *hmap) *hmap { := makemap(, .count, nil) .hash0 = .hash0 .nevacuate = 0// flags do not need to be copied here, just like a new map has no flags.if .count == 0 {return }if .flags&hashWriting != 0 {fatal("concurrent map clone and map write") }if .B == 0 && !(.IndirectKey() && .NeedKeyUpdate()) && !.IndirectElem() {// Quick copy for small maps. .buckets = newobject(.Bucket) .count = .counttypedmemmove(.Bucket, .buckets, .buckets)return }if .B == 0 { .buckets = newobject(.Bucket) } := int(bucketShift(.B)) := int(bucketShift(.B))for := 0; < ; ++ { := (*bmap)(add(.buckets, uintptr(*int(.BucketSize)))) := 0for := 0; < ; += { := (*bmap)(add(.buckets, uintptr((+)*int(.BucketSize))))for != nil { , = moveToBmap(, , , , ) = .overflow() } } }if .oldbuckets == nil {return } := .B := .oldbucketsif !.sameSizeGrow() { -- } := int(bucketShift())for := 0; < ; ++ { := (*bmap)(add(, uintptr(*int(.BucketSize))))ifevacuated() {continue }if >= .B { // main bucket bits in dst is less than oldB bits in src := (*bmap)(add(.buckets, (uintptr()&bucketMask(.B))*uintptr(.BucketSize)))for .overflow() != nil { = .overflow() } := 0for != nil { , = moveToBmap(, , , , ) = .overflow() }continue }// oldB < dst.B, so a single source bucket may go to multiple destination buckets. // Process entries one at a time.for != nil {// move from oldBlucket to new bucketfor := uintptr(0); < abi.MapBucketCount; ++ {ifisEmpty(.tophash[]) {continue }if .flags&hashWriting != 0 {fatal("concurrent map clone and map write") } := add(unsafe.Pointer(), dataOffset+*uintptr(.KeySize))if .IndirectKey() { = *((*unsafe.Pointer)()) } := add(unsafe.Pointer(), dataOffset+abi.MapBucketCount*uintptr(.KeySize)+*uintptr(.ValueSize))if .IndirectElem() { = *((*unsafe.Pointer)()) } := mapassign(, , )typedmemmove(.Elem, , ) } = .overflow() } }return}// keys for implementing maps.keys////go:linkname keys maps.keysfunc keys( any, unsafe.Pointer) { := efaceOf(&) := (*maptype)(unsafe.Pointer(._type)) := (*hmap)(.data)if == nil || .count == 0 {return } := (*slice)() := int(rand()) := uint8( >> .B & (abi.MapBucketCount - 1))if .B == 0 {copyKeys(, , (*bmap)(.buckets), , )return } := int(bucketShift(.B)) := .bucketsfor := 0; < ; ++ { := ( + ) & ( - 1) := (*bmap)(add(, uintptr()*uintptr(.BucketSize)))copyKeys(, , , , ) }if .growing() { := int(.noldbuckets())for := 0; < ; ++ { := ( + ) & ( - 1) := (*bmap)(add(.oldbuckets, uintptr()*uintptr(.BucketSize)))ifevacuated() {continue }copyKeys(, , , , ) } }return}func copyKeys( *maptype, *hmap, *bmap, *slice, uint8) {for != nil {for := uintptr(0); < abi.MapBucketCount; ++ { := ( + uintptr()) & (abi.MapBucketCount - 1)ifisEmpty(.tophash[]) {continue }if .flags&hashWriting != 0 {fatal("concurrent map read and map write") } := add(unsafe.Pointer(), dataOffset+*uintptr(.KeySize))if .IndirectKey() { = *((*unsafe.Pointer)()) }if .len >= .cap {fatal("concurrent map read and map write") }typedmemmove(.Key, add(.array, uintptr(.len)*uintptr(.Key.Size())), ) .len++ } = .overflow() }}// values for implementing maps.values////go:linkname values maps.valuesfunc values( any, unsafe.Pointer) { := efaceOf(&) := (*maptype)(unsafe.Pointer(._type)) := (*hmap)(.data)if == nil || .count == 0 {return } := (*slice)() := int(rand()) := uint8( >> .B & (abi.MapBucketCount - 1))if .B == 0 {copyValues(, , (*bmap)(.buckets), , )return } := int(bucketShift(.B)) := .bucketsfor := 0; < ; ++ { := ( + ) & ( - 1) := (*bmap)(add(, uintptr()*uintptr(.BucketSize)))copyValues(, , , , ) }if .growing() { := int(.noldbuckets())for := 0; < ; ++ { := ( + ) & ( - 1) := (*bmap)(add(.oldbuckets, uintptr()*uintptr(.BucketSize)))ifevacuated() {continue }copyValues(, , , , ) } }return}func copyValues( *maptype, *hmap, *bmap, *slice, uint8) {for != nil {for := uintptr(0); < abi.MapBucketCount; ++ { := ( + uintptr()) & (abi.MapBucketCount - 1)ifisEmpty(.tophash[]) {continue }if .flags&hashWriting != 0 {fatal("concurrent map read and map write") } := add(unsafe.Pointer(), dataOffset+abi.MapBucketCount*uintptr(.KeySize)+*uintptr(.ValueSize))if .IndirectElem() { = *((*unsafe.Pointer)()) }if .len >= .cap {fatal("concurrent map read and map write") }typedmemmove(.Elem, add(.array, uintptr(.len)*uintptr(.Elem.Size())), ) .len++ } = .overflow() }}
The pages are generated with Goldsv0.7.0-preview. (GOOS=linux GOARCH=amd64)
Golds is a Go 101 project developed by Tapir Liu.
PR and bug reports are welcome and can be submitted to the issue list.
Please follow @zigo_101 (reachable from the left QR code) to get the latest news of Golds.