// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.

package runtime

import (
	
	
	
)

func mapaccess1_fast64( *maptype,  *hmap,  uint64) unsafe.Pointer {
	if raceenabled &&  != nil {
		 := getcallerpc()
		racereadpc(unsafe.Pointer(), , abi.FuncPCABIInternal())
	}
	if  == nil || .count == 0 {
		return unsafe.Pointer(&zeroVal[0])
	}
	if .flags&hashWriting != 0 {
		fatal("concurrent map read and map write")
	}
	var  *bmap
	if .B == 0 {
		// One-bucket table. No need to hash.
		 = (*bmap)(.buckets)
	} else {
		 := .Hasher(noescape(unsafe.Pointer(&)), uintptr(.hash0))
		 := bucketMask(.B)
		 = (*bmap)(add(.buckets, (&)*uintptr(.BucketSize)))
		if  := .oldbuckets;  != nil {
			if !.sameSizeGrow() {
				// There used to be half as many buckets; mask down one more power of two.
				 >>= 1
			}
			 := (*bmap)(add(, (&)*uintptr(.BucketSize)))
			if !evacuated() {
				 = 
			}
		}
	}
	for ;  != nil;  = .overflow() {
		for ,  := uintptr(0), .keys();  < bucketCnt; ,  = +1, add(, 8) {
			if *(*uint64)() ==  && !isEmpty(.tophash[]) {
				return add(unsafe.Pointer(), dataOffset+bucketCnt*8+*uintptr(.ValueSize))
			}
		}
	}
	return unsafe.Pointer(&zeroVal[0])
}

func mapaccess2_fast64( *maptype,  *hmap,  uint64) (unsafe.Pointer, bool) {
	if raceenabled &&  != nil {
		 := getcallerpc()
		racereadpc(unsafe.Pointer(), , abi.FuncPCABIInternal())
	}
	if  == nil || .count == 0 {
		return unsafe.Pointer(&zeroVal[0]), false
	}
	if .flags&hashWriting != 0 {
		fatal("concurrent map read and map write")
	}
	var  *bmap
	if .B == 0 {
		// One-bucket table. No need to hash.
		 = (*bmap)(.buckets)
	} else {
		 := .Hasher(noescape(unsafe.Pointer(&)), uintptr(.hash0))
		 := bucketMask(.B)
		 = (*bmap)(add(.buckets, (&)*uintptr(.BucketSize)))
		if  := .oldbuckets;  != nil {
			if !.sameSizeGrow() {
				// There used to be half as many buckets; mask down one more power of two.
				 >>= 1
			}
			 := (*bmap)(add(, (&)*uintptr(.BucketSize)))
			if !evacuated() {
				 = 
			}
		}
	}
	for ;  != nil;  = .overflow() {
		for ,  := uintptr(0), .keys();  < bucketCnt; ,  = +1, add(, 8) {
			if *(*uint64)() ==  && !isEmpty(.tophash[]) {
				return add(unsafe.Pointer(), dataOffset+bucketCnt*8+*uintptr(.ValueSize)), true
			}
		}
	}
	return unsafe.Pointer(&zeroVal[0]), false
}

func mapassign_fast64( *maptype,  *hmap,  uint64) unsafe.Pointer {
	if  == nil {
		panic(plainError("assignment to entry in nil map"))
	}
	if raceenabled {
		 := getcallerpc()
		racewritepc(unsafe.Pointer(), , abi.FuncPCABIInternal())
	}
	if .flags&hashWriting != 0 {
		fatal("concurrent map writes")
	}
	 := .Hasher(noescape(unsafe.Pointer(&)), uintptr(.hash0))

	// Set hashWriting after calling t.hasher for consistency with mapassign.
	.flags ^= hashWriting

	if .buckets == nil {
		.buckets = newobject(.Bucket) // newarray(t.bucket, 1)
	}

:
	 :=  & bucketMask(.B)
	if .growing() {
		growWork_fast64(, , )
	}
	 := (*bmap)(add(.buckets, *uintptr(.BucketSize)))

	var  *bmap
	var  uintptr
	var  unsafe.Pointer

:
	for {
		for  := uintptr(0);  < bucketCnt; ++ {
			if isEmpty(.tophash[]) {
				if  == nil {
					 = 
					 = 
				}
				if .tophash[] == emptyRest {
					break 
				}
				continue
			}
			 := *((*uint64)(add(unsafe.Pointer(), dataOffset+*8)))
			if  !=  {
				continue
			}
			 = 
			 = 
			goto 
		}
		 := .overflow()
		if  == nil {
			break
		}
		 = 
	}

	// Did not find mapping for key. Allocate new cell & add entry.

	// If we hit the max load factor or we have too many overflow buckets,
	// and we're not already in the middle of growing, start growing.
	if !.growing() && (overLoadFactor(.count+1, .B) || tooManyOverflowBuckets(.noverflow, .B)) {
		hashGrow(, )
		goto  // Growing the table invalidates everything, so try again
	}

	if  == nil {
		// The current bucket and all the overflow buckets connected to it are full, allocate a new one.
		 = .newoverflow(, )
		 = 0 // not necessary, but avoids needlessly spilling inserti
	}
	.tophash[&(bucketCnt-1)] = tophash() // mask inserti to avoid bounds checks

	 = add(unsafe.Pointer(), dataOffset+*8)
	// store new key at insert position
	*(*uint64)() = 

	.count++

:
	 := add(unsafe.Pointer(), dataOffset+bucketCnt*8+*uintptr(.ValueSize))
	if .flags&hashWriting == 0 {
		fatal("concurrent map writes")
	}
	.flags &^= hashWriting
	return 
}

func mapassign_fast64ptr( *maptype,  *hmap,  unsafe.Pointer) unsafe.Pointer {
	if  == nil {
		panic(plainError("assignment to entry in nil map"))
	}
	if raceenabled {
		 := getcallerpc()
		racewritepc(unsafe.Pointer(), , abi.FuncPCABIInternal(mapassign_fast64))
	}
	if .flags&hashWriting != 0 {
		fatal("concurrent map writes")
	}
	 := .Hasher(noescape(unsafe.Pointer(&)), uintptr(.hash0))

	// Set hashWriting after calling t.hasher for consistency with mapassign.
	.flags ^= hashWriting

	if .buckets == nil {
		.buckets = newobject(.Bucket) // newarray(t.bucket, 1)
	}

:
	 :=  & bucketMask(.B)
	if .growing() {
		growWork_fast64(, , )
	}
	 := (*bmap)(add(.buckets, *uintptr(.BucketSize)))

	var  *bmap
	var  uintptr
	var  unsafe.Pointer

:
	for {
		for  := uintptr(0);  < bucketCnt; ++ {
			if isEmpty(.tophash[]) {
				if  == nil {
					 = 
					 = 
				}
				if .tophash[] == emptyRest {
					break 
				}
				continue
			}
			 := *((*unsafe.Pointer)(add(unsafe.Pointer(), dataOffset+*8)))
			if  !=  {
				continue
			}
			 = 
			 = 
			goto 
		}
		 := .overflow()
		if  == nil {
			break
		}
		 = 
	}

	// Did not find mapping for key. Allocate new cell & add entry.

	// If we hit the max load factor or we have too many overflow buckets,
	// and we're not already in the middle of growing, start growing.
	if !.growing() && (overLoadFactor(.count+1, .B) || tooManyOverflowBuckets(.noverflow, .B)) {
		hashGrow(, )
		goto  // Growing the table invalidates everything, so try again
	}

	if  == nil {
		// The current bucket and all the overflow buckets connected to it are full, allocate a new one.
		 = .newoverflow(, )
		 = 0 // not necessary, but avoids needlessly spilling inserti
	}
	.tophash[&(bucketCnt-1)] = tophash() // mask inserti to avoid bounds checks

	 = add(unsafe.Pointer(), dataOffset+*8)
	// store new key at insert position
	*(*unsafe.Pointer)() = 

	.count++

:
	 := add(unsafe.Pointer(), dataOffset+bucketCnt*8+*uintptr(.ValueSize))
	if .flags&hashWriting == 0 {
		fatal("concurrent map writes")
	}
	.flags &^= hashWriting
	return 
}

func mapdelete_fast64( *maptype,  *hmap,  uint64) {
	if raceenabled &&  != nil {
		 := getcallerpc()
		racewritepc(unsafe.Pointer(), , abi.FuncPCABIInternal())
	}
	if  == nil || .count == 0 {
		return
	}
	if .flags&hashWriting != 0 {
		fatal("concurrent map writes")
	}

	 := .Hasher(noescape(unsafe.Pointer(&)), uintptr(.hash0))

	// Set hashWriting after calling t.hasher for consistency with mapdelete
	.flags ^= hashWriting

	 :=  & bucketMask(.B)
	if .growing() {
		growWork_fast64(, , )
	}
	 := (*bmap)(add(.buckets, *uintptr(.BucketSize)))
	 := 
:
	for ;  != nil;  = .overflow() {
		for ,  := uintptr(0), .keys();  < bucketCnt; ,  = +1, add(, 8) {
			if  != *(*uint64)() || isEmpty(.tophash[]) {
				continue
			}
			// Only clear key if there are pointers in it.
			if .Key.PtrBytes != 0 {
				if goarch.PtrSize == 8 {
					*(*unsafe.Pointer)() = nil
				} else {
					// There are three ways to squeeze at one or more 32 bit pointers into 64 bits.
					// Just call memclrHasPointers instead of trying to handle all cases here.
					memclrHasPointers(, 8)
				}
			}
			 := add(unsafe.Pointer(), dataOffset+bucketCnt*8+*uintptr(.ValueSize))
			if .Elem.PtrBytes != 0 {
				memclrHasPointers(, .Elem.Size_)
			} else {
				memclrNoHeapPointers(, .Elem.Size_)
			}
			.tophash[] = emptyOne
			// If the bucket now ends in a bunch of emptyOne states,
			// change those to emptyRest states.
			if  == bucketCnt-1 {
				if .overflow() != nil && .overflow().tophash[0] != emptyRest {
					goto 
				}
			} else {
				if .tophash[+1] != emptyRest {
					goto 
				}
			}
			for {
				.tophash[] = emptyRest
				if  == 0 {
					if  ==  {
						break // beginning of initial bucket, we're done.
					}
					// Find previous bucket, continue at its last entry.
					 := 
					for  = ; .overflow() != ;  = .overflow() {
					}
					 = bucketCnt - 1
				} else {
					--
				}
				if .tophash[] != emptyOne {
					break
				}
			}
		:
			.count--
			// Reset the hash seed to make it more difficult for attackers to
			// repeatedly trigger hash collisions. See issue 25237.
			if .count == 0 {
				.hash0 = uint32(rand())
			}
			break 
		}
	}

	if .flags&hashWriting == 0 {
		fatal("concurrent map writes")
	}
	.flags &^= hashWriting
}

func growWork_fast64( *maptype,  *hmap,  uintptr) {
	// make sure we evacuate the oldbucket corresponding
	// to the bucket we're about to use
	evacuate_fast64(, , &.oldbucketmask())

	// evacuate one more oldbucket to make progress on growing
	if .growing() {
		evacuate_fast64(, , .nevacuate)
	}
}

func evacuate_fast64( *maptype,  *hmap,  uintptr) {
	 := (*bmap)(add(.oldbuckets, *uintptr(.BucketSize)))
	 := .noldbuckets()
	if !evacuated() {
		// TODO: reuse overflow buckets instead of using new ones, if there
		// is no iterator using the old buckets.  (If !oldIterator.)

		// xy contains the x and y (low and high) evacuation destinations.
		var  [2]evacDst
		 := &[0]
		.b = (*bmap)(add(.buckets, *uintptr(.BucketSize)))
		.k = add(unsafe.Pointer(.b), dataOffset)
		.e = add(.k, bucketCnt*8)

		if !.sameSizeGrow() {
			// Only calculate y pointers if we're growing bigger.
			// Otherwise GC can see bad pointers.
			 := &[1]
			.b = (*bmap)(add(.buckets, (+)*uintptr(.BucketSize)))
			.k = add(unsafe.Pointer(.b), dataOffset)
			.e = add(.k, bucketCnt*8)
		}

		for ;  != nil;  = .overflow() {
			 := add(unsafe.Pointer(), dataOffset)
			 := add(, bucketCnt*8)
			for  := 0;  < bucketCnt; , ,  = +1, add(, 8), add(, uintptr(.ValueSize)) {
				 := .tophash[]
				if isEmpty() {
					.tophash[] = evacuatedEmpty
					continue
				}
				if  < minTopHash {
					throw("bad map state")
				}
				var  uint8
				if !.sameSizeGrow() {
					// Compute hash to make our evacuation decision (whether we need
					// to send this key/elem to bucket x or bucket y).
					 := .Hasher(, uintptr(.hash0))
					if & != 0 {
						 = 1
					}
				}

				.tophash[] = evacuatedX +  // evacuatedX + 1 == evacuatedY, enforced in makemap
				 := &[]                 // evacuation destination

				if .i == bucketCnt {
					.b = .newoverflow(, .b)
					.i = 0
					.k = add(unsafe.Pointer(.b), dataOffset)
					.e = add(.k, bucketCnt*8)
				}
				.b.tophash[.i&(bucketCnt-1)] =  // mask dst.i as an optimization, to avoid a bounds check

				// Copy key.
				if .Key.PtrBytes != 0 && writeBarrier.enabled {
					if goarch.PtrSize == 8 {
						// Write with a write barrier.
						*(*unsafe.Pointer)(.k) = *(*unsafe.Pointer)()
					} else {
						// There are three ways to squeeze at least one 32 bit pointer into 64 bits.
						// Give up and call typedmemmove.
						typedmemmove(.Key, .k, )
					}
				} else {
					*(*uint64)(.k) = *(*uint64)()
				}

				typedmemmove(.Elem, .e, )
				.i++
				// These updates might push these pointers past the end of the
				// key or elem arrays.  That's ok, as we have the overflow pointer
				// at the end of the bucket to protect against pointing past the
				// end of the bucket.
				.k = add(.k, 8)
				.e = add(.e, uintptr(.ValueSize))
			}
		}
		// Unlink the overflow buckets & clear key/elem to help GC.
		if .flags&oldIterator == 0 && .Bucket.PtrBytes != 0 {
			 := add(.oldbuckets, *uintptr(.BucketSize))
			// Preserve b.tophash because the evacuation
			// state is maintained there.
			 := add(, dataOffset)
			 := uintptr(.BucketSize) - dataOffset
			memclrHasPointers(, )
		}
	}

	if  == .nevacuate {
		advanceEvacuationMark(, , )
	}
}