// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.

package runtime

import (
	
	
	
)

func mapaccess1_faststr( *maptype,  *hmap,  string) unsafe.Pointer {
	if raceenabled &&  != nil {
		 := getcallerpc()
		racereadpc(unsafe.Pointer(), , abi.FuncPCABIInternal())
	}
	if  == nil || .count == 0 {
		return unsafe.Pointer(&zeroVal[0])
	}
	if .flags&hashWriting != 0 {
		fatal("concurrent map read and map write")
	}
	 := stringStructOf(&)
	if .B == 0 {
		// One-bucket table.
		 := (*bmap)(.buckets)
		if .len < 32 {
			// short key, doing lots of comparisons is ok
			for ,  := uintptr(0), .keys();  < bucketCnt; ,  = +1, add(, 2*goarch.PtrSize) {
				 := (*stringStruct)()
				if .len != .len || isEmpty(.tophash[]) {
					if .tophash[] == emptyRest {
						break
					}
					continue
				}
				if .str == .str || memequal(.str, .str, uintptr(.len)) {
					return add(unsafe.Pointer(), dataOffset+bucketCnt*2*goarch.PtrSize+*uintptr(.ValueSize))
				}
			}
			return unsafe.Pointer(&zeroVal[0])
		}
		// long key, try not to do more comparisons than necessary
		 := uintptr(bucketCnt)
		for ,  := uintptr(0), .keys();  < bucketCnt; ,  = +1, add(, 2*goarch.PtrSize) {
			 := (*stringStruct)()
			if .len != .len || isEmpty(.tophash[]) {
				if .tophash[] == emptyRest {
					break
				}
				continue
			}
			if .str == .str {
				return add(unsafe.Pointer(), dataOffset+bucketCnt*2*goarch.PtrSize+*uintptr(.ValueSize))
			}
			// check first 4 bytes
			if *((*[4]byte)(.str)) != *((*[4]byte)(.str)) {
				continue
			}
			// check last 4 bytes
			if *((*[4]byte)(add(.str, uintptr(.len)-4))) != *((*[4]byte)(add(.str, uintptr(.len)-4))) {
				continue
			}
			if  != bucketCnt {
				// Two keys are potential matches. Use hash to distinguish them.
				goto 
			}
			 = 
		}
		if  != bucketCnt {
			 := (*stringStruct)(add(unsafe.Pointer(), dataOffset+*2*goarch.PtrSize))
			if memequal(.str, .str, uintptr(.len)) {
				return add(unsafe.Pointer(), dataOffset+bucketCnt*2*goarch.PtrSize+*uintptr(.ValueSize))
			}
		}
		return unsafe.Pointer(&zeroVal[0])
	}
:
	 := .Hasher(noescape(unsafe.Pointer(&)), uintptr(.hash0))
	 := bucketMask(.B)
	 := (*bmap)(add(.buckets, (&)*uintptr(.BucketSize)))
	if  := .oldbuckets;  != nil {
		if !.sameSizeGrow() {
			// There used to be half as many buckets; mask down one more power of two.
			 >>= 1
		}
		 := (*bmap)(add(, (&)*uintptr(.BucketSize)))
		if !evacuated() {
			 = 
		}
	}
	 := tophash()
	for ;  != nil;  = .overflow() {
		for ,  := uintptr(0), .keys();  < bucketCnt; ,  = +1, add(, 2*goarch.PtrSize) {
			 := (*stringStruct)()
			if .len != .len || .tophash[] !=  {
				continue
			}
			if .str == .str || memequal(.str, .str, uintptr(.len)) {
				return add(unsafe.Pointer(), dataOffset+bucketCnt*2*goarch.PtrSize+*uintptr(.ValueSize))
			}
		}
	}
	return unsafe.Pointer(&zeroVal[0])
}

func mapaccess2_faststr( *maptype,  *hmap,  string) (unsafe.Pointer, bool) {
	if raceenabled &&  != nil {
		 := getcallerpc()
		racereadpc(unsafe.Pointer(), , abi.FuncPCABIInternal())
	}
	if  == nil || .count == 0 {
		return unsafe.Pointer(&zeroVal[0]), false
	}
	if .flags&hashWriting != 0 {
		fatal("concurrent map read and map write")
	}
	 := stringStructOf(&)
	if .B == 0 {
		// One-bucket table.
		 := (*bmap)(.buckets)
		if .len < 32 {
			// short key, doing lots of comparisons is ok
			for ,  := uintptr(0), .keys();  < bucketCnt; ,  = +1, add(, 2*goarch.PtrSize) {
				 := (*stringStruct)()
				if .len != .len || isEmpty(.tophash[]) {
					if .tophash[] == emptyRest {
						break
					}
					continue
				}
				if .str == .str || memequal(.str, .str, uintptr(.len)) {
					return add(unsafe.Pointer(), dataOffset+bucketCnt*2*goarch.PtrSize+*uintptr(.ValueSize)), true
				}
			}
			return unsafe.Pointer(&zeroVal[0]), false
		}
		// long key, try not to do more comparisons than necessary
		 := uintptr(bucketCnt)
		for ,  := uintptr(0), .keys();  < bucketCnt; ,  = +1, add(, 2*goarch.PtrSize) {
			 := (*stringStruct)()
			if .len != .len || isEmpty(.tophash[]) {
				if .tophash[] == emptyRest {
					break
				}
				continue
			}
			if .str == .str {
				return add(unsafe.Pointer(), dataOffset+bucketCnt*2*goarch.PtrSize+*uintptr(.ValueSize)), true
			}
			// check first 4 bytes
			if *((*[4]byte)(.str)) != *((*[4]byte)(.str)) {
				continue
			}
			// check last 4 bytes
			if *((*[4]byte)(add(.str, uintptr(.len)-4))) != *((*[4]byte)(add(.str, uintptr(.len)-4))) {
				continue
			}
			if  != bucketCnt {
				// Two keys are potential matches. Use hash to distinguish them.
				goto 
			}
			 = 
		}
		if  != bucketCnt {
			 := (*stringStruct)(add(unsafe.Pointer(), dataOffset+*2*goarch.PtrSize))
			if memequal(.str, .str, uintptr(.len)) {
				return add(unsafe.Pointer(), dataOffset+bucketCnt*2*goarch.PtrSize+*uintptr(.ValueSize)), true
			}
		}
		return unsafe.Pointer(&zeroVal[0]), false
	}
:
	 := .Hasher(noescape(unsafe.Pointer(&)), uintptr(.hash0))
	 := bucketMask(.B)
	 := (*bmap)(add(.buckets, (&)*uintptr(.BucketSize)))
	if  := .oldbuckets;  != nil {
		if !.sameSizeGrow() {
			// There used to be half as many buckets; mask down one more power of two.
			 >>= 1
		}
		 := (*bmap)(add(, (&)*uintptr(.BucketSize)))
		if !evacuated() {
			 = 
		}
	}
	 := tophash()
	for ;  != nil;  = .overflow() {
		for ,  := uintptr(0), .keys();  < bucketCnt; ,  = +1, add(, 2*goarch.PtrSize) {
			 := (*stringStruct)()
			if .len != .len || .tophash[] !=  {
				continue
			}
			if .str == .str || memequal(.str, .str, uintptr(.len)) {
				return add(unsafe.Pointer(), dataOffset+bucketCnt*2*goarch.PtrSize+*uintptr(.ValueSize)), true
			}
		}
	}
	return unsafe.Pointer(&zeroVal[0]), false
}

func mapassign_faststr( *maptype,  *hmap,  string) unsafe.Pointer {
	if  == nil {
		panic(plainError("assignment to entry in nil map"))
	}
	if raceenabled {
		 := getcallerpc()
		racewritepc(unsafe.Pointer(), , abi.FuncPCABIInternal())
	}
	if .flags&hashWriting != 0 {
		fatal("concurrent map writes")
	}
	 := stringStructOf(&)
	 := .Hasher(noescape(unsafe.Pointer(&)), uintptr(.hash0))

	// Set hashWriting after calling t.hasher for consistency with mapassign.
	.flags ^= hashWriting

	if .buckets == nil {
		.buckets = newobject(.Bucket) // newarray(t.bucket, 1)
	}

:
	 :=  & bucketMask(.B)
	if .growing() {
		growWork_faststr(, , )
	}
	 := (*bmap)(add(.buckets, *uintptr(.BucketSize)))
	 := tophash()

	var  *bmap
	var  uintptr
	var  unsafe.Pointer

:
	for {
		for  := uintptr(0);  < bucketCnt; ++ {
			if .tophash[] !=  {
				if isEmpty(.tophash[]) &&  == nil {
					 = 
					 = 
				}
				if .tophash[] == emptyRest {
					break 
				}
				continue
			}
			 := (*stringStruct)(add(unsafe.Pointer(), dataOffset+*2*goarch.PtrSize))
			if .len != .len {
				continue
			}
			if .str != .str && !memequal(.str, .str, uintptr(.len)) {
				continue
			}
			// already have a mapping for key. Update it.
			 = 
			 = 
			// Overwrite existing key, so it can be garbage collected.
			// The size is already guaranteed to be set correctly.
			.str = .str
			goto 
		}
		 := .overflow()
		if  == nil {
			break
		}
		 = 
	}

	// Did not find mapping for key. Allocate new cell & add entry.

	// If we hit the max load factor or we have too many overflow buckets,
	// and we're not already in the middle of growing, start growing.
	if !.growing() && (overLoadFactor(.count+1, .B) || tooManyOverflowBuckets(.noverflow, .B)) {
		hashGrow(, )
		goto  // Growing the table invalidates everything, so try again
	}

	if  == nil {
		// The current bucket and all the overflow buckets connected to it are full, allocate a new one.
		 = .newoverflow(, )
		 = 0 // not necessary, but avoids needlessly spilling inserti
	}
	.tophash[&(bucketCnt-1)] =  // mask inserti to avoid bounds checks

	 = add(unsafe.Pointer(), dataOffset+*2*goarch.PtrSize)
	// store new key at insert position
	*((*stringStruct)()) = *
	.count++

:
	 := add(unsafe.Pointer(), dataOffset+bucketCnt*2*goarch.PtrSize+*uintptr(.ValueSize))
	if .flags&hashWriting == 0 {
		fatal("concurrent map writes")
	}
	.flags &^= hashWriting
	return 
}

func mapdelete_faststr( *maptype,  *hmap,  string) {
	if raceenabled &&  != nil {
		 := getcallerpc()
		racewritepc(unsafe.Pointer(), , abi.FuncPCABIInternal())
	}
	if  == nil || .count == 0 {
		return
	}
	if .flags&hashWriting != 0 {
		fatal("concurrent map writes")
	}

	 := stringStructOf(&)
	 := .Hasher(noescape(unsafe.Pointer(&)), uintptr(.hash0))

	// Set hashWriting after calling t.hasher for consistency with mapdelete
	.flags ^= hashWriting

	 :=  & bucketMask(.B)
	if .growing() {
		growWork_faststr(, , )
	}
	 := (*bmap)(add(.buckets, *uintptr(.BucketSize)))
	 := 
	 := tophash()
:
	for ;  != nil;  = .overflow() {
		for ,  := uintptr(0), .keys();  < bucketCnt; ,  = +1, add(, 2*goarch.PtrSize) {
			 := (*stringStruct)()
			if .len != .len || .tophash[] !=  {
				continue
			}
			if .str != .str && !memequal(.str, .str, uintptr(.len)) {
				continue
			}
			// Clear key's pointer.
			.str = nil
			 := add(unsafe.Pointer(), dataOffset+bucketCnt*2*goarch.PtrSize+*uintptr(.ValueSize))
			if .Elem.PtrBytes != 0 {
				memclrHasPointers(, .Elem.Size_)
			} else {
				memclrNoHeapPointers(, .Elem.Size_)
			}
			.tophash[] = emptyOne
			// If the bucket now ends in a bunch of emptyOne states,
			// change those to emptyRest states.
			if  == bucketCnt-1 {
				if .overflow() != nil && .overflow().tophash[0] != emptyRest {
					goto 
				}
			} else {
				if .tophash[+1] != emptyRest {
					goto 
				}
			}
			for {
				.tophash[] = emptyRest
				if  == 0 {
					if  ==  {
						break // beginning of initial bucket, we're done.
					}
					// Find previous bucket, continue at its last entry.
					 := 
					for  = ; .overflow() != ;  = .overflow() {
					}
					 = bucketCnt - 1
				} else {
					--
				}
				if .tophash[] != emptyOne {
					break
				}
			}
		:
			.count--
			// Reset the hash seed to make it more difficult for attackers to
			// repeatedly trigger hash collisions. See issue 25237.
			if .count == 0 {
				.hash0 = uint32(rand())
			}
			break 
		}
	}

	if .flags&hashWriting == 0 {
		fatal("concurrent map writes")
	}
	.flags &^= hashWriting
}

func growWork_faststr( *maptype,  *hmap,  uintptr) {
	// make sure we evacuate the oldbucket corresponding
	// to the bucket we're about to use
	evacuate_faststr(, , &.oldbucketmask())

	// evacuate one more oldbucket to make progress on growing
	if .growing() {
		evacuate_faststr(, , .nevacuate)
	}
}

func evacuate_faststr( *maptype,  *hmap,  uintptr) {
	 := (*bmap)(add(.oldbuckets, *uintptr(.BucketSize)))
	 := .noldbuckets()
	if !evacuated() {
		// TODO: reuse overflow buckets instead of using new ones, if there
		// is no iterator using the old buckets.  (If !oldIterator.)

		// xy contains the x and y (low and high) evacuation destinations.
		var  [2]evacDst
		 := &[0]
		.b = (*bmap)(add(.buckets, *uintptr(.BucketSize)))
		.k = add(unsafe.Pointer(.b), dataOffset)
		.e = add(.k, bucketCnt*2*goarch.PtrSize)

		if !.sameSizeGrow() {
			// Only calculate y pointers if we're growing bigger.
			// Otherwise GC can see bad pointers.
			 := &[1]
			.b = (*bmap)(add(.buckets, (+)*uintptr(.BucketSize)))
			.k = add(unsafe.Pointer(.b), dataOffset)
			.e = add(.k, bucketCnt*2*goarch.PtrSize)
		}

		for ;  != nil;  = .overflow() {
			 := add(unsafe.Pointer(), dataOffset)
			 := add(, bucketCnt*2*goarch.PtrSize)
			for  := 0;  < bucketCnt; , ,  = +1, add(, 2*goarch.PtrSize), add(, uintptr(.ValueSize)) {
				 := .tophash[]
				if isEmpty() {
					.tophash[] = evacuatedEmpty
					continue
				}
				if  < minTopHash {
					throw("bad map state")
				}
				var  uint8
				if !.sameSizeGrow() {
					// Compute hash to make our evacuation decision (whether we need
					// to send this key/elem to bucket x or bucket y).
					 := .Hasher(, uintptr(.hash0))
					if & != 0 {
						 = 1
					}
				}

				.tophash[] = evacuatedX +  // evacuatedX + 1 == evacuatedY, enforced in makemap
				 := &[]                 // evacuation destination

				if .i == bucketCnt {
					.b = .newoverflow(, .b)
					.i = 0
					.k = add(unsafe.Pointer(.b), dataOffset)
					.e = add(.k, bucketCnt*2*goarch.PtrSize)
				}
				.b.tophash[.i&(bucketCnt-1)] =  // mask dst.i as an optimization, to avoid a bounds check

				// Copy key.
				*(*string)(.k) = *(*string)()

				typedmemmove(.Elem, .e, )
				.i++
				// These updates might push these pointers past the end of the
				// key or elem arrays.  That's ok, as we have the overflow pointer
				// at the end of the bucket to protect against pointing past the
				// end of the bucket.
				.k = add(.k, 2*goarch.PtrSize)
				.e = add(.e, uintptr(.ValueSize))
			}
		}
		// Unlink the overflow buckets & clear key/elem to help GC.
		if .flags&oldIterator == 0 && .Bucket.PtrBytes != 0 {
			 := add(.oldbuckets, *uintptr(.BucketSize))
			// Preserve b.tophash because the evacuation
			// state is maintained there.
			 := add(, dataOffset)
			 := uintptr(.BucketSize) - dataOffset
			memclrHasPointers(, )
		}
	}

	if  == .nevacuate {
		advanceEvacuationMark(, , )
	}
}