// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.

//go:build goexperiment.swissmap

package maps

import (
	
	
	
	
	
)

func ( *Map) ( *abi.SwissMapType,  string) unsafe.Pointer {
	 := groupReference{
		data: .dirPtr,
	}

	 := *.ctrls()
	 := .key(, 0)
	 := .SlotSize

	// The 64 threshold was chosen based on performance of BenchmarkMapStringKeysEight,
	// where there are 8 keys to check, all of which don't quick-match the lookup key.
	// In that case, we can save hashing the lookup key. That savings is worth this extra code
	// for strings that are long enough that hashing is expensive.
	if len() > 64 {
		// String hashing and equality might be expensive. Do a quick check first.
		 := abi.SwissMapGroupSlots
		for  := range abi.SwissMapGroupSlots {
			if &(1<<7) == 0 && longStringQuickEqualityTest(, *(*string)()) {
				if  < abi.SwissMapGroupSlots {
					// 2 strings both passed the quick equality test.
					// Break out of this loop and do it the slow way.
					goto 
				}
				 = 
			}
			 = unsafe.Pointer(uintptr() + )
			 >>= 8
		}
		if  == abi.SwissMapGroupSlots {
			// No slot passed the quick test.
			return nil
		}
		// There's exactly one slot that passed the quick test. Do the single expensive comparison.
		 = .key(, uintptr())
		if  == *(*string)() {
			return unsafe.Pointer(uintptr() + 2*goarch.PtrSize)
		}
		return nil
	}

:
	// This path will cost 1 hash and 1+ε comparisons.
	 := .Hasher(abi.NoEscape(unsafe.Pointer(&)), .seed)
	 := uint8(h2())
	 = *.ctrls()
	 = .key(, 0)

	for range abi.SwissMapGroupSlots {
		if uint8() ==  &&  == *(*string)() {
			return unsafe.Pointer(uintptr() + 2*goarch.PtrSize)
		}
		 = unsafe.Pointer(uintptr() + )
		 >>= 8
	}
	return nil
}

// Returns true if a and b might be equal.
// Returns false if a and b are definitely not equal.
// Requires len(a)>=8.
func longStringQuickEqualityTest(,  string) bool {
	if len() != len() {
		return false
	}
	,  := stringPtr(), stringPtr()
	// Check first 8 bytes.
	if *(*[8]byte)() != *(*[8]byte)() {
		return false
	}
	// Check last 8 bytes.
	 = unsafe.Pointer(uintptr() + uintptr(len()) - 8)
	 = unsafe.Pointer(uintptr() + uintptr(len()) - 8)
	if *(*[8]byte)() != *(*[8]byte)() {
		return false
	}
	return true
}
func stringPtr( string) unsafe.Pointer {
	type  struct {
		 unsafe.Pointer
		 int
	}
	return (*)(unsafe.Pointer(&)).
}

//go:linkname runtime_mapaccess1_faststr runtime.mapaccess1_faststr
func runtime_mapaccess1_faststr( *abi.SwissMapType,  *Map,  string) unsafe.Pointer {
	if race.Enabled &&  != nil {
		 := sys.GetCallerPC()
		 := abi.FuncPCABIInternal(runtime_mapaccess1)
		race.ReadPC(unsafe.Pointer(), , )
	}

	if  == nil || .Used() == 0 {
		return unsafe.Pointer(&zeroVal[0])
	}

	if .writing != 0 {
		fatal("concurrent map read and map write")
		return nil
	}

	if .dirLen <= 0 {
		 := .getWithoutKeySmallFastStr(, )
		if  == nil {
			return unsafe.Pointer(&zeroVal[0])
		}
		return 
	}

	 := 
	 := .Hasher(abi.NoEscape(unsafe.Pointer(&)), .seed)

	// Select table.
	 := .directoryIndex()
	 := .directoryAt()

	// Probe table.
	 := makeProbeSeq(h1(), .groups.lengthMask)
	for ; ;  = .next() {
		 := .groups.group(, .offset)

		 := .ctrls().matchH2(h2())

		for  != 0 {
			 := .first()

			 := .key(, )
			if  == *(*string)() {
				 := unsafe.Pointer(uintptr() + 2*goarch.PtrSize)
				return 
			}
			 = .removeFirst()
		}

		 = .ctrls().matchEmpty()
		if  != 0 {
			// Finding an empty slot means we've reached the end of
			// the probe sequence.
			return unsafe.Pointer(&zeroVal[0])
		}
	}
}

//go:linkname runtime_mapaccess2_faststr runtime.mapaccess2_faststr
func runtime_mapaccess2_faststr( *abi.SwissMapType,  *Map,  string) (unsafe.Pointer, bool) {
	if race.Enabled &&  != nil {
		 := sys.GetCallerPC()
		 := abi.FuncPCABIInternal(runtime_mapaccess1)
		race.ReadPC(unsafe.Pointer(), , )
	}

	if  == nil || .Used() == 0 {
		return unsafe.Pointer(&zeroVal[0]), false
	}

	if .writing != 0 {
		fatal("concurrent map read and map write")
		return nil, false
	}

	if .dirLen <= 0 {
		 := .getWithoutKeySmallFastStr(, )
		if  == nil {
			return unsafe.Pointer(&zeroVal[0]), false
		}
		return , true
	}

	 := 
	 := .Hasher(abi.NoEscape(unsafe.Pointer(&)), .seed)

	// Select table.
	 := .directoryIndex()
	 := .directoryAt()

	// Probe table.
	 := makeProbeSeq(h1(), .groups.lengthMask)
	for ; ;  = .next() {
		 := .groups.group(, .offset)

		 := .ctrls().matchH2(h2())

		for  != 0 {
			 := .first()

			 := .key(, )
			if  == *(*string)() {
				 := unsafe.Pointer(uintptr() + 2*goarch.PtrSize)
				return , true
			}
			 = .removeFirst()
		}

		 = .ctrls().matchEmpty()
		if  != 0 {
			// Finding an empty slot means we've reached the end of
			// the probe sequence.
			return unsafe.Pointer(&zeroVal[0]), false
		}
	}
}

func ( *Map) ( *abi.SwissMapType,  uintptr,  string) unsafe.Pointer {
	 := groupReference{
		data: .dirPtr,
	}

	 := .ctrls().matchH2(h2())

	// Look for an existing slot containing this key.
	for  != 0 {
		 := .first()

		 := .key(, )
		if  == *(*string)() {
			// Key needs update, as the backing storage may differ.
			*(*string)() = 
			 := .elem(, )
			return 
		}
		 = .removeFirst()
	}

	// There can't be deleted slots, small maps can't have them
	// (see deleteSmall). Use matchEmptyOrDeleted as it is a bit
	// more efficient than matchEmpty.
	 = .ctrls().matchEmptyOrDeleted()
	if  == 0 {
		fatal("small map with no empty slot (concurrent map writes?)")
	}

	 := .first()

	 := .key(, )
	*(*string)() = 

	 := .elem(, )

	.ctrls().set(, ctrl(h2()))
	.used++

	return 
}

//go:linkname runtime_mapassign_faststr runtime.mapassign_faststr
func runtime_mapassign_faststr( *abi.SwissMapType,  *Map,  string) unsafe.Pointer {
	if  == nil {
		panic(errNilAssign)
	}
	if race.Enabled {
		 := sys.GetCallerPC()
		 := abi.FuncPCABIInternal(runtime_mapassign)
		race.WritePC(unsafe.Pointer(), , )
	}
	if .writing != 0 {
		fatal("concurrent map writes")
	}

	 := 
	 := .Hasher(abi.NoEscape(unsafe.Pointer(&)), .seed)

	// Set writing after calling Hasher, since Hasher may panic, in which
	// case we have not actually done a write.
	.writing ^= 1 // toggle, see comment on writing

	if .dirPtr == nil {
		.growToSmall()
	}

	if .dirLen == 0 {
		if .used < abi.SwissMapGroupSlots {
			 := .putSlotSmallFastStr(, , )

			if .writing == 0 {
				fatal("concurrent map writes")
			}
			.writing ^= 1

			return 
		}

		// Can't fit another entry, grow to full size map.
		.growToTable()
	}

	var  unsafe.Pointer
:
	for {
		// Select table.
		 := .directoryIndex()
		 := .directoryAt()

		 := makeProbeSeq(h1(), .groups.lengthMask)

		// As we look for a match, keep track of the first deleted slot
		// we find, which we'll use to insert the new entry if
		// necessary.
		var  groupReference
		var  uintptr

		for ; ;  = .next() {
			 := .groups.group(, .offset)
			 := .ctrls().matchH2(h2())

			// Look for an existing slot containing this key.
			for  != 0 {
				 := .first()

				 := .key(, )
				if  == *(*string)() {
					// Key needs update, as the backing
					// storage may differ.
					*(*string)() = 
					 = .elem(, )

					.checkInvariants(, )
					break 
				}
				 = .removeFirst()
			}

			// No existing slot for this key in this group. Is this the end
			// of the probe sequence?
			 = .ctrls().matchEmptyOrDeleted()
			if  == 0 {
				continue // nothing but filled slots. Keep probing.
			}
			 := .first()
			if .ctrls().get() == ctrlDeleted {
				// There are some deleted slots. Remember
				// the first one, and keep probing.
				if .data == nil {
					 = 
					 = 
				}
				continue
			}
			// We've found an empty slot, which means we've reached the end of
			// the probe sequence.

			// If we found a deleted slot along the way, we can
			// replace it without consuming growthLeft.
			if .data != nil {
				 = 
				 = 
				.growthLeft++ // will be decremented below to become a no-op.
			}

			// If there is room left to grow, just insert the new entry.
			if .growthLeft > 0 {
				 := .key(, )
				*(*string)() = 

				 = .elem(, )

				.ctrls().set(, ctrl(h2()))
				.growthLeft--
				.used++
				.used++

				.checkInvariants(, )
				break 
			}

			.rehash(, )
			continue 
		}
	}

	if .writing == 0 {
		fatal("concurrent map writes")
	}
	.writing ^= 1

	return 
}

//go:linkname runtime_mapdelete_faststr runtime.mapdelete_faststr
func runtime_mapdelete_faststr( *abi.SwissMapType,  *Map,  string) {
	if race.Enabled {
		 := sys.GetCallerPC()
		 := abi.FuncPCABIInternal(runtime_mapassign)
		race.WritePC(unsafe.Pointer(), , )
	}

	if  == nil || .Used() == 0 {
		return
	}

	.Delete(, abi.NoEscape(unsafe.Pointer(&)))
}