// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.

package sync

import (
	
	
	
	
)

// HashTrieMap is an implementation of a concurrent hash-trie. The implementation
// is designed around frequent loads, but offers decent performance for stores
// and deletes as well, especially if the map is larger. Its primary use-case is
// the unique package, but can be used elsewhere as well.
//
// The zero HashTrieMap is empty and ready to use.
// It must not be copied after first use.
type HashTrieMap[ comparable,  any] struct {
	inited   atomic.Uint32
	initMu   Mutex
	root     atomic.Pointer[indirect[, ]]
	keyHash  hashFunc
	valEqual equalFunc
	seed     uintptr
}

func ( *HashTrieMap[, ]) () {
	if .inited.Load() == 0 {
		.initSlow()
	}
}

//go:noinline
func ( *HashTrieMap[, ]) () {
	.initMu.Lock()
	defer .initMu.Unlock()

	if .inited.Load() != 0 {
		// Someone got to it while we were waiting.
		return
	}

	// Set up root node, derive the hash function for the key, and the
	// equal function for the value, if any.
	var  map[]
	 := abi.TypeOf().MapType()
	.root.Store(newIndirectNode[, ](nil))
	.keyHash = .Hasher
	.valEqual = .Elem.Equal
	.seed = uintptr(runtime_rand())

	.inited.Store(1)
}

type hashFunc func(unsafe.Pointer, uintptr) uintptr
type equalFunc func(unsafe.Pointer, unsafe.Pointer) bool

// Load returns the value stored in the map for a key, or nil if no
// value is present.
// The ok result indicates whether value was found in the map.
func ( *HashTrieMap[, ]) ( ) ( ,  bool) {
	.init()
	 := .keyHash(abi.NoEscape(unsafe.Pointer(&)), .seed)

	 := .root.Load()
	 := 8 * goarch.PtrSize
	for  != 0 {
		 -= nChildrenLog2

		 := .children[(>>)&nChildrenMask].Load()
		if  == nil {
			return *new(), false
		}
		if .isEntry {
			return .entry().lookup()
		}
		 = .indirect()
	}
	panic("internal/concurrent.HashMapTrie: ran out of hash bits while iterating")
}

// LoadOrStore returns the existing value for the key if present.
// Otherwise, it stores and returns the given value.
// The loaded result is true if the value was loaded, false if stored.
func ( *HashTrieMap[, ]) ( ,  ) ( ,  bool) {
	.init()
	 := .keyHash(abi.NoEscape(unsafe.Pointer(&)), .seed)
	var  *indirect[, ]
	var  uint
	var  *atomic.Pointer[node[, ]]
	var  *node[, ]
	for {
		// Find the key or a candidate location for insertion.
		 = .root.Load()
		 = 8 * goarch.PtrSize
		 := false
		for  != 0 {
			 -= nChildrenLog2

			 = &.children[(>>)&nChildrenMask]
			 = .Load()
			if  == nil {
				// We found a nil slot which is a candidate for insertion.
				 = true
				break
			}
			if .isEntry {
				// We found an existing entry, which is as far as we can go.
				// If it stays this way, we'll have to replace it with an
				// indirect node.
				if ,  := .entry().lookup();  {
					return , true
				}
				 = true
				break
			}
			 = .indirect()
		}
		if ! {
			panic("internal/concurrent.HashMapTrie: ran out of hash bits while iterating")
		}

		// Grab the lock and double-check what we saw.
		.mu.Lock()
		 = .Load()
		if ( == nil || .isEntry) && !.dead.Load() {
			// What we saw is still true, so we can continue with the insert.
			break
		}
		// We have to start over.
		.mu.Unlock()
	}
	// N.B. This lock is held from when we broke out of the outer loop above.
	// We specifically break this out so that we can use defer here safely.
	// One option is to break this out into a new function instead, but
	// there's so much local iteration state used below that this turns out
	// to be cleaner.
	defer .mu.Unlock()

	var  *entry[, ]
	if  != nil {
		 = .entry()
		if ,  := .lookup();  {
			// Easy case: by loading again, it turns out exactly what we wanted is here!
			return , true
		}
	}
	 := newEntryNode(, )
	if  == nil {
		// Easy case: create a new entry and store it.
		.Store(&.node)
	} else {
		// We possibly need to expand the entry already there into one or more new nodes.
		//
		// Publish the node last, which will make both oldEntry and newEntry visible. We
		// don't want readers to be able to observe that oldEntry isn't in the tree.
		.Store(.expand(, , , , ))
	}
	return , false
}

// expand takes oldEntry and newEntry whose hashes conflict from bit 64 down to hashShift and
// produces a subtree of indirect nodes to hold the two new entries.
func ( *HashTrieMap[, ]) (,  *entry[, ],  uintptr,  uint,  *indirect[, ]) *node[, ] {
	// Check for a hash collision.
	 := .keyHash(unsafe.Pointer(&.key), .seed)
	if  ==  {
		// Store the old entry in the new entry's overflow list, then store
		// the new entry.
		.overflow.Store()
		return &.node
	}
	// We have to add an indirect node. Worse still, we may need to add more than one.
	 := newIndirectNode()
	 := 
	for {
		if  == 0 {
			panic("internal/concurrent.HashMapTrie: ran out of hash bits while inserting")
		}
		 -= nChildrenLog2 // hashShift is for the level parent is at. We need to go deeper.
		 := ( >> ) & nChildrenMask
		 := ( >> ) & nChildrenMask
		if  !=  {
			.children[].Store(&.node)
			.children[].Store(&.node)
			break
		}
		 := newIndirectNode()
		.children[].Store(&.node)
		 = 
	}
	return &.node
}

// Store sets the value for a key.
func ( *HashTrieMap[, ]) ( ,  ) {
	_, _ = .Swap(, )
}

// Swap swaps the value for a key and returns the previous value if any.
// The loaded result reports whether the key was present.
func ( *HashTrieMap[, ]) ( ,  ) ( ,  bool) {
	.init()
	 := .keyHash(abi.NoEscape(unsafe.Pointer(&)), .seed)
	var  *indirect[, ]
	var  uint
	var  *atomic.Pointer[node[, ]]
	var  *node[, ]
	for {
		// Find the key or a candidate location for insertion.
		 = .root.Load()
		 = 8 * goarch.PtrSize
		 := false
		for  != 0 {
			 -= nChildrenLog2

			 = &.children[(>>)&nChildrenMask]
			 = .Load()
			if  == nil {
				// We found a nil slot which is a candidate for insertion,
				// or an existing entry that we'll replace.
				 = true
				break
			}
			if .isEntry {
				// Swap if the keys compare.
				,  := .entry().swap(, )
				if  {
					return , true
				}
				// If we fail, that means we should try to insert.
				 = true
				break
			}
			 = .indirect()
		}
		if ! {
			panic("internal/concurrent.HashMapTrie: ran out of hash bits while iterating")
		}

		// Grab the lock and double-check what we saw.
		.mu.Lock()
		 = .Load()
		if ( == nil || .isEntry) && !.dead.Load() {
			// What we saw is still true, so we can continue with the insert.
			break
		}
		// We have to start over.
		.mu.Unlock()
	}
	// N.B. This lock is held from when we broke out of the outer loop above.
	// We specifically break this out so that we can use defer here safely.
	// One option is to break this out into a new function instead, but
	// there's so much local iteration state used below that this turns out
	// to be cleaner.
	defer .mu.Unlock()

	var  
	var  *entry[, ]
	if  != nil {
		// Between before and now, something got inserted. Swap if the keys compare.
		 = .entry()
		,  := .swap(, )
		if  {
			return , true
		}
	}
	// The keys didn't compare, so we're doing an insertion.
	 := newEntryNode(, )
	if  == nil {
		// Easy case: create a new entry and store it.
		.Store(&.node)
	} else {
		// We possibly need to expand the entry already there into one or more new nodes.
		//
		// Publish the node last, which will make both oldEntry and newEntry visible. We
		// don't want readers to be able to observe that oldEntry isn't in the tree.
		.Store(.expand(, , , , ))
	}
	return , false
}

// CompareAndSwap swaps the old and new values for key
// if the value stored in the map is equal to old.
// The value type must be of a comparable type, otherwise CompareAndSwap will panic.
func ( *HashTrieMap[, ]) ( , ,  ) ( bool) {
	.init()
	if .valEqual == nil {
		panic("called CompareAndSwap when value is not of comparable type")
	}
	 := .keyHash(abi.NoEscape(unsafe.Pointer(&)), .seed)
	for {
		// Find the key or return if it's not there.
		 := .root.Load()
		 := 8 * goarch.PtrSize
		 := false
		for  != 0 {
			 -= nChildrenLog2

			 := &.children[(>>)&nChildrenMask]
			 := .Load()
			if  == nil {
				// Nothing to compare with. Give up.
				return false
			}
			if .isEntry {
				// We found an entry. Try to compare and swap directly.
				return .entry().compareAndSwap(, , , .valEqual)
			}
			 = .indirect()
		}
		if ! {
			panic("internal/concurrent.HashMapTrie: ran out of hash bits while iterating")
		}
	}
}

// LoadAndDelete deletes the value for a key, returning the previous value if any.
// The loaded result reports whether the key was present.
func ( *HashTrieMap[, ]) ( ) ( ,  bool) {
	.init()
	 := .keyHash(abi.NoEscape(unsafe.Pointer(&)), .seed)

	// Find a node with the key and compare with it. n != nil if we found the node.
	, , ,  := .find(, , nil, *new())
	if  == nil {
		if  != nil {
			.mu.Unlock()
		}
		return *new(), false
	}

	// Try to delete the entry.
	, ,  := .entry().loadAndDelete()
	if ! {
		// Nothing was actually deleted, which means the node is no longer there.
		.mu.Unlock()
		return *new(), false
	}
	if  != nil {
		// We didn't actually delete the whole entry, just one entry in the chain.
		// Nothing else to do, since the parent is definitely not empty.
		.Store(&.node)
		.mu.Unlock()
		return , true
	}
	// Delete the entry.
	.Store(nil)

	// Check if the node is now empty (and isn't the root), and delete it if able.
	for .parent != nil && .empty() {
		if  == 8*goarch.PtrSize {
			panic("internal/concurrent.HashMapTrie: ran out of hash bits while iterating")
		}
		 += nChildrenLog2

		// Delete the current node in the parent.
		 := .parent
		.mu.Lock()
		.dead.Store(true)
		.children[(>>)&nChildrenMask].Store(nil)
		.mu.Unlock()
		 = 
	}
	.mu.Unlock()
	return , true
}

// Delete deletes the value for a key.
func ( *HashTrieMap[, ]) ( ) {
	_, _ = .LoadAndDelete()
}

// CompareAndDelete deletes the entry for key if its value is equal to old.
// The value type must be comparable, otherwise this CompareAndDelete will panic.
//
// If there is no current value for key in the map, CompareAndDelete returns false
// (even if the old value is the nil interface value).
func ( *HashTrieMap[, ]) ( ,  ) ( bool) {
	.init()
	if .valEqual == nil {
		panic("called CompareAndDelete when value is not of comparable type")
	}
	 := .keyHash(abi.NoEscape(unsafe.Pointer(&)), .seed)

	// Find a node with the key. n != nil if we found the node.
	, , ,  := .find(, , nil, *new())
	if  == nil {
		if  != nil {
			.mu.Unlock()
		}
		return false
	}

	// Try to delete the entry.
	,  := .entry().compareAndDelete(, , .valEqual)
	if ! {
		// Nothing was actually deleted, which means the node is no longer there.
		.mu.Unlock()
		return false
	}
	if  != nil {
		// We didn't actually delete the whole entry, just one entry in the chain.
		// Nothing else to do, since the parent is definitely not empty.
		.Store(&.node)
		.mu.Unlock()
		return true
	}
	// Delete the entry.
	.Store(nil)

	// Check if the node is now empty (and isn't the root), and delete it if able.
	for .parent != nil && .empty() {
		if  == 8*goarch.PtrSize {
			panic("internal/concurrent.HashMapTrie: ran out of hash bits while iterating")
		}
		 += nChildrenLog2

		// Delete the current node in the parent.
		 := .parent
		.mu.Lock()
		.dead.Store(true)
		.children[(>>)&nChildrenMask].Store(nil)
		.mu.Unlock()
		 = 
	}
	.mu.Unlock()
	return true
}

// find searches the tree for a node that contains key (hash must be the hash of key).
// If valEqual != nil, then it will also enforce that the values are equal as well.
//
// Returns a non-nil node, which will always be an entry, if found.
//
// If i != nil then i.mu is locked, and it is the caller's responsibility to unlock it.
func ( *HashTrieMap[, ]) ( ,  uintptr,  equalFunc,  ) ( *indirect[, ],  uint,  *atomic.Pointer[node[, ]],  *node[, ]) {
	for {
		// Find the key or return if it's not there.
		 = .root.Load()
		 = 8 * goarch.PtrSize
		 := false
		for  != 0 {
			 -= nChildrenLog2

			 = &.children[(>>)&nChildrenMask]
			 = .Load()
			if  == nil {
				// Nothing to compare with. Give up.
				 = nil
				return
			}
			if .isEntry {
				// We found an entry. Check if it matches.
				if ,  := .entry().lookupWithValue(, , ); ! {
					// No match, comparison failed.
					 = nil
					 = nil
					return
				}
				// We've got a match. Prepare to perform an operation on the key.
				 = true
				break
			}
			 = .indirect()
		}
		if ! {
			panic("internal/concurrent.HashMapTrie: ran out of hash bits while iterating")
		}

		// Grab the lock and double-check what we saw.
		.mu.Lock()
		 = .Load()
		if !.dead.Load() && ( == nil || .isEntry) {
			// Either we've got a valid node or the node is now nil under the lock.
			// In either case, we're done here.
			return
		}
		// We have to start over.
		.mu.Unlock()
	}
}

// All returns an iterator over each key and value present in the map.
//
// The iterator does not necessarily correspond to any consistent snapshot of the
// HashTrieMap's contents: no key will be visited more than once, but if the value
// for any key is stored or deleted concurrently (including by yield), the iterator
// may reflect any mapping for that key from any point during iteration. The iterator
// does not block other methods on the receiver; even yield itself may call any
// method on the HashTrieMap.
func ( *HashTrieMap[, ]) () func( func(, ) bool) {
	.init()
	return func( func( ,  ) bool) {
		.iter(.root.Load(), )
	}
}

// Range calls f sequentially for each key and value present in the map.
// If f returns false, range stops the iteration.
//
// This exists for compatibility with sync.Map; All should be preferred.
// It provides the same guarantees as sync.Map, and All.
func ( *HashTrieMap[, ]) ( func(, ) bool) {
	.init()
	.iter(.root.Load(), )
}

func ( *HashTrieMap[, ]) ( *indirect[, ],  func( ,  ) bool) bool {
	for  := range .children {
		 := .children[].Load()
		if  == nil {
			continue
		}
		if !.isEntry {
			if !.(.indirect(), ) {
				return false
			}
			continue
		}
		 := .entry()
		for  != nil {
			if !(.key, *.value.Load()) {
				return false
			}
			 = .overflow.Load()
		}
	}
	return true
}

// Clear deletes all the entries, resulting in an empty HashTrieMap.
func ( *HashTrieMap[, ]) () {
	.init()

	// It's sufficient to just drop the root on the floor, but the root
	// must always be non-nil.
	.root.Store(newIndirectNode[, ](nil))
}

const (
	// 16 children. This seems to be the sweet spot for
	// load performance: any smaller and we lose out on
	// 50% or more in CPU performance. Any larger and the
	// returns are minuscule (~1% improvement for 32 children).
	nChildrenLog2 = 4
	nChildren     = 1 << nChildrenLog2
	nChildrenMask = nChildren - 1
)

// indirect is an internal node in the hash-trie.
type indirect[ comparable,  any] struct {
	node[, ]
	dead     atomic.Bool
	mu       Mutex // Protects mutation to children and any children that are entry nodes.
	parent   *indirect[, ]
	children [nChildren]atomic.Pointer[node[, ]]
}

func newIndirectNode[ comparable,  any]( *indirect[, ]) *indirect[, ] {
	return &indirect[, ]{node: node[, ]{isEntry: false}, parent: }
}

func ( *indirect[, ]) () bool {
	 := 0
	for  := range .children {
		if .children[].Load() != nil {
			++
		}
	}
	return  == 0
}

// entry is a leaf node in the hash-trie.
type entry[ comparable,  any] struct {
	node[, ]
	overflow atomic.Pointer[entry[, ]] // Overflow for hash collisions.
	key      
	value    atomic.Pointer[]
}

func newEntryNode[ comparable,  any]( ,  ) *entry[, ] {
	 := &entry[, ]{
		node: node[, ]{isEntry: true},
		key:  ,
	}
	.value.Store(&)
	return 
}

func ( *entry[, ]) ( ) (, bool) {
	for  != nil {
		if .key ==  {
			return *.value.Load(), true
		}
		 = .overflow.Load()
	}
	return *new(), false
}

func ( *entry[, ]) ( ,  ,  equalFunc) (, bool) {
	for  != nil {
		 := .value.Load()
		if .key ==  && ( == nil || (unsafe.Pointer(), abi.NoEscape(unsafe.Pointer(&)))) {
			return *, true
		}
		 = .overflow.Load()
	}
	return *new(), false
}

// swap replaces a value in the overflow chain if keys compare equal.
// Returns the old value, and whether or not anything was swapped.
//
// swap must be called under the mutex of the indirect node which e is a child of.
func ( *entry[, ]) ( ,  ) (, bool) {
	if .key ==  {
		 := new()
		* = 
		 := .value.Swap()
		return *, true
	}
	 := &.overflow
	 := .Load()
	for  != nil {
		if .key ==  {
			 := new()
			* = 
			 := .value.Swap()
			return *, true
		}
		 = &.overflow
		 = .overflow.Load()
	}
	var  
	return , false
}

// compareAndSwap replaces a value for a matching key and existing value in the overflow chain.
// Returns whether or not anything was swapped.
//
// compareAndSwap must be called under the mutex of the indirect node which e is a child of.
func ( *entry[, ]) ( , ,  ,  equalFunc) bool {
	var  *
:
	for {
		 := .value.Load()
		if .key ==  && (unsafe.Pointer(), abi.NoEscape(unsafe.Pointer(&))) {
			// Return the new head of the list.
			if  == nil {
				// Delay explicit creation of a new value to hold newv. If we just pass &newv
				// to CompareAndSwap, then newv will unconditionally escape, even if the CAS fails.
				 = new()
				* = 
			}
			if .value.CompareAndSwap(, ) {
				return true
			}
			// We need to restart from the head of the overflow list in case, due to a removal, a node
			// is moved up the list and we miss it.
			continue 
		}
		 := &.overflow
		 := .Load()
		for  != nil {
			 := .value.Load()
			if .key ==  && (unsafe.Pointer(), abi.NoEscape(unsafe.Pointer(&))) {
				if  == nil {
					// Delay explicit creation of a new value to hold newv. If we just pass &newv
					// to CompareAndSwap, then newv will unconditionally escape, even if the CAS fails.
					 = new()
					* = 
				}
				if .value.CompareAndSwap(, ) {
					return true
				}
				continue 
			}
			 = &.overflow
			 = .overflow.Load()
		}
		return false
	}
}

// loadAndDelete deletes an entry in the overflow chain by key. Returns the value for the key, the new
// entry chain and whether or not anything was loaded (and deleted).
//
// loadAndDelete must be called under the mutex of the indirect node which e is a child of.
func ( *entry[, ]) ( ) (, *entry[, ], bool) {
	if .key ==  {
		// Drop the head of the list.
		return *.value.Load(), .overflow.Load(), true
	}
	 := &.overflow
	 := .Load()
	for  != nil {
		if .key ==  {
			.Store(.overflow.Load())
			return *.value.Load(), , true
		}
		 = &.overflow
		 = .overflow.Load()
	}
	return *new(), , false
}

// compareAndDelete deletes an entry in the overflow chain if both the key and value compare
// equal. Returns the new entry chain and whether or not anything was deleted.
//
// compareAndDelete must be called under the mutex of the indirect node which e is a child of.
func ( *entry[, ]) ( ,  ,  equalFunc) (*entry[, ], bool) {
	if .key ==  && (unsafe.Pointer(.value.Load()), abi.NoEscape(unsafe.Pointer(&))) {
		// Drop the head of the list.
		return .overflow.Load(), true
	}
	 := &.overflow
	 := .Load()
	for  != nil {
		if .key ==  && (unsafe.Pointer(.value.Load()), abi.NoEscape(unsafe.Pointer(&))) {
			.Store(.overflow.Load())
			return , true
		}
		 = &.overflow
		 = .overflow.Load()
	}
	return , false
}

// node is the header for a node. It's polymorphic and
// is actually either an entry or an indirect.
type node[ comparable,  any] struct {
	isEntry bool
}

func ( *node[, ]) () *entry[, ] {
	if !.isEntry {
		panic("called entry on non-entry node")
	}
	return (*entry[, ])(unsafe.Pointer())
}

func ( *node[, ]) () *indirect[, ] {
	if .isEntry {
		panic("called indirect on entry node")
	}
	return (*indirect[, ])(unsafe.Pointer())
}

// Pull in runtime.rand so that we don't need to take a dependency
// on math/rand/v2.
//
//go:linkname runtime_rand runtime.rand
func runtime_rand() uint64