// Copyright 2025 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.

package unique

import (
	
	
	
	
	
	
	
)

// canonMap is a map of T -> *T. The map controls the creation
// of a canonical *T, and elements of the map are automatically
// deleted when the canonical *T is no longer referenced.
type canonMap[ comparable] struct {
	root atomic.Pointer[indirect[]]
	hash func(unsafe.Pointer, uintptr) uintptr
	seed uintptr
}

func newCanonMap[ comparable]() *canonMap[] {
	 := new(canonMap[])
	.root.Store(newIndirectNode[](nil))

	var  map[]struct{}
	 := abi.TypeOf().MapType()
	.hash = .Hasher
	.seed = uintptr(runtime_rand())
	return 
}

func ( *canonMap[]) ( ) * {
	 := .hash(abi.NoEscape(unsafe.Pointer(&)), .seed)

	 := .root.Load()
	 := 8 * goarch.PtrSize
	for  != 0 {
		 -= nChildrenLog2

		 := .children[(>>)&nChildrenMask].Load()
		if  == nil {
			return nil
		}
		if .isEntry {
			,  := .entry().lookup()
			return 
		}
		 = .indirect()
	}
	panic("unique.canonMap: ran out of hash bits while iterating")
}

func ( *canonMap[]) ( ) * {
	 := .hash(abi.NoEscape(unsafe.Pointer(&)), .seed)

	var  *indirect[]
	var  uint
	var  *atomic.Pointer[node[]]
	var  *node[]
	for {
		// Find the key or a candidate location for insertion.
		 = .root.Load()
		 = 8 * goarch.PtrSize
		 := false
		for  != 0 {
			 -= nChildrenLog2

			 = &.children[(>>)&nChildrenMask]
			 = .Load()
			if  == nil {
				// We found a nil slot which is a candidate for insertion.
				 = true
				break
			}
			if .isEntry {
				// We found an existing entry, which is as far as we can go.
				// If it stays this way, we'll have to replace it with an
				// indirect node.
				if ,  := .entry().lookup();  != nil {
					return 
				}
				 = true
				break
			}
			 = .indirect()
		}
		if ! {
			panic("unique.canonMap: ran out of hash bits while iterating")
		}

		// Grab the lock and double-check what we saw.
		.mu.Lock()
		 = .Load()
		if ( == nil || .isEntry) && !.dead.Load() {
			// What we saw is still true, so we can continue with the insert.
			break
		}
		// We have to start over.
		.mu.Unlock()
	}
	// N.B. This lock is held from when we broke out of the outer loop above.
	// We specifically break this out so that we can use defer here safely.
	// One option is to break this out into a new function instead, but
	// there's so much local iteration state used below that this turns out
	// to be cleaner.
	defer .mu.Unlock()

	var  *entry[]
	if  != nil {
		 = .entry()
		if ,  := .lookup();  != nil {
			// Easy case: by loading again, it turns out exactly what we wanted is here!
			return 
		}
	}
	, ,  := newEntryNode(, )
	// Prune dead pointers. This is to avoid O(n) lookups when we store the exact same
	// value in the set but the cleanup hasn't run yet because it got delayed for some
	// reason.
	 = .prune()
	if  == nil {
		// Easy case: create a new entry and store it.
		.Store(&.node)
	} else {
		// We possibly need to expand the entry already there into one or more new nodes.
		//
		// Publish the node last, which will make both oldEntry and newEntry visible. We
		// don't want readers to be able to observe that oldEntry isn't in the tree.
		.Store(.expand(, , , , ))
	}
	runtime.AddCleanup(, func( struct{}) {
		.cleanup(, )
	}, struct{}{})
	return 
}

// expand takes oldEntry and newEntry whose hashes conflict from bit 64 down to hashShift and
// produces a subtree of indirect nodes to hold the two new entries. newHash is the hash of
// the value in the new entry.
func ( *canonMap[]) (,  *entry[],  uintptr,  uint,  *indirect[]) *node[] {
	// Check for a hash collision.
	 := .hash
	if  ==  {
		// Store the old entry in the new entry's overflow list, then store
		// the new entry.
		.overflow.Store()
		return &.node
	}
	// We have to add an indirect node. Worse still, we may need to add more than one.
	 := newIndirectNode()
	 := 
	for {
		if  == 0 {
			panic("unique.canonMap: ran out of hash bits while inserting")
		}
		 -= nChildrenLog2 // hashShift is for the level parent is at. We need to go deeper.
		 := ( >> ) & nChildrenMask
		 := ( >> ) & nChildrenMask
		if  !=  {
			.children[].Store(&.node)
			.children[].Store(&.node)
			break
		}
		 := newIndirectNode()
		.children[].Store(&.node)
		 = 
	}
	return &.node
}

// cleanup deletes the entry corresponding to wp in the canon map, if it's
// still in the map. wp must have a Value method that returns nil by the
// time this function is called. hash must be the hash of the value that
// wp once pointed to (that is, the hash of *wp.Value()).
func ( *canonMap[]) ( uintptr,  weak.Pointer[]) {
	var  *indirect[]
	var  uint
	var  *atomic.Pointer[node[]]
	var  *node[]
	for {
		// Find wp in the map by following hash.
		 = .root.Load()
		 = 8 * goarch.PtrSize
		 := false
		for  != 0 {
			 -= nChildrenLog2

			 = &.children[(>>)&nChildrenMask]
			 = .Load()
			if  == nil {
				// We found a nil slot, already deleted.
				return
			}
			if .isEntry {
				if !.entry().hasWeakPointer() {
					// The weak pointer was already pruned.
					return
				}
				 = true
				break
			}
			 = .indirect()
		}
		if ! {
			panic("unique.canonMap: ran out of hash bits while iterating")
		}

		// Grab the lock and double-check what we saw.
		.mu.Lock()
		 = .Load()
		if  != nil && .isEntry {
			// Prune the entry node without thinking too hard. If we do
			// somebody else's work, such as someone trying to insert an
			// entry with the same hash (probably the same value) then
			// great, they'll back out without taking the lock.
			 := .entry().prune()
			if  == nil {
				.Store(nil)
			} else {
				.Store(&.node)
			}

			// Delete interior nodes that are empty, up the tree.
			//
			// We'll hand-over-hand lock our way up the tree as we do this,
			// since we need to delete each empty node's link in its parent,
			// which requires the parents' lock.
			for .parent != nil && .empty() {
				if  == 8*goarch.PtrSize {
					panic("internal/sync.HashTrieMap: ran out of hash bits while iterating")
				}
				 += nChildrenLog2

				// Delete the current node in the parent.
				 := .parent
				.mu.Lock()
				.dead.Store(true) // Could be done outside of parent's lock.
				.children[(>>)&nChildrenMask].Store(nil)
				.mu.Unlock()
				 = 
			}
			.mu.Unlock()
			return
		}
		// We have to start over.
		.mu.Unlock()
	}
}

// node is the header for a node. It's polymorphic and
// is actually either an entry or an indirect.
type node[ comparable] struct {
	isEntry bool
}

func ( *node[]) () *entry[] {
	if !.isEntry {
		panic("called entry on non-entry node")
	}
	return (*entry[])(unsafe.Pointer())
}

func ( *node[]) () *indirect[] {
	if .isEntry {
		panic("called indirect on entry node")
	}
	return (*indirect[])(unsafe.Pointer())
}

const (
	// 16 children. This seems to be the sweet spot for
	// load performance: any smaller and we lose out on
	// 50% or more in CPU performance. Any larger and the
	// returns are minuscule (~1% improvement for 32 children).
	nChildrenLog2 = 4
	nChildren     = 1 << nChildrenLog2
	nChildrenMask = nChildren - 1
)

// indirect is an internal node in the hash-trie.
type indirect[ comparable] struct {
	node[]
	dead     atomic.Bool
	parent   *indirect[]
	mu       sync.Mutex // Protects mutation to children and any children that are entry nodes.
	children [nChildren]atomic.Pointer[node[]]
}

func newIndirectNode[ comparable]( *indirect[]) *indirect[] {
	return &indirect[]{node: node[]{isEntry: false}, parent: }
}

func ( *indirect[]) () bool {
	for  := range .children {
		if .children[].Load() != nil {
			return false
		}
	}
	return true
}

// entry is a leaf node in the hash-trie.
type entry[ comparable] struct {
	node[]
	overflow atomic.Pointer[entry[]] // Overflow for hash collisions.
	key      weak.Pointer[]
	hash     uintptr
}

func newEntryNode[ comparable]( ,  uintptr) (*entry[], *, weak.Pointer[]) {
	 := new()
	* = 
	 := weak.Make()
	return &entry[]{
		node: node[]{isEntry: true},
		key:  ,
		hash: ,
	}, , 
}

// lookup finds the entry in the overflow chain that has the provided key.
//
// Returns the key's canonical pointer and the weak pointer for that canonical pointer.
func ( *entry[]) ( ) (*, weak.Pointer[]) {
	for  != nil {
		 := .key.Value()
		if  != nil && * ==  {
			return , .key
		}
		 = .overflow.Load()
	}
	return nil, weak.Pointer[]{}
}

// hasWeakPointer returns true if the provided weak pointer can be found in the overflow chain.
func ( *entry[]) ( weak.Pointer[]) bool {
	for  != nil {
		if .key ==  {
			return true
		}
		 = .overflow.Load()
	}
	return false
}

// prune removes all entries in the overflow chain whose keys are nil.
//
// The caller must hold the lock on e's parent node.
func ( *entry[]) () *entry[] {
	// Prune the head of the list.
	for  != nil {
		if .key.Value() != nil {
			break
		}
		 = .overflow.Load()
	}
	if  == nil {
		return nil
	}

	// Prune individual nodes in the list.
	 := 
	 := &.overflow
	 = .Load()
	for  != nil {
		if .key.Value() != nil {
			 = &.overflow
		} else {
			.Store(.overflow.Load())
		}
		 = .overflow.Load()
	}
	return 
}

// Pull in runtime.rand so that we don't need to take a dependency
// on math/rand/v2.
//
//go:linkname runtime_rand runtime.rand
func runtime_rand() uint64