// Copyright 2023 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.

//go:build goexperiment.exectracer2

// Simple hash table for tracing. Provides a mapping
// between variable-length data and a unique ID. Subsequent
// puts of the same data will return the same ID.
//
// Uses a region-based allocation scheme and assumes that the
// table doesn't ever grow very big.
//
// This is definitely not a general-purpose hash table! It avoids
// doing any high-level Go operations so it's safe to use even in
// sensitive contexts.

package runtime

import (
	
	
	
)

type traceMap struct {
	lock mutex // Must be acquired on the system stack
	seq  atomic.Uint64
	mem  traceRegionAlloc
	tab  [1 << 13]atomic.UnsafePointer // *traceMapNode (can't use generics because it's notinheap)
}

type traceMapNode struct {
	_    sys.NotInHeap
	link atomic.UnsafePointer // *traceMapNode (can't use generics because it's notinheap)
	hash uintptr
	id   uint64
	data []byte
}

// next is a type-safe wrapper around link.
func ( *traceMapNode) () *traceMapNode {
	return (*traceMapNode)(.link.Load())
}

// stealID steals an ID from the table, ensuring that it will not
// appear in the table anymore.
func ( *traceMap) () uint64 {
	return .seq.Add(1)
}

// put inserts the data into the table.
//
// It's always safe to noescape data because its bytes are always copied.
//
// Returns a unique ID for the data and whether this is the first time
// the data has been added to the map.
func ( *traceMap) ( unsafe.Pointer,  uintptr) (uint64, bool) {
	if  == 0 {
		return 0, false
	}
	 := memhash(, 0, )
	// First, search the hashtable w/o the mutex.
	if  := .find(, , );  != 0 {
		return , false
	}
	// Now, double check under the mutex.
	// Switch to the system stack so we can acquire tab.lock
	var  uint64
	var  bool
	systemstack(func() {
		lock(&.lock)
		if  = .find(, , );  != 0 {
			unlock(&.lock)
			return
		}
		// Create new record.
		 = .seq.Add(1)
		 := .newTraceMapNode(, , , )

		// Insert it into the table.
		//
		// Update the link first, since the node isn't published yet.
		// Then, store the node in the table as the new first node
		// for the bucket.
		 := int( % uintptr(len(.tab)))
		.link.StoreNoWB(.tab[].Load())
		.tab[].StoreNoWB(unsafe.Pointer())
		unlock(&.lock)

		 = true
	})
	return , 
}

// find looks up data in the table, assuming hash is a hash of data.
//
// Returns 0 if the data is not found, and the unique ID for it if it is.
func ( *traceMap) ( unsafe.Pointer, ,  uintptr) uint64 {
	 := int( % uintptr(len(.tab)))
	for  := .bucket();  != nil;  = .next() {
		// Synchronization not necessary. Once published to the table, these
		// values are immutable.
		if .hash ==  && uintptr(len(.data)) ==  {
			if memequal(unsafe.Pointer(&.data[0]), , ) {
				return .id
			}
		}
	}
	return 0
}

// bucket is a type-safe wrapper for looking up a value in tab.tab.
func ( *traceMap) ( int) *traceMapNode {
	return (*traceMapNode)(.tab[].Load())
}

func ( *traceMap) ( unsafe.Pointer, ,  uintptr,  uint64) *traceMapNode {
	// Create data array.
	 := notInHeapSlice{
		array: .mem.alloc(),
		len:   int(),
		cap:   int(),
	}
	memmove(unsafe.Pointer(.array), , )

	// Create metadata structure.
	 := (*traceMapNode)(unsafe.Pointer(.mem.alloc(unsafe.Sizeof(traceMapNode{}))))
	*(*notInHeapSlice)(unsafe.Pointer(&.data)) = 
	.id = 
	.hash = 
	return 
}

// reset drops all allocated memory from the table and resets it.
//
// tab.lock must be held. Must run on the system stack because of this.
//
//go:systemstack
func ( *traceMap) () {
	assertLockHeld(&.lock)
	.mem.drop()
	.seq.Store(0)
	// Clear table without write barriers. The table consists entirely
	// of notinheap pointers, so this is fine.
	//
	// Write barriers may theoretically call into the tracer and acquire
	// the lock again, and this lock ordering is expressed in the static
	// lock ranking checker.
	memclrNoHeapPointers(unsafe.Pointer(&.tab), unsafe.Sizeof(.tab))
}