// Copyright 2023 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.

// Simple not-in-heap bump-pointer traceRegion allocator.

package runtime

import (
	
	
	
)

// traceRegionAlloc is a thread-safe region allocator.
// It holds a linked list of traceRegionAllocBlock.
type traceRegionAlloc struct {
	lock     mutex
	dropping atomic.Bool          // For checking invariants.
	current  atomic.UnsafePointer // *traceRegionAllocBlock
	full     *traceRegionAllocBlock
}

// traceRegionAllocBlock is a block in traceRegionAlloc.
//
// traceRegionAllocBlock is allocated from non-GC'd memory, so it must not
// contain heap pointers. Writes to pointers to traceRegionAllocBlocks do
// not need write barriers.
type traceRegionAllocBlock struct {
	_ sys.NotInHeap
	traceRegionAllocBlockHeader
	data [traceRegionAllocBlockData]byte
}

type traceRegionAllocBlockHeader struct {
	next *traceRegionAllocBlock
	off  atomic.Uintptr
}

const traceRegionAllocBlockData = 64<<10 - unsafe.Sizeof(traceRegionAllocBlockHeader{})

// alloc allocates n-byte block. The block is always aligned to 8 bytes, regardless of platform.
func ( *traceRegionAlloc) ( uintptr) *notInHeap {
	 = alignUp(, 8)
	if  > traceRegionAllocBlockData {
		throw("traceRegion: alloc too large")
	}
	if .dropping.Load() {
		throw("traceRegion: alloc with concurrent drop")
	}

	// Try to bump-pointer allocate into the current block.
	 := (*traceRegionAllocBlock)(.current.Load())
	if  != nil {
		 := .off.Add()
		if  <= uintptr(len(.data)) {
			return (*notInHeap)(unsafe.Pointer(&.data[-]))
		}
	}

	// Try to install a new block.
	var  *notInHeap
	systemstack(func() {
		// Acquire a.lock on the systemstack to avoid stack growth
		// and accidentally entering the tracer again.
		lock(&.lock)

		// Check block again under the lock. Someone may
		// have gotten here first.
		 = (*traceRegionAllocBlock)(.current.Load())
		if  != nil {
			 := .off.Add()
			if  <= uintptr(len(.data)) {
				unlock(&.lock)
				 = (*notInHeap)(unsafe.Pointer(&.data[-]))
				return
			}

			// Add the existing block to the full list.
			.next = .full
			.full = 
		}

		// Allocate a new block.
		 = (*traceRegionAllocBlock)(sysAlloc(unsafe.Sizeof(traceRegionAllocBlock{}), &memstats.other_sys, "trace arena alloc"))
		if  == nil {
			throw("traceRegion: out of memory")
		}

		// Allocate space for our current request, so we always make
		// progress.
		.off.Store()
		 = (*notInHeap)(unsafe.Pointer(&.data[0]))

		// Publish the new block.
		.current.Store(unsafe.Pointer())
		unlock(&.lock)
	})
	return 
}

// drop frees all previously allocated memory and resets the allocator.
//
// drop is not safe to call concurrently with other calls to drop or with calls to alloc. The caller
// must ensure that it is not possible for anything else to be using the same structure.
func ( *traceRegionAlloc) () {
	.dropping.Store(true)
	for .full != nil {
		 := .full
		.full = .next
		sysFree(unsafe.Pointer(), unsafe.Sizeof(traceRegionAllocBlock{}), &memstats.other_sys)
	}
	if  := .current.Load();  != nil {
		sysFree(, unsafe.Sizeof(traceRegionAllocBlock{}), &memstats.other_sys)
		.current.Store(nil)
	}
	.dropping.Store(false)
}