// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.

package runtime

import (
	
	
)

// AddCleanup attaches a cleanup function to ptr. Some time after ptr is no longer
// reachable, the runtime will call cleanup(arg) in a separate goroutine.
//
// A typical use is that ptr is an object wrapping an underlying resource (e.g.,
// a File object wrapping an OS file descriptor), arg is the underlying resource
// (e.g., the OS file descriptor), and the cleanup function releases the underlying
// resource (e.g., by calling the close system call).
//
// There are few constraints on ptr. In particular, multiple cleanups may be
// attached to the same pointer, or to different pointers within the same
// allocation.
//
// If ptr is reachable from cleanup or arg, ptr will never be collected
// and the cleanup will never run. As a protection against simple cases of this,
// AddCleanup panics if arg is equal to ptr.
//
// There is no specified order in which cleanups will run.
// In particular, if several objects point to each other and all become
// unreachable at the same time, their cleanups all become eligible to run
// and can run in any order. This is true even if the objects form a cycle.
//
// A single goroutine runs all cleanup calls for a program, sequentially. If a
// cleanup function must run for a long time, it should create a new goroutine.
//
// If ptr has both a cleanup and a finalizer, the cleanup will only run once
// it has been finalized and becomes unreachable without an associated finalizer.
//
// The cleanup(arg) call is not always guaranteed to run; in particular it is not
// guaranteed to run before program exit.
//
// Cleanups are not guaranteed to run if the size of T is zero bytes, because
// it may share same address with other zero-size objects in memory. See
// https://go.dev/ref/spec#Size_and_alignment_guarantees.
//
// It is not guaranteed that a cleanup will run for objects allocated
// in initializers for package-level variables. Such objects may be
// linker-allocated, not heap-allocated.
//
// Note that because cleanups may execute arbitrarily far into the future
// after an object is no longer referenced, the runtime is allowed to perform
// a space-saving optimization that batches objects together in a single
// allocation slot. The cleanup for an unreferenced object in such an
// allocation may never run if it always exists in the same batch as a
// referenced object. Typically, this batching only happens for tiny
// (on the order of 16 bytes or less) and pointer-free objects.
//
// A cleanup may run as soon as an object becomes unreachable.
// In order to use cleanups correctly, the program must ensure that
// the object is reachable until it is safe to run its cleanup.
// Objects stored in global variables, or that can be found by tracing
// pointers from a global variable, are reachable. A function argument or
// receiver may become unreachable at the last point where the function
// mentions it. To ensure a cleanup does not get called prematurely,
// pass the object to the [KeepAlive] function after the last point
// where the object must remain reachable.
func [,  any]( *,  func(),  ) Cleanup {
	// Explicitly force ptr to escape to the heap.
	 = abi.Escape()

	// The pointer to the object must be valid.
	if  == nil {
		throw("runtime.AddCleanup: ptr is nil")
	}
	 := uintptr(unsafe.Pointer())

	// Check that arg is not equal to ptr.
	// TODO(67535) this does not cover the case where T and *S are the same
	// type and ptr and arg are equal.
	if unsafe.Pointer(&) == unsafe.Pointer() {
		throw("runtime.AddCleanup: ptr is equal to arg, cleanup will never run")
	}
	if inUserArenaChunk() {
		// Arena-allocated objects are not eligible for cleanup.
		throw("runtime.AddCleanup: ptr is arena-allocated")
	}
	if debug.sbrk != 0 {
		// debug.sbrk never frees memory, so no cleanup will ever run
		// (and we don't have the data structures to record them).
		// Return a noop cleanup.
		return Cleanup{}
	}

	 := func() {
		()
	}
	// Closure must escape.
	 := *(**funcval)(unsafe.Pointer(&))
	 = abi.Escape()

	// Find the containing object.
	, ,  := findObject(, 0, 0)
	if  == 0 {
		if isGoPointerWithoutSpan(unsafe.Pointer()) {
			// Cleanup is a noop.
			return Cleanup{}
		}
		throw("runtime.AddCleanup: ptr not in allocated block")
	}

	// Ensure we have a finalizer processing goroutine running.
	createfing()

	 := addCleanup(unsafe.Pointer(), )
	return Cleanup{
		id:  ,
		ptr: ,
	}
}

// Cleanup is a handle to a cleanup call for a specific object.
type Cleanup struct {
	// id is the unique identifier for the cleanup within the arena.
	id uint64
	// ptr contains the pointer to the object.
	ptr uintptr
}

// Stop cancels the cleanup call. Stop will have no effect if the cleanup call
// has already been queued for execution (because ptr became unreachable).
// To guarantee that Stop removes the cleanup function, the caller must ensure
// that the pointer that was passed to AddCleanup is reachable across the call to Stop.
func ( Cleanup) () {
	if .id == 0 {
		// id is set to zero when the cleanup is a noop.
		return
	}

	// The following block removes the Special record of type cleanup for the object c.ptr.
	 := spanOfHeap(uintptr(unsafe.Pointer(.ptr)))
	if  == nil {
		return
	}
	// Ensure that the span is swept.
	// Sweeping accesses the specials list w/o locks, so we have
	// to synchronize with it. And it's just much safer.
	 := acquirem()
	.ensureSwept()

	 := uintptr(unsafe.Pointer(.ptr)) - .base()

	var  *special
	lock(&.speciallock)

	,  := .specialFindSplicePoint(, _KindSpecialCleanup)
	if  {
		for {
			 := *
			if  == nil {
				// Reached the end of the linked list. Stop searching at this point.
				break
			}
			if  == uintptr(.offset) && _KindSpecialCleanup == .kind &&
				(*specialCleanup)(unsafe.Pointer()).id == .id {
				// The special is a cleanup and contains a matching cleanup id.
				* = .next
				 = 
				break
			}
			if  < uintptr(.offset) || ( == uintptr(.offset) && _KindSpecialCleanup < .kind) {
				// The special is outside the region specified for that kind of
				// special. The specials are sorted by kind.
				break
			}
			// Try the next special.
			 = &.next
		}
	}
	if .specials == nil {
		spanHasNoSpecials()
	}
	unlock(&.speciallock)
	releasem()

	if  == nil {
		return
	}
	lock(&mheap_.speciallock)
	mheap_.specialCleanupAlloc.free(unsafe.Pointer())
	unlock(&mheap_.speciallock)
}