// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.

// Runtime -> tracer API for memory events.

package runtime

import (
	
	
)

// Batch type values for the alloc/free experiment.
const (
	traceAllocFreeTypesBatch = iota // Contains types. [{id, address, size, ptrspan, name length, name string} ...]
	traceAllocFreeInfoBatch         // Contains info for interpreting events. [min heap addr, page size, min heap align, min stack align]
)

// traceSnapshotMemory takes a snapshot of all runtime memory that there are events for
// (heap spans, heap objects, goroutine stacks, etc.) and writes out events for them.
//
// The world must be stopped and tracing must be enabled when this function is called.
func traceSnapshotMemory( uintptr) {
	assertWorldStopped()

	// Write a batch containing information that'll be necessary to
	// interpret the events.
	var  bool
	 := unsafeTraceExpWriter(, nil, traceExperimentAllocFree)
	,  = .ensure(1 + 4*traceBytesPerNumber)
	if  {
		// Annotate the batch as containing additional info.
		.byte(byte(traceAllocFreeInfoBatch))
	}

	// Emit info.
	.varint(uint64(trace.minPageHeapAddr))
	.varint(uint64(pageSize))
	.varint(uint64(minHeapAlign))
	.varint(uint64(fixedStack))

	// Finish writing the batch.
	.flush().end()

	// Start tracing.
	 := traceAcquire()
	if !.ok() {
		throw("traceSnapshotMemory: tracing is not enabled")
	}

	// Write out all the heap spans and heap objects.
	for ,  := range mheap_.allspans {
		if .state.get() == mSpanDead {
			continue
		}
		// It's some kind of span, so trace that it exists.
		.SpanExists()

		// Write out allocated objects if it's a heap span.
		if .state.get() != mSpanInUse {
			continue
		}

		// Find all allocated objects.
		 := .allocBitsForIndex(0)
		for  := uintptr(0);  < uintptr(.nelems); ++ {
			if .index < uintptr(.freeindex) || .isMarked() {
				 := .base() + *.elemsize
				.HeapObjectExists(, .typePointersOfUnchecked().typ)
			}
			.advance()
		}
	}

	// Write out all the goroutine stacks.
	forEachGRace(func( *g) {
		.GoroutineStackExists(.stack.lo, .stack.hi-.stack.lo)
	})
	traceRelease()
}

func traceSpanTypeAndClass( *mspan) traceArg {
	if .state.get() == mSpanInUse {
		return traceArg(.spanclass) << 1
	}
	return traceArg(1)
}

// SpanExists records an event indicating that the span exists.
func ( traceLocker) ( *mspan) {
	.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvSpan, traceSpanID(), traceArg(.npages), traceSpanTypeAndClass())
}

// SpanAlloc records an event indicating that the span has just been allocated.
func ( traceLocker) ( *mspan) {
	.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvSpanAlloc, traceSpanID(), traceArg(.npages), traceSpanTypeAndClass())
}

// SpanFree records an event indicating that the span is about to be freed.
func ( traceLocker) ( *mspan) {
	.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvSpanFree, traceSpanID())
}

// traceSpanID creates a trace ID for the span s for the trace.
func traceSpanID( *mspan) traceArg {
	return traceArg(uint64(.base())-trace.minPageHeapAddr) / pageSize
}

// HeapObjectExists records that an object already exists at addr with the provided type.
// The type is optional, and the size of the slot occupied the object is inferred from the
// span containing it.
func ( traceLocker) ( uintptr,  *abi.Type) {
	.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvHeapObject, traceHeapObjectID(), .rtype())
}

// HeapObjectAlloc records that an object was newly allocated at addr with the provided type.
// The type is optional, and the size of the slot occupied the object is inferred from the
// span containing it.
func ( traceLocker) ( uintptr,  *abi.Type) {
	.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvHeapObjectAlloc, traceHeapObjectID(), .rtype())
}

// HeapObjectFree records that an object at addr is about to be freed.
func ( traceLocker) ( uintptr) {
	.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvHeapObjectFree, traceHeapObjectID())
}

// traceHeapObjectID creates a trace ID for a heap object at address addr.
func traceHeapObjectID( uintptr) traceArg {
	return traceArg(uint64()-trace.minPageHeapAddr) / minHeapAlign
}

// GoroutineStackExists records that a goroutine stack already exists at address base with the provided size.
func ( traceLocker) (,  uintptr) {
	 := traceCompressStackSize()
	.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvGoroutineStack, traceGoroutineStackID(), )
}

// GoroutineStackAlloc records that a goroutine stack was newly allocated at address base with the provided size..
func ( traceLocker) (,  uintptr) {
	 := traceCompressStackSize()
	.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvGoroutineStackAlloc, traceGoroutineStackID(), )
}

// GoroutineStackFree records that a goroutine stack at address base is about to be freed.
func ( traceLocker) ( uintptr) {
	.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvGoroutineStackFree, traceGoroutineStackID())
}

// traceGoroutineStackID creates a trace ID for the goroutine stack from its base address.
func traceGoroutineStackID( uintptr) traceArg {
	return traceArg(uint64()-trace.minPageHeapAddr) / fixedStack
}

// traceCompressStackSize assumes size is a power of 2 and returns log2(size).
func traceCompressStackSize( uintptr) traceArg {
	if &(-1) != 0 {
		throw("goroutine stack size is not a power of 2")
	}
	return traceArg(sys.Len64(uint64()))
}