// Copyright 2023 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.

//go:build goexperiment.exectracer2

// Trace stack table and acquisition.

package runtime

import (
	
	
	
)

const (
	// Maximum number of PCs in a single stack trace.
	// Since events contain only stack id rather than whole stack trace,
	// we can allow quite large values here.
	traceStackSize = 128

	// logicalStackSentinel is a sentinel value at pcBuf[0] signifying that
	// pcBuf[1:] holds a logical stack requiring no further processing. Any other
	// value at pcBuf[0] represents a skip value to apply to the physical stack in
	// pcBuf[1:] after inline expansion.
	logicalStackSentinel = ^uintptr(0)
)

// traceStack captures a stack trace and registers it in the trace stack table.
// It then returns its unique ID.
//
// skip controls the number of leaf frames to omit in order to hide tracer internals
// from stack traces, see CL 5523.
//
// Avoid calling this function directly. gen needs to be the current generation
// that this stack trace is being written out for, which needs to be synchronized with
// generations moving forward. Prefer traceEventWriter.stack.
func traceStack( int,  *m,  uintptr) uint64 {
	var  [traceStackSize]uintptr

	 := getg()
	 := .m.curg
	 := 1
	if tracefpunwindoff() || .hasCgoOnStack() {
		// Slow path: Unwind using default unwinder. Used when frame pointer
		// unwinding is unavailable or disabled (tracefpunwindoff), or might
		// produce incomplete results or crashes (hasCgoOnStack). Note that no
		// cgo callback related crashes have been observed yet. The main
		// motivation is to take advantage of a potentially registered cgo
		// symbolizer.
		[0] = logicalStackSentinel
		if  ==  {
			 += callers(+1, [1:])
		} else if  != nil {
			 += gcallers(, , [1:])
		}
	} else {
		// Fast path: Unwind using frame pointers.
		[0] = uintptr()
		if  ==  {
			 += fpTracebackPCs(unsafe.Pointer(getfp()), [1:])
		} else if  != nil {
			// We're called on the g0 stack through mcall(fn) or systemstack(fn). To
			// behave like gcallers above, we start unwinding from sched.bp, which
			// points to the caller frame of the leaf frame on g's stack. The return
			// address of the leaf frame is stored in sched.pc, which we manually
			// capture here.
			[1] = .sched.pc
			 += 1 + fpTracebackPCs(unsafe.Pointer(.sched.bp), [2:])
		}
	}
	if  > 0 {
		-- // skip runtime.goexit
	}
	if  > 0 && .goid == 1 {
		-- // skip runtime.main
	}
	 := trace.stackTab[%2].put([:])
	return 
}

// traceStackTable maps stack traces (arrays of PC's) to unique uint32 ids.
// It is lock-free for reading.
type traceStackTable struct {
	tab traceMap
}

// put returns a unique id for the stack trace pcs and caches it in the table,
// if it sees the trace for the first time.
func ( *traceStackTable) ( []uintptr) uint64 {
	if len() == 0 {
		return 0
	}
	,  := .tab.put(noescape(unsafe.Pointer(&[0])), uintptr(len())*unsafe.Sizeof(uintptr(0)))
	return 
}

// dump writes all previously cached stacks to trace buffers,
// releases all memory and resets state. It must only be called once the caller
// can guarantee that there are no more writers to the table.
//
// This must run on the system stack because it flushes buffers and thus
// may acquire trace.lock.
//
//go:systemstack
func ( *traceStackTable) ( uintptr) {
	 := unsafeTraceWriter(, nil)

	// Iterate over the table.
	//
	// Do not acquire t.tab.lock. There's a conceptual lock cycle between acquiring this lock
	// here and allocation-related locks. Specifically, this lock may be acquired when an event
	// is emitted in allocation paths. Simultaneously, we might allocate here with the lock held,
	// creating a cycle. In practice, this cycle is never exercised. Because the table is only
	// dumped once there are no more writers, it's not possible for the cycle to occur. However
	// the lockrank mode is not sophisticated enough to identify this, and if it's not possible
	// for that cycle to happen, then it's also not possible for this to race with writers to
	// the table.
	for  := range .tab.tab {
		 := .tab.bucket()
		for ;  != nil;  = .next() {
			 := unsafe.Slice((*uintptr)(unsafe.Pointer(&.data[0])), uintptr(len(.data))/unsafe.Sizeof(uintptr(0)))

			// N.B. This might allocate, but that's OK because we're not writing to the M's buffer,
			// but one we're about to create (with ensure).
			 := makeTraceFrames(, fpunwindExpand())

			// Returns the maximum number of bytes required to hold the encoded stack, given that
			// it contains N frames.
			 := 1 + (2+4*len())*traceBytesPerNumber

			// Estimate the size of this record. This
			// bound is pretty loose, but avoids counting
			// lots of varint sizes.
			//
			// Add 1 because we might also write traceEvStacks.
			var  bool
			,  = .ensure(1 + )
			if  {
				.byte(byte(traceEvStacks))
			}

			// Emit stack event.
			.byte(byte(traceEvStack))
			.varint(uint64(.id))
			.varint(uint64(len()))
			for ,  := range  {
				.varint(uint64(.PC))
				.varint(.funcID)
				.varint(.fileID)
				.varint(.line)
			}
		}
	}
	// Still, hold the lock over reset. The callee expects it, even though it's
	// not strictly necessary.
	lock(&.tab.lock)
	.tab.reset()
	unlock(&.tab.lock)

	.flush().end()
}

// makeTraceFrames returns the frames corresponding to pcs. It may
// allocate and may emit trace events.
func makeTraceFrames( uintptr,  []uintptr) []traceFrame {
	 := make([]traceFrame, 0, len())
	 := CallersFrames()
	for {
		,  := .Next()
		 = append(, makeTraceFrame(, ))
		if ! {
			return 
		}
	}
}

type traceFrame struct {
	PC     uintptr
	funcID uint64
	fileID uint64
	line   uint64
}

// makeTraceFrame sets up a traceFrame for a frame.
func makeTraceFrame( uintptr,  Frame) traceFrame {
	var  traceFrame
	.PC = .PC

	 := .Function
	const  = 1 << 10
	if len() >  {
		 = [len()-:]
	}
	.funcID = trace.stringTab[%2].put(, )
	.line = uint64(.Line)
	 := .File
	if len() >  {
		 = [len()-:]
	}
	.fileID = trace.stringTab[%2].put(, )
	return 
}

// tracefpunwindoff returns true if frame pointer unwinding for the tracer is
// disabled via GODEBUG or not supported by the architecture.
func tracefpunwindoff() bool {
	return debug.tracefpunwindoff != 0 || (goarch.ArchFamily != goarch.AMD64 && goarch.ArchFamily != goarch.ARM64)
}

// fpTracebackPCs populates pcBuf with the return addresses for each frame and
// returns the number of PCs written to pcBuf. The returned PCs correspond to
// "physical frames" rather than "logical frames"; that is if A is inlined into
// B, this will return a PC for only B.
func fpTracebackPCs( unsafe.Pointer,  []uintptr) ( int) {
	for  = 0;  < len() &&  != nil; ++ {
		// return addr sits one word above the frame pointer
		[] = *(*uintptr)(unsafe.Pointer(uintptr() + goarch.PtrSize))
		// follow the frame pointer to the next one
		 = unsafe.Pointer(*(*uintptr)())
	}
	return 
}

// fpunwindExpand checks if pcBuf contains logical frames (which include inlined
// frames) or physical frames (produced by frame pointer unwinding) using a
// sentinel value in pcBuf[0]. Logical frames are simply returned without the
// sentinel. Physical frames are turned into logical frames via inline unwinding
// and by applying the skip value that's stored in pcBuf[0].
func fpunwindExpand( []uintptr) []uintptr {
	if len() > 0 && [0] == logicalStackSentinel {
		// pcBuf contains logical rather than inlined frames, skip has already been
		// applied, just return it without the sentinel value in pcBuf[0].
		return [1:]
	}

	var (
		 = abi.FuncIDNormal
		   = make([]uintptr, 0, traceStackSize)
		       = [0]
		// skipOrAdd skips or appends retPC to newPCBuf and returns true if more
		// pcs can be added.
		 = func( uintptr) bool {
			if  > 0 {
				--
			} else {
				 = append(, )
			}
			return len() < cap()
		}
	)

:
	for ,  := range [1:] {
		 :=  - 1
		 := findfunc()
		if !.valid() {
			// There is no funcInfo if callPC belongs to a C function. In this case
			// we still keep the pc, but don't attempt to expand inlined frames.
			if  := (); ! {
				break 
			}
			continue
		}

		,  := newInlineUnwinder(, )
		for ; .valid();  = .next() {
			 := .srcFunc()
			if .funcID == abi.FuncIDWrapper && elideWrapperCalling() {
				// ignore wrappers
			} else if  := (.pc + 1); ! {
				break 
			}
			 = .funcID
		}
	}
	return 
}

// startPCForTrace returns the start PC of a goroutine for tracing purposes.
// If pc is a wrapper, it returns the PC of the wrapped function. Otherwise it
// returns pc.
func startPCForTrace( uintptr) uintptr {
	 := findfunc()
	if !.valid() {
		return  // may happen for locked g in extra M since its pc is 0.
	}
	 := funcdata(, abi.FUNCDATA_WrapInfo)
	if  == nil {
		return  // not a wrapper
	}
	return .datap.textAddr(*(*uint32)())
}