// Copyright 2023 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.

// Trace buffer management.

package runtime

import (
	
	
)

// Maximum number of bytes required to encode uint64 in base-128.
const traceBytesPerNumber = 10

// traceWriter is the interface for writing all trace data.
//
// This type is passed around as a value, and all of its methods return
// a new traceWriter. This allows for chaining together calls in a fluent-style
// API. This is partly stylistic, and very slightly for performance, since
// the compiler can destructure this value and pass it between calls as
// just regular arguments. However, this style is not load-bearing, and
// we can change it if it's deemed too error-prone.
type traceWriter struct {
	traceLocker
	*traceBuf
}

// write returns an a traceWriter that writes into the current M's stream.
func ( traceLocker) () traceWriter {
	return traceWriter{traceLocker: , traceBuf: .mp.trace.buf[.gen%2]}
}

// unsafeTraceWriter produces a traceWriter that doesn't lock the trace.
//
// It should only be used in contexts where either:
// - Another traceLocker is held.
// - trace.gen is prevented from advancing.
//
// buf may be nil.
func unsafeTraceWriter( uintptr,  *traceBuf) traceWriter {
	return traceWriter{traceLocker: traceLocker{gen: }, traceBuf: }
}

// end writes the buffer back into the m.
func ( traceWriter) () {
	if .mp == nil {
		// Tolerate a nil mp. It makes code that creates traceWriters directly
		// less error-prone.
		return
	}
	.mp.trace.buf[.gen%2] = .traceBuf
}

// ensure makes sure that at least maxSize bytes are available to write.
//
// Returns whether the buffer was flushed.
func ( traceWriter) ( int) (traceWriter, bool) {
	 := .traceBuf == nil || !.available()
	if  {
		 = .refill(traceNoExperiment)
	}
	return , 
}

// flush puts w.traceBuf on the queue of full buffers.
func ( traceWriter) () traceWriter {
	systemstack(func() {
		lock(&trace.lock)
		if .traceBuf != nil {
			traceBufFlush(.traceBuf, .gen)
		}
		unlock(&trace.lock)
	})
	.traceBuf = nil
	return 
}

// refill puts w.traceBuf on the queue of full buffers and refresh's w's buffer.
//
// exp indicates whether the refilled batch should be EvExperimentalBatch.
func ( traceWriter) ( traceExperiment) traceWriter {
	systemstack(func() {
		lock(&trace.lock)
		if .traceBuf != nil {
			traceBufFlush(.traceBuf, .gen)
		}
		if trace.empty != nil {
			.traceBuf = trace.empty
			trace.empty = .traceBuf.link
			unlock(&trace.lock)
		} else {
			unlock(&trace.lock)
			.traceBuf = (*traceBuf)(sysAlloc(unsafe.Sizeof(traceBuf{}), &memstats.other_sys))
			if .traceBuf == nil {
				throw("trace: out of memory")
			}
		}
	})
	// Initialize the buffer.
	 := traceClockNow()
	if  <= .traceBuf.lastTime {
		 = .traceBuf.lastTime + 1
	}
	.traceBuf.lastTime = 
	.traceBuf.link = nil
	.traceBuf.pos = 0

	// Tolerate a nil mp.
	 := ^uint64(0)
	if .mp != nil {
		 = uint64(.mp.procid)
	}

	// Write the buffer's header.
	if  == traceNoExperiment {
		.byte(byte(traceEvEventBatch))
	} else {
		.byte(byte(traceEvExperimentalBatch))
		.byte(byte())
	}
	.varint(uint64(.gen))
	.varint(uint64())
	.varint(uint64())
	.traceBuf.lenPos = .varintReserve()
	return 
}

// traceBufQueue is a FIFO of traceBufs.
type traceBufQueue struct {
	head, tail *traceBuf
}

// push queues buf into queue of buffers.
func ( *traceBufQueue) ( *traceBuf) {
	.link = nil
	if .head == nil {
		.head = 
	} else {
		.tail.link = 
	}
	.tail = 
}

// pop dequeues from the queue of buffers.
func ( *traceBufQueue) () *traceBuf {
	 := .head
	if  == nil {
		return nil
	}
	.head = .link
	if .head == nil {
		.tail = nil
	}
	.link = nil
	return 
}

func ( *traceBufQueue) () bool {
	return .head == nil
}

// traceBufHeader is per-P tracing buffer.
type traceBufHeader struct {
	link     *traceBuf // in trace.empty/full
	lastTime traceTime // when we wrote the last event
	pos      int       // next write offset in arr
	lenPos   int       // position of batch length value
}

// traceBuf is per-M tracing buffer.
//
// TODO(mknyszek): Rename traceBuf to traceBatch, since they map 1:1 with event batches.
type traceBuf struct {
	_ sys.NotInHeap
	traceBufHeader
	arr [64<<10 - unsafe.Sizeof(traceBufHeader{})]byte // underlying buffer for traceBufHeader.buf
}

// byte appends v to buf.
func ( *traceBuf) ( byte) {
	.arr[.pos] = 
	.pos++
}

// varint appends v to buf in little-endian-base-128 encoding.
func ( *traceBuf) ( uint64) {
	 := .pos
	 := .arr[ : +traceBytesPerNumber]
	for  := range  {
		if  < 0x80 {
			 +=  + 1
			[] = byte()
			break
		}
		[] = 0x80 | byte()
		 >>= 7
	}
	.pos = 
}

// varintReserve reserves enough space in buf to hold any varint.
//
// Space reserved this way can be filled in with the varintAt method.
func ( *traceBuf) () int {
	 := .pos
	.pos += traceBytesPerNumber
	return 
}

// stringData appends s's data directly to buf.
func ( *traceBuf) ( string) {
	.pos += copy(.arr[.pos:], )
}

func ( *traceBuf) ( int) bool {
	return len(.arr)-.pos >= 
}

// varintAt writes varint v at byte position pos in buf. This always
// consumes traceBytesPerNumber bytes. This is intended for when the caller
// needs to reserve space for a varint but can't populate it until later.
// Use varintReserve to reserve this space.
func ( *traceBuf) ( int,  uint64) {
	for  := 0;  < traceBytesPerNumber; ++ {
		if  < traceBytesPerNumber-1 {
			.arr[] = 0x80 | byte()
		} else {
			.arr[] = byte()
		}
		 >>= 7
		++
	}
	if  != 0 {
		throw("v could not fit in traceBytesPerNumber")
	}
}

// traceBufFlush flushes a trace buffer.
//
// Must run on the system stack because trace.lock must be held.
//
//go:systemstack
func traceBufFlush( *traceBuf,  uintptr) {
	assertLockHeld(&trace.lock)

	// Write out the non-header length of the batch in the header.
	//
	// Note: the length of the header is not included to make it easier
	// to calculate this value when deserializing and reserializing the
	// trace. Varints can have additional padding of zero bits that is
	// quite difficult to preserve, and if we include the header we
	// force serializers to do more work. Nothing else actually needs
	// padding.
	.varintAt(.lenPos, uint64(.pos-(.lenPos+traceBytesPerNumber)))
	trace.full[%2].push()

	// Notify the scheduler that there's work available and that the trace
	// reader should be scheduled.
	if !trace.workAvailable.Load() {
		trace.workAvailable.Store(true)
	}
}