// Copyright 2025 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.

package trace

import (
	
	
	
	_  // added for go linkname usage
)

// A recorder receives bytes from the runtime tracer, processes it.
type recorder struct {
	r *FlightRecorder

	headerReceived bool
}

func ( *recorder) ( []byte) ( int,  error) {
	 := .r

	defer func() {
		if  != nil {
			// Propagate errors to the flightrecorder.
			if .err == nil {
				.err = 
			}
		}
	}()

	if !.headerReceived {
		if len() < len(.header) {
			return 0, fmt.Errorf("expected at least %d bytes in the first write", len(.header))
		}
		.header = ([16]byte)([:16])
		 += 16
		.headerReceived = true
	}
	if len() ==  {
		return 0, nil
	}
	, , ,  := readBatch([:]) // Every write from the runtime is guaranteed to be a complete batch.
	if  != nil {
		return len() - int() - , 
	}
	 += int()

	// Append the batch to the current generation.
	if .active.gen == 0 {
		.active.gen = 
	}
	if .active.minTime == 0 || .active.minTime > .freq.mul(.time) {
		.active.minTime = .freq.mul(.time)
	}
	.active.size += len(.data)
	.active.batches = append(.active.batches, )

	return len(), nil
}

func ( *recorder) () {
	 := .r

	// Check if we're entering a new generation.
	.ringMu.Lock()

	// Get the current trace clock time.
	 := traceTimeNow(.freq)

	// Add the current generation to the ring. Make sure we always have at least one
	// complete generation by putting the active generation onto the new list, regardless
	// of whatever our settings are.
	//
	// N.B. Let's completely replace the ring here, so that WriteTo can just make a copy
	// and not worry about aliasing. This creates allocations, but at a very low rate.
	 := []rawGeneration{.active}
	 := .active.size
	for  := len(.ring) - 1;  >= 0; -- {
		// Stop adding older generations if the new ring already exceeds the thresholds.
		// This ensures we keep generations that cross a threshold, but not any that lie
		// entirely outside it.
		if uint64() > .wantSize || .Sub([len()-1].minTime) > .wantDur {
			break
		}
		 += .ring[].size
		 = append(, .ring[])
	}
	slices.Reverse()
	.ring = 
	.ringMu.Unlock()

	// Start a new active generation.
	.active = rawGeneration{}
}

type rawGeneration struct {
	gen     uint64
	size    int
	minTime eventTime
	batches []batch
}

func traceTimeNow( frequency) eventTime {
	return .mul(timestamp(runtime_traceClockNow()))
}

//go:linkname runtime_traceClockNow runtime.traceClockNow
func runtime_traceClockNow() uint64

// frequency is nanoseconds per timestamp unit.
type frequency float64

// mul multiplies an unprocessed to produce a time in nanoseconds.
func ( frequency) ( timestamp) eventTime {
	return eventTime(float64() * float64())
}

// eventTime is a timestamp in nanoseconds.
//
// It corresponds to the monotonic clock on the platform that the
// trace was taken, and so is possible to correlate with timestamps
// for other traces taken on the same machine using the same clock
// (i.e. no reboots in between).
//
// The actual absolute value of the timestamp is only meaningful in
// relation to other timestamps from the same clock.
//
// BUG: Timestamps coming from traces on Windows platforms are
// only comparable with timestamps from the same trace. Timestamps
// across traces cannot be compared, because the system clock is
// not used as of Go 1.22.
//
// BUG: Traces produced by Go versions 1.21 and earlier cannot be
// compared with timestamps from other traces taken on the same
// machine. This is because the system clock was not used at all
// to collect those timestamps.
type eventTime int64

// Sub subtracts t0 from t, returning the duration in nanoseconds.
func ( eventTime) ( eventTime) time.Duration {
	return time.Duration(int64() - int64())
}