Source File
tracebuf.go
Belonging Package
runtime
// Copyright 2023 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Trace buffer management.
package runtime
import (
)
// Maximum number of bytes required to encode uint64 in base-128.
const traceBytesPerNumber = 10
// traceWriter is the interface for writing all trace data.
//
// This type is passed around as a value, and all of its methods return
// a new traceWriter. This allows for chaining together calls in a fluent-style
// API. This is partly stylistic, and very slightly for performance, since
// the compiler can destructure this value and pass it between calls as
// just regular arguments. However, this style is not load-bearing, and
// we can change it if it's deemed too error-prone.
type traceWriter struct {
traceLocker
exp traceExperiment
*traceBuf
}
// writer returns an a traceWriter that writes into the current M's stream.
//
// Once this is called, the caller must guard against stack growth until
// end is called on it. Therefore, it's highly recommended to use this
// API in a "fluent" style, for example tl.writer().event(...).end().
// Better yet, callers just looking to write events should use eventWriter
// when possible, which is a much safer wrapper around this function.
//
// nosplit to allow for safe reentrant tracing from stack growth paths.
//
//go:nosplit
func ( traceLocker) () traceWriter {
if debugTraceReentrancy {
// Checks that the invariants of this function are being upheld.
:= getg()
if == .m.curg {
.mp.trace.oldthrowsplit = .throwsplit
.throwsplit = true
}
}
return traceWriter{traceLocker: , traceBuf: .mp.trace.buf[.gen%2][traceNoExperiment]}
}
// unsafeTraceWriter produces a traceWriter that doesn't lock the trace.
//
// It should only be used in contexts where either:
// - Another traceLocker is held.
// - trace.gen is prevented from advancing.
//
// This does not have the same stack growth restrictions as traceLocker.writer.
//
// buf may be nil.
func unsafeTraceWriter( uintptr, *traceBuf) traceWriter {
return traceWriter{traceLocker: traceLocker{gen: }, traceBuf: }
}
// event writes out the bytes of an event into the event stream.
//
// nosplit because it's part of writing an event for an M, which must not
// have any stack growth.
//
//go:nosplit
func ( traceWriter) ( traceEv, ...traceArg) traceWriter {
// N.B. Everything in this call must be nosplit to maintain
// the stack growth related invariants for writing events.
// Make sure we have room.
, _ = .ensure(1 + (len()+1)*traceBytesPerNumber)
// Compute the timestamp diff that we'll put in the trace.
:= traceClockNow()
if <= .traceBuf.lastTime {
= .traceBuf.lastTime + 1
}
:= uint64( - .traceBuf.lastTime)
.traceBuf.lastTime =
// Write out event.
.byte(byte())
.varint()
for , := range {
.varint(uint64())
}
return
}
// end writes the buffer back into the m.
//
// nosplit because it's part of writing an event for an M, which must not
// have any stack growth.
//
//go:nosplit
func ( traceWriter) () {
if .mp == nil {
// Tolerate a nil mp. It makes code that creates traceWriters directly
// less error-prone.
return
}
.mp.trace.buf[.gen%2][.exp] = .traceBuf
if debugTraceReentrancy {
// The writer is no longer live, we can drop throwsplit (if it wasn't
// already set upon entry).
:= getg()
if == .m.curg {
.throwsplit = .mp.trace.oldthrowsplit
}
}
}
// ensure makes sure that at least maxSize bytes are available to write.
//
// Returns whether the buffer was flushed.
//
// nosplit because it's part of writing an event for an M, which must not
// have any stack growth.
//
//go:nosplit
func ( traceWriter) ( int) (traceWriter, bool) {
:= .traceBuf == nil || !.available()
if {
= .refill()
}
return ,
}
// flush puts w.traceBuf on the queue of full buffers.
//
// nosplit because it's part of writing an event for an M, which must not
// have any stack growth.
//
//go:nosplit
func ( traceWriter) () traceWriter {
systemstack(func() {
lock(&trace.lock)
if .traceBuf != nil {
traceBufFlush(.traceBuf, .gen)
}
unlock(&trace.lock)
})
.traceBuf = nil
return
}
// refill puts w.traceBuf on the queue of full buffers and refresh's w's buffer.
func ( traceWriter) () traceWriter {
systemstack(func() {
lock(&trace.lock)
if .traceBuf != nil {
traceBufFlush(.traceBuf, .gen)
}
if trace.empty != nil {
.traceBuf = trace.empty
trace.empty = .traceBuf.link
unlock(&trace.lock)
} else {
unlock(&trace.lock)
.traceBuf = (*traceBuf)(sysAlloc(unsafe.Sizeof(traceBuf{}), &memstats.other_sys))
if .traceBuf == nil {
throw("trace: out of memory")
}
}
})
// Initialize the buffer.
:= traceClockNow()
if <= .traceBuf.lastTime {
= .traceBuf.lastTime + 1
}
.traceBuf.lastTime =
.traceBuf.link = nil
.traceBuf.pos = 0
// Tolerate a nil mp.
:= ^uint64(0)
if .mp != nil {
= uint64(.mp.procid)
}
// Write the buffer's header.
if .exp == traceNoExperiment {
.byte(byte(traceEvEventBatch))
} else {
.byte(byte(traceEvExperimentalBatch))
.byte(byte(.exp))
}
.varint(uint64(.gen))
.varint(uint64())
.varint(uint64())
.traceBuf.lenPos = .varintReserve()
return
}
// traceBufQueue is a FIFO of traceBufs.
type traceBufQueue struct {
head, tail *traceBuf
}
// push queues buf into queue of buffers.
func ( *traceBufQueue) ( *traceBuf) {
.link = nil
if .head == nil {
.head =
} else {
.tail.link =
}
.tail =
}
// pop dequeues from the queue of buffers.
func ( *traceBufQueue) () *traceBuf {
:= .head
if == nil {
return nil
}
.head = .link
if .head == nil {
.tail = nil
}
.link = nil
return
}
func ( *traceBufQueue) () bool {
return .head == nil
}
// traceBufHeader is per-P tracing buffer.
type traceBufHeader struct {
link *traceBuf // in trace.empty/full
lastTime traceTime // when we wrote the last event
pos int // next write offset in arr
lenPos int // position of batch length value
}
// traceBuf is per-M tracing buffer.
//
// TODO(mknyszek): Rename traceBuf to traceBatch, since they map 1:1 with event batches.
type traceBuf struct {
_ sys.NotInHeap
traceBufHeader
arr [64<<10 - unsafe.Sizeof(traceBufHeader{})]byte // underlying buffer for traceBufHeader.buf
}
// byte appends v to buf.
//
// nosplit because it's part of writing an event for an M, which must not
// have any stack growth.
//
//go:nosplit
func ( *traceBuf) ( byte) {
.arr[.pos] =
.pos++
}
// varint appends v to buf in little-endian-base-128 encoding.
//
// nosplit because it's part of writing an event for an M, which must not
// have any stack growth.
//
//go:nosplit
func ( *traceBuf) ( uint64) {
:= .pos
:= .arr[ : +traceBytesPerNumber]
for := range {
if < 0x80 {
+= + 1
[] = byte()
break
}
[] = 0x80 | byte()
>>= 7
}
.pos =
}
// varintReserve reserves enough space in buf to hold any varint.
//
// Space reserved this way can be filled in with the varintAt method.
//
// nosplit because it's part of writing an event for an M, which must not
// have any stack growth.
//
//go:nosplit
func ( *traceBuf) () int {
:= .pos
.pos += traceBytesPerNumber
return
}
// stringData appends s's data directly to buf.
//
// nosplit because it's part of writing an event for an M, which must not
// have any stack growth.
//
//go:nosplit
func ( *traceBuf) ( string) {
.pos += copy(.arr[.pos:], )
}
// nosplit because it's part of writing an event for an M, which must not
// have any stack growth.
//
//go:nosplit
func ( *traceBuf) ( int) bool {
return len(.arr)-.pos >=
}
// varintAt writes varint v at byte position pos in buf. This always
// consumes traceBytesPerNumber bytes. This is intended for when the caller
// needs to reserve space for a varint but can't populate it until later.
// Use varintReserve to reserve this space.
//
// nosplit because it's part of writing an event for an M, which must not
// have any stack growth.
//
//go:nosplit
func ( *traceBuf) ( int, uint64) {
for := 0; < traceBytesPerNumber; ++ {
if < traceBytesPerNumber-1 {
.arr[] = 0x80 | byte()
} else {
.arr[] = byte()
}
>>= 7
++
}
if != 0 {
throw("v could not fit in traceBytesPerNumber")
}
}
// traceBufFlush flushes a trace buffer.
//
// Must run on the system stack because trace.lock must be held.
//
//go:systemstack
func traceBufFlush( *traceBuf, uintptr) {
assertLockHeld(&trace.lock)
// Write out the non-header length of the batch in the header.
//
// Note: the length of the header is not included to make it easier
// to calculate this value when deserializing and reserializing the
// trace. Varints can have additional padding of zero bits that is
// quite difficult to preserve, and if we include the header we
// force serializers to do more work. Nothing else actually needs
// padding.
.varintAt(.lenPos, uint64(.pos-(.lenPos+traceBytesPerNumber)))
trace.full[%2].push()
// Notify the scheduler that there's work available and that the trace
// reader should be scheduled.
if !trace.workAvailable.Load() {
trace.workAvailable.Store(true)
}
}
The pages are generated with Golds v0.7.3. (GOOS=linux GOARCH=amd64) Golds is a Go 101 project developed by Tapir Liu. PR and bug reports are welcome and can be submitted to the issue list. Please follow @zigo_101 (reachable from the left QR code) to get the latest news of Golds. |