package runtime
import (
"internal/abi"
"runtime/internal/sys"
)
const (
traceAllocFreeTypesBatch = iota
traceAllocFreeInfoBatch
)
func traceSnapshotMemory(gen uintptr ) {
assertWorldStopped ()
var flushed bool
w := unsafeTraceExpWriter (gen , nil , traceExperimentAllocFree )
w , flushed = w .ensure (1 + 4 *traceBytesPerNumber )
if flushed {
w .byte (byte (traceAllocFreeInfoBatch ))
}
w .varint (uint64 (trace .minPageHeapAddr ))
w .varint (uint64 (pageSize ))
w .varint (uint64 (minHeapAlign ))
w .varint (uint64 (fixedStack ))
w .flush ().end ()
trace := traceAcquire ()
if !trace .ok () {
throw ("traceSnapshotMemory: tracing is not enabled" )
}
for _ , s := range mheap_ .allspans {
if s .state .get () == mSpanDead {
continue
}
trace .SpanExists (s )
if s .state .get () != mSpanInUse {
continue
}
abits := s .allocBitsForIndex (0 )
for i := uintptr (0 ); i < uintptr (s .nelems ); i ++ {
if abits .index < uintptr (s .freeindex ) || abits .isMarked () {
x := s .base () + i *s .elemsize
trace .HeapObjectExists (x , s .typePointersOfUnchecked (x ).typ )
}
abits .advance ()
}
}
forEachGRace (func (gp *g ) {
trace .GoroutineStackExists (gp .stack .lo , gp .stack .hi -gp .stack .lo )
})
traceRelease (trace )
}
func traceSpanTypeAndClass(s *mspan ) traceArg {
if s .state .get () == mSpanInUse {
return traceArg (s .spanclass ) << 1
}
return traceArg (1 )
}
func (tl traceLocker ) SpanExists (s *mspan ) {
tl .eventWriter (traceGoRunning , traceProcRunning ).commit (traceEvSpan , traceSpanID (s ), traceArg (s .npages ), traceSpanTypeAndClass (s ))
}
func (tl traceLocker ) SpanAlloc (s *mspan ) {
tl .eventWriter (traceGoRunning , traceProcRunning ).commit (traceEvSpanAlloc , traceSpanID (s ), traceArg (s .npages ), traceSpanTypeAndClass (s ))
}
func (tl traceLocker ) SpanFree (s *mspan ) {
tl .eventWriter (traceGoRunning , traceProcRunning ).commit (traceEvSpanFree , traceSpanID (s ))
}
func traceSpanID(s *mspan ) traceArg {
return traceArg (uint64 (s .base ())-trace .minPageHeapAddr ) / pageSize
}
func (tl traceLocker ) HeapObjectExists (addr uintptr , typ *abi .Type ) {
tl .eventWriter (traceGoRunning , traceProcRunning ).commit (traceEvHeapObject , traceHeapObjectID (addr ), tl .rtype (typ ))
}
func (tl traceLocker ) HeapObjectAlloc (addr uintptr , typ *abi .Type ) {
tl .eventWriter (traceGoRunning , traceProcRunning ).commit (traceEvHeapObjectAlloc , traceHeapObjectID (addr ), tl .rtype (typ ))
}
func (tl traceLocker ) HeapObjectFree (addr uintptr ) {
tl .eventWriter (traceGoRunning , traceProcRunning ).commit (traceEvHeapObjectFree , traceHeapObjectID (addr ))
}
func traceHeapObjectID(addr uintptr ) traceArg {
return traceArg (uint64 (addr )-trace .minPageHeapAddr ) / minHeapAlign
}
func (tl traceLocker ) GoroutineStackExists (base , size uintptr ) {
order := traceCompressStackSize (size )
tl .eventWriter (traceGoRunning , traceProcRunning ).commit (traceEvGoroutineStack , traceGoroutineStackID (base ), order )
}
func (tl traceLocker ) GoroutineStackAlloc (base , size uintptr ) {
order := traceCompressStackSize (size )
tl .eventWriter (traceGoRunning , traceProcRunning ).commit (traceEvGoroutineStackAlloc , traceGoroutineStackID (base ), order )
}
func (tl traceLocker ) GoroutineStackFree (base uintptr ) {
tl .eventWriter (traceGoRunning , traceProcRunning ).commit (traceEvGoroutineStackFree , traceGoroutineStackID (base ))
}
func traceGoroutineStackID(base uintptr ) traceArg {
return traceArg (uint64 (base )-trace .minPageHeapAddr ) / fixedStack
}
func traceCompressStackSize(size uintptr ) traceArg {
if size &(size -1 ) != 0 {
throw ("goroutine stack size is not a power of 2" )
}
return traceArg (sys .Len64 (uint64 (size )))
}
The pages are generated with Golds v0.7.0-preview . (GOOS=linux GOARCH=amd64)
Golds is a Go 101 project developed by Tapir Liu .
PR and bug reports are welcome and can be submitted to the issue list .
Please follow @zigo_101 (reachable from the left QR code) to get the latest news of Golds .