// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.

package pprof

import (
	
	
	
	
	
	
	
	
	
	
)

// lostProfileEvent is the function to which lost profiling
// events are attributed.
// (The name shows up in the pprof graphs.)
func lostProfileEvent() { () }

// A profileBuilder writes a profile incrementally from a
// stream of profile samples delivered by the runtime.
type profileBuilder struct {
	start      time.Time
	end        time.Time
	havePeriod bool
	period     int64
	m          profMap

	// encoding state
	w         io.Writer
	zw        *gzip.Writer
	pb        protobuf
	strings   []string
	stringMap map[string]int
	locs      map[uintptr]locInfo // list of locInfo starting with the given PC.
	funcs     map[string]int      // Package path-qualified function name to Function.ID
	mem       []memMap
	deck      pcDeck
}

type memMap struct {
	// initialized as reading mapping
	start   uintptr // Address at which the binary (or DLL) is loaded into memory.
	end     uintptr // The limit of the address range occupied by this mapping.
	offset  uint64  // Offset in the binary that corresponds to the first mapped address.
	file    string  // The object this entry is loaded from.
	buildID string  // A string that uniquely identifies a particular program version with high probability.

	funcs symbolizeFlag
	fake  bool // map entry was faked; /proc/self/maps wasn't available
}

// symbolizeFlag keeps track of symbolization result.
//
//	0                  : no symbol lookup was performed
//	1<<0 (lookupTried) : symbol lookup was performed
//	1<<1 (lookupFailed): symbol lookup was performed but failed
type symbolizeFlag uint8

const (
	lookupTried  symbolizeFlag = 1 << iota
	lookupFailed symbolizeFlag = 1 << iota
)

const (
	// message Profile
	tagProfile_SampleType        = 1  // repeated ValueType
	tagProfile_Sample            = 2  // repeated Sample
	tagProfile_Mapping           = 3  // repeated Mapping
	tagProfile_Location          = 4  // repeated Location
	tagProfile_Function          = 5  // repeated Function
	tagProfile_StringTable       = 6  // repeated string
	tagProfile_DropFrames        = 7  // int64 (string table index)
	tagProfile_KeepFrames        = 8  // int64 (string table index)
	tagProfile_TimeNanos         = 9  // int64
	tagProfile_DurationNanos     = 10 // int64
	tagProfile_PeriodType        = 11 // ValueType (really optional string???)
	tagProfile_Period            = 12 // int64
	tagProfile_Comment           = 13 // repeated int64
	tagProfile_DefaultSampleType = 14 // int64

	// message ValueType
	tagValueType_Type = 1 // int64 (string table index)
	tagValueType_Unit = 2 // int64 (string table index)

	// message Sample
	tagSample_Location = 1 // repeated uint64
	tagSample_Value    = 2 // repeated int64
	tagSample_Label    = 3 // repeated Label

	// message Label
	tagLabel_Key = 1 // int64 (string table index)
	tagLabel_Str = 2 // int64 (string table index)
	tagLabel_Num = 3 // int64

	// message Mapping
	tagMapping_ID              = 1  // uint64
	tagMapping_Start           = 2  // uint64
	tagMapping_Limit           = 3  // uint64
	tagMapping_Offset          = 4  // uint64
	tagMapping_Filename        = 5  // int64 (string table index)
	tagMapping_BuildID         = 6  // int64 (string table index)
	tagMapping_HasFunctions    = 7  // bool
	tagMapping_HasFilenames    = 8  // bool
	tagMapping_HasLineNumbers  = 9  // bool
	tagMapping_HasInlineFrames = 10 // bool

	// message Location
	tagLocation_ID        = 1 // uint64
	tagLocation_MappingID = 2 // uint64
	tagLocation_Address   = 3 // uint64
	tagLocation_Line      = 4 // repeated Line

	// message Line
	tagLine_FunctionID = 1 // uint64
	tagLine_Line       = 2 // int64

	// message Function
	tagFunction_ID         = 1 // uint64
	tagFunction_Name       = 2 // int64 (string table index)
	tagFunction_SystemName = 3 // int64 (string table index)
	tagFunction_Filename   = 4 // int64 (string table index)
	tagFunction_StartLine  = 5 // int64
)

// stringIndex adds s to the string table if not already present
// and returns the index of s in the string table.
func ( *profileBuilder) ( string) int64 {
	,  := .stringMap[]
	if ! {
		 = len(.strings)
		.strings = append(.strings, )
		.stringMap[] = 
	}
	return int64()
}

func ( *profileBuilder) () {
	const  = 4096
	if .pb.nest == 0 && len(.pb.data) >  {
		.zw.Write(.pb.data)
		.pb.data = .pb.data[:0]
	}
}

// pbValueType encodes a ValueType message to b.pb.
func ( *profileBuilder) ( int, ,  string) {
	 := .pb.startMessage()
	.pb.int64(tagValueType_Type, .stringIndex())
	.pb.int64(tagValueType_Unit, .stringIndex())
	.pb.endMessage(, )
}

// pbSample encodes a Sample message to b.pb.
func ( *profileBuilder) ( []int64,  []uint64,  func()) {
	 := .pb.startMessage()
	.pb.int64s(tagSample_Value, )
	.pb.uint64s(tagSample_Location, )
	if  != nil {
		()
	}
	.pb.endMessage(tagProfile_Sample, )
	.flush()
}

// pbLabel encodes a Label message to b.pb.
func ( *profileBuilder) ( int, ,  string,  int64) {
	 := .pb.startMessage()
	.pb.int64Opt(tagLabel_Key, .stringIndex())
	.pb.int64Opt(tagLabel_Str, .stringIndex())
	.pb.int64Opt(tagLabel_Num, )
	.pb.endMessage(, )
}

// pbLine encodes a Line message to b.pb.
func ( *profileBuilder) ( int,  uint64,  int64) {
	 := .pb.startMessage()
	.pb.uint64Opt(tagLine_FunctionID, )
	.pb.int64Opt(tagLine_Line, )
	.pb.endMessage(, )
}

// pbMapping encodes a Mapping message to b.pb.
func ( *profileBuilder) ( int, , , ,  uint64, ,  string,  bool) {
	 := .pb.startMessage()
	.pb.uint64Opt(tagMapping_ID, )
	.pb.uint64Opt(tagMapping_Start, )
	.pb.uint64Opt(tagMapping_Limit, )
	.pb.uint64Opt(tagMapping_Offset, )
	.pb.int64Opt(tagMapping_Filename, .stringIndex())
	.pb.int64Opt(tagMapping_BuildID, .stringIndex())
	// TODO: we set HasFunctions if all symbols from samples were symbolized (hasFuncs).
	// Decide what to do about HasInlineFrames and HasLineNumbers.
	// Also, another approach to handle the mapping entry with
	// incomplete symbolization results is to duplicate the mapping
	// entry (but with different Has* fields values) and use
	// different entries for symbolized locations and unsymbolized locations.
	if  {
		.pb.bool(tagMapping_HasFunctions, true)
	}
	.pb.endMessage(, )
}

func allFrames( uintptr) ([]runtime.Frame, symbolizeFlag) {
	// Expand this one address using CallersFrames so we can cache
	// each expansion. In general, CallersFrames takes a whole
	// stack, but in this case we know there will be no skips in
	// the stack and we have return PCs anyway.
	 := runtime.CallersFrames([]uintptr{})
	,  := .Next()
	if .Function == "runtime.goexit" {
		// Short-circuit if we see runtime.goexit so the loop
		// below doesn't allocate a useless empty location.
		return nil, 0
	}

	 := lookupTried
	if .PC == 0 || .Function == "" || .File == "" || .Line == 0 {
		 |= lookupFailed
	}

	if .PC == 0 {
		// If we failed to resolve the frame, at least make up
		// a reasonable call PC. This mostly happens in tests.
		.PC =  - 1
	}
	 := []runtime.Frame{}
	for .Function != "runtime.goexit" &&  {
		,  = .Next()
		 = append(, )
	}
	return , 
}

type locInfo struct {
	// location id assigned by the profileBuilder
	id uint64

	// sequence of PCs, including the fake PCs returned by the traceback
	// to represent inlined functions
	// https://github.com/golang/go/blob/d6f2f833c93a41ec1c68e49804b8387a06b131c5/src/runtime/traceback.go#L347-L368
	pcs []uintptr

	// firstPCFrames and firstPCSymbolizeResult hold the results of the
	// allFrames call for the first (leaf-most) PC this locInfo represents
	firstPCFrames          []runtime.Frame
	firstPCSymbolizeResult symbolizeFlag
}

// newProfileBuilder returns a new profileBuilder.
// CPU profiling data obtained from the runtime can be added
// by calling b.addCPUData, and then the eventual profile
// can be obtained by calling b.finish.
func newProfileBuilder( io.Writer) *profileBuilder {
	,  := gzip.NewWriterLevel(, gzip.BestSpeed)
	 := &profileBuilder{
		w:         ,
		zw:        ,
		start:     time.Now(),
		strings:   []string{""},
		stringMap: map[string]int{"": 0},
		locs:      map[uintptr]locInfo{},
		funcs:     map[string]int{},
	}
	.readMapping()
	return 
}

// addCPUData adds the CPU profiling data to the profile.
//
// The data must be a whole number of records, as delivered by the runtime.
// len(tags) must be equal to the number of records in data.
func ( *profileBuilder) ( []uint64,  []unsafe.Pointer) error {
	if !.havePeriod {
		// first record is period
		if len() < 3 {
			return fmt.Errorf("truncated profile")
		}
		if [0] != 3 || [2] == 0 {
			return fmt.Errorf("malformed profile")
		}
		// data[2] is sampling rate in Hz. Convert to sampling
		// period in nanoseconds.
		.period = 1e9 / int64([2])
		.havePeriod = true
		 = [3:]
		// Consume tag slot. Note that there isn't a meaningful tag
		// value for this record.
		 = [1:]
	}

	// Parse CPU samples from the profile.
	// Each sample is 3+n uint64s:
	//	data[0] = 3+n
	//	data[1] = time stamp (ignored)
	//	data[2] = count
	//	data[3:3+n] = stack
	// If the count is 0 and the stack has length 1,
	// that's an overflow record inserted by the runtime
	// to indicate that stack[0] samples were lost.
	// Otherwise the count is usually 1,
	// but in a few special cases like lost non-Go samples
	// there can be larger counts.
	// Because many samples with the same stack arrive,
	// we want to deduplicate immediately, which we do
	// using the b.m profMap.
	for len() > 0 {
		if len() < 3 || [0] > uint64(len()) {
			return fmt.Errorf("truncated profile")
		}
		if [0] < 3 ||  != nil && len() < 1 {
			return fmt.Errorf("malformed profile")
		}
		if len() < 1 {
			return fmt.Errorf("mismatched profile records and tags")
		}
		 := [2]
		 := [3:[0]]
		 = [[0]:]
		 := [0]
		 = [1:]

		if  == 0 && len() == 1 {
			// overflow record
			 = uint64([0])
			 = []uint64{
				// gentraceback guarantees that PCs in the
				// stack can be unconditionally decremented and
				// still be valid, so we must do the same.
				uint64(abi.FuncPCABIInternal(lostProfileEvent) + 1),
			}
		}
		.m.lookup(, ).count += int64()
	}

	if len() != 0 {
		return fmt.Errorf("mismatched profile records and tags")
	}
	return nil
}

// build completes and returns the constructed profile.
func ( *profileBuilder) () {
	.end = time.Now()

	.pb.int64Opt(tagProfile_TimeNanos, .start.UnixNano())
	if .havePeriod { // must be CPU profile
		.pbValueType(tagProfile_SampleType, "samples", "count")
		.pbValueType(tagProfile_SampleType, "cpu", "nanoseconds")
		.pb.int64Opt(tagProfile_DurationNanos, .end.Sub(.start).Nanoseconds())
		.pbValueType(tagProfile_PeriodType, "cpu", "nanoseconds")
		.pb.int64Opt(tagProfile_Period, .period)
	}

	 := []int64{0, 0}
	var  []uint64

	for  := .m.all;  != nil;  = .nextAll {
		[0] = .count
		[1] = .count * .period

		var  func()
		if .tag != nil {
			 = func() {
				for ,  := range (*labelMap)(.tag).list {
					.pbLabel(tagSample_Label, .key, .value, 0)
				}
			}
		}

		 = .appendLocsForStack([:0], .stk)

		.pbSample(, , )
	}

	for ,  := range .mem {
		 := .funcs == lookupTried // lookupTried but not lookupFailed
		.pbMapping(tagProfile_Mapping, uint64(+1), uint64(.start), uint64(.end), .offset, .file, .buildID, )
	}

	// TODO: Anything for tagProfile_DropFrames?
	// TODO: Anything for tagProfile_KeepFrames?

	.pb.strings(tagProfile_StringTable, .strings)
	.zw.Write(.pb.data)
	.zw.Close()
}

// appendLocsForStack appends the location IDs for the given stack trace to the given
// location ID slice, locs. The addresses in the stack are return PCs or 1 + the PC of
// an inline marker as the runtime traceback function returns.
//
// It may return an empty slice even if locs is non-empty, for example if locs consists
// solely of runtime.goexit. We still count these empty stacks in profiles in order to
// get the right cumulative sample count.
//
// It may emit to b.pb, so there must be no message encoding in progress.
func ( *profileBuilder) ( []uint64,  []uintptr) ( []uint64) {
	.deck.reset()

	// The last frame might be truncated. Recover lost inline frames.
	 := 
	 = runtime_expandFinalInlineFrame()

	for len() > 0 {
		 := [0]
		if ,  := .locs[];  {
			// When generating code for an inlined function, the compiler adds
			// NOP instructions to the outermost function as a placeholder for
			// each layer of inlining. When the runtime generates tracebacks for
			// stacks that include inlined functions, it uses the addresses of
			// those NOPs as "fake" PCs on the stack as if they were regular
			// function call sites. But if a profiling signal arrives while the
			// CPU is executing one of those NOPs, its PC will show up as a leaf
			// in the profile with its own Location entry. So, always check
			// whether addr is a "fake" PC in the context of the current call
			// stack by trying to add it to the inlining deck before assuming
			// that the deck is complete.
			if len(.deck.pcs) > 0 {
				if  := .deck.tryAdd(, .firstPCFrames, .firstPCSymbolizeResult);  {
					 = [1:]
					continue
				}
			}

			// first record the location if there is any pending accumulated info.
			if  := .emitLocation();  > 0 {
				 = append(, )
			}

			// then, record the cached location.
			 = append(, .id)

			// Skip the matching pcs.
			//
			// Even if stk was truncated due to the stack depth
			// limit, expandFinalInlineFrame above has already
			// fixed the truncation, ensuring it is long enough.
			if len(.pcs) > len() {
				panic(fmt.Sprintf("stack too short to match cached location; stk = %#x, l.pcs = %#x, original stk = %#x", , .pcs, ))
			}
			 = [len(.pcs):]
			continue
		}

		,  := allFrames()
		if len() == 0 { // runtime.goexit.
			if  := .emitLocation();  > 0 {
				 = append(, )
			}
			 = [1:]
			continue
		}

		if  := .deck.tryAdd(, , );  {
			 = [1:]
			continue
		}
		// add failed because this addr is not inlined with the
		// existing PCs in the deck. Flush the deck and retry handling
		// this pc.
		if  := .emitLocation();  > 0 {
			 = append(, )
		}

		// check cache again - previous emitLocation added a new entry
		if ,  := .locs[];  {
			 = append(, .id)
			 = [len(.pcs):] // skip the matching pcs.
		} else {
			.deck.tryAdd(, , ) // must succeed.
			 = [1:]
		}
	}
	if  := .emitLocation();  > 0 { // emit remaining location.
		 = append(, )
	}
	return 
}

// Here's an example of how Go 1.17 writes out inlined functions, compiled for
// linux/amd64. The disassembly of main.main shows two levels of inlining: main
// calls b, b calls a, a does some work.
//
//   inline.go:9   0x4553ec  90              NOPL                 // func main()    { b(v) }
//   inline.go:6   0x4553ed  90              NOPL                 // func b(v *int) { a(v) }
//   inline.go:5   0x4553ee  48c7002a000000  MOVQ $0x2a, 0(AX)    // func a(v *int) { *v = 42 }
//
// If a profiling signal arrives while executing the MOVQ at 0x4553ee (for line
// 5), the runtime will report the stack as the MOVQ frame being called by the
// NOPL at 0x4553ed (for line 6) being called by the NOPL at 0x4553ec (for line
// 9).
//
// The role of pcDeck is to collapse those three frames back into a single
// location at 0x4553ee, with file/line/function symbolization info representing
// the three layers of calls. It does that via sequential calls to pcDeck.tryAdd
// starting with the leaf-most address. The fourth call to pcDeck.tryAdd will be
// for the caller of main.main. Because main.main was not inlined in its caller,
// the deck will reject the addition, and the fourth PC on the stack will get
// its own location.

// pcDeck is a helper to detect a sequence of inlined functions from
// a stack trace returned by the runtime.
//
// The stack traces returned by runtime's trackback functions are fully
// expanded (at least for Go functions) and include the fake pcs representing
// inlined functions. The profile proto expects the inlined functions to be
// encoded in one Location message.
// https://github.com/google/pprof/blob/5e965273ee43930341d897407202dd5e10e952cb/proto/profile.proto#L177-L184
//
// Runtime does not directly expose whether a frame is for an inlined function
// and looking up debug info is not ideal, so we use a heuristic to filter
// the fake pcs and restore the inlined and entry functions. Inlined functions
// have the following properties:
//
//	Frame's Func is nil (note: also true for non-Go functions), and
//	Frame's Entry matches its entry function frame's Entry (note: could also be true for recursive calls and non-Go functions), and
//	Frame's Name does not match its entry function frame's name (note: inlined functions cannot be directly recursive).
//
// As reading and processing the pcs in a stack trace one by one (from leaf to the root),
// we use pcDeck to temporarily hold the observed pcs and their expanded frames
// until we observe the entry function frame.
type pcDeck struct {
	pcs             []uintptr
	frames          []runtime.Frame
	symbolizeResult symbolizeFlag

	// firstPCFrames indicates the number of frames associated with the first
	// (leaf-most) PC in the deck
	firstPCFrames int
	// firstPCSymbolizeResult holds the results of the allFrames call for the
	// first (leaf-most) PC in the deck
	firstPCSymbolizeResult symbolizeFlag
}

func ( *pcDeck) () {
	.pcs = .pcs[:0]
	.frames = .frames[:0]
	.symbolizeResult = 0
	.firstPCFrames = 0
	.firstPCSymbolizeResult = 0
}

// tryAdd tries to add the pc and Frames expanded from it (most likely one,
// since the stack trace is already fully expanded) and the symbolizeResult
// to the deck. If it fails the caller needs to flush the deck and retry.
func ( *pcDeck) ( uintptr,  []runtime.Frame,  symbolizeFlag) ( bool) {
	if  := len(.frames);  > 0 {
		// 'd.frames' are all expanded from one 'pc' and represent all
		// inlined functions so we check only the last one.
		 := [0]
		 := .frames[-1]
		if .Func != nil { // the last frame can't be inlined. Flush.
			return false
		}
		if .Entry == 0 || .Entry == 0 { // Possibly not a Go function. Don't try to merge.
			return false
		}

		if .Entry != .Entry { // newFrame is for a different function.
			return false
		}
		if runtime_FrameSymbolName(&) == runtime_FrameSymbolName(&) { // maybe recursion.
			return false
		}
	}
	.pcs = append(.pcs, )
	.frames = append(.frames, ...)
	.symbolizeResult |= 
	if len(.pcs) == 1 {
		.firstPCFrames = len(.frames)
		.firstPCSymbolizeResult = 
	}
	return true
}

// emitLocation emits the new location and function information recorded in the deck
// and returns the location ID encoded in the profile protobuf.
// It emits to b.pb, so there must be no message encoding in progress.
// It resets the deck.
func ( *profileBuilder) () uint64 {
	if len(.deck.pcs) == 0 {
		return 0
	}
	defer .deck.reset()

	 := .deck.pcs[0]
	 := .deck.frames[0]

	// We can't write out functions while in the middle of the
	// Location message, so record new functions we encounter and
	// write them out after the Location.
	type  struct {
		         uint64
		,  string
		  int64
	}
	 := make([], 0, 8)

	 := uint64(len(.locs)) + 1
	.locs[] = locInfo{
		id:                     ,
		pcs:                    append([]uintptr{}, .deck.pcs...),
		firstPCSymbolizeResult: .deck.firstPCSymbolizeResult,
		firstPCFrames:          append([]runtime.Frame{}, .deck.frames[:.deck.firstPCFrames]...),
	}

	 := .pb.startMessage()
	.pb.uint64Opt(tagLocation_ID, )
	.pb.uint64Opt(tagLocation_Address, uint64(.PC))
	for ,  := range .deck.frames {
		// Write out each line in frame expansion.
		 := runtime_FrameSymbolName(&)
		 := uint64(.funcs[])
		if  == 0 {
			 = uint64(len(.funcs)) + 1
			.funcs[] = int()
			 = append(, {
				:        ,
				:      ,
				:      .File,
				: int64(runtime_FrameStartLine(&)),
			})
		}
		.pbLine(tagLocation_Line, , int64(.Line))
	}
	for  := range .mem {
		if .mem[].start <=  &&  < .mem[].end || .mem[].fake {
			.pb.uint64Opt(tagLocation_MappingID, uint64(+1))

			 := .mem[]
			.funcs |= .deck.symbolizeResult
			.mem[] = 
			break
		}
	}
	.pb.endMessage(tagProfile_Location, )

	// Write out functions we found during frame expansion.
	for ,  := range  {
		 := .pb.startMessage()
		.pb.uint64Opt(tagFunction_ID, .)
		.pb.int64Opt(tagFunction_Name, .stringIndex(.))
		.pb.int64Opt(tagFunction_SystemName, .stringIndex(.))
		.pb.int64Opt(tagFunction_Filename, .stringIndex(.))
		.pb.int64Opt(tagFunction_StartLine, .)
		.pb.endMessage(tagProfile_Function, )
	}

	.flush()
	return 
}

var space = []byte(" ")
var newline = []byte("\n")

func parseProcSelfMaps( []byte,  func(, ,  uint64, ,  string)) {
	// $ cat /proc/self/maps
	// 00400000-0040b000 r-xp 00000000 fc:01 787766                             /bin/cat
	// 0060a000-0060b000 r--p 0000a000 fc:01 787766                             /bin/cat
	// 0060b000-0060c000 rw-p 0000b000 fc:01 787766                             /bin/cat
	// 014ab000-014cc000 rw-p 00000000 00:00 0                                  [heap]
	// 7f7d76af8000-7f7d7797c000 r--p 00000000 fc:01 1318064                    /usr/lib/locale/locale-archive
	// 7f7d7797c000-7f7d77b36000 r-xp 00000000 fc:01 1180226                    /lib/x86_64-linux-gnu/libc-2.19.so
	// 7f7d77b36000-7f7d77d36000 ---p 001ba000 fc:01 1180226                    /lib/x86_64-linux-gnu/libc-2.19.so
	// 7f7d77d36000-7f7d77d3a000 r--p 001ba000 fc:01 1180226                    /lib/x86_64-linux-gnu/libc-2.19.so
	// 7f7d77d3a000-7f7d77d3c000 rw-p 001be000 fc:01 1180226                    /lib/x86_64-linux-gnu/libc-2.19.so
	// 7f7d77d3c000-7f7d77d41000 rw-p 00000000 00:00 0
	// 7f7d77d41000-7f7d77d64000 r-xp 00000000 fc:01 1180217                    /lib/x86_64-linux-gnu/ld-2.19.so
	// 7f7d77f3f000-7f7d77f42000 rw-p 00000000 00:00 0
	// 7f7d77f61000-7f7d77f63000 rw-p 00000000 00:00 0
	// 7f7d77f63000-7f7d77f64000 r--p 00022000 fc:01 1180217                    /lib/x86_64-linux-gnu/ld-2.19.so
	// 7f7d77f64000-7f7d77f65000 rw-p 00023000 fc:01 1180217                    /lib/x86_64-linux-gnu/ld-2.19.so
	// 7f7d77f65000-7f7d77f66000 rw-p 00000000 00:00 0
	// 7ffc342a2000-7ffc342c3000 rw-p 00000000 00:00 0                          [stack]
	// 7ffc34343000-7ffc34345000 r-xp 00000000 00:00 0                          [vdso]
	// ffffffffff600000-ffffffffff601000 r-xp 00000000 00:00 0                  [vsyscall]

	var  []byte
	// next removes and returns the next field in the line.
	// It also removes from line any spaces following the field.
	 := func() []byte {
		var  []byte
		, , _ = bytes.Cut(, space)
		 = bytes.TrimLeft(, " ")
		return 
	}

	for len() > 0 {
		, , _ = bytes.Cut(, newline)
		 := ()
		, ,  := strings.Cut(string(), "-")
		if ! {
			continue
		}
		,  := strconv.ParseUint(, 16, 64)
		if  != nil {
			continue
		}
		,  := strconv.ParseUint(, 16, 64)
		if  != nil {
			continue
		}
		 := ()
		if len() < 4 || [2] != 'x' {
			// Only interested in executable mappings.
			continue
		}
		,  := strconv.ParseUint(string(()), 16, 64)
		if  != nil {
			continue
		}
		()          // dev
		 := () // inode
		if  == nil {
			continue
		}
		 := string()

		// Trim deleted file marker.
		 := " (deleted)"
		 := len()
		if len() >=  && [len()-:] ==  {
			 = [:len()-]
		}

		if len() == 1 && [0] == '0' &&  == "" {
			// Huge-page text mappings list the initial fragment of
			// mapped but unpopulated memory as being inode 0.
			// Don't report that part.
			// But [vdso] and [vsyscall] are inode 0, so let non-empty file names through.
			continue
		}

		// TODO: pprof's remapMappingIDs makes one adjustment:
		// 1. If there is an /anon_hugepage mapping first and it is
		// consecutive to a next mapping, drop the /anon_hugepage.
		// There's no indication why this is needed.
		// Let's try not doing this and see what breaks.
		// If we do need it, it would go here, before we
		// enter the mappings into b.mem in the first place.

		,  := elfBuildID()
		(, , , , )
	}
}

func ( *profileBuilder) (, ,  uint64, ,  string) {
	.addMappingEntry(, , , , , false)
}

func ( *profileBuilder) (, ,  uint64, ,  string,  bool) {
	.mem = append(.mem, memMap{
		start:   uintptr(),
		end:     uintptr(),
		offset:  ,
		file:    ,
		buildID: ,
		fake:    ,
	})
}