// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.

// Implementation of runtime/debug.WriteHeapDump. Writes all
// objects in the heap plus additional info (roots, threads,
// finalizers, etc.) to a file.

// The format of the dumped file is described at
// https://golang.org/s/go15heapdump.

package runtime

import (
	
	
	
	
)

//go:linkname runtime_debug_WriteHeapDump runtime/debug.WriteHeapDump
func runtime_debug_WriteHeapDump( uintptr) {
	 := stopTheWorld(stwWriteHeapDump)

	// Keep m on this G's stack instead of the system stack.
	// Both readmemstats_m and writeheapdump_m have pretty large
	// peak stack depths and we risk blowing the system stack.
	// This is safe because the world is stopped, so we don't
	// need to worry about anyone shrinking and therefore moving
	// our stack.
	var  MemStats
	systemstack(func() {
		// Call readmemstats_m here instead of deeper in
		// writeheapdump_m because we might blow the system stack
		// otherwise.
		readmemstats_m(&)
		writeheapdump_m(, &)
	})

	startTheWorld()
}

const (
	fieldKindEol       = 0
	fieldKindPtr       = 1
	fieldKindIface     = 2
	fieldKindEface     = 3
	tagEOF             = 0
	tagObject          = 1
	tagOtherRoot       = 2
	tagType            = 3
	tagGoroutine       = 4
	tagStackFrame      = 5
	tagParams          = 6
	tagFinalizer       = 7
	tagItab            = 8
	tagOSThread        = 9
	tagMemStats        = 10
	tagQueuedFinalizer = 11
	tagData            = 12
	tagBSS             = 13
	tagDefer           = 14
	tagPanic           = 15
	tagMemProf         = 16
	tagAllocSample     = 17
)

var dumpfd uintptr // fd to write the dump to.
var tmpbuf []byte

// buffer of pending write data
const (
	bufSize = 4096
)

var buf [bufSize]byte
var nbuf uintptr

func dwrite( unsafe.Pointer,  uintptr) {
	if  == 0 {
		return
	}
	if nbuf+ <= bufSize {
		copy(buf[nbuf:], (*[bufSize]byte)()[:])
		nbuf += 
		return
	}

	write(dumpfd, unsafe.Pointer(&buf), int32(nbuf))
	if  >= bufSize {
		write(dumpfd, , int32())
		nbuf = 0
	} else {
		copy(buf[:], (*[bufSize]byte)()[:])
		nbuf = 
	}
}

func dwritebyte( byte) {
	dwrite(unsafe.Pointer(&), 1)
}

func flush() {
	write(dumpfd, unsafe.Pointer(&buf), int32(nbuf))
	nbuf = 0
}

// Cache of types that have been serialized already.
// We use a type's hash field to pick a bucket.
// Inside a bucket, we keep a list of types that
// have been serialized so far, most recently used first.
// Note: when a bucket overflows we may end up
// serializing a type more than once. That's ok.
const (
	typeCacheBuckets = 256
	typeCacheAssoc   = 4
)

type typeCacheBucket struct {
	t [typeCacheAssoc]*_type
}

var typecache [typeCacheBuckets]typeCacheBucket

// dump a uint64 in a varint format parseable by encoding/binary.
func dumpint( uint64) {
	var  [10]byte
	var  int
	for  >= 0x80 {
		[] = byte( | 0x80)
		++
		 >>= 7
	}
	[] = byte()
	++
	dwrite(unsafe.Pointer(&), uintptr())
}

func dumpbool( bool) {
	if  {
		dumpint(1)
	} else {
		dumpint(0)
	}
}

// dump varint uint64 length followed by memory contents.
func dumpmemrange( unsafe.Pointer,  uintptr) {
	dumpint(uint64())
	dwrite(, )
}

func dumpslice( []byte) {
	dumpint(uint64(len()))
	if len() > 0 {
		dwrite(unsafe.Pointer(&[0]), uintptr(len()))
	}
}

func dumpstr( string) {
	dumpmemrange(unsafe.Pointer(unsafe.StringData()), uintptr(len()))
}

// dump information for a type.
func dumptype( *_type) {
	if  == nil {
		return
	}

	// If we've definitely serialized the type before,
	// no need to do it again.
	 := &typecache[.Hash&(typeCacheBuckets-1)]
	if  == .t[0] {
		return
	}
	for  := 1;  < typeCacheAssoc; ++ {
		if  == .t[] {
			// Move-to-front
			for  := ;  > 0; -- {
				.t[] = .t[-1]
			}
			.t[0] = 
			return
		}
	}

	// Might not have been dumped yet. Dump it and
	// remember we did so.
	for  := typeCacheAssoc - 1;  > 0; -- {
		.t[] = .t[-1]
	}
	.t[0] = 

	// dump the type
	dumpint(tagType)
	dumpint(uint64(uintptr(unsafe.Pointer())))
	dumpint(uint64(.Size_))
	 := toRType()
	if  := .Uncommon();  == nil || .nameOff(.PkgPath).Name() == "" {
		dumpstr(.string())
	} else {
		 := .nameOff(.PkgPath).Name()
		 := .name()
		dumpint(uint64(uintptr(len()) + 1 + uintptr(len())))
		dwrite(unsafe.Pointer(unsafe.StringData()), uintptr(len()))
		dwritebyte('.')
		dwrite(unsafe.Pointer(unsafe.StringData()), uintptr(len()))
	}
	dumpbool(.Kind_&kindDirectIface == 0 || .PtrBytes != 0)
}

// dump an object.
func dumpobj( unsafe.Pointer,  uintptr,  bitvector) {
	dumpint(tagObject)
	dumpint(uint64(uintptr()))
	dumpmemrange(, )
	dumpfields()
}

func dumpotherroot( string,  unsafe.Pointer) {
	dumpint(tagOtherRoot)
	dumpstr()
	dumpint(uint64(uintptr()))
}

func dumpfinalizer( unsafe.Pointer,  *funcval,  *_type,  *ptrtype) {
	dumpint(tagFinalizer)
	dumpint(uint64(uintptr()))
	dumpint(uint64(uintptr(unsafe.Pointer())))
	dumpint(uint64(uintptr(unsafe.Pointer(.fn))))
	dumpint(uint64(uintptr(unsafe.Pointer())))
	dumpint(uint64(uintptr(unsafe.Pointer())))
}

type childInfo struct {
	// Information passed up from the callee frame about
	// the layout of the outargs region.
	argoff uintptr   // where the arguments start in the frame
	arglen uintptr   // size of args region
	args   bitvector // if args.n >= 0, pointer map of args region
	sp     *uint8    // callee sp
	depth  uintptr   // depth in call stack (0 == most recent)
}

// dump kinds & offsets of interesting fields in bv.
func dumpbv( *bitvector,  uintptr) {
	for  := uintptr(0);  < uintptr(.n); ++ {
		if .ptrbit() == 1 {
			dumpint(fieldKindPtr)
			dumpint(uint64( + *goarch.PtrSize))
		}
	}
}

func dumpframe( *stkframe,  *childInfo) {
	 := .fn

	// Figure out what we can about our stack map
	 := .pc
	 := int32(-1) // Use the entry map at function entry
	if  != .entry() {
		--
		 = pcdatavalue(, abi.PCDATA_StackMapIndex, )
	}
	if  == -1 {
		// We do not have a valid pcdata value but there might be a
		// stackmap for this function. It is likely that we are looking
		// at the function prologue, assume so and hope for the best.
		 = 0
	}
	 := (*stackmap)(funcdata(, abi.FUNCDATA_LocalsPointerMaps))

	var  bitvector
	if  != nil && .n > 0 {
		 = stackmapdata(, )
	} else {
		.n = -1
	}

	// Dump main body of stack frame.
	dumpint(tagStackFrame)
	dumpint(uint64(.sp))                              // lowest address in frame
	dumpint(uint64(.depth))                       // # of frames deep on the stack
	dumpint(uint64(uintptr(unsafe.Pointer(.sp)))) // sp of child, or 0 if bottom of stack
	dumpmemrange(unsafe.Pointer(.sp), .fp-.sp)      // frame contents
	dumpint(uint64(.entry()))
	dumpint(uint64(.pc))
	dumpint(uint64(.continpc))
	 := funcname()
	if  == "" {
		 = "unknown function"
	}
	dumpstr()

	// Dump fields in the outargs section
	if .args.n >= 0 {
		dumpbv(&.args, .argoff)
	} else {
		// conservative - everything might be a pointer
		for  := .argoff;  < .argoff+.arglen;  += goarch.PtrSize {
			dumpint(fieldKindPtr)
			dumpint(uint64())
		}
	}

	// Dump fields in the local vars section
	if  == nil {
		// No locals information, dump everything.
		for  := .arglen;  < .varp-.sp;  += goarch.PtrSize {
			dumpint(fieldKindPtr)
			dumpint(uint64())
		}
	} else if .n < 0 {
		// Locals size information, dump just the locals.
		 := uintptr(-.n)
		for  := .varp -  - .sp;  < .varp-.sp;  += goarch.PtrSize {
			dumpint(fieldKindPtr)
			dumpint(uint64())
		}
	} else if .n > 0 {
		// Locals bitmap information, scan just the pointers in
		// locals.
		dumpbv(&, .varp-uintptr(.n)*goarch.PtrSize-.sp)
	}
	dumpint(fieldKindEol)

	// Record arg info for parent.
	.argoff = .argp - .fp
	.arglen = .argBytes()
	.sp = (*uint8)(unsafe.Pointer(.sp))
	.depth++
	 = (*stackmap)(funcdata(, abi.FUNCDATA_ArgsPointerMaps))
	if  != nil {
		.args = stackmapdata(, )
	} else {
		.args.n = -1
	}
	return
}

func dumpgoroutine( *g) {
	var , ,  uintptr
	if .syscallsp != 0 {
		 = .syscallsp
		 = .syscallpc
		 = 0
	} else {
		 = .sched.sp
		 = .sched.pc
		 = .sched.lr
	}

	dumpint(tagGoroutine)
	dumpint(uint64(uintptr(unsafe.Pointer())))
	dumpint(uint64())
	dumpint(.goid)
	dumpint(uint64(.gopc))
	dumpint(uint64(readgstatus()))
	dumpbool(isSystemGoroutine(, false))
	dumpbool(false) // isbackground
	dumpint(uint64(.waitsince))
	dumpstr(.waitreason.String())
	dumpint(uint64(uintptr(.sched.ctxt)))
	dumpint(uint64(uintptr(unsafe.Pointer(.m))))
	dumpint(uint64(uintptr(unsafe.Pointer(._defer))))
	dumpint(uint64(uintptr(unsafe.Pointer(._panic))))

	// dump stack
	var  childInfo
	.args.n = -1
	.arglen = 0
	.sp = nil
	.depth = 0
	var  unwinder
	for .initAt(, , , , 0); .valid(); .next() {
		dumpframe(&.frame, &)
	}

	// dump defer & panic records
	for  := ._defer;  != nil;  = .link {
		dumpint(tagDefer)
		dumpint(uint64(uintptr(unsafe.Pointer())))
		dumpint(uint64(uintptr(unsafe.Pointer())))
		dumpint(uint64(.sp))
		dumpint(uint64(.pc))
		 := *(**funcval)(unsafe.Pointer(&.fn))
		dumpint(uint64(uintptr(unsafe.Pointer())))
		if .fn == nil {
			// d.fn can be nil for open-coded defers
			dumpint(uint64(0))
		} else {
			dumpint(uint64(uintptr(unsafe.Pointer(.fn))))
		}
		dumpint(uint64(uintptr(unsafe.Pointer(.link))))
	}
	for  := ._panic;  != nil;  = .link {
		dumpint(tagPanic)
		dumpint(uint64(uintptr(unsafe.Pointer())))
		dumpint(uint64(uintptr(unsafe.Pointer())))
		 := efaceOf(&.arg)
		dumpint(uint64(uintptr(unsafe.Pointer(._type))))
		dumpint(uint64(uintptr(.data)))
		dumpint(0) // was p->defer, no longer recorded
		dumpint(uint64(uintptr(unsafe.Pointer(.link))))
	}
}

func dumpgs() {
	assertWorldStopped()

	// goroutines & stacks
	forEachG(func( *g) {
		 := readgstatus() // The world is stopped so gp will not be in a scan state.
		switch  {
		default:
			print("runtime: unexpected G.status ", hex(), "\n")
			throw("dumpgs in STW - bad status")
		case _Gdead:
			// ok
		case _Grunnable,
			_Gsyscall,
			_Gwaiting:
			dumpgoroutine()
		}
	})
}

func finq_callback( *funcval,  unsafe.Pointer,  uintptr,  *_type,  *ptrtype) {
	dumpint(tagQueuedFinalizer)
	dumpint(uint64(uintptr()))
	dumpint(uint64(uintptr(unsafe.Pointer())))
	dumpint(uint64(uintptr(unsafe.Pointer(.fn))))
	dumpint(uint64(uintptr(unsafe.Pointer())))
	dumpint(uint64(uintptr(unsafe.Pointer())))
}

func dumproots() {
	// To protect mheap_.allspans.
	assertWorldStopped()

	// TODO(mwhudson): dump datamask etc from all objects
	// data segment
	dumpint(tagData)
	dumpint(uint64(firstmoduledata.data))
	dumpmemrange(unsafe.Pointer(firstmoduledata.data), firstmoduledata.edata-firstmoduledata.data)
	dumpfields(firstmoduledata.gcdatamask)

	// bss segment
	dumpint(tagBSS)
	dumpint(uint64(firstmoduledata.bss))
	dumpmemrange(unsafe.Pointer(firstmoduledata.bss), firstmoduledata.ebss-firstmoduledata.bss)
	dumpfields(firstmoduledata.gcbssmask)

	// mspan.types
	for ,  := range mheap_.allspans {
		if .state.get() == mSpanInUse {
			// Finalizers
			for  := .specials;  != nil;  = .next {
				if .kind != _KindSpecialFinalizer {
					continue
				}
				 := (*specialfinalizer)(unsafe.Pointer())
				 := unsafe.Pointer(.base() + uintptr(.special.offset))
				dumpfinalizer(, .fn, .fint, .ot)
			}
		}
	}

	// Finalizer queue
	iterate_finq(finq_callback)
}

// Bit vector of free marks.
// Needs to be as big as the largest number of objects per span.
var freemark [_PageSize / 8]bool

func dumpobjs() {
	// To protect mheap_.allspans.
	assertWorldStopped()

	for ,  := range mheap_.allspans {
		if .state.get() != mSpanInUse {
			continue
		}
		 := .base()
		 := .elemsize
		 := (.npages << _PageShift) / 
		if  > uintptr(len(freemark)) {
			throw("freemark array doesn't have enough entries")
		}

		for  := uint16(0);  < .nelems; ++ {
			if .isFree(uintptr()) {
				freemark[] = true
			}
		}

		for  := uintptr(0);  < ; ,  = +1, + {
			if freemark[] {
				freemark[] = false
				continue
			}
			dumpobj(unsafe.Pointer(), , makeheapobjbv(, ))
		}
	}
}

func dumpparams() {
	dumpint(tagParams)
	 := uintptr(1)
	if *(*byte)(unsafe.Pointer(&)) == 1 {
		dumpbool(false) // little-endian ptrs
	} else {
		dumpbool(true) // big-endian ptrs
	}
	dumpint(goarch.PtrSize)
	var ,  uintptr
	for  := range mheap_.arenas {
		if mheap_.arenas[] == nil {
			continue
		}
		for ,  := range mheap_.arenas[] {
			if  == nil {
				continue
			}
			 := arenaBase(arenaIdx()<<arenaL1Shift | arenaIdx())
			if  == 0 ||  <  {
				 = 
			}
			if +heapArenaBytes >  {
				 =  + heapArenaBytes
			}
		}
	}
	dumpint(uint64())
	dumpint(uint64())
	dumpstr(goarch.GOARCH)
	dumpstr(buildVersion)
	dumpint(uint64(ncpu))
}

func itab_callback( *itab) {
	 := ._type
	dumptype()
	dumpint(tagItab)
	dumpint(uint64(uintptr(unsafe.Pointer())))
	dumpint(uint64(uintptr(unsafe.Pointer())))
}

func dumpitabs() {
	iterate_itabs(itab_callback)
}

func dumpms() {
	for  := allm;  != nil;  = .alllink {
		dumpint(tagOSThread)
		dumpint(uint64(uintptr(unsafe.Pointer())))
		dumpint(uint64(.id))
		dumpint(.procid)
	}
}

//go:systemstack
func dumpmemstats( *MemStats) {
	assertWorldStopped()

	// These ints should be identical to the exported
	// MemStats structure and should be ordered the same
	// way too.
	dumpint(tagMemStats)
	dumpint(.Alloc)
	dumpint(.TotalAlloc)
	dumpint(.Sys)
	dumpint(.Lookups)
	dumpint(.Mallocs)
	dumpint(.Frees)
	dumpint(.HeapAlloc)
	dumpint(.HeapSys)
	dumpint(.HeapIdle)
	dumpint(.HeapInuse)
	dumpint(.HeapReleased)
	dumpint(.HeapObjects)
	dumpint(.StackInuse)
	dumpint(.StackSys)
	dumpint(.MSpanInuse)
	dumpint(.MSpanSys)
	dumpint(.MCacheInuse)
	dumpint(.MCacheSys)
	dumpint(.BuckHashSys)
	dumpint(.GCSys)
	dumpint(.OtherSys)
	dumpint(.NextGC)
	dumpint(.LastGC)
	dumpint(.PauseTotalNs)
	for  := 0;  < 256; ++ {
		dumpint(.PauseNs[])
	}
	dumpint(uint64(.NumGC))
}

func dumpmemprof_callback( *bucket,  uintptr,  *uintptr, , ,  uintptr) {
	 := (*[100000]uintptr)(unsafe.Pointer())
	dumpint(tagMemProf)
	dumpint(uint64(uintptr(unsafe.Pointer())))
	dumpint(uint64())
	dumpint(uint64())
	for  := uintptr(0);  < ; ++ {
		 := []
		 := findfunc()
		if !.valid() {
			var  [64]byte
			 := len()
			--
			[] = ')'
			if  == 0 {
				--
				[] = '0'
			} else {
				for  > 0 {
					--
					[] = "0123456789abcdef"[&15]
					 >>= 4
				}
			}
			--
			[] = 'x'
			--
			[] = '0'
			--
			[] = '('
			dumpslice([:])
			dumpstr("?")
			dumpint(0)
		} else {
			dumpstr(funcname())
			if  > 0 &&  > .entry() {
				--
			}
			,  := funcline(, )
			dumpstr()
			dumpint(uint64())
		}
	}
	dumpint(uint64())
	dumpint(uint64())
}

func dumpmemprof() {
	// To protect mheap_.allspans.
	assertWorldStopped()

	iterate_memprof(dumpmemprof_callback)
	for ,  := range mheap_.allspans {
		if .state.get() != mSpanInUse {
			continue
		}
		for  := .specials;  != nil;  = .next {
			if .kind != _KindSpecialProfile {
				continue
			}
			 := (*specialprofile)(unsafe.Pointer())
			 := .base() + uintptr(.special.offset)
			dumpint(tagAllocSample)
			dumpint(uint64())
			dumpint(uint64(uintptr(unsafe.Pointer(.b))))
		}
	}
}

var dumphdr = []byte("go1.7 heap dump\n")

func mdump( *MemStats) {
	assertWorldStopped()

	// make sure we're done sweeping
	for ,  := range mheap_.allspans {
		if .state.get() == mSpanInUse {
			.ensureSwept()
		}
	}
	memclrNoHeapPointers(unsafe.Pointer(&typecache), unsafe.Sizeof(typecache))
	dwrite(unsafe.Pointer(&dumphdr[0]), uintptr(len(dumphdr)))
	dumpparams()
	dumpitabs()
	dumpobjs()
	dumpgs()
	dumpms()
	dumproots()
	dumpmemstats()
	dumpmemprof()
	dumpint(tagEOF)
	flush()
}

func writeheapdump_m( uintptr,  *MemStats) {
	assertWorldStopped()

	 := getg()
	casGToWaiting(.m.curg, _Grunning, waitReasonDumpingHeap)

	// Set dump file.
	dumpfd = 

	// Call dump routine.
	mdump()

	// Reset dump file.
	dumpfd = 0
	if tmpbuf != nil {
		sysFree(unsafe.Pointer(&tmpbuf[0]), uintptr(len(tmpbuf)), &memstats.other_sys)
		tmpbuf = nil
	}

	casgstatus(.m.curg, _Gwaiting, _Grunning)
}

// dumpint() the kind & offset of each field in an object.
func dumpfields( bitvector) {
	dumpbv(&, 0)
	dumpint(fieldKindEol)
}

func makeheapobjbv( uintptr,  uintptr) bitvector {
	// Extend the temp buffer if necessary.
	 :=  / goarch.PtrSize
	if uintptr(len(tmpbuf)) < /8+1 {
		if tmpbuf != nil {
			sysFree(unsafe.Pointer(&tmpbuf[0]), uintptr(len(tmpbuf)), &memstats.other_sys)
		}
		 := /8 + 1
		 := sysAlloc(, &memstats.other_sys)
		if  == nil {
			throw("heapdump: out of memory")
		}
		tmpbuf = (*[1 << 30]byte)()[:]
	}
	// Convert heap bitmap to pointer bitmap.
	for  := uintptr(0);  < /8+1; ++ {
		tmpbuf[] = 0
	}
	if goexperiment.AllocHeaders {
		 := spanOf()
		 := .typePointersOf(, )
		for {
			var  uintptr
			if ,  = .next( + );  == 0 {
				break
			}
			 := ( - ) / goarch.PtrSize
			tmpbuf[/8] |= 1 << ( % 8)
		}
	} else {
		 := heapBitsForAddr(, )
		for {
			var  uintptr
			,  = .next()
			if  == 0 {
				break
			}
			 := ( - ) / goarch.PtrSize
			tmpbuf[/8] |= 1 << ( % 8)
		}
	}
	return bitvector{int32(), &tmpbuf[0]}
}