// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.

package runtime

import (
	
	
	
)

// Per-thread (in Go, per-P) cache for small objects.
// This includes a small object cache and local allocation stats.
// No locking needed because it is per-thread (per-P).
//
// mcaches are allocated from non-GC'd memory, so any heap pointers
// must be specially handled.
type mcache struct {
	_ sys.NotInHeap

	// The following members are accessed on every malloc,
	// so they are grouped here for better caching.
	nextSample uintptr // trigger heap sample after allocating this many bytes
	scanAlloc  uintptr // bytes of scannable heap allocated

	// Allocator cache for tiny objects w/o pointers.
	// See "Tiny allocator" comment in malloc.go.

	// tiny points to the beginning of the current tiny block, or
	// nil if there is no current tiny block.
	//
	// tiny is a heap pointer. Since mcache is in non-GC'd memory,
	// we handle it by clearing it in releaseAll during mark
	// termination.
	//
	// tinyAllocs is the number of tiny allocations performed
	// by the P that owns this mcache.
	tiny       uintptr
	tinyoffset uintptr
	tinyAllocs uintptr

	// The rest is not accessed on every malloc.

	alloc [numSpanClasses]*mspan // spans to allocate from, indexed by spanClass

	stackcache [_NumStackOrders]stackfreelist

	// flushGen indicates the sweepgen during which this mcache
	// was last flushed. If flushGen != mheap_.sweepgen, the spans
	// in this mcache are stale and need to the flushed so they
	// can be swept. This is done in acquirep.
	flushGen atomic.Uint32
}

// A gclink is a node in a linked list of blocks, like mlink,
// but it is opaque to the garbage collector.
// The GC does not trace the pointers during collection,
// and the compiler does not emit write barriers for assignments
// of gclinkptr values. Code should store references to gclinks
// as gclinkptr, not as *gclink.
type gclink struct {
	next gclinkptr
}

// A gclinkptr is a pointer to a gclink, but it is opaque
// to the garbage collector.
type gclinkptr uintptr

// ptr returns the *gclink form of p.
// The result should be used for accessing fields, not stored
// in other data structures.
func ( gclinkptr) () *gclink {
	return (*gclink)(unsafe.Pointer())
}

type stackfreelist struct {
	list gclinkptr // linked list of free stacks
	size uintptr   // total size of stacks in list
}

// dummy mspan that contains no free objects.
var emptymspan mspan

func allocmcache() *mcache {
	var  *mcache
	systemstack(func() {
		lock(&mheap_.lock)
		 = (*mcache)(mheap_.cachealloc.alloc())
		.flushGen.Store(mheap_.sweepgen)
		unlock(&mheap_.lock)
	})
	for  := range .alloc {
		.alloc[] = &emptymspan
	}
	.nextSample = nextSample()
	return 
}

// freemcache releases resources associated with this
// mcache and puts the object onto a free list.
//
// In some cases there is no way to simply release
// resources, such as statistics, so donate them to
// a different mcache (the recipient).
func freemcache( *mcache) {
	systemstack(func() {
		.releaseAll()
		stackcache_clear()

		// NOTE(rsc,rlh): If gcworkbuffree comes back, we need to coordinate
		// with the stealing of gcworkbufs during garbage collection to avoid
		// a race where the workbuf is double-freed.
		// gcworkbuffree(c.gcworkbuf)

		lock(&mheap_.lock)
		mheap_.cachealloc.free(unsafe.Pointer())
		unlock(&mheap_.lock)
	})
}

// getMCache is a convenience function which tries to obtain an mcache.
//
// Returns nil if we're not bootstrapping or we don't have a P. The caller's
// P must not change, so we must be in a non-preemptible state.
func getMCache( *m) *mcache {
	// Grab the mcache, since that's where stats live.
	 := .p.ptr()
	var  *mcache
	if  == nil {
		// We will be called without a P while bootstrapping,
		// in which case we use mcache0, which is set in mallocinit.
		// mcache0 is cleared when bootstrapping is complete,
		// by procresize.
		 = mcache0
	} else {
		 = .mcache
	}
	return 
}

// refill acquires a new span of span class spc for c. This span will
// have at least one free object. The current span in c must be full.
//
// Must run in a non-preemptible context since otherwise the owner of
// c could change.
func ( *mcache) ( spanClass) {
	// Return the current cached span to the central lists.
	 := .alloc[]

	if .allocCount != .nelems {
		throw("refill of span with free space remaining")
	}
	if  != &emptymspan {
		// Mark this span as no longer cached.
		if .sweepgen != mheap_.sweepgen+3 {
			throw("bad sweepgen in refill")
		}
		mheap_.central[].mcentral.uncacheSpan()

		// Count up how many slots were used and record it.
		 := memstats.heapStats.acquire()
		 := int64(.allocCount) - int64(.allocCountBeforeCache)
		atomic.Xadd64(&.smallAllocCount[.sizeclass()], )

		// Flush tinyAllocs.
		if  == tinySpanClass {
			atomic.Xadd64(&.tinyAllocCount, int64(.tinyAllocs))
			.tinyAllocs = 0
		}
		memstats.heapStats.release()

		// Count the allocs in inconsistent, internal stats.
		 :=  * int64(.elemsize)
		gcController.totalAlloc.Add()

		// Clear the second allocCount just to be safe.
		.allocCountBeforeCache = 0
	}

	// Get a new cached span from the central lists.
	 = mheap_.central[].mcentral.cacheSpan()
	if  == nil {
		throw("out of memory")
	}

	if .allocCount == .nelems {
		throw("span has no free space")
	}

	// Indicate that this span is cached and prevent asynchronous
	// sweeping in the next sweep phase.
	.sweepgen = mheap_.sweepgen + 3

	// Store the current alloc count for accounting later.
	.allocCountBeforeCache = .allocCount

	// Update heapLive and flush scanAlloc.
	//
	// We have not yet allocated anything new into the span, but we
	// assume that all of its slots will get used, so this makes
	// heapLive an overestimate.
	//
	// When the span gets uncached, we'll fix up this overestimate
	// if necessary (see releaseAll).
	//
	// We pick an overestimate here because an underestimate leads
	// the pacer to believe that it's in better shape than it is,
	// which appears to lead to more memory used. See #53738 for
	// more details.
	 := uintptr(.allocCount) * .elemsize
	gcController.update(int64(.npages*pageSize)-int64(), int64(.scanAlloc))
	.scanAlloc = 0

	.alloc[] = 
}

// allocLarge allocates a span for a large object.
func ( *mcache) ( uintptr,  bool) *mspan {
	if +_PageSize <  {
		throw("out of memory")
	}
	 :=  >> _PageShift
	if &_PageMask != 0 {
		++
	}

	// Deduct credit for this span allocation and sweep if
	// necessary. mHeap_Alloc will also sweep npages, so this only
	// pays the debt down to npage pages.
	deductSweepCredit(*_PageSize, )

	 := makeSpanClass(0, )
	 := mheap_.alloc(, )
	if  == nil {
		throw("out of memory")
	}

	// Count the alloc in consistent, external stats.
	 := memstats.heapStats.acquire()
	atomic.Xadd64(&.largeAlloc, int64(*pageSize))
	atomic.Xadd64(&.largeAllocCount, 1)
	memstats.heapStats.release()

	// Count the alloc in inconsistent, internal stats.
	gcController.totalAlloc.Add(int64( * pageSize))

	// Update heapLive.
	gcController.update(int64(.npages*pageSize), 0)

	// Put the large span in the mcentral swept list so that it's
	// visible to the background sweeper.
	mheap_.central[].mcentral.fullSwept(mheap_.sweepgen).push()
	.limit = .base() + 
	.initHeapBits(false)
	return 
}

func ( *mcache) () {
	// Take this opportunity to flush scanAlloc.
	 := int64(.scanAlloc)
	.scanAlloc = 0

	 := mheap_.sweepgen
	 := int64(0)
	for  := range .alloc {
		 := .alloc[]
		if  != &emptymspan {
			 := int64(.allocCount) - int64(.allocCountBeforeCache)
			.allocCountBeforeCache = 0

			// Adjust smallAllocCount for whatever was allocated.
			 := memstats.heapStats.acquire()
			atomic.Xadd64(&.smallAllocCount[spanClass().sizeclass()], )
			memstats.heapStats.release()

			// Adjust the actual allocs in inconsistent, internal stats.
			// We assumed earlier that the full span gets allocated.
			gcController.totalAlloc.Add( * int64(.elemsize))

			if .sweepgen != +1 {
				// refill conservatively counted unallocated slots in gcController.heapLive.
				// Undo this.
				//
				// If this span was cached before sweep, then gcController.heapLive was totally
				// recomputed since caching this span, so we don't do this for stale spans.
				 -= int64(.nelems-.allocCount) * int64(.elemsize)
			}

			// Release the span to the mcentral.
			mheap_.central[].mcentral.uncacheSpan()
			.alloc[] = &emptymspan
		}
	}
	// Clear tinyalloc pool.
	.tiny = 0
	.tinyoffset = 0

	// Flush tinyAllocs.
	 := memstats.heapStats.acquire()
	atomic.Xadd64(&.tinyAllocCount, int64(.tinyAllocs))
	.tinyAllocs = 0
	memstats.heapStats.release()

	// Update heapLive and heapScan.
	gcController.update(, )
}

// prepareForSweep flushes c if the system has entered a new sweep phase
// since c was populated. This must happen between the sweep phase
// starting and the first allocation from c.
func ( *mcache) () {
	// Alternatively, instead of making sure we do this on every P
	// between starting the world and allocating on that P, we
	// could leave allocate-black on, allow allocation to continue
	// as usual, use a ragged barrier at the beginning of sweep to
	// ensure all cached spans are swept, and then disable
	// allocate-black. However, with this approach it's difficult
	// to avoid spilling mark bits into the *next* GC cycle.
	 := mheap_.sweepgen
	 := .flushGen.Load()
	if  ==  {
		return
	} else if  != -2 {
		println("bad flushGen", , "in prepareForSweep; sweepgen", )
		throw("bad flushGen")
	}
	.releaseAll()
	stackcache_clear()
	.flushGen.Store(mheap_.sweepgen) // Synchronizes with gcStart
}