// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.

// Central free lists.
//
// See malloc.go for an overview.
//
// The mcentral doesn't actually contain the list of free objects; the mspan does.
// Each mcentral is two lists of mspans: those with free objects (c->nonempty)
// and those that are completely allocated (c->empty).

package runtime

import (
	
	
)

// Central list of free objects of a given size.
type mcentral struct {
	_         sys.NotInHeap
	spanclass spanClass

	// partial and full contain two mspan sets: one of swept in-use
	// spans, and one of unswept in-use spans. These two trade
	// roles on each GC cycle. The unswept set is drained either by
	// allocation or by the background sweeper in every GC cycle,
	// so only two roles are necessary.
	//
	// sweepgen is increased by 2 on each GC cycle, so the swept
	// spans are in partial[sweepgen/2%2] and the unswept spans are in
	// partial[1-sweepgen/2%2]. Sweeping pops spans from the
	// unswept set and pushes spans that are still in-use on the
	// swept set. Likewise, allocating an in-use span pushes it
	// on the swept set.
	//
	// Some parts of the sweeper can sweep arbitrary spans, and hence
	// can't remove them from the unswept set, but will add the span
	// to the appropriate swept list. As a result, the parts of the
	// sweeper and mcentral that do consume from the unswept list may
	// encounter swept spans, and these should be ignored.
	partial [2]spanSet // list of spans with a free object
	full    [2]spanSet // list of spans with no free objects
}

// Initialize a single central free list.
func ( *mcentral) ( spanClass) {
	.spanclass = 
	lockInit(&.partial[0].spineLock, lockRankSpanSetSpine)
	lockInit(&.partial[1].spineLock, lockRankSpanSetSpine)
	lockInit(&.full[0].spineLock, lockRankSpanSetSpine)
	lockInit(&.full[1].spineLock, lockRankSpanSetSpine)
}

// partialUnswept returns the spanSet which holds partially-filled
// unswept spans for this sweepgen.
func ( *mcentral) ( uint32) *spanSet {
	return &.partial[1-/2%2]
}

// partialSwept returns the spanSet which holds partially-filled
// swept spans for this sweepgen.
func ( *mcentral) ( uint32) *spanSet {
	return &.partial[/2%2]
}

// fullUnswept returns the spanSet which holds unswept spans without any
// free slots for this sweepgen.
func ( *mcentral) ( uint32) *spanSet {
	return &.full[1-/2%2]
}

// fullSwept returns the spanSet which holds swept spans without any
// free slots for this sweepgen.
func ( *mcentral) ( uint32) *spanSet {
	return &.full[/2%2]
}

// Allocate a span to use in an mcache.
func ( *mcentral) () *mspan {
	// Deduct credit for this span allocation and sweep if necessary.
	 := uintptr(class_to_allocnpages[.spanclass.sizeclass()]) * _PageSize
	deductSweepCredit(, 0)

	 := false
	 := traceAcquire()
	if .ok() {
		.GCSweepStart()
		traceRelease()
	}

	// If we sweep spanBudget spans without finding any free
	// space, just allocate a fresh span. This limits the amount
	// of time we can spend trying to find free space and
	// amortizes the cost of small object sweeping over the
	// benefit of having a full free span to allocate from. By
	// setting this to 100, we limit the space overhead to 1%.
	//
	// TODO(austin,mknyszek): This still has bad worst-case
	// throughput. For example, this could find just one free slot
	// on the 100th swept span. That limits allocation latency, but
	// still has very poor throughput. We could instead keep a
	// running free-to-used budget and switch to fresh span
	// allocation if the budget runs low.
	 := 100

	var  *mspan
	var  sweepLocker

	// Try partial swept spans first.
	 := mheap_.sweepgen
	if  = .partialSwept().pop();  != nil {
		goto 
	}

	 = sweep.active.begin()
	if .valid {
		// Now try partial unswept spans.
		for ;  >= 0; -- {
			 = .partialUnswept().pop()
			if  == nil {
				break
			}
			if ,  := .tryAcquire();  {
				// We got ownership of the span, so let's sweep it and use it.
				.sweep(true)
				sweep.active.end()
				goto 
			}
			// We failed to get ownership of the span, which means it's being or
			// has been swept by an asynchronous sweeper that just couldn't remove it
			// from the unswept list. That sweeper took ownership of the span and
			// responsibility for either freeing it to the heap or putting it on the
			// right swept list. Either way, we should just ignore it (and it's unsafe
			// for us to do anything else).
		}
		// Now try full unswept spans, sweeping them and putting them into the
		// right list if we fail to get a span.
		for ;  >= 0; -- {
			 = .fullUnswept().pop()
			if  == nil {
				break
			}
			if ,  := .tryAcquire();  {
				// We got ownership of the span, so let's sweep it.
				.sweep(true)
				// Check if there's any free space.
				 := .nextFreeIndex()
				if  != .nelems {
					.freeindex = 
					sweep.active.end()
					goto 
				}
				// Add it to the swept list, because sweeping didn't give us any free space.
				.fullSwept().push(.mspan)
			}
			// See comment for partial unswept spans.
		}
		sweep.active.end()
	}
	 = traceAcquire()
	if .ok() {
		.GCSweepDone()
		 = true
		traceRelease()
	}

	// We failed to get a span from the mcentral so get one from mheap.
	 = .grow()
	if  == nil {
		return nil
	}

	// At this point s is a span that should have free slots.
:
	if ! {
		 := traceAcquire()
		if .ok() {
			.GCSweepDone()
			traceRelease()
		}
	}
	 := int(.nelems) - int(.allocCount)
	if  == 0 || .freeindex == .nelems || .allocCount == .nelems {
		throw("span has no free objects")
	}
	 := .freeindex &^ (64 - 1)
	 :=  / 8
	// Init alloc bits cache.
	.refillAllocCache()

	// Adjust the allocCache so that s.freeindex corresponds to the low bit in
	// s.allocCache.
	.allocCache >>= .freeindex % 64

	return 
}

// Return span from an mcache.
//
// s must have a span class corresponding to this
// mcentral and it must not be empty.
func ( *mcentral) ( *mspan) {
	if .allocCount == 0 {
		throw("uncaching span but s.allocCount == 0")
	}

	 := mheap_.sweepgen
	 := .sweepgen == +1

	// Fix up sweepgen.
	if  {
		// Span was cached before sweep began. It's our
		// responsibility to sweep it.
		//
		// Set sweepgen to indicate it's not cached but needs
		// sweeping and can't be allocated from. sweep will
		// set s.sweepgen to indicate s is swept.
		atomic.Store(&.sweepgen, -1)
	} else {
		// Indicate that s is no longer cached.
		atomic.Store(&.sweepgen, )
	}

	// Put the span in the appropriate place.
	if  {
		// It's stale, so just sweep it. Sweeping will put it on
		// the right list.
		//
		// We don't use a sweepLocker here. Stale cached spans
		// aren't in the global sweep lists, so mark termination
		// itself holds up sweep completion until all mcaches
		// have been swept.
		 := sweepLocked{}
		.sweep(false)
	} else {
		if int(.nelems)-int(.allocCount) > 0 {
			// Put it back on the partial swept list.
			.partialSwept().push()
		} else {
			// There's no free space and it's not stale, so put it on the
			// full swept list.
			.fullSwept().push()
		}
	}
}

// grow allocates a new empty span from the heap and initializes it for c's size class.
func ( *mcentral) () *mspan {
	 := uintptr(class_to_allocnpages[.spanclass.sizeclass()])
	 := uintptr(class_to_size[.spanclass.sizeclass()])

	 := mheap_.alloc(, .spanclass)
	if  == nil {
		return nil
	}

	// Use division by multiplication and shifts to quickly compute:
	// n := (npages << _PageShift) / size
	 := .divideByElemSize( << _PageShift)
	.limit = .base() + *
	.initHeapBits(false)
	return 
}