// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.

//go:build amd64 || arm64 || loong64 || mips64 || mips64le || ppc64 || ppc64le || riscv64 || s390x

package runtime

import (
	
)

const (
	// The number of levels in the radix tree.
	summaryLevels = 5

	// Constants for testing.
	pageAlloc32Bit = 0
	pageAlloc64Bit = 1

	// Number of bits needed to represent all indices into the L1 of the
	// chunks map.
	//
	// See (*pageAlloc).chunks for more details. Update the documentation
	// there should this number change.
	pallocChunksL1Bits = 13
)

// levelBits is the number of bits in the radix for a given level in the super summary
// structure.
//
// The sum of all the entries of levelBits should equal heapAddrBits.
var levelBits = [summaryLevels]uint{
	summaryL0Bits,
	summaryLevelBits,
	summaryLevelBits,
	summaryLevelBits,
	summaryLevelBits,
}

// levelShift is the number of bits to shift to acquire the radix for a given level
// in the super summary structure.
//
// With levelShift, one can compute the index of the summary at level l related to a
// pointer p by doing:
//
//	p >> levelShift[l]
var levelShift = [summaryLevels]uint{
	heapAddrBits - summaryL0Bits,
	heapAddrBits - summaryL0Bits - 1*summaryLevelBits,
	heapAddrBits - summaryL0Bits - 2*summaryLevelBits,
	heapAddrBits - summaryL0Bits - 3*summaryLevelBits,
	heapAddrBits - summaryL0Bits - 4*summaryLevelBits,
}

// levelLogPages is log2 the maximum number of runtime pages in the address space
// a summary in the given level represents.
//
// The leaf level always represents exactly log2 of 1 chunk's worth of pages.
var levelLogPages = [summaryLevels]uint{
	logPallocChunkPages + 4*summaryLevelBits,
	logPallocChunkPages + 3*summaryLevelBits,
	logPallocChunkPages + 2*summaryLevelBits,
	logPallocChunkPages + 1*summaryLevelBits,
	logPallocChunkPages,
}

// sysInit performs architecture-dependent initialization of fields
// in pageAlloc. pageAlloc should be uninitialized except for sysStat
// if any runtime statistic should be updated.
func ( *pageAlloc) ( bool) {
	// Reserve memory for each level. This will get mapped in
	// as R/W by setArenas.
	for ,  := range levelShift {
		 := 1 << (heapAddrBits - )

		// Reserve b bytes of memory anywhere in the address space.
		 := alignUp(uintptr()*pallocSumBytes, physPageSize)
		 := sysReserve(nil, )
		if  == nil {
			throw("failed to reserve page summary memory")
		}

		// Put this reservation into a slice.
		 := notInHeapSlice{(*notInHeap)(), 0, }
		.summary[] = *(*[]pallocSum)(unsafe.Pointer(&))
	}
}

// sysGrow performs architecture-dependent operations on heap
// growth for the page allocator, such as mapping in new memory
// for summaries. It also updates the length of the slices in
// p.summary.
//
// base is the base of the newly-added heap memory and limit is
// the first address past the end of the newly-added heap memory.
// Both must be aligned to pallocChunkBytes.
//
// The caller must update p.start and p.end after calling sysGrow.
func ( *pageAlloc) (,  uintptr) {
	if %pallocChunkBytes != 0 || %pallocChunkBytes != 0 {
		print("runtime: base = ", hex(), ", limit = ", hex(), "\n")
		throw("sysGrow bounds not aligned to pallocChunkBytes")
	}

	// addrRangeToSummaryRange converts a range of addresses into a range
	// of summary indices which must be mapped to support those addresses
	// in the summary range.
	 := func( int,  addrRange) (int, int) {
		,  := addrsToSummaryRange(, .base.addr(), .limit.addr())
		return blockAlignSummaryRange(, , )
	}

	// summaryRangeToSumAddrRange converts a range of indices in any
	// level of p.summary into page-aligned addresses which cover that
	// range of indices.
	 := func(, ,  int) addrRange {
		 := alignDown(uintptr()*pallocSumBytes, physPageSize)
		 := alignUp(uintptr()*pallocSumBytes, physPageSize)
		 := unsafe.Pointer(&.summary[][0])
		return addrRange{
			offAddr{uintptr(add(, ))},
			offAddr{uintptr(add(, ))},
		}
	}

	// addrRangeToSumAddrRange is a convenience function that converts
	// an address range r to the address range of the given summary level
	// that stores the summaries for r.
	 := func( int,  addrRange) addrRange {
		,  := (, )
		return (, , )
	}

	// Find the first inUse index which is strictly greater than base.
	//
	// Because this function will never be asked remap the same memory
	// twice, this index is effectively the index at which we would insert
	// this new growth, and base will never overlap/be contained within
	// any existing range.
	//
	// This will be used to look at what memory in the summary array is already
	// mapped before and after this new range.
	 := .inUse.findSucc()

	// Walk up the radix tree and map summaries in as needed.
	for  := range .summary {
		// Figure out what part of the summary array this new address space needs.
		,  := (, makeAddrRange(, ))

		// Update the summary slices with a new upper-bound. This ensures
		// we get tight bounds checks on at least the top bound.
		//
		// We must do this regardless of whether we map new memory.
		if  > len(.summary[]) {
			.summary[] = .summary[][:]
		}

		// Compute the needed address range in the summary array for level l.
		 := (, , )

		// Prune need down to what needs to be newly mapped. Some parts of it may
		// already be mapped by what inUse describes due to page alignment requirements
		// for mapping. Because this function will never be asked to remap the same
		// memory twice, it should never be possible to prune in such a way that causes
		// need to be split.
		if  > 0 {
			 = .subtract((, .inUse.ranges[-1]))
		}
		if  < len(.inUse.ranges) {
			 = .subtract((, .inUse.ranges[]))
		}
		// It's possible that after our pruning above, there's nothing new to map.
		if .size() == 0 {
			continue
		}

		// Map and commit need.
		sysMap(unsafe.Pointer(.base.addr()), .size(), .sysStat)
		sysUsed(unsafe.Pointer(.base.addr()), .size(), .size())
		.summaryMappedReady += .size()
	}

	// Update the scavenge index.
	.summaryMappedReady += .scav.index.sysGrow(, , .sysStat)
}

// sysGrow increases the index's backing store in response to a heap growth.
//
// Returns the amount of memory added to sysStat.
func ( *scavengeIndex) (,  uintptr,  *sysMemStat) uintptr {
	if %pallocChunkBytes != 0 || %pallocChunkBytes != 0 {
		print("runtime: base = ", hex(), ", limit = ", hex(), "\n")
		throw("sysGrow bounds not aligned to pallocChunkBytes")
	}
	 := unsafe.Sizeof(atomicScavChunkData{})
	// Map and commit the pieces of chunks that we need.
	//
	// We always map the full range of the minimum heap address to the
	// maximum heap address. We don't do this for the summary structure
	// because it's quite large and a discontiguous heap could cause a
	// lot of memory to be used. In this situation, the worst case overhead
	// is in the single-digit MiB if we map the whole thing.
	//
	// The base address of the backing store is always page-aligned,
	// because it comes from the OS, so it's sufficient to align the
	// index.
	 := .min.Load()
	 := .max.Load()
	 := alignDown(uintptr(chunkIndex()), physPageSize/)
	 := alignUp(uintptr(chunkIndex()), physPageSize/)

	// We need a contiguous range, so extend the range if there's no overlap.
	if  <  {
		 = 
	}
	if  != 0 &&  >  {
		 = 
	}

	// Avoid a panic from indexing one past the last element.
	 := uintptr(unsafe.Pointer(&.chunks[0]))
	 := makeAddrRange(+*, +*)
	 := makeAddrRange(+*, +*)

	// Subtract any overlap from rounding. We can't re-map memory because
	// it'll be zeroed.
	 = .subtract()

	// If we've got something to map, map it, and update the slice bounds.
	if .size() != 0 {
		sysMap(unsafe.Pointer(.base.addr()), .size(), )
		sysUsed(unsafe.Pointer(.base.addr()), .size(), .size())
		// Update the indices only after the new memory is valid.
		if  == 0 ||  <  {
			.min.Store()
		}
		if  >  {
			.max.Store()
		}
	}
	return .size()
}

// sysInit initializes the scavengeIndex' chunks array.
//
// Returns the amount of memory added to sysStat.
func ( *scavengeIndex) ( bool,  *sysMemStat) uintptr {
	 := uintptr(1<<heapAddrBits) / pallocChunkBytes
	 :=  * unsafe.Sizeof(atomicScavChunkData{})
	 := sysReserve(nil, )
	 := notInHeapSlice{(*notInHeap)(), int(), int()}
	.chunks = *(*[]atomicScavChunkData)(unsafe.Pointer(&))
	return 0 // All memory above is mapped Reserved.
}