// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.

package runtime

import (
	
	
	
	
	
)

type slice struct {
	array unsafe.Pointer
	len   int
	cap   int
}

// A notInHeapSlice is a slice backed by runtime/internal/sys.NotInHeap memory.
type notInHeapSlice struct {
	array *notInHeap
	len   int
	cap   int
}

func panicmakeslicelen() {
	panic(errorString("makeslice: len out of range"))
}

func panicmakeslicecap() {
	panic(errorString("makeslice: cap out of range"))
}

// makeslicecopy allocates a slice of "tolen" elements of type "et",
// then copies "fromlen" elements of type "et" into that new allocation from "from".
func makeslicecopy( *_type,  int,  int,  unsafe.Pointer) unsafe.Pointer {
	var ,  uintptr
	if uintptr() > uintptr() {
		var  bool
		,  = math.MulUintptr(.Size_, uintptr())
		if  ||  > maxAlloc ||  < 0 {
			panicmakeslicelen()
		}
		 = .Size_ * uintptr()
	} else {
		// fromlen is a known good length providing and equal or greater than tolen,
		// thereby making tolen a good slice length too as from and to slices have the
		// same element width.
		 = .Size_ * uintptr()
		 = 
	}

	var  unsafe.Pointer
	if !.Pointers() {
		 = mallocgc(, nil, false)
		if  <  {
			memclrNoHeapPointers(add(, ), -)
		}
	} else {
		// Note: can't use rawmem (which avoids zeroing of memory), because then GC can scan uninitialized memory.
		 = mallocgc(, , true)
		if  > 0 && writeBarrier.enabled {
			// Only shade the pointers in old.array since we know the destination slice to
			// only contains nil pointers because it has been cleared during alloc.
			//
			// It's safe to pass a type to this function as an optimization because
			// from and to only ever refer to memory representing whole values of
			// type et. See the comment on bulkBarrierPreWrite.
			bulkBarrierPreWriteSrcOnly(uintptr(), uintptr(), , )
		}
	}

	if raceenabled {
		 := getcallerpc()
		 := abi.FuncPCABIInternal()
		racereadrangepc(, , , )
	}
	if msanenabled {
		msanread(, )
	}
	if asanenabled {
		asanread(, )
	}

	memmove(, , )

	return 
}

// makeslice should be an internal detail,
// but widely used packages access it using linkname.
// Notable members of the hall of shame include:
//   - github.com/bytedance/sonic
//
// Do not remove or change the type signature.
// See go.dev/issue/67401.
//
//go:linkname makeslice
func makeslice( *_type, ,  int) unsafe.Pointer {
	,  := math.MulUintptr(.Size_, uintptr())
	if  ||  > maxAlloc ||  < 0 ||  >  {
		// NOTE: Produce a 'len out of range' error instead of a
		// 'cap out of range' error when someone does make([]T, bignumber).
		// 'cap out of range' is true too, but since the cap is only being
		// supplied implicitly, saying len is clearer.
		// See golang.org/issue/4085.
		,  := math.MulUintptr(.Size_, uintptr())
		if  ||  > maxAlloc ||  < 0 {
			panicmakeslicelen()
		}
		panicmakeslicecap()
	}

	return mallocgc(, , true)
}

func makeslice64( *_type, ,  int64) unsafe.Pointer {
	 := int()
	if int64() !=  {
		panicmakeslicelen()
	}

	 := int()
	if int64() !=  {
		panicmakeslicecap()
	}

	return makeslice(, , )
}

// growslice allocates new backing store for a slice.
//
// arguments:
//
//	oldPtr = pointer to the slice's backing array
//	newLen = new length (= oldLen + num)
//	oldCap = original slice's capacity.
//	   num = number of elements being added
//	    et = element type
//
// return values:
//
//	newPtr = pointer to the new backing store
//	newLen = same value as the argument
//	newCap = capacity of the new backing store
//
// Requires that uint(newLen) > uint(oldCap).
// Assumes the original slice length is newLen - num
//
// A new backing store is allocated with space for at least newLen elements.
// Existing entries [0, oldLen) are copied over to the new backing store.
// Added entries [oldLen, newLen) are not initialized by growslice
// (although for pointer-containing element types, they are zeroed). They
// must be initialized by the caller.
// Trailing entries [newLen, newCap) are zeroed.
//
// growslice's odd calling convention makes the generated code that calls
// this function simpler. In particular, it accepts and returns the
// new length so that the old length is not live (does not need to be
// spilled/restored) and the new length is returned (also does not need
// to be spilled/restored).
//
// growslice should be an internal detail,
// but widely used packages access it using linkname.
// Notable members of the hall of shame include:
//   - github.com/bytedance/sonic
//   - github.com/chenzhuoyu/iasm
//   - github.com/cloudwego/dynamicgo
//   - github.com/ugorji/go/codec
//
// Do not remove or change the type signature.
// See go.dev/issue/67401.
//
//go:linkname growslice
func growslice( unsafe.Pointer, , ,  int,  *_type) slice {
	 :=  - 
	if raceenabled {
		 := getcallerpc()
		racereadrangepc(, uintptr(*int(.Size_)), , abi.FuncPCABIInternal())
	}
	if msanenabled {
		msanread(, uintptr(*int(.Size_)))
	}
	if asanenabled {
		asanread(, uintptr(*int(.Size_)))
	}

	if  < 0 {
		panic(errorString("growslice: len out of range"))
	}

	if .Size_ == 0 {
		// append should not create a slice with nil pointer but non-zero len.
		// We assume that append doesn't need to preserve oldPtr in this case.
		return slice{unsafe.Pointer(&zerobase), , }
	}

	 := nextslicecap(, )

	var  bool
	var , ,  uintptr
	// Specialize for common values of et.Size.
	// For 1 we don't need any division/multiplication.
	// For goarch.PtrSize, compiler will optimize division/multiplication into a shift by a constant.
	// For powers of 2, use a variable shift.
	 := !.Pointers()
	switch {
	case .Size_ == 1:
		 = uintptr()
		 = uintptr()
		 = roundupsize(uintptr(), )
		 = uintptr() > maxAlloc
		 = int()
	case .Size_ == goarch.PtrSize:
		 = uintptr() * goarch.PtrSize
		 = uintptr() * goarch.PtrSize
		 = roundupsize(uintptr()*goarch.PtrSize, )
		 = uintptr() > maxAlloc/goarch.PtrSize
		 = int( / goarch.PtrSize)
	case isPowerOfTwo(.Size_):
		var  uintptr
		if goarch.PtrSize == 8 {
			// Mask shift for better code generation.
			 = uintptr(sys.TrailingZeros64(uint64(.Size_))) & 63
		} else {
			 = uintptr(sys.TrailingZeros32(uint32(.Size_))) & 31
		}
		 = uintptr() << 
		 = uintptr() << 
		 = roundupsize(uintptr()<<, )
		 = uintptr() > (maxAlloc >> )
		 = int( >> )
		 = uintptr() << 
	default:
		 = uintptr() * .Size_
		 = uintptr() * .Size_
		,  = math.MulUintptr(.Size_, uintptr())
		 = roundupsize(, )
		 = int( / .Size_)
		 = uintptr() * .Size_
	}

	// The check of overflow in addition to capmem > maxAlloc is needed
	// to prevent an overflow which can be used to trigger a segfault
	// on 32bit architectures with this example program:
	//
	// type T [1<<27 + 1]int64
	//
	// var d T
	// var s []T
	//
	// func main() {
	//   s = append(s, d, d, d, d)
	//   print(len(s), "\n")
	// }
	if  ||  > maxAlloc {
		panic(errorString("growslice: len out of range"))
	}

	var  unsafe.Pointer
	if !.Pointers() {
		 = mallocgc(, nil, false)
		// The append() that calls growslice is going to overwrite from oldLen to newLen.
		// Only clear the part that will not be overwritten.
		// The reflect_growslice() that calls growslice will manually clear
		// the region not cleared here.
		memclrNoHeapPointers(add(, ), -)
	} else {
		// Note: can't use rawmem (which avoids zeroing of memory), because then GC can scan uninitialized memory.
		 = mallocgc(, , true)
		if  > 0 && writeBarrier.enabled {
			// Only shade the pointers in oldPtr since we know the destination slice p
			// only contains nil pointers because it has been cleared during alloc.
			//
			// It's safe to pass a type to this function as an optimization because
			// from and to only ever refer to memory representing whole values of
			// type et. See the comment on bulkBarrierPreWrite.
			bulkBarrierPreWriteSrcOnly(uintptr(), uintptr(), -.Size_+.PtrBytes, )
		}
	}
	memmove(, , )

	return slice{, , }
}

// nextslicecap computes the next appropriate slice length.
func nextslicecap(,  int) int {
	 := 
	 :=  + 
	if  >  {
		return 
	}

	const  = 256
	if  <  {
		return 
	}
	for {
		// Transition from growing 2x for small slices
		// to growing 1.25x for large slices. This formula
		// gives a smooth-ish transition between the two.
		 += ( + 3*) >> 2

		// We need to check `newcap >= newLen` and whether `newcap` overflowed.
		// newLen is guaranteed to be larger than zero, hence
		// when newcap overflows then `uint(newcap) > uint(newLen)`.
		// This allows to check for both with the same comparison.
		if uint() >= uint() {
			break
		}
	}

	// Set newcap to the requested cap when
	// the newcap calculation overflowed.
	if  <= 0 {
		return 
	}
	return 
}

// reflect_growslice should be an internal detail,
// but widely used packages access it using linkname.
// Notable members of the hall of shame include:
//   - github.com/cloudwego/dynamicgo
//
// Do not remove or change the type signature.
// See go.dev/issue/67401.
//
//go:linkname reflect_growslice reflect.growslice
func reflect_growslice( *_type,  slice,  int) slice {
	// Semantically equivalent to slices.Grow, except that the caller
	// is responsible for ensuring that old.len+num > old.cap.
	 -= .cap - .len // preserve memory of old[old.len:old.cap]
	 := growslice(.array, .cap+, .cap, , )
	// growslice does not zero out new[old.cap:new.len] since it assumes that
	// the memory will be overwritten by an append() that called growslice.
	// Since the caller of reflect_growslice is not append(),
	// zero out this region before returning the slice to the reflect package.
	if !.Pointers() {
		 := uintptr(.cap) * .Size_
		 := uintptr(.len) * .Size_
		memclrNoHeapPointers(add(.array, ), -)
	}
	.len = .len // preserve the old length
	return 
}

func isPowerOfTwo( uintptr) bool {
	return &(-1) == 0
}

// slicecopy is used to copy from a string or slice of pointerless elements into a slice.
func slicecopy( unsafe.Pointer,  int,  unsafe.Pointer,  int,  uintptr) int {
	if  == 0 ||  == 0 {
		return 0
	}

	 := 
	if  <  {
		 = 
	}

	if  == 0 {
		return 
	}

	 := uintptr() * 
	if raceenabled {
		 := getcallerpc()
		 := abi.FuncPCABIInternal()
		racereadrangepc(, , , )
		racewriterangepc(, , , )
	}
	if msanenabled {
		msanread(, )
		msanwrite(, )
	}
	if asanenabled {
		asanread(, )
		asanwrite(, )
	}

	if  == 1 { // common case worth about 2x to do here
		// TODO: is this still worth it with new memmove impl?
		*(*byte)() = *(*byte)() // known to be a byte pointer
	} else {
		memmove(, , )
	}
	return 
}

//go:linkname bytealg_MakeNoZero internal/bytealg.MakeNoZero
func bytealg_MakeNoZero( int) []byte {
	if uintptr() > maxAlloc {
		panicmakeslicelen()
	}
	 := roundupsize(uintptr(), true)
	return unsafe.Slice((*byte)(mallocgc(uintptr(), nil, false)), )[:]
}