// Copyright 2009 The Go Authors. All rights reserved.// Use of this source code is governed by a BSD-style// license that can be found in the LICENSE file.package runtimeimport ()type slice struct { array unsafe.Pointer len int cap int}// A notInHeapSlice is a slice backed by internal/runtime/sys.NotInHeap memory.type notInHeapSlice struct { array *notInHeap len int cap int}func panicmakeslicelen() {panic(errorString("makeslice: len out of range"))}func panicmakeslicecap() {panic(errorString("makeslice: cap out of range"))}// makeslicecopy allocates a slice of "tolen" elements of type "et",// then copies "fromlen" elements of type "et" into that new allocation from "from".func makeslicecopy( *_type, int, int, unsafe.Pointer) unsafe.Pointer {var , uintptrifuintptr() > uintptr() {varbool , = math.MulUintptr(.Size_, uintptr())if || > maxAlloc || < 0 {panicmakeslicelen() } = .Size_ * uintptr() } else {// fromlen is a known good length providing and equal or greater than tolen, // thereby making tolen a good slice length too as from and to slices have the // same element width. = .Size_ * uintptr() = }varunsafe.Pointerif !.Pointers() { = mallocgc(, nil, false)if < {memclrNoHeapPointers(add(, ), -) } } else {// Note: can't use rawmem (which avoids zeroing of memory), because then GC can scan uninitialized memory. = mallocgc(, , true)if > 0 && writeBarrier.enabled {// Only shade the pointers in old.array since we know the destination slice to // only contains nil pointers because it has been cleared during alloc. // // It's safe to pass a type to this function as an optimization because // from and to only ever refer to memory representing whole values of // type et. See the comment on bulkBarrierPreWrite.bulkBarrierPreWriteSrcOnly(uintptr(), uintptr(), , ) } }ifraceenabled { := sys.GetCallerPC() := abi.FuncPCABIInternal()racereadrangepc(, , , ) }ifmsanenabled {msanread(, ) }ifasanenabled {asanread(, ) }memmove(, , )return}// makeslice should be an internal detail,// but widely used packages access it using linkname.// Notable members of the hall of shame include:// - github.com/bytedance/sonic//// Do not remove or change the type signature.// See go.dev/issue/67401.////go:linkname makeslicefunc makeslice( *_type, , int) unsafe.Pointer { , := math.MulUintptr(.Size_, uintptr())if || > maxAlloc || < 0 || > {// NOTE: Produce a 'len out of range' error instead of a // 'cap out of range' error when someone does make([]T, bignumber). // 'cap out of range' is true too, but since the cap is only being // supplied implicitly, saying len is clearer. // See golang.org/issue/4085. , := math.MulUintptr(.Size_, uintptr())if || > maxAlloc || < 0 {panicmakeslicelen() }panicmakeslicecap() }returnmallocgc(, , true)}func makeslice64( *_type, , int64) unsafe.Pointer { := int()ifint64() != {panicmakeslicelen() } := int()ifint64() != {panicmakeslicecap() }returnmakeslice(, , )}// growslice allocates new backing store for a slice.//// arguments://// oldPtr = pointer to the slice's backing array// newLen = new length (= oldLen + num)// oldCap = original slice's capacity.// num = number of elements being added// et = element type//// return values://// newPtr = pointer to the new backing store// newLen = same value as the argument// newCap = capacity of the new backing store//// Requires that uint(newLen) > uint(oldCap).// Assumes the original slice length is newLen - num//// A new backing store is allocated with space for at least newLen elements.// Existing entries [0, oldLen) are copied over to the new backing store.// Added entries [oldLen, newLen) are not initialized by growslice// (although for pointer-containing element types, they are zeroed). They// must be initialized by the caller.// Trailing entries [newLen, newCap) are zeroed.//// growslice's odd calling convention makes the generated code that calls// this function simpler. In particular, it accepts and returns the// new length so that the old length is not live (does not need to be// spilled/restored) and the new length is returned (also does not need// to be spilled/restored).//// growslice should be an internal detail,// but widely used packages access it using linkname.// Notable members of the hall of shame include:// - github.com/bytedance/sonic// - github.com/chenzhuoyu/iasm// - github.com/cloudwego/dynamicgo// - github.com/ugorji/go/codec//// Do not remove or change the type signature.// See go.dev/issue/67401.////go:linkname growslicefunc growslice( unsafe.Pointer, , , int, *_type) slice { := - ifraceenabled { := sys.GetCallerPC()racereadrangepc(, uintptr(*int(.Size_)), , abi.FuncPCABIInternal()) }ifmsanenabled {msanread(, uintptr(*int(.Size_))) }ifasanenabled {asanread(, uintptr(*int(.Size_))) }if < 0 {panic(errorString("growslice: len out of range")) }if .Size_ == 0 {// append should not create a slice with nil pointer but non-zero len. // We assume that append doesn't need to preserve oldPtr in this case.returnslice{unsafe.Pointer(&zerobase), , } } := nextslicecap(, )varboolvar , , uintptr// Specialize for common values of et.Size. // For 1 we don't need any division/multiplication. // For goarch.PtrSize, compiler will optimize division/multiplication into a shift by a constant. // For powers of 2, use a variable shift. := !.Pointers()switch {case .Size_ == 1: = uintptr() = uintptr() = roundupsize(uintptr(), ) = uintptr() > maxAlloc = int()case .Size_ == goarch.PtrSize: = uintptr() * goarch.PtrSize = uintptr() * goarch.PtrSize = roundupsize(uintptr()*goarch.PtrSize, ) = uintptr() > maxAlloc/goarch.PtrSize = int( / goarch.PtrSize)caseisPowerOfTwo(.Size_):varuintptrifgoarch.PtrSize == 8 {// Mask shift for better code generation. = uintptr(sys.TrailingZeros64(uint64(.Size_))) & 63 } else { = uintptr(sys.TrailingZeros32(uint32(.Size_))) & 31 } = uintptr() << = uintptr() << = roundupsize(uintptr()<<, ) = uintptr() > (maxAlloc >> ) = int( >> ) = uintptr() << default: = uintptr() * .Size_ = uintptr() * .Size_ , = math.MulUintptr(.Size_, uintptr()) = roundupsize(, ) = int( / .Size_) = uintptr() * .Size_ }// The check of overflow in addition to capmem > maxAlloc is needed // to prevent an overflow which can be used to trigger a segfault // on 32bit architectures with this example program: // // type T [1<<27 + 1]int64 // // var d T // var s []T // // func main() { // s = append(s, d, d, d, d) // print(len(s), "\n") // }if || > maxAlloc {panic(errorString("growslice: len out of range")) }varunsafe.Pointerif !.Pointers() { = mallocgc(, nil, false)// The append() that calls growslice is going to overwrite from oldLen to newLen. // Only clear the part that will not be overwritten. // The reflect_growslice() that calls growslice will manually clear // the region not cleared here.memclrNoHeapPointers(add(, ), -) } else {// Note: can't use rawmem (which avoids zeroing of memory), because then GC can scan uninitialized memory. = mallocgc(, , true)if > 0 && writeBarrier.enabled {// Only shade the pointers in oldPtr since we know the destination slice p // only contains nil pointers because it has been cleared during alloc. // // It's safe to pass a type to this function as an optimization because // from and to only ever refer to memory representing whole values of // type et. See the comment on bulkBarrierPreWrite.bulkBarrierPreWriteSrcOnly(uintptr(), uintptr(), -.Size_+.PtrBytes, ) } }memmove(, , )returnslice{, , }}// growsliceNoAlias is like growslice but only for the case where// we know that oldPtr is not aliased.//// In other words, the caller must know that there are no other references// to the backing memory of the slice being grown aside from the slice header// that will be updated with new backing memory when growsliceNoAlias// returns, and therefore oldPtr must be the only pointer to its referent// aside from the slice header updated by the returned slice.//// In addition, oldPtr must point to the start of the allocation and match// the pointer that was returned by mallocgc. In particular, oldPtr must not// be an interior pointer, such as after a reslice.//// See freegc for details.func growsliceNoAlias( unsafe.Pointer, , , int, *_type) slice { := growslice(, , , , )ifgoexperiment.RuntimeFreegc && != nil && != .array {if := getg(); uintptr() < .stack.lo || .stack.hi <= uintptr() {// oldPtr does not point into the current stack, and it is not // the data pointer for s after the grow, so attempt to free it. // (Note that freegc also verifies that oldPtr does not point into our stack, // but checking here first is slightly cheaper for the case when // oldPtr is on the stack and freegc would be a no-op.) // // TODO(thepudds): it may be that oldPtr==s.array only when elemsize==0, // so perhaps we could prohibit growsliceNoAlias being called in that case // and eliminate that check here, or alternatively, we could lean into // freegc being a no-op for zero-sized allocations (that is, no check of // oldPtr != s.array here and just let freegc return quickly). := !.Pointers()freegc(, uintptr()*.Size_, ) } }return}// nextslicecap computes the next appropriate slice length.func nextslicecap(, int) int { := := + if > {return }const = 256if < {return }for {// Transition from growing 2x for small slices // to growing 1.25x for large slices. This formula // gives a smooth-ish transition between the two. += ( + 3*) >> 2// We need to check `newcap >= newLen` and whether `newcap` overflowed. // newLen is guaranteed to be larger than zero, hence // when newcap overflows then `uint(newcap) > uint(newLen)`. // This allows to check for both with the same comparison.ifuint() >= uint() {break } }// Set newcap to the requested cap when // the newcap calculation overflowed.if <= 0 {return }return}// reflect_growslice should be an internal detail,// but widely used packages access it using linkname.// Notable members of the hall of shame include:// - github.com/cloudwego/dynamicgo//// Do not remove or change the type signature.// See go.dev/issue/67401.////go:linkname reflect_growslice reflect.growslicefunc reflect_growslice( *_type, slice, int) slice {// Semantically equivalent to slices.Grow, except that the caller // is responsible for ensuring that old.len+num > old.cap. -= .cap - .len// preserve memory of old[old.len:old.cap] := growslice(.array, .cap+, .cap, , )// growslice does not zero out new[old.cap:new.len] since it assumes that // the memory will be overwritten by an append() that called growslice. // Since the caller of reflect_growslice is not append(), // zero out this region before returning the slice to the reflect package.if !.Pointers() { := uintptr(.cap) * .Size_ := uintptr(.len) * .Size_memclrNoHeapPointers(add(.array, ), -) } .len = .len// preserve the old lengthreturn}func isPowerOfTwo( uintptr) bool {return &(-1) == 0}// slicecopy is used to copy from a string or slice of pointerless elements into a slice.func slicecopy( unsafe.Pointer, int, unsafe.Pointer, int, uintptr) int {if == 0 || == 0 {return0 } := if < { = }if == 0 {return } := uintptr() * ifraceenabled { := sys.GetCallerPC() := abi.FuncPCABIInternal()racereadrangepc(, , , )racewriterangepc(, , , ) }ifmsanenabled {msanread(, )msanwrite(, ) }ifasanenabled {asanread(, )asanwrite(, ) }if == 1 { // common case worth about 2x to do here// TODO: is this still worth it with new memmove impl? *(*byte)() = *(*byte)() // known to be a byte pointer } else {memmove(, , ) }return}//go:linkname bytealg_MakeNoZero internal/bytealg.MakeNoZerofunc bytealg_MakeNoZero( int) []byte {ifuintptr() > maxAlloc {panicmakeslicelen() } := roundupsize(uintptr(), true)returnunsafe.Slice((*byte)(mallocgc(, nil, false)), )[:]}// moveSlice copies the input slice to the heap and returns it.// et is the element type of the slice.func moveSlice( *_type, unsafe.Pointer, , int) (unsafe.Pointer, int, int) {if == 0 {if != nil { = unsafe.Pointer(&zerobase) }return , 0, 0 } := uintptr() * .Size_ := mallocgc(, , true)bulkBarrierPreWriteSrcOnly(uintptr(), uintptr(), , )memmove(, , )return , , }// moveSliceNoScan is like moveSlice except the element type is known to// not have any pointers. We instead pass in the size of the element.func moveSliceNoScan( uintptr, unsafe.Pointer, , int) (unsafe.Pointer, int, int) {if == 0 {if != nil { = unsafe.Pointer(&zerobase) }return , 0, 0 } := uintptr() * := mallocgc(, nil, false)memmove(, , )return , , }// moveSliceNoCap is like moveSlice, but can pick any appropriate capacity// for the returned slice.// Elements between len and cap in the returned slice will be zeroed.func moveSliceNoCap( *_type, unsafe.Pointer, int) (unsafe.Pointer, int, int) {if == 0 {if != nil { = unsafe.Pointer(&zerobase) }return , 0, 0 } := uintptr() * .Size_ := roundupsize(, false) := mallocgc(, , true)bulkBarrierPreWriteSrcOnly(uintptr(), uintptr(), , )memmove(, , )return , , int( / .Size_)}// moveSliceNoCapNoScan is a combination of moveSliceNoScan and moveSliceNoCap.func moveSliceNoCapNoScan( uintptr, unsafe.Pointer, int) (unsafe.Pointer, int, int) {if == 0 {if != nil { = unsafe.Pointer(&zerobase) }return , 0, 0 } := uintptr() * := roundupsize(, true) := mallocgc(, nil, false)memmove(, , )if > {memclrNoHeapPointers(add(, ), -) }return , , int( / )}// growsliceBuf is like growslice, but we can use the given buffer// as a backing store if we want. bufPtr must be on the stack.func growsliceBuf( unsafe.Pointer, , , int, *_type, unsafe.Pointer, int) slice {if > {// Doesn't fit, process like a normal growslice.returngrowslice(, , , , ) } := - if != && != 0 {// Move data to start of buffer. // Note: bufPtr is on the stack, so no write barrier needed.memmove(, , uintptr()*.Size_) }// Pick a new capacity. // // Unlike growslice, we don't need to double the size each time. // The work done here is not proportional to the length of the slice. // (Unless the memmove happens above, but that is rare, and in any // case there are not many elements on this path.) // // Instead, we try to just bump up to the next size class. // This will ensure that we don't waste any space when we eventually // call moveSlice with the resulting slice. := int(roundupsize(uintptr()*.Size_, !.Pointers()) / .Size_)// Zero slice beyond newLen. // The buffer is stack memory, so NoHeapPointers is ok. // Caller will overwrite [oldLen:newLen], so we don't need to zero that portion. // If et.Pointers(), buffer is at least initialized so we don't need to // worry about the caller overwriting junk in [oldLen:newLen].if < {memclrNoHeapPointers(add(, uintptr()*.Size_), uintptr(-)*.Size_) }returnslice{, , }}// growsliceBufNoAlias is a combination of growsliceBuf and growsliceNoAlias.// bufPtr must be on the stack.func growsliceBufNoAlias( unsafe.Pointer, , , int, *_type, unsafe.Pointer, int) slice { := growsliceBuf(, , , , , , )ifgoexperiment.RuntimeFreegc && != && != nil && != .array {// oldPtr is not bufPtr (the stack buffer) and it is not // the data pointer for s after the grow, so attempt to free it. // (Note that freegc does a broader check that oldPtr does not point into our stack, // but checking here first is slightly cheaper for a common case when oldPtr is bufPtr // and freegc would be a no-op.) // // TODO(thepudds): see related TODO in growsliceNoAlias about possibly eliminating // the oldPtr != s.array check. := !.Pointers()freegc(, uintptr()*.Size_, ) }return}
The pages are generated with Goldsv0.8.3-preview. (GOOS=linux GOARCH=amd64)
Golds is a Go 101 project developed by Tapir Liu.
PR and bug reports are welcome and can be submitted to the issue list.
Please follow @zigo_101 (reachable from the left QR code) to get the latest news of Golds.