// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.

//go:build linux && (amd64 || arm64 || arm64be || ppc64 || ppc64le || loong64 || s390x)

package runtime

import (
	
	
)

//go:noescape
func vgetrandom1( *byte,  uintptr,  uint32,  uintptr,  uintptr) int

var vgetrandomAlloc struct {
	states     []uintptr
	statesLock mutex
	stateSize  uintptr
	mmapProt   int32
	mmapFlags  int32
}

func vgetrandomInit() {
	if vdsoGetrandomSym == 0 {
		return
	}

	var  struct {
		 uint32
		          uint32
		         uint32
		          [13]uint32
	}
	if vgetrandom1(nil, 0, 0, uintptr(unsafe.Pointer(&)), ^uintptr(0)) != 0 {
		return
	}
	vgetrandomAlloc.stateSize = uintptr(.)
	vgetrandomAlloc.mmapProt = int32(.)
	vgetrandomAlloc.mmapFlags = int32(.)

	lockInit(&vgetrandomAlloc.statesLock, lockRankLeafRank)
}

func vgetrandomGetState() uintptr {
	lock(&vgetrandomAlloc.statesLock)
	if len(vgetrandomAlloc.states) == 0 {
		 := uintptr(ncpu) // Just a reasonable size hint to start.
		 := (vgetrandomAlloc.stateSize + cpu.CacheLineSize - 1) &^ (cpu.CacheLineSize - 1)
		 := (* + physPageSize - 1) &^ (physPageSize - 1)
		 = (physPageSize / ) * ( / physPageSize)
		,  := mmap(nil, , vgetrandomAlloc.mmapProt, vgetrandomAlloc.mmapFlags, -1, 0)
		if  != 0 {
			unlock(&vgetrandomAlloc.statesLock)
			return 0
		}
		 := uintptr()
		if vgetrandomAlloc.states == nil {
			vgetrandomAlloc.states = make([]uintptr, 0, )
		}
		for  := uintptr(0);  < ; ++ {
			if (&(physPageSize-1))+vgetrandomAlloc.stateSize > physPageSize {
				 = ( + physPageSize - 1) &^ (physPageSize - 1)
			}
			vgetrandomAlloc.states = append(vgetrandomAlloc.states, )
			 += 
		}
	}
	 := vgetrandomAlloc.states[len(vgetrandomAlloc.states)-1]
	vgetrandomAlloc.states = vgetrandomAlloc.states[:len(vgetrandomAlloc.states)-1]
	unlock(&vgetrandomAlloc.statesLock)
	return 
}

func vgetrandomPutState( uintptr) {
	lock(&vgetrandomAlloc.statesLock)
	vgetrandomAlloc.states = append(vgetrandomAlloc.states, )
	unlock(&vgetrandomAlloc.statesLock)
}

// This is exported for use in internal/syscall/unix as well as x/sys/unix.
//
//go:linkname vgetrandom
func vgetrandom( []byte,  uint32) ( int,  bool) {
	if vgetrandomAlloc.stateSize == 0 {
		return -1, false
	}

	// We use getg().m instead of acquirem() here, because always taking
	// the lock is slightly more expensive than not always taking the lock.
	// However, we *do* require that m doesn't migrate elsewhere during the
	// execution of the vDSO. So, we exploit two details:
	//   1) Asynchronous preemption is aborted when PC is in the runtime.
	//   2) Most of the time, this function only calls vgetrandom1(), which
	//      does not have a preamble that synchronously preempts.
	// We do need to take the lock when getting a new state for m, but this
	// is very much the slow path, in the sense that it only ever happens
	// once over the entire lifetime of an m. So, a simple getg().m suffices.
	 := getg().m

	if .vgetrandomState == 0 {
		.locks++
		 := vgetrandomGetState()
		.locks--
		if  == 0 {
			return -1, false
		}
		.vgetrandomState = 
	}
	return vgetrandom1(unsafe.SliceData(), uintptr(len()), , .vgetrandomState, vgetrandomAlloc.stateSize), true
}