// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.

package runtime

// This file contains the implementation of Go select statements.

import (
	
	
)

const debugSelect = false

// Select case descriptor.
// Known to compiler.
// Changes here must also be made in src/cmd/compile/internal/walk/select.go's scasetype.
type scase struct {
	c    *hchan         // chan
	elem unsafe.Pointer // data element
}

var (
	chansendpc = abi.FuncPCABIInternal(chansend)
	chanrecvpc = abi.FuncPCABIInternal(chanrecv)
)

func selectsetpc( *uintptr) {
	* = getcallerpc()
}

func sellock( []scase,  []uint16) {
	var  *hchan
	for ,  := range  {
		 := [].c
		if  !=  {
			 = 
			lock(&.lock)
		}
	}
}

func selunlock( []scase,  []uint16) {
	// We must be very careful here to not touch sel after we have unlocked
	// the last lock, because sel can be freed right after the last unlock.
	// Consider the following situation.
	// First M calls runtime·park() in runtime·selectgo() passing the sel.
	// Once runtime·park() has unlocked the last lock, another M makes
	// the G that calls select runnable again and schedules it for execution.
	// When the G runs on another M, it locks all the locks and frees sel.
	// Now if the first M touches sel, it will access freed memory.
	for  := len() - 1;  >= 0; -- {
		 := [[]].c
		if  > 0 &&  == [[-1]].c {
			continue // will unlock it on the next iteration
		}
		unlock(&.lock)
	}
}

func selparkcommit( *g,  unsafe.Pointer) bool {
	// There are unlocked sudogs that point into gp's stack. Stack
	// copying must lock the channels of those sudogs.
	// Set activeStackChans here instead of before we try parking
	// because we could self-deadlock in stack growth on a
	// channel lock.
	.activeStackChans = true
	// Mark that it's safe for stack shrinking to occur now,
	// because any thread acquiring this G's stack for shrinking
	// is guaranteed to observe activeStackChans after this store.
	.parkingOnChan.Store(false)
	// Make sure we unlock after setting activeStackChans and
	// unsetting parkingOnChan. The moment we unlock any of the
	// channel locks we risk gp getting readied by a channel operation
	// and so gp could continue running before everything before the
	// unlock is visible (even to gp itself).

	// This must not access gp's stack (see gopark). In
	// particular, it must not access the *hselect. That's okay,
	// because by the time this is called, gp.waiting has all
	// channels in lock order.
	var  *hchan
	for  := .waiting;  != nil;  = .waitlink {
		if .c !=  &&  != nil {
			// As soon as we unlock the channel, fields in
			// any sudog with that channel may change,
			// including c and waitlink. Since multiple
			// sudogs may have the same channel, we unlock
			// only after we've passed the last instance
			// of a channel.
			unlock(&.lock)
		}
		 = .c
	}
	if  != nil {
		unlock(&.lock)
	}
	return true
}

func block() {
	gopark(nil, nil, waitReasonSelectNoCases, traceBlockForever, 1) // forever
}

// selectgo implements the select statement.
//
// cas0 points to an array of type [ncases]scase, and order0 points to
// an array of type [2*ncases]uint16 where ncases must be <= 65536.
// Both reside on the goroutine's stack (regardless of any escaping in
// selectgo).
//
// For race detector builds, pc0 points to an array of type
// [ncases]uintptr (also on the stack); for other builds, it's set to
// nil.
//
// selectgo returns the index of the chosen scase, which matches the
// ordinal position of its respective select{recv,send,default} call.
// Also, if the chosen scase was a receive operation, it reports whether
// a value was received.
func selectgo( *scase,  *uint16,  *uintptr, ,  int,  bool) (int, bool) {
	if debugSelect {
		print("select: cas0=", , "\n")
	}

	// NOTE: In order to maintain a lean stack size, the number of scases
	// is capped at 65536.
	 := (*[1 << 16]scase)(unsafe.Pointer())
	 := (*[1 << 17]uint16)(unsafe.Pointer())

	 :=  + 
	 := [::]
	 := [::]
	 := [:][::]
	// NOTE: pollorder/lockorder's underlying array was not zero-initialized by compiler.

	// Even when raceenabled is true, there might be select
	// statements in packages compiled without -race (e.g.,
	// ensureSigM in runtime/signal_unix.go).
	var  []uintptr
	if raceenabled &&  != nil {
		 := (*[1 << 16]uintptr)(unsafe.Pointer())
		 = [::]
	}
	 := func( int) uintptr {
		if  == nil {
			return 0
		}
		return []
	}

	var  int64
	if blockprofilerate > 0 {
		 = cputicks()
	}

	// The compiler rewrites selects that statically have
	// only 0 or 1 cases plus default into simpler constructs.
	// The only way we can end up with such small sel.ncase
	// values here is for a larger select in which most channels
	// have been nilled out. The general code handles those
	// cases correctly, and they are rare enough not to bother
	// optimizing (and needing to test).

	// generate permuted order
	 := 0
	for  := range  {
		 := &[]

		// Omit cases without channels from the poll and lock orders.
		if .c == nil {
			.elem = nil // allow GC
			continue
		}

		 := cheaprandn(uint32( + 1))
		[] = []
		[] = uint16()
		++
	}
	 = [:]
	 = [:]

	// sort the cases by Hchan address to get the locking order.
	// simple heap sort, to guarantee n log n time and constant stack footprint.
	for  := range  {
		 := 
		// Start with the pollorder to permute cases on the same channel.
		 := [[]].c
		for  > 0 && [[(-1)/2]].c.sortkey() < .sortkey() {
			 := ( - 1) / 2
			[] = []
			 = 
		}
		[] = []
	}
	for  := len() - 1;  >= 0; -- {
		 := []
		 := [].c
		[] = [0]
		 := 0
		for {
			 := *2 + 1
			if  >=  {
				break
			}
			if +1 <  && [[]].c.sortkey() < [[+1]].c.sortkey() {
				++
			}
			if .sortkey() < [[]].c.sortkey() {
				[] = []
				 = 
				continue
			}
			break
		}
		[] = 
	}

	if debugSelect {
		for  := 0; +1 < len(); ++ {
			if [[]].c.sortkey() > [[+1]].c.sortkey() {
				print("i=", , " x=", [], " y=", [+1], "\n")
				throw("select: broken sort")
			}
		}
	}

	// lock all the channels involved in the select
	sellock(, )

	var (
		     *g
		     *sudog
		      *hchan
		      *scase
		 *sudog
		 *sudog
		     unsafe.Pointer
		  **sudog
	)

	// pass 1 - look for something already waiting
	var  int
	var  *scase
	var  bool
	var  int64 = -1
	var  bool
	for ,  := range  {
		 = int()
		 = &[]
		 = .c

		if  >=  {
			 = .sendq.dequeue()
			if  != nil {
				goto 
			}
			if .qcount > 0 {
				goto 
			}
			if .closed != 0 {
				goto 
			}
		} else {
			if raceenabled {
				racereadpc(.raceaddr(), (), chansendpc)
			}
			if .closed != 0 {
				goto 
			}
			 = .recvq.dequeue()
			if  != nil {
				goto 
			}
			if .qcount < .dataqsiz {
				goto 
			}
		}
	}

	if ! {
		selunlock(, )
		 = -1
		goto 
	}

	// pass 2 - enqueue on all chans
	 = getg()
	if .waiting != nil {
		throw("gp.waiting != nil")
	}
	 = &.waiting
	for ,  := range  {
		 = int()
		 = &[]
		 = .c
		 := acquireSudog()
		.g = 
		.isSelect = true
		// No stack splits between assigning elem and enqueuing
		// sg on gp.waiting where copystack can find it.
		.elem = .elem
		.releasetime = 0
		if  != 0 {
			.releasetime = -1
		}
		.c = 
		// Construct waiting list in lock order.
		* = 
		 = &.waitlink

		if  <  {
			.sendq.enqueue()
		} else {
			.recvq.enqueue()
		}
	}

	// wait for someone to wake us up
	.param = nil
	// Signal to anyone trying to shrink our stack that we're about
	// to park on a channel. The window between when this G's status
	// changes and when we set gp.activeStackChans is not safe for
	// stack shrinking.
	.parkingOnChan.Store(true)
	gopark(selparkcommit, nil, waitReasonSelect, traceBlockSelect, 1)
	.activeStackChans = false

	sellock(, )

	.selectDone.Store(0)
	 = (*sudog)(.param)
	.param = nil

	// pass 3 - dequeue from unsuccessful chans
	// otherwise they stack up on quiet channels
	// record the successful case, if any.
	// We singly-linked up the SudoGs in lock order.
	 = -1
	 = nil
	 = false
	 = .waiting
	// Clear all elem before unlinking from gp.waiting.
	for  := .waiting;  != nil;  = .waitlink {
		.isSelect = false
		.elem = nil
		.c = nil
	}
	.waiting = nil

	for ,  := range  {
		 = &[]
		if  ==  {
			// sg has already been dequeued by the G that woke us up.
			 = int()
			 = 
			 = .success
			if .releasetime > 0 {
				 = .releasetime
			}
		} else {
			 = .c
			if int() <  {
				.sendq.dequeueSudoG()
			} else {
				.recvq.dequeueSudoG()
			}
		}
		 = .waitlink
		.waitlink = nil
		releaseSudog()
		 = 
	}

	if  == nil {
		throw("selectgo: bad wakeup")
	}

	 = .c

	if debugSelect {
		print("wait-return: cas0=", , " c=", , " cas=", , " send=",  < , "\n")
	}

	if  <  {
		if ! {
			goto 
		}
	} else {
		 = 
	}

	if raceenabled {
		if  <  {
			raceReadObjectPC(.elemtype, .elem, (), chansendpc)
		} else if .elem != nil {
			raceWriteObjectPC(.elemtype, .elem, (), chanrecvpc)
		}
	}
	if msanenabled {
		if  <  {
			msanread(.elem, .elemtype.Size_)
		} else if .elem != nil {
			msanwrite(.elem, .elemtype.Size_)
		}
	}
	if asanenabled {
		if  <  {
			asanread(.elem, .elemtype.Size_)
		} else if .elem != nil {
			asanwrite(.elem, .elemtype.Size_)
		}
	}

	selunlock(, )
	goto 

:
	// can receive from buffer
	if raceenabled {
		if .elem != nil {
			raceWriteObjectPC(.elemtype, .elem, (), chanrecvpc)
		}
		racenotify(, .recvx, nil)
	}
	if msanenabled && .elem != nil {
		msanwrite(.elem, .elemtype.Size_)
	}
	if asanenabled && .elem != nil {
		asanwrite(.elem, .elemtype.Size_)
	}
	 = true
	 = chanbuf(, .recvx)
	if .elem != nil {
		typedmemmove(.elemtype, .elem, )
	}
	typedmemclr(.elemtype, )
	.recvx++
	if .recvx == .dataqsiz {
		.recvx = 0
	}
	.qcount--
	selunlock(, )
	goto 

:
	// can send to buffer
	if raceenabled {
		racenotify(, .sendx, nil)
		raceReadObjectPC(.elemtype, .elem, (), chansendpc)
	}
	if msanenabled {
		msanread(.elem, .elemtype.Size_)
	}
	if asanenabled {
		asanread(.elem, .elemtype.Size_)
	}
	typedmemmove(.elemtype, chanbuf(, .sendx), .elem)
	.sendx++
	if .sendx == .dataqsiz {
		.sendx = 0
	}
	.qcount++
	selunlock(, )
	goto 

:
	// can receive from sleeping sender (sg)
	recv(, , .elem, func() { selunlock(, ) }, 2)
	if debugSelect {
		print("syncrecv: cas0=", , " c=", , "\n")
	}
	 = true
	goto 

:
	// read at end of closed channel
	selunlock(, )
	 = false
	if .elem != nil {
		typedmemclr(.elemtype, .elem)
	}
	if raceenabled {
		raceacquire(.raceaddr())
	}
	goto 

:
	// can send to a sleeping receiver (sg)
	if raceenabled {
		raceReadObjectPC(.elemtype, .elem, (), chansendpc)
	}
	if msanenabled {
		msanread(.elem, .elemtype.Size_)
	}
	if asanenabled {
		asanread(.elem, .elemtype.Size_)
	}
	send(, , .elem, func() { selunlock(, ) }, 2)
	if debugSelect {
		print("syncsend: cas0=", , " c=", , "\n")
	}
	goto 

:
	if  > 0 {
		blockevent(-, 1)
	}
	return , 

:
	// send on closed channel
	selunlock(, )
	panic(plainError("send on closed channel"))
}

func ( *hchan) () uintptr {
	return uintptr(unsafe.Pointer())
}

// A runtimeSelect is a single case passed to rselect.
// This must match ../reflect/value.go:/runtimeSelect
type runtimeSelect struct {
	dir selectDir
	typ unsafe.Pointer // channel type (not used here)
	ch  *hchan         // channel
	val unsafe.Pointer // ptr to data (SendDir) or ptr to receive buffer (RecvDir)
}

// These values must match ../reflect/value.go:/SelectDir.
type selectDir int

const (
	_             selectDir = iota
	selectSend              // case Chan <- Send
	selectRecv              // case <-Chan:
	selectDefault           // default
)

//go:linkname reflect_rselect reflect.rselect
func reflect_rselect( []runtimeSelect) (int, bool) {
	if len() == 0 {
		block()
	}
	 := make([]scase, len())
	 := make([]int, len())
	,  := 0, 0
	 := -1
	for ,  := range  {
		var  int
		switch .dir {
		case selectDefault:
			 = 
			continue
		case selectSend:
			 = 
			++
		case selectRecv:
			++
			 = len() - 
		}

		[] = scase{c: .ch, elem: .val}
		[] = 
	}

	// Only a default case.
	if + == 0 {
		return , false
	}

	// Compact sel and orig if necessary.
	if + < len() {
		copy([:], [len()-:])
		copy([:], [len()-:])
	}

	 := make([]uint16, 2*(+))
	var  *uintptr
	if raceenabled {
		 := make([]uintptr, +)
		for  := range  {
			selectsetpc(&[])
		}
		 = &[0]
	}

	,  := selectgo(&[0], &[0], , , ,  == -1)

	// Translate chosen back to caller's ordering.
	if  < 0 {
		 = 
	} else {
		 = []
	}
	return , 
}

func ( *waitq) ( *sudog) {
	 := .prev
	 := .next
	if  != nil {
		if  != nil {
			// middle of queue
			.next = 
			.prev = 
			.next = nil
			.prev = nil
			return
		}
		// end of queue
		.next = nil
		.last = 
		.prev = nil
		return
	}
	if  != nil {
		// start of queue
		.prev = nil
		.first = 
		.next = nil
		return
	}

	// x==y==nil. Either sgp is the only element in the queue,
	// or it has already been removed. Use q.first to disambiguate.
	if .first ==  {
		.first = nil
		.last = nil
	}
}