// Copyright 2023 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.

package runtime

import 

// A coro represents extra concurrency without extra parallelism,
// as would be needed for a coroutine implementation.
// The coro does not represent a specific coroutine, only the ability
// to do coroutine-style control transfers.
// It can be thought of as like a special channel that always has
// a goroutine blocked on it. If another goroutine calls coroswitch(c),
// the caller becomes the goroutine blocked in c, and the goroutine
// formerly blocked in c starts running.
// These switches continue until a call to coroexit(c),
// which ends the use of the coro by releasing the blocked
// goroutine in c and exiting the current goroutine.
//
// Coros are heap allocated and garbage collected, so that user code
// can hold a pointer to a coro without causing potential dangling
// pointer errors.
type coro struct {
	gp guintptr
	f  func(*coro)
}

//go:linkname newcoro

// newcoro creates a new coro containing a
// goroutine blocked waiting to run f
// and returns that coro.
func newcoro( func(*coro)) *coro {
	 := new(coro)
	.f = 
	 := getcallerpc()
	 := getg()
	systemstack(func() {
		 := corostart
		 := *(**funcval)(unsafe.Pointer(&))
		 = newproc1(, , )
	})
	.coroarg = 
	.waitreason = waitReasonCoroutine
	casgstatus(, _Grunnable, _Gwaiting)
	.gp.set()
	return 
}

//go:linkname corostart

// corostart is the entry func for a new coroutine.
// It runs the coroutine user function f passed to corostart
// and then calls coroexit to remove the extra concurrency.
func corostart() {
	 := getg()
	 := .coroarg
	.coroarg = nil

	.f()
	coroexit()
}

// coroexit is like coroswitch but closes the coro
// and exits the current goroutine
func coroexit( *coro) {
	 := getg()
	.coroarg = 
	.coroexit = true
	mcall(coroswitch_m)
}

//go:linkname coroswitch

// coroswitch switches to the goroutine blocked on c
// and then blocks the current goroutine on c.
func coroswitch( *coro) {
	 := getg()
	.coroarg = 
	mcall(coroswitch_m)
}

// coroswitch_m is the implementation of coroswitch
// that runs on the m stack.
//
// Note: Coroutine switches are expected to happen at
// an order of magnitude (or more) higher frequency
// than regular goroutine switches, so this path is heavily
// optimized to remove unnecessary work.
// The fast path here is three CAS: the one at the top on gp.atomicstatus,
// the one in the middle to choose the next g,
// and the one at the bottom on gnext.atomicstatus.
// It is important not to add more atomic operations or other
// expensive operations to the fast path.
func coroswitch_m( *g) {
	// TODO(rsc,mknyszek): add tracing support in a lightweight manner.
	// Probably the tracer will need a global bool (set and cleared during STW)
	// that this code can check to decide whether to use trace.gen.Load();
	// we do not want to do the atomic load all the time, especially when
	// tracer use is relatively rare.
	 := .coroarg
	.coroarg = nil
	 := .coroexit
	.coroexit = false
	 := .m

	if  {
		gdestroy()
		 = nil
	} else {
		// If we can CAS ourselves directly from running to waiting, so do,
		// keeping the control transfer as lightweight as possible.
		.waitreason = waitReasonCoroutine
		if !.atomicstatus.CompareAndSwap(_Grunning, _Gwaiting) {
			// The CAS failed: use casgstatus, which will take care of
			// coordinating with the garbage collector about the state change.
			casgstatus(, _Grunning, _Gwaiting)
		}

		// Clear gp.m.
		setMNoWB(&.m, nil)
	}

	// The goroutine stored in c is the one to run next.
	// Swap it with ourselves.
	var  *g
	for {
		// Note: this is a racy load, but it will eventually
		// get the right value, and if it gets the wrong value,
		// the c.gp.cas will fail, so no harm done other than
		// a wasted loop iteration.
		// The cas will also sync c.gp's
		// memory enough that the next iteration of the racy load
		// should see the correct value.
		// We are avoiding the atomic load to keep this path
		// as lightweight as absolutely possible.
		// (The atomic load is free on x86 but not free elsewhere.)
		 := .gp
		if .ptr() == nil {
			throw("coroswitch on exited coro")
		}
		var  guintptr
		.set()
		if .gp.cas(, ) {
			 = .ptr()
			break
		}
	}

	// Start running next, without heavy scheduling machinery.
	// Set mp.curg and gnext.m and then update scheduling state
	// directly if possible.
	setGNoWB(&.curg, )
	setMNoWB(&.m, )
	if !.atomicstatus.CompareAndSwap(_Gwaiting, _Grunning) {
		// The CAS failed: use casgstatus, which will take care of
		// coordinating with the garbage collector about the state change.
		casgstatus(, _Gwaiting, _Grunnable)
		casgstatus(, _Grunnable, _Grunning)
	}

	// Switch to gnext. Does not return.
	gogo(&.sched)
}