// Copyright 2020 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.

package fuzz

import (
	
	
	
	
	
	
	
	
	
	
	
	
	
)

const (
	// workerFuzzDuration is the amount of time a worker can spend testing random
	// variations of an input given by the coordinator.
	workerFuzzDuration = 100 * time.Millisecond

	// workerTimeoutDuration is the amount of time a worker can go without
	// responding to the coordinator before being stopped.
	workerTimeoutDuration = 1 * time.Second

	// workerExitCode is used as an exit code by fuzz worker processes after an internal error.
	// This distinguishes internal errors from uncontrolled panics and other crashes.
	// Keep in sync with internal/fuzz.workerExitCode.
	workerExitCode = 70

	// workerSharedMemSize is the maximum size of the shared memory file used to
	// communicate with workers. This limits the size of fuzz inputs.
	workerSharedMemSize = 100 << 20 // 100 MB
)

// worker manages a worker process running a test binary. The worker object
// exists only in the coordinator (the process started by 'go test -fuzz').
// workerClient is used by the coordinator to send RPCs to the worker process,
// which handles them with workerServer.
type worker struct {
	dir     string   // working directory, same as package directory
	binPath string   // path to test executable
	args    []string // arguments for test executable
	env     []string // environment for test executable

	coordinator *coordinator

	memMu chan *sharedMem // mutex guarding shared memory with worker; persists across processes.

	cmd         *exec.Cmd     // current worker process
	client      *workerClient // used to communicate with worker process
	waitErr     error         // last error returned by wait, set before termC is closed.
	interrupted bool          // true after stop interrupts a running worker.
	termC       chan struct{} // closed by wait when worker process terminates
}

func newWorker( *coordinator, ,  string, ,  []string) (*worker, error) {
	,  := sharedMemTempFile(workerSharedMemSize)
	if  != nil {
		return nil, 
	}
	 := make(chan *sharedMem, 1)
	 <- 
	return &worker{
		dir:         ,
		binPath:     ,
		args:        ,
		env:         [:len():len()], // copy on append to ensure workers don't overwrite each other.
		coordinator: ,
		memMu:       ,
	}, nil
}

// cleanup releases persistent resources associated with the worker.
func ( *worker) () error {
	 := <-.memMu
	if  == nil {
		return nil
	}
	close(.memMu)
	return .Close()
}

// coordinate runs the test binary to perform fuzzing.
//
// coordinate loops until ctx is cancelled or a fatal error is encountered.
// If a test process terminates unexpectedly while fuzzing, coordinate will
// attempt to restart and continue unless the termination can be attributed
// to an interruption (from a timer or the user).
//
// While looping, coordinate receives inputs from the coordinator, passes
// those inputs to the worker process, then passes the results back to
// the coordinator.
func ( *worker) ( context.Context) error {
	// Main event loop.
	for {
		// Start or restart the worker if it's not running.
		if !.isRunning() {
			if  := .startAndPing();  != nil {
				return 
			}
		}

		select {
		case <-.Done():
			// Worker was told to stop.
			 := .stop()
			if  != nil && !.interrupted && !isInterruptError() {
				return 
			}
			return .Err()

		case <-.termC:
			// Worker process terminated unexpectedly while waiting for input.
			 := .stop()
			if .interrupted {
				panic("worker interrupted after unexpected termination")
			}
			if  == nil || isInterruptError() {
				// Worker stopped, either by exiting with status 0 or after being
				// interrupted with a signal that was not sent by the coordinator.
				//
				// When the user presses ^C, on POSIX platforms, SIGINT is delivered to
				// all processes in the group concurrently, and the worker may see it
				// before the coordinator. The worker should exit 0 gracefully (in
				// theory).
				//
				// This condition is probably intended by the user, so suppress
				// the error.
				return nil
			}
			if ,  := .(*exec.ExitError);  && .ExitCode() == workerExitCode {
				// Worker exited with a code indicating F.Fuzz was not called correctly,
				// for example, F.Fail was called first.
				return fmt.Errorf("fuzzing process exited unexpectedly due to an internal failure: %w", )
			}
			// Worker exited non-zero or was terminated by a non-interrupt
			// signal (for example, SIGSEGV) while fuzzing.
			return fmt.Errorf("fuzzing process hung or terminated unexpectedly: %w", )
			// TODO(jayconrod,katiehockman): if -keepfuzzing, restart worker.

		case  := <-.coordinator.inputC:
			// Received input from coordinator.
			 := fuzzArgs{
				Limit:        .limit,
				Timeout:      .timeout,
				Warmup:       .warmup,
				CoverageData: .coverageData,
			}
			, , ,  := .client.fuzz(, .entry, )
			 := true
			if  != nil {
				// Error communicating with worker.
				.stop()
				if .Err() != nil {
					// Timeout or interruption.
					return .Err()
				}
				if .interrupted {
					// Communication error before we stopped the worker.
					// Report an error, but don't record a crasher.
					return fmt.Errorf("communicating with fuzzing process: %v", )
				}
				if ,  := terminationSignal(.waitErr);  && !isCrashSignal() {
					// Worker terminated by a signal that probably wasn't caused by a
					// specific input to the fuzz function. For example, on Linux,
					// the kernel (OOM killer) may send SIGKILL to a process using a lot
					// of memory. Or the shell might send SIGHUP when the terminal
					// is closed. Don't record a crasher.
					return fmt.Errorf("fuzzing process terminated by unexpected signal; no crash will be recorded: %v", .waitErr)
				}
				if  {
					// An internal error occurred which shouldn't be considered
					// a crash.
					return 
				}
				// Unexpected termination. Set error message and fall through.
				// We'll restart the worker on the next iteration.
				// Don't attempt to minimize this since it crashed the worker.
				.Err = fmt.Sprintf("fuzzing process hung or terminated unexpectedly: %v", .waitErr)
				 = false
			}
			 := fuzzResult{
				limit:         .limit,
				count:         .Count,
				totalDuration: .TotalDuration,
				entryDuration: .InterestingDuration,
				entry:         ,
				crasherMsg:    .Err,
				coverageData:  .CoverageData,
				canMinimize:   ,
			}
			.coordinator.resultC <- 

		case  := <-.coordinator.minimizeC:
			// Received input to minimize from coordinator.
			,  := .minimize(, )
			if  != nil {
				// Error minimizing. Send back the original input. If it didn't cause
				// an error before, report it as causing an error now.
				// TODO: double-check this is handled correctly when
				// implementing -keepfuzzing.
				 = fuzzResult{
					entry:       .entry,
					crasherMsg:  .crasherMsg,
					canMinimize: false,
					limit:       .limit,
				}
				if .crasherMsg == "" {
					.crasherMsg = .Error()
				}
			}
			if shouldPrintDebugInfo() {
				.coordinator.debugLogf(
					"input minimized, id: %s, original id: %s, crasher: %t, originally crasher: %t, minimizing took: %s",
					.entry.Path,
					.entry.Path,
					.crasherMsg != "",
					.crasherMsg != "",
					.totalDuration,
				)
			}
			.coordinator.resultC <- 
		}
	}
}

// minimize tells a worker process to attempt to find a smaller value that
// either causes an error (if we started minimizing because we found an input
// that causes an error) or preserves new coverage (if we started minimizing
// because we found an input that expands coverage).
func ( *worker) ( context.Context,  fuzzMinimizeInput) ( fuzzResult,  error) {
	if .coordinator.opts.MinimizeTimeout != 0 {
		var  func()
		,  = context.WithTimeout(, .coordinator.opts.MinimizeTimeout)
		defer ()
	}

	 := minimizeArgs{
		Limit:        .limit,
		Timeout:      .timeout,
		KeepCoverage: .keepCoverage,
	}
	, ,  := .client.minimize(, .entry, )
	if  != nil {
		// Error communicating with worker.
		.stop()
		if .Err() != nil || .interrupted || isInterruptError(.waitErr) {
			// Worker was interrupted, possibly by the user pressing ^C.
			// Normally, workers can handle interrupts and timeouts gracefully and
			// will return without error. An error here indicates the worker
			// may not have been in a good state, but the error won't be meaningful
			// to the user. Just return the original crasher without logging anything.
			return fuzzResult{
				entry:        .entry,
				crasherMsg:   .crasherMsg,
				coverageData: .keepCoverage,
				canMinimize:  false,
				limit:        .limit,
			}, nil
		}
		return fuzzResult{
			entry:         ,
			crasherMsg:    fmt.Sprintf("fuzzing process hung or terminated unexpectedly while minimizing: %v", ),
			canMinimize:   false,
			limit:         .limit,
			count:         .Count,
			totalDuration: .Duration,
		}, nil
	}

	if .crasherMsg != "" && .Err == "" {
		return fuzzResult{}, fmt.Errorf("attempted to minimize a crash but could not reproduce")
	}

	return fuzzResult{
		entry:         ,
		crasherMsg:    .Err,
		coverageData:  .CoverageData,
		canMinimize:   false,
		limit:         .limit,
		count:         .Count,
		totalDuration: .Duration,
	}, nil
}

func ( *worker) () bool {
	return .cmd != nil
}

// startAndPing starts the worker process and sends it a message to make sure it
// can communicate.
//
// startAndPing returns an error if any part of this didn't work, including if
// the context is expired or the worker process was interrupted before it
// responded. Errors that happen after start but before the ping response
// likely indicate that the worker did not call F.Fuzz or called F.Fail first.
// We don't record crashers for these errors.
func ( *worker) ( context.Context) error {
	if .Err() != nil {
		return .Err()
	}
	if  := .start();  != nil {
		return 
	}
	if  := .client.ping();  != nil {
		.stop()
		if .Err() != nil {
			return .Err()
		}
		if isInterruptError() {
			// User may have pressed ^C before worker responded.
			return 
		}
		// TODO: record and return stderr.
		return fmt.Errorf("fuzzing process terminated without fuzzing: %w", )
	}
	return nil
}

// start runs a new worker process.
//
// If the process couldn't be started, start returns an error. Start won't
// return later termination errors from the process if they occur.
//
// If the process starts successfully, start returns nil. stop must be called
// once later to clean up, even if the process terminates on its own.
//
// When the process terminates, w.waitErr is set to the error (if any), and
// w.termC is closed.
func ( *worker) () ( error) {
	if .isRunning() {
		panic("worker already started")
	}
	.waitErr = nil
	.interrupted = false
	.termC = nil

	 := exec.Command(.binPath, .args...)
	.Dir = .dir
	.Env = .env[:len(.env):len(.env)] // copy on append to ensure workers don't overwrite each other.

	// Create the "fuzz_in" and "fuzz_out" pipes so we can communicate with
	// the worker. We don't use stdin and stdout, since the test binary may
	// do something else with those.
	//
	// Each pipe has a reader and a writer. The coordinator writes to fuzzInW
	// and reads from fuzzOutR. The worker inherits fuzzInR and fuzzOutW.
	// The coordinator closes fuzzInR and fuzzOutW after starting the worker,
	// since we have no further need of them.
	, ,  := os.Pipe()
	if  != nil {
		return 
	}
	defer .Close()
	, ,  := os.Pipe()
	if  != nil {
		.Close()
		return 
	}
	defer .Close()
	setWorkerComm(, workerComm{fuzzIn: , fuzzOut: , memMu: .memMu})

	// Start the worker process.
	if  := .Start();  != nil {
		.Close()
		.Close()
		return 
	}

	// Worker started successfully.
	// After this, w.client owns fuzzInW and fuzzOutR, so w.client.Close must be
	// called later by stop.
	.cmd = 
	.termC = make(chan struct{})
	 := workerComm{fuzzIn: , fuzzOut: , memMu: .memMu}
	 := newMutator()
	.client = newWorkerClient(, )

	go func() {
		.waitErr = .cmd.Wait()
		close(.termC)
	}()

	return nil
}

// stop tells the worker process to exit by closing w.client, then blocks until
// it terminates. If the worker doesn't terminate after a short time, stop
// signals it with os.Interrupt (where supported), then os.Kill.
//
// stop returns the error the process terminated with, if any (same as
// w.waitErr).
//
// stop must be called at least once after start returns successfully, even if
// the worker process terminates unexpectedly.
func ( *worker) () error {
	if .termC == nil {
		panic("worker was not started successfully")
	}
	select {
	case <-.termC:
		// Worker already terminated.
		if .client == nil {
			// stop already called.
			return .waitErr
		}
		// Possible unexpected termination.
		.client.Close()
		.cmd = nil
		.client = nil
		return .waitErr
	default:
		// Worker still running.
	}

	// Tell the worker to stop by closing fuzz_in. It won't actually stop until it
	// finishes with earlier calls.
	 := make(chan struct{})
	go func() {
		.client.Close()
		close()
	}()

	 := os.Interrupt
	if runtime.GOOS == "windows" {
		// Per https://golang.org/pkg/os/#Signal, “Interrupt is not implemented on
		// Windows; using it with os.Process.Signal will return an error.”
		// Fall back to Kill instead.
		 = os.Kill
	}

	 := time.NewTimer(workerTimeoutDuration)
	for {
		select {
		case <-.termC:
			// Worker terminated.
			.Stop()
			<-
			.cmd = nil
			.client = nil
			return .waitErr

		case <-.C:
			// Timer fired before worker terminated.
			.interrupted = true
			switch  {
			case os.Interrupt:
				// Try to stop the worker with SIGINT and wait a little longer.
				.cmd.Process.Signal()
				 = os.Kill
				.Reset(workerTimeoutDuration)

			case os.Kill:
				// Try to stop the worker with SIGKILL and keep waiting.
				.cmd.Process.Signal()
				 = nil
				.Reset(workerTimeoutDuration)

			case nil:
				// Still waiting. Print a message to let the user know why.
				fmt.Fprintf(.coordinator.opts.Log, "waiting for fuzzing process to terminate...\n")
			}
		}
	}
}

// RunFuzzWorker is called in a worker process to communicate with the
// coordinator process in order to fuzz random inputs. RunFuzzWorker loops
// until the coordinator tells it to stop.
//
// fn is a wrapper on the fuzz function. It may return an error to indicate
// a given input "crashed". The coordinator will also record a crasher if
// the function times out or terminates the process.
//
// RunFuzzWorker returns an error if it could not communicate with the
// coordinator process.
func ( context.Context,  func(CorpusEntry) error) error {
	,  := getWorkerComm()
	if  != nil {
		return 
	}
	 := &workerServer{
		workerComm: ,
		fuzzFn: func( CorpusEntry) (time.Duration, error) {
			 := time.AfterFunc(10*time.Second, func() {
				panic("deadlocked!") // this error message won't be printed
			})
			defer .Stop()
			 := time.Now()
			 := ()
			return time.Since(), 
		},
		m: newMutator(),
	}
	return .serve()
}

// call is serialized and sent from the coordinator on fuzz_in. It acts as
// a minimalist RPC mechanism. Exactly one of its fields must be set to indicate
// which method to call.
type call struct {
	Ping     *pingArgs
	Fuzz     *fuzzArgs
	Minimize *minimizeArgs
}

// minimizeArgs contains arguments to workerServer.minimize. The value to
// minimize is already in shared memory.
type minimizeArgs struct {
	// Timeout is the time to spend minimizing. This may include time to start up,
	// especially if the input causes the worker process to terminated, requiring
	// repeated restarts.
	Timeout time.Duration

	// Limit is the maximum number of values to test, without spending more time
	// than Duration. 0 indicates no limit.
	Limit int64

	// KeepCoverage is a set of coverage counters the worker should attempt to
	// keep in minimized values. When provided, the worker will reject inputs that
	// don't cause at least one of these bits to be set.
	KeepCoverage []byte

	// Index is the index of the fuzz target parameter to be minimized.
	Index int
}

// minimizeResponse contains results from workerServer.minimize.
type minimizeResponse struct {
	// WroteToMem is true if the worker found a smaller input and wrote it to
	// shared memory. If minimizeArgs.KeepCoverage was set, the minimized input
	// preserved at least one coverage bit and did not cause an error.
	// Otherwise, the minimized input caused some error, recorded in Err.
	WroteToMem bool

	// Err is the error string caused by the value in shared memory, if any.
	Err string

	// CoverageData is the set of coverage bits activated by the minimized value
	// in shared memory. When set, it contains at least one bit from KeepCoverage.
	// CoverageData will be nil if Err is set or if minimization failed.
	CoverageData []byte

	// Duration is the time spent minimizing, not including starting or cleaning up.
	Duration time.Duration

	// Count is the number of values tested.
	Count int64
}

// fuzzArgs contains arguments to workerServer.fuzz. The value to fuzz is
// passed in shared memory.
type fuzzArgs struct {
	// Timeout is the time to spend fuzzing, not including starting or
	// cleaning up.
	Timeout time.Duration

	// Limit is the maximum number of values to test, without spending more time
	// than Duration. 0 indicates no limit.
	Limit int64

	// Warmup indicates whether this is part of a warmup run, meaning that
	// fuzzing should not occur. If coverageEnabled is true, then coverage data
	// should be reported.
	Warmup bool

	// CoverageData is the coverage data. If set, the worker should update its
	// local coverage data prior to fuzzing.
	CoverageData []byte
}

// fuzzResponse contains results from workerServer.fuzz.
type fuzzResponse struct {
	// Duration is the time spent fuzzing, not including starting or cleaning up.
	TotalDuration       time.Duration
	InterestingDuration time.Duration

	// Count is the number of values tested.
	Count int64

	// CoverageData is set if the value in shared memory expands coverage
	// and therefore may be interesting to the coordinator.
	CoverageData []byte

	// Err is the error string caused by the value in shared memory, which is
	// non-empty if the value in shared memory caused a crash.
	Err string

	// InternalErr is the error string caused by an internal error in the
	// worker. This shouldn't be considered a crasher.
	InternalErr string
}

// pingArgs contains arguments to workerServer.ping.
type pingArgs struct{}

// pingResponse contains results from workerServer.ping.
type pingResponse struct{}

// workerComm holds pipes and shared memory used for communication
// between the coordinator process (client) and a worker process (server).
// These values are unique to each worker; they are shared only with the
// coordinator, not with other workers.
//
// Access to shared memory is synchronized implicitly over the RPC protocol
// implemented in workerServer and workerClient. During a call, the client
// (worker) has exclusive access to shared memory; at other times, the server
// (coordinator) has exclusive access.
type workerComm struct {
	fuzzIn, fuzzOut *os.File
	memMu           chan *sharedMem // mutex guarding shared memory
}

// workerServer is a minimalist RPC server, run by fuzz worker processes.
// It allows the coordinator process (using workerClient) to call methods in a
// worker process. This system allows the coordinator to run multiple worker
// processes in parallel and to collect inputs that caused crashes from shared
// memory after a worker process terminates unexpectedly.
type workerServer struct {
	workerComm
	m *mutator

	// coverageMask is the local coverage data for the worker. It is
	// periodically updated to reflect the data in the coordinator when new
	// coverage is found.
	coverageMask []byte

	// fuzzFn runs the worker's fuzz target on the given input and returns an
	// error if it finds a crasher (the process may also exit or crash), and the
	// time it took to run the input. It sets a deadline of 10 seconds, at which
	// point it will panic with the assumption that the process is hanging or
	// deadlocked.
	fuzzFn func(CorpusEntry) (time.Duration, error)
}

// serve reads serialized RPC messages on fuzzIn. When serve receives a message,
// it calls the corresponding method, then sends the serialized result back
// on fuzzOut.
//
// serve handles RPC calls synchronously; it will not attempt to read a message
// until the previous call has finished.
//
// serve returns errors that occurred when communicating over pipes. serve
// does not return errors from method calls; those are passed through serialized
// responses.
func ( *workerServer) ( context.Context) error {
	 := json.NewEncoder(.fuzzOut)
	 := json.NewDecoder(&contextReader{ctx: , r: .fuzzIn})
	for {
		var  call
		if  := .Decode(&);  != nil {
			if  == io.EOF ||  == .Err() {
				return nil
			} else {
				return 
			}
		}

		var  any
		switch {
		case .Fuzz != nil:
			 = .fuzz(, *.Fuzz)
		case .Minimize != nil:
			 = .minimize(, *.Minimize)
		case .Ping != nil:
			 = .ping(, *.Ping)
		default:
			return errors.New("no arguments provided for any call")
		}

		if  := .Encode();  != nil {
			return 
		}
	}
}

// chainedMutations is how many mutations are applied before the worker
// resets the input to it's original state.
// NOTE: this number was picked without much thought. It is low enough that
// it seems to create a significant diversity in mutated inputs. We may want
// to consider looking into this more closely once we have a proper performance
// testing framework. Another option is to randomly pick the number of chained
// mutations on each invocation of the workerServer.fuzz method (this appears to
// be what libFuzzer does, although there seems to be no documentation which
// explains why this choice was made.)
const chainedMutations = 5

// fuzz runs the test function on random variations of the input value in shared
// memory for a limited duration or number of iterations.
//
// fuzz returns early if it finds an input that crashes the fuzz function (with
// fuzzResponse.Err set) or an input that expands coverage (with
// fuzzResponse.InterestingDuration set).
//
// fuzz does not modify the input in shared memory. Instead, it saves the
// initial PRNG state in shared memory and increments a counter in shared
// memory before each call to the test function. The caller may reconstruct
// the crashing input with this information, since the PRNG is deterministic.
func ( *workerServer) ( context.Context,  fuzzArgs) ( fuzzResponse) {
	if .CoverageData != nil {
		if .coverageMask != nil && len(.CoverageData) != len(.coverageMask) {
			.InternalErr = fmt.Sprintf("unexpected size for CoverageData: got %d, expected %d", len(.CoverageData), len(.coverageMask))
			return 
		}
		.coverageMask = .CoverageData
	}
	 := time.Now()
	defer func() { .TotalDuration = time.Since() }()

	if .Timeout != 0 {
		var  func()
		,  = context.WithTimeout(, .Timeout)
		defer ()
	}
	 := <-.memMu
	.m.r.save(&.header().randState, &.header().randInc)
	defer func() {
		.Count = .header().count
		.memMu <- 
	}()
	if .Limit > 0 && .header().count >= .Limit {
		.InternalErr = fmt.Sprintf("mem.header().count %d already exceeds args.Limit %d", .header().count, .Limit)
		return 
	}

	,  := unmarshalCorpusFile(.valueCopy())
	if  != nil {
		.InternalErr = .Error()
		return 
	}
	 := make([]any, len())
	copy(, )

	 := func() bool {
		return .Limit > 0 && .header().count >= .Limit
	}
	 := func( CorpusEntry) ( time.Duration,  []byte,  string) {
		.header().count++
		var  error
		,  = .fuzzFn()
		if  != nil {
			 = .Error()
			if  == "" {
				 = "fuzz function failed with no input"
			}
			return , nil, 
		}
		if .coverageMask != nil && countNewCoverageBits(.coverageMask, coverageSnapshot) > 0 {
			return , coverageSnapshot, ""
		}
		return , nil, ""
	}

	if .Warmup {
		, ,  := (CorpusEntry{Values: })
		if  != "" {
			.Err = 
			return 
		}
		.InterestingDuration = 
		if coverageEnabled {
			.CoverageData = coverageSnapshot
		}
		return 
	}

	for {
		select {
		case <-.Done():
			return 
		default:
			if .header().count%chainedMutations == 0 {
				copy(, )
				.m.r.save(&.header().randState, &.header().randInc)
			}
			.m.mutate(, cap(.valueRef()))

			 := CorpusEntry{Values: }
			, ,  := ()
			if  != "" {
				.Err = 
				return 
			}
			if  != nil {
				.CoverageData = 
				.InterestingDuration = 
				return 
			}
			if () {
				return 
			}
		}
	}
}

func ( *workerServer) ( context.Context,  minimizeArgs) ( minimizeResponse) {
	 := time.Now()
	defer func() { .Duration = time.Since() }()
	 := <-.memMu
	defer func() { .memMu <-  }()
	,  := unmarshalCorpusFile(.valueCopy())
	if  != nil {
		panic()
	}
	 := sha256.Sum256(.valueCopy())
	if .Timeout != 0 {
		var  func()
		,  = context.WithTimeout(, .Timeout)
		defer ()
	}

	// Minimize the values in vals, then write to shared memory. We only write
	// to shared memory after completing minimization.
	,  := .minimizeInput(, , , )
	if  {
		writeToMem(, )
		 := sha256.Sum256(.valueCopy())
		.header().rawInMem = false
		.WroteToMem = true
		if  != nil {
			.Err = .Error()
		} else {
			// If the values didn't change during minimization then coverageSnapshot is likely
			// a dirty snapshot which represents the very last step of minimization, not the
			// coverage for the initial input. In that case just return the coverage we were
			// given initially, since it more accurately represents the coverage map for the
			// input we are returning.
			if  !=  {
				.CoverageData = coverageSnapshot
			} else {
				.CoverageData = .KeepCoverage
			}
		}
	}
	return 
}

// minimizeInput applies a series of minimizing transformations on the provided
// vals, ensuring that each minimization still causes an error, or keeps
// coverage, in fuzzFn. It uses the context to determine how long to run,
// stopping once closed. It returns a bool indicating whether minimization was
// successful and an error if one was found.
func ( *workerServer) ( context.Context,  []any,  *sharedMem,  minimizeArgs) ( bool,  error) {
	 := .KeepCoverage
	 := .valueRef()
	 := &
	 := &.header().count
	 := func() bool {
		return .Err() != nil ||
			(.Limit > 0 && * >= .Limit)
	}
	if () {
		return false, nil
	}

	// Check that the original value preserves coverage or causes an error.
	// If not, then whatever caused us to think the value was interesting may
	// have been a flake, and we can't minimize it.
	*++
	_,  = .fuzzFn(CorpusEntry{Values: })
	if  != nil {
		if !hasCoverageBit(, coverageSnapshot) ||  != nil {
			return false, nil
		}
	} else if  == nil {
		return false, nil
	}
	.header().rawInMem = true

	// tryMinimized runs the fuzz function with candidate replacing the value
	// at index valI. tryMinimized returns whether the input with candidate is
	// interesting for the same reason as the original input: it returns
	// an error if one was expected, or it preserves coverage.
	 := func( []byte) bool {
		 := [.Index]
		switch .(type) {
		case []byte:
			[.Index] = 
		case string:
			[.Index] = string()
		default:
			panic("impossible")
		}
		copy(*, )
		* = (*)[:len()]
		.setValueLen(len())
		*++
		,  := .fuzzFn(CorpusEntry{Values: })
		if  != nil {
			 = 
			if  != nil {
				// Now that we've found a crash, that's more important than any
				// minimization of interesting inputs that was being done. Clear out
				// keepCoverage to only minimize the crash going forward.
				 = nil
			}
			return true
		}
		// Minimization should preserve coverage bits.
		if  != nil && isCoverageSubset(, coverageSnapshot) {
			return true
		}
		[.Index] = 
		return false
	}
	switch v := [.Index].(type) {
	case string:
		minimizeBytes([]byte(), , )
	case []byte:
		minimizeBytes(, , )
	default:
		panic("impossible")
	}
	return true, 
}

func writeToMem( []any,  *sharedMem) {
	 := marshalCorpusFile(...)
	.setValue()
}

// ping does nothing. The coordinator calls this method to ensure the worker
// has called F.Fuzz and can communicate.
func ( *workerServer) ( context.Context,  pingArgs) pingResponse {
	return pingResponse{}
}

// workerClient is a minimalist RPC client. The coordinator process uses a
// workerClient to call methods in each worker process (handled by
// workerServer).
type workerClient struct {
	workerComm
	m *mutator

	// mu is the mutex protecting the workerComm.fuzzIn pipe. This must be
	// locked before making calls to the workerServer. It prevents
	// workerClient.Close from closing fuzzIn while workerClient methods are
	// writing to it concurrently, and prevents multiple callers from writing to
	// fuzzIn concurrently.
	mu sync.Mutex
}

func newWorkerClient( workerComm,  *mutator) *workerClient {
	return &workerClient{workerComm: , m: }
}

// Close shuts down the connection to the RPC server (the worker process) by
// closing fuzz_in. Close drains fuzz_out (avoiding a SIGPIPE in the worker),
// and closes it after the worker process closes the other end.
func ( *workerClient) () error {
	.mu.Lock()
	defer .mu.Unlock()

	// Close fuzzIn. This signals to the server that there are no more calls,
	// and it should exit.
	if  := .fuzzIn.Close();  != nil {
		.fuzzOut.Close()
		return 
	}

	// Drain fuzzOut and close it. When the server exits, the kernel will close
	// its end of fuzzOut, and we'll get EOF.
	if ,  := io.Copy(io.Discard, .fuzzOut);  != nil {
		.fuzzOut.Close()
		return 
	}
	return .fuzzOut.Close()
}

// errSharedMemClosed is returned by workerClient methods that cannot access
// shared memory because it was closed and unmapped by another goroutine. That
// can happen when worker.cleanup is called in the worker goroutine while a
// workerClient.fuzz call runs concurrently.
//
// This error should not be reported. It indicates the operation was
// interrupted.
var errSharedMemClosed = errors.New("internal error: shared memory was closed and unmapped")

// minimize tells the worker to call the minimize method. See
// workerServer.minimize.
func ( *workerClient) ( context.Context,  CorpusEntry,  minimizeArgs) ( CorpusEntry,  minimizeResponse,  error) {
	.mu.Lock()
	defer .mu.Unlock()

	,  := <-.memMu
	if ! {
		return CorpusEntry{}, minimizeResponse{}, errSharedMemClosed
	}
	defer func() { .memMu <-  }()
	.header().count = 0
	,  := corpusEntryData()
	if  != nil {
		return CorpusEntry{}, minimizeResponse{}, 
	}
	.setValue()
	 = 
	.Values,  = unmarshalCorpusFile()
	if  != nil {
		return CorpusEntry{}, minimizeResponse{}, fmt.Errorf("workerClient.minimize unmarshaling provided value: %v", )
	}
	for ,  := range .Values {
		if !isMinimizable(reflect.TypeOf()) {
			continue
		}

		.memMu <- 
		.Index = 
		 := call{Minimize: &}
		 := .callLocked(, , &)
		,  = <-.memMu
		if ! {
			return CorpusEntry{}, minimizeResponse{}, errSharedMemClosed
		}

		if  != nil {
			 = 
			if !.header().rawInMem {
				// An unrecoverable error occurred before minimization began.
				return , minimizeResponse{}, 
			}
			// An unrecoverable error occurred during minimization. mem now
			// holds the raw, unmarshalled bytes of entryIn.Values[i] that
			// caused the error.
			switch .Values[].(type) {
			case string:
				.Values[] = string(.valueCopy())
			case []byte:
				.Values[] = .valueCopy()
			default:
				panic("impossible")
			}
			.Data = marshalCorpusFile(.Values...)
			// Stop minimizing; another unrecoverable error is likely to occur.
			break
		}

		if .WroteToMem {
			// Minimization succeeded, and mem holds the marshaled data.
			.Data = .valueCopy()
			.Values,  = unmarshalCorpusFile(.Data)
			if  != nil {
				return CorpusEntry{}, minimizeResponse{}, fmt.Errorf("workerClient.minimize unmarshaling minimized value: %v", )
			}
		}

		// Prepare for next iteration of the loop.
		if .Timeout != 0 {
			.Timeout -= .Duration
			if .Timeout <= 0 {
				break
			}
		}
		if .Limit != 0 {
			.Limit -= .header().count
			if .Limit <= 0 {
				break
			}
		}
	}
	.Count = .header().count
	 := sha256.Sum256(.Data)
	.Path = fmt.Sprintf("%x", [:4])
	return , , 
}

// fuzz tells the worker to call the fuzz method. See workerServer.fuzz.
func ( *workerClient) ( context.Context,  CorpusEntry,  fuzzArgs) ( CorpusEntry,  fuzzResponse,  bool,  error) {
	.mu.Lock()
	defer .mu.Unlock()

	,  := <-.memMu
	if ! {
		return CorpusEntry{}, fuzzResponse{}, true, errSharedMemClosed
	}
	.header().count = 0
	,  := corpusEntryData()
	if  != nil {
		.memMu <- 
		return CorpusEntry{}, fuzzResponse{}, true, 
	}
	.setValue()
	.memMu <- 

	 := call{Fuzz: &}
	 := .callLocked(, , &)
	if .InternalErr != "" {
		return CorpusEntry{}, fuzzResponse{}, true, errors.New(.InternalErr)
	}
	,  = <-.memMu
	if ! {
		return CorpusEntry{}, fuzzResponse{}, true, errSharedMemClosed
	}
	defer func() { .memMu <-  }()
	.Count = .header().count

	if !bytes.Equal(, .valueRef()) {
		return CorpusEntry{}, fuzzResponse{}, true, errors.New("workerServer.fuzz modified input")
	}
	 :=  != nil || .Err != "" ||
		(!.Warmup && .CoverageData != nil)
	if  {
		,  := unmarshalCorpusFile()
		if  != nil {
			return CorpusEntry{}, fuzzResponse{}, true, fmt.Errorf("unmarshaling fuzz input value after call: %v", )
		}
		.m.r.restore(.header().randState, .header().randInc)
		if !.Warmup {
			// Only mutate the valuesOut if fuzzing actually occurred.
			 := ((.Count - 1) % chainedMutations) + 1
			for  := int64(0);  < ; ++ {
				.m.mutate(, cap(.valueRef()))
			}
		}
		 := marshalCorpusFile(...)

		 := sha256.Sum256()
		 := fmt.Sprintf("%x", [:4])
		 = CorpusEntry{
			Parent:     .Path,
			Path:       ,
			Data:       ,
			Generation: .Generation + 1,
		}
		if .Warmup {
			// The bytes weren't mutated, so if entryIn was a seed corpus value,
			// then entryOut is too.
			.IsSeed = .IsSeed
		}
	}

	return , , false, 
}

// ping tells the worker to call the ping method. See workerServer.ping.
func ( *workerClient) ( context.Context) error {
	.mu.Lock()
	defer .mu.Unlock()
	 := call{Ping: &pingArgs{}}
	var  pingResponse
	return .callLocked(, , &)
}

// callLocked sends an RPC from the coordinator to the worker process and waits
// for the response. The callLocked may be cancelled with ctx.
func ( *workerClient) ( context.Context,  call,  any) ( error) {
	 := json.NewEncoder(.fuzzIn)
	 := json.NewDecoder(&contextReader{ctx: , r: .fuzzOut})
	if  := .Encode();  != nil {
		return 
	}
	return .Decode()
}

// contextReader wraps a Reader with a Context. If the context is cancelled
// while the underlying reader is blocked, Read returns immediately.
//
// This is useful for reading from a pipe. Closing a pipe file descriptor does
// not unblock pending Reads on that file descriptor. All copies of the pipe's
// other file descriptor (the write end) must be closed in all processes that
// inherit it. This is difficult to do correctly in the situation we care about
// (process group termination).
type contextReader struct {
	ctx context.Context
	r   io.Reader
}

func ( *contextReader) ( []byte) (int, error) {
	if  := .ctx.Err();  != nil {
		return 0, 
	}
	 := make(chan struct{})

	// This goroutine may stay blocked after Read returns because the underlying
	// read is blocked.
	var  int
	var  error
	go func() {
		,  = .r.Read()
		close()
	}()

	select {
	case <-.ctx.Done():
		return 0, .ctx.Err()
	case <-:
		return , 
	}
}