// Copyright 2020 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.

//go:build goexperiment.jsonv2

package jsontext

import (
	
	
	
	
)

// TODO(https://go.dev/issue/47657): Use sync.PoolOf.

var (
	// This owns the internal buffer since there is no io.Writer to output to.
	// Since the buffer can get arbitrarily large in normal usage,
	// there is statistical tracking logic to determine whether to recycle
	// the internal buffer or not based on a history of utilization.
	bufferedEncoderPool = &sync.Pool{New: func() any { return new(Encoder) }}

	// This owns the internal buffer, but it is only used to temporarily store
	// buffered JSON before flushing it to the underlying io.Writer.
	// In a sufficiently efficient streaming mode, we do not expect the buffer
	// to grow arbitrarily large. Thus, we avoid recycling large buffers.
	streamingEncoderPool = &sync.Pool{New: func() any { return new(Encoder) }}

	// This does not own the internal buffer since
	// it is taken directly from the provided bytes.Buffer.
	bytesBufferEncoderPool = &sync.Pool{New: func() any { return new(Encoder) }}
)

// bufferStatistics is statistics to track buffer utilization.
// It is used to determine whether to recycle a buffer or not
// to avoid https://go.dev/issue/23199.
type bufferStatistics struct {
	strikes int // number of times the buffer was under-utilized
	prevLen int // length of previous buffer
}

func getBufferedEncoder( ...Options) *Encoder {
	 := bufferedEncoderPool.Get().(*Encoder)
	if .s.Buf == nil {
		// Round up to nearest 2ⁿ to make best use of malloc size classes.
		// See runtime/sizeclasses.go on Go1.15.
		// Logical OR with 63 to ensure 64 as the minimum buffer size.
		 := 1 << bits.Len(uint(.s.bufStats.prevLen|63))
		.s.Buf = make([]byte, 0, )
	}
	.s.reset(.s.Buf[:0], nil, ...)
	return 
}
func putBufferedEncoder( *Encoder) {
	// Recycle large buffers only if sufficiently utilized.
	// If a buffer is under-utilized enough times sequentially,
	// then it is discarded, ensuring that a single large buffer
	// won't be kept alive by a continuous stream of small usages.
	//
	// The worst case utilization is computed as:
	//	MIN_UTILIZATION_THRESHOLD / (1 + MAX_NUM_STRIKES)
	//
	// For the constants chosen below, this is (25%)/(1+4) ⇒ 5%.
	// This may seem low, but it ensures a lower bound on
	// the absolute worst-case utilization. Without this check,
	// this would be theoretically 0%, which is infinitely worse.
	//
	// See https://go.dev/issue/27735.
	switch {
	case cap(.s.Buf) <= 4<<10: // always recycle buffers smaller than 4KiB
		.s.bufStats.strikes = 0
	case cap(.s.Buf)/4 <= len(.s.Buf): // at least 25% utilization
		.s.bufStats.strikes = 0
	case .s.bufStats.strikes < 4: // at most 4 strikes
		.s.bufStats.strikes++
	default: // discard the buffer; too large and too often under-utilized
		.s.bufStats.strikes = 0
		.s.bufStats.prevLen = len(.s.Buf) // heuristic for size to allocate next time
		.s.Buf = nil
	}
	bufferedEncoderPool.Put()
}

func getStreamingEncoder( io.Writer,  ...Options) *Encoder {
	if ,  := .(*bytes.Buffer);  {
		 := bytesBufferEncoderPool.Get().(*Encoder)
		.s.reset(nil, , ...) // buffer taken from bytes.Buffer
		return 
	} else {
		 := streamingEncoderPool.Get().(*Encoder)
		.s.reset(.s.Buf[:0], , ...) // preserve existing buffer
		return 
	}
}
func putStreamingEncoder( *Encoder) {
	if ,  := .s.wr.(*bytes.Buffer);  {
		bytesBufferEncoderPool.Put()
	} else {
		if cap(.s.Buf) > 64<<10 {
			.s.Buf = nil // avoid pinning arbitrarily large amounts of memory
		}
		streamingEncoderPool.Put()
	}
}

var (
	// This does not own the internal buffer since it is externally provided.
	bufferedDecoderPool = &sync.Pool{New: func() any { return new(Decoder) }}

	// This owns the internal buffer, but it is only used to temporarily store
	// buffered JSON fetched from the underlying io.Reader.
	// In a sufficiently efficient streaming mode, we do not expect the buffer
	// to grow arbitrarily large. Thus, we avoid recycling large buffers.
	streamingDecoderPool = &sync.Pool{New: func() any { return new(Decoder) }}

	// This does not own the internal buffer since
	// it is taken directly from the provided bytes.Buffer.
	bytesBufferDecoderPool = bufferedDecoderPool
)

func getBufferedDecoder( []byte,  ...Options) *Decoder {
	 := bufferedDecoderPool.Get().(*Decoder)
	.s.reset(, nil, ...)
	return 
}
func putBufferedDecoder( *Decoder) {
	bufferedDecoderPool.Put()
}

func getStreamingDecoder( io.Reader,  ...Options) *Decoder {
	if ,  := .(*bytes.Buffer);  {
		 := bytesBufferDecoderPool.Get().(*Decoder)
		.s.reset(nil, , ...) // buffer taken from bytes.Buffer
		return 
	} else {
		 := streamingDecoderPool.Get().(*Decoder)
		.s.reset(.s.buf[:0], , ...) // preserve existing buffer
		return 
	}
}
func putStreamingDecoder( *Decoder) {
	if ,  := .s.rd.(*bytes.Buffer);  {
		bytesBufferDecoderPool.Put()
	} else {
		if cap(.s.buf) > 64<<10 {
			.s.buf = nil // avoid pinning arbitrarily large amounts of memory
		}
		streamingDecoderPool.Put()
	}
}