Source file src/encoding/json/jsontext/pools.go

     1  // Copyright 2020 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  //go:build goexperiment.jsonv2
     6  
     7  package jsontext
     8  
     9  import (
    10  	"bytes"
    11  	"io"
    12  	"math/bits"
    13  	"sync"
    14  )
    15  
    16  // TODO(https://go.dev/issue/47657): Use sync.PoolOf.
    17  
    18  var (
    19  	// This owns the internal buffer since there is no io.Writer to output to.
    20  	// Since the buffer can get arbitrarily large in normal usage,
    21  	// there is statistical tracking logic to determine whether to recycle
    22  	// the internal buffer or not based on a history of utilization.
    23  	bufferedEncoderPool = &sync.Pool{New: func() any { return new(Encoder) }}
    24  
    25  	// This owns the internal buffer, but it is only used to temporarily store
    26  	// buffered JSON before flushing it to the underlying io.Writer.
    27  	// In a sufficiently efficient streaming mode, we do not expect the buffer
    28  	// to grow arbitrarily large. Thus, we avoid recycling large buffers.
    29  	streamingEncoderPool = &sync.Pool{New: func() any { return new(Encoder) }}
    30  
    31  	// This does not own the internal buffer since
    32  	// it is taken directly from the provided bytes.Buffer.
    33  	bytesBufferEncoderPool = &sync.Pool{New: func() any { return new(Encoder) }}
    34  )
    35  
    36  // bufferStatistics is statistics to track buffer utilization.
    37  // It is used to determine whether to recycle a buffer or not
    38  // to avoid https://go.dev/issue/23199.
    39  type bufferStatistics struct {
    40  	strikes int // number of times the buffer was under-utilized
    41  	prevLen int // length of previous buffer
    42  }
    43  
    44  func getBufferedEncoder(opts ...Options) *Encoder {
    45  	e := bufferedEncoderPool.Get().(*Encoder)
    46  	if e.s.Buf == nil {
    47  		// Round up to nearest 2ⁿ to make best use of malloc size classes.
    48  		// See runtime/sizeclasses.go on Go1.15.
    49  		// Logical OR with 63 to ensure 64 as the minimum buffer size.
    50  		n := 1 << bits.Len(uint(e.s.bufStats.prevLen|63))
    51  		e.s.Buf = make([]byte, 0, n)
    52  	}
    53  	e.s.reset(e.s.Buf[:0], nil, opts...)
    54  	return e
    55  }
    56  func putBufferedEncoder(e *Encoder) {
    57  	if cap(e.s.availBuffer) > 64<<10 {
    58  		e.s.availBuffer = nil // avoid pinning arbitrarily large amounts of memory
    59  	}
    60  
    61  	// Recycle large buffers only if sufficiently utilized.
    62  	// If a buffer is under-utilized enough times sequentially,
    63  	// then it is discarded, ensuring that a single large buffer
    64  	// won't be kept alive by a continuous stream of small usages.
    65  	//
    66  	// The worst case utilization is computed as:
    67  	//	MIN_UTILIZATION_THRESHOLD / (1 + MAX_NUM_STRIKES)
    68  	//
    69  	// For the constants chosen below, this is (25%)/(1+4) ⇒ 5%.
    70  	// This may seem low, but it ensures a lower bound on
    71  	// the absolute worst-case utilization. Without this check,
    72  	// this would be theoretically 0%, which is infinitely worse.
    73  	//
    74  	// See https://go.dev/issue/27735.
    75  	switch {
    76  	case cap(e.s.Buf) <= 4<<10: // always recycle buffers smaller than 4KiB
    77  		e.s.bufStats.strikes = 0
    78  	case cap(e.s.Buf)/4 <= len(e.s.Buf): // at least 25% utilization
    79  		e.s.bufStats.strikes = 0
    80  	case e.s.bufStats.strikes < 4: // at most 4 strikes
    81  		e.s.bufStats.strikes++
    82  	default: // discard the buffer; too large and too often under-utilized
    83  		e.s.bufStats.strikes = 0
    84  		e.s.bufStats.prevLen = len(e.s.Buf) // heuristic for size to allocate next time
    85  		e.s.Buf = nil
    86  	}
    87  	bufferedEncoderPool.Put(e)
    88  }
    89  
    90  func getStreamingEncoder(w io.Writer, opts ...Options) *Encoder {
    91  	if _, ok := w.(*bytes.Buffer); ok {
    92  		e := bytesBufferEncoderPool.Get().(*Encoder)
    93  		e.s.reset(nil, w, opts...) // buffer taken from bytes.Buffer
    94  		return e
    95  	} else {
    96  		e := streamingEncoderPool.Get().(*Encoder)
    97  		e.s.reset(e.s.Buf[:0], w, opts...) // preserve existing buffer
    98  		return e
    99  	}
   100  }
   101  func putStreamingEncoder(e *Encoder) {
   102  	if cap(e.s.availBuffer) > 64<<10 {
   103  		e.s.availBuffer = nil // avoid pinning arbitrarily large amounts of memory
   104  	}
   105  	if _, ok := e.s.wr.(*bytes.Buffer); ok {
   106  		e.s.wr, e.s.Buf = nil, nil // avoid pinning the provided bytes.Buffer
   107  		bytesBufferEncoderPool.Put(e)
   108  	} else {
   109  		e.s.wr = nil // avoid pinning the provided io.Writer
   110  		if cap(e.s.Buf) > 64<<10 {
   111  			e.s.Buf = nil // avoid pinning arbitrarily large amounts of memory
   112  		}
   113  		streamingEncoderPool.Put(e)
   114  	}
   115  }
   116  
   117  var (
   118  	// This does not own the internal buffer since it is externally provided.
   119  	bufferedDecoderPool = &sync.Pool{New: func() any { return new(Decoder) }}
   120  
   121  	// This owns the internal buffer, but it is only used to temporarily store
   122  	// buffered JSON fetched from the underlying io.Reader.
   123  	// In a sufficiently efficient streaming mode, we do not expect the buffer
   124  	// to grow arbitrarily large. Thus, we avoid recycling large buffers.
   125  	streamingDecoderPool = &sync.Pool{New: func() any { return new(Decoder) }}
   126  
   127  	// This does not own the internal buffer since
   128  	// it is taken directly from the provided bytes.Buffer.
   129  	bytesBufferDecoderPool = bufferedDecoderPool
   130  )
   131  
   132  func getBufferedDecoder(b []byte, opts ...Options) *Decoder {
   133  	d := bufferedDecoderPool.Get().(*Decoder)
   134  	d.s.reset(b, nil, opts...)
   135  	return d
   136  }
   137  func putBufferedDecoder(d *Decoder) {
   138  	d.s.buf = nil // avoid pinning the provided buffer
   139  	bufferedDecoderPool.Put(d)
   140  }
   141  
   142  func getStreamingDecoder(r io.Reader, opts ...Options) *Decoder {
   143  	if _, ok := r.(*bytes.Buffer); ok {
   144  		d := bytesBufferDecoderPool.Get().(*Decoder)
   145  		d.s.reset(nil, r, opts...) // buffer taken from bytes.Buffer
   146  		return d
   147  	} else {
   148  		d := streamingDecoderPool.Get().(*Decoder)
   149  		d.s.reset(d.s.buf[:0], r, opts...) // preserve existing buffer
   150  		return d
   151  	}
   152  }
   153  func putStreamingDecoder(d *Decoder) {
   154  	if _, ok := d.s.rd.(*bytes.Buffer); ok {
   155  		d.s.rd, d.s.buf = nil, nil // avoid pinning the provided bytes.Buffer
   156  		bytesBufferDecoderPool.Put(d)
   157  	} else {
   158  		d.s.rd = nil // avoid pinning the provided io.Reader
   159  		if cap(d.s.buf) > 64<<10 {
   160  			d.s.buf = nil // avoid pinning arbitrarily large amounts of memory
   161  		}
   162  		streamingDecoderPool.Put(d)
   163  	}
   164  }
   165  

View as plain text