-
Notifications
You must be signed in to change notification settings - Fork 0
/
buffer.go
408 lines (331 loc) · 11.2 KB
/
buffer.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
package gbuf
// Simple generic type buffer for marshaling data.
import (
"errors"
"io"
"sync/atomic"
"github.com/zalgonoise/gio"
)
// smallBufferSize is an initial allocation minimal capacity.
const smallBufferSize = 64
// A Buffer is a variable-sized buffer of T items with Read and Write methods.
// The zero value for Buffer is an empty buffer ready to use.
type Buffer[T any] struct {
buf []T // contents are the T items buf[off : len(buf)]
off int // read at &buf[off], write at &buf[len(buf)]
isReading atomic.Bool // last operation type, so that Unread* can work correctly.
}
const maxInt = int(^uint(0) >> 1)
// Value returns a slice of length b.Len() holding the unread portion of the buffer.
// The slice is valid for use only until the next buffer modification (that is,
// only until the next call to a method like Read, Write, Reset, or Truncate).
// The slice aliases the buffer content at least until the next buffer modification,
// so immediate changes to the slice will affect the result of future reads.
func (b *Buffer[T]) Value() []T { return b.buf[b.off:] }
// empty reports whether the unread portion of the buffer is empty.
func (b *Buffer[T]) empty() bool { return len(b.buf) <= b.off }
// Len returns the number of T items of the unread portion of the buffer;
// b.Len() == len(b.Value()).
func (b *Buffer[T]) Len() int { return len(b.buf) - b.off }
// Cap returns the capacity of the buffer's underlying T item slice, that is, the
// total space allocated for the buffer's data.
func (b *Buffer[T]) Cap() int { return cap(b.buf) }
// Truncate discards all but the first n unread T items from the buffer
// but continues to use the same allocated storage.
// It panics if n is negative or greater than the length of the buffer.
func (b *Buffer[T]) Truncate(n int) {
if n == 0 {
b.Reset()
return
}
b.isReading.Store(false)
if n < 0 || n > b.Len() {
panic("gbuf.Buffer: truncation out of range")
}
b.buf = b.buf[:b.off+n]
}
// Reset resets the buffer to be empty,
// but it retains the underlying storage for use by future writes.
// Reset is the same as Truncate(0).
func (b *Buffer[T]) Reset() {
b.buf = b.buf[:0]
b.off = 0
b.isReading.Store(false)
}
// tryGrowByReslice is a inlineable version of grow for the fast-case where the
// internal buffer only needs to be resliced.
// It returns the index where T items should be written and whether it succeeded.
func (b *Buffer[T]) tryGrowByReslice(n int) (int, bool) {
if l := len(b.buf); n <= cap(b.buf)-l {
b.buf = b.buf[:l+n]
return l, true
}
return 0, false
}
// grow grows the buffer to guarantee space for n more T items.
// It returns the index where T items should be written.
// If the buffer can't grow it will panic with ErrBufferTooLarge.
func (b *Buffer[T]) grow(n int) int {
m := b.Len()
// If buffer is empty, reset to recover space.
if m == 0 && b.off != 0 {
b.Reset()
}
// Try to grow by means of a reslice.
if i, ok := b.tryGrowByReslice(n); ok {
return i
}
if b.buf == nil && n <= smallBufferSize {
b.buf = make([]T, n, smallBufferSize)
return 0
}
c := cap(b.buf)
switch {
case n <= c/2-m:
// We can slide things down instead of allocating a new
// slice. We only need m+n <= c to slide, but
// we instead let capacity get twice as large so we
// don't spend all our time copying.
copy(b.buf, b.buf[b.off:])
case c > maxInt-c-n:
panic(ErrBufferTooLarge)
default:
// Add b.off to account for b.buf[:b.off] being sliced off the front.
b.buf = growSlice(b.buf[b.off:], b.off+n)
}
// Restore b.off and len(b.buf).
b.off = 0
b.buf = b.buf[:m+n]
return m
}
// Grow grows the buffer's capacity, if necessary, to guarantee space for
// another n T items. After Grow(n), at least n T items can be written to the
// buffer without another allocation.
// If n is negative, Grow will panic.
// If the buffer can't grow it will panic with ErrBufferTooLarge.
func (b *Buffer[T]) Grow(n int) {
if n < 0 {
panic(ErrBufferNegativeCount)
}
m := b.grow(n)
b.buf = b.buf[:m]
}
// Write appends the contents of p to the buffer, growing the buffer as
// needed. The return value n is the length of p; err is always nil. If the
// buffer becomes too large, Write will panic with ErrBufferTooLarge.
func (b *Buffer[T]) Write(p []T) (n int, err error) {
b.isReading.Store(false)
m, ok := b.tryGrowByReslice(len(p))
if !ok {
m = b.grow(len(p))
}
return copy(b.buf[m:], p), nil
}
// MinRead is the minimum slice size passed to a Read call by
// Buffer.ReadFrom. As long as the Buffer has at least MinRead T items beyond
// what is required to hold the contents of r, ReadFrom will not grow the
// underlying buffer.
const MinRead = 512
// ReadFrom reads data from r until EOF and appends it to the buffer, growing
// the buffer as needed. The return value n is the number of T items read. Any
// error except io.EOF encountered during the read is also returned. If the
// buffer becomes too large, ReadFrom will panic with ErrBufferTooLarge.
func (b *Buffer[T]) ReadFrom(r gio.Reader[T]) (n int64, err error) {
b.isReading.Store(false)
for {
i := b.grow(MinRead)
b.buf = b.buf[:i]
m, e := r.Read(b.buf[i:cap(b.buf)])
if m < 0 {
panic(ErrBufferNegativeRead)
}
b.buf = b.buf[:i+m]
n += int64(m)
if errors.Is(e, io.EOF) {
return n, nil // e is EOF, so return nil explicitly
}
if e != nil {
return n, e
}
}
}
// growSlice grows b by n, preserving the original content of b.
// If the allocation fails, it panics with ErrBufferTooLarge.
func growSlice[T any](b []T, n int) []T {
defer func() {
if recover() != nil {
panic(ErrBufferTooLarge)
}
}()
// TODO(http://golang.org/issue/51462): We should rely on the append-make
// pattern so that the compiler can call runtime.growslice. For example:
// return append(b, make([]T item, n)...)
// This avoids unnecessary zero-ing of the first len(b) T items of the
// allocated slice, but this pattern causes b to escape onto the heap.
//
// Instead use the append-make pattern with a nil slice to ensure that
// we allocate buffers rounded up to the closest size class.
c := len(b) + n // ensure enough space for n elements
if c < double*cap(b) {
// The growth rate has historically always been 2x. In the future,
// we could rely purely on append to determine the growth rate.
c = double * cap(b)
}
b2 := append([]T(nil), make([]T, c)...)
copy(b2, b)
return b2[:len(b)]
}
// WriteTo writes data to w until the buffer is drained or an error occurs.
// The return value n is the number of T items written; it always fits into an
// int, but it is int64 to match the gio.WriterTo interface. Any error
// encountered during the write operation is also returned.
func (b *Buffer[T]) WriteTo(w gio.Writer[T]) (n int64, err error) {
b.isReading.Store(false)
if nItems := b.Len(); nItems > 0 {
m, e := w.Write(b.buf[b.off:])
if m > nItems {
panic(ErrBufferInvalidWriteCount)
}
b.off += m
n = int64(m)
if e != nil {
return n, e
}
// all T items should have been written, by definition of
// Write method in gio.Writer
if m != nItems {
return n, io.ErrShortWrite
}
}
// Buffer is now empty; reset.
b.Reset()
return n, nil
}
// WriteItem appends the T `item` to the buffer, growing the buffer as needed.
// The returned error is always nil, but is included to match gio.Writer's
// WriteItem. If the buffer becomes too large, WriteItem will panic with
// ErrBufferTooLarge.
func (b *Buffer[T]) WriteItem(item T) error {
b.isReading.Store(false)
m, ok := b.tryGrowByReslice(1)
if !ok {
m = b.grow(1)
}
b.buf[m] = item
return nil
}
// Read reads the next len(p) T items from the buffer or until the buffer
// is drained. The return value n is the number of T items read. If the
// buffer has no data to return, err is io.EOF (unless len(p) is zero);
// otherwise it is nil.
func (b *Buffer[T]) Read(p []T) (n int, err error) {
b.isReading.Store(false)
if b.empty() {
// Buffer is empty, reset to recover space.
b.Reset()
if len(p) == 0 {
return 0, nil
}
return 0, io.EOF
}
n = copy(p, b.buf[b.off:])
b.off += n
if n > 0 {
b.isReading.Store(true)
}
return n, nil
}
// Next returns a slice containing the next n T items from the buffer,
// advancing the buffer as if the T items had been returned by Read.
// If there are fewer than n T items in the buffer, Next returns the entire buffer.
// The slice is only valid until the next call to a read or write method.
func (b *Buffer[T]) Next(n int) []T {
b.isReading.Store(false)
m := b.Len()
if n > m {
n = m
}
data := b.buf[b.off : b.off+n]
b.off += n
if n > 0 {
b.isReading.Store(true)
}
return data
}
// ReadItem reads and returns the next T item from the buffer.
// If no T item is available, it returns error io.EOF.
func (b *Buffer[T]) ReadItem() (T, error) {
if b.empty() {
// Buffer is empty, reset to recover space.
b.Reset()
var zero T
return zero, io.EOF
}
c := b.buf[b.off]
b.off++
b.isReading.Store(true)
return c, nil
}
// UnreadItem unreads the last T item returned by the most recent successful
// read operation that read at least one T item. If a write operation has happened since
// the last read, if the last read returned an error, or if the read operation reads zero
// T items, UnreadItem returns an error.
func (b *Buffer[T]) UnreadItem() error {
if !b.isReading.Load() {
return ErrBufferUnreadItem
}
b.isReading.Store(false)
if b.off > 0 {
b.off--
}
return nil
}
// ReadItems reads until the first occurrence of delim in the input,
// returning a slice containing the data up to and including the delimiter.
// If ReadT items encounters an error before finding a delimiter,
// it returns the data read before the error and the error itself (often io.EOF).
// ReadT items returns err != nil if and only if the returned data does not end in
// delim.
func (b *Buffer[T]) ReadItems(delim func(T) bool) (line []T, err error) {
slice, err := b.readSlice(delim)
// return a copy of slice. The buffer's backing array may
// be overwritten by later calls.
line = append(line, slice...)
return line, err
}
// readSlice is like ReadItems but returns a reference to internal buffer data.
func (b *Buffer[T]) readSlice(delim func(T) bool) (line []T, err error) {
// init end as not found
var end = -1
// iterate through remaining buffer trying to find a matching delimiter
for i := b.off; i < len(b.buf); i++ {
if delim(b.buf[i]) {
// set end to index + 1, as slice ranges require it
end = i + 1
break
}
}
// if there are no matches, get up to the remainder of the existing buffer
if end < 0 {
end = len(b.buf)
err = io.EOF
}
line = b.buf[b.off:end]
b.off = end
b.isReading.Store(true)
return line, err
}
// NewBuffer creates and initializes a new Buffer using buf as its
// initial contents. The new Buffer takes ownership of buf, and the
// caller should not use buf after this call. NewBuffer is intended to
// prepare a Buffer to read existing data. It can also be used to set
// the initial size of the internal buffer for writing. To do that,
// buf should have the desired capacity but a length of zero.
//
// In most cases, new(Buffer[T]) (or just declaring a *Buffer[T] variable) is
// sufficient to initialize a Buffer of type T.
func NewBuffer[T any](buf []T) *Buffer[T] {
return &Buffer[T]{
buf: buf,
}
}