Skip to content

Commit

Permalink
Merge branch 'go-sql-driver:master' into fix_bigint_unsigned_scan_type
Browse files Browse the repository at this point in the history
  • Loading branch information
elonnzhang authored Jan 15, 2025
2 parents 6368cf3 + 7403860 commit 9a48688
Show file tree
Hide file tree
Showing 19 changed files with 688 additions and 316 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/test.yml
Original file line number Diff line number Diff line change
Expand Up @@ -83,7 +83,7 @@ jobs:
my-cnf: |
innodb_log_file_size=256MB
innodb_buffer_pool_size=512MB
max_allowed_packet=16MB
max_allowed_packet=48MB
; TestConcurrent fails if max_connections is too large
max_connections=50
local_infile=1
Expand Down
4 changes: 4 additions & 0 deletions AUTHORS
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,9 @@ Andrew Reid <andrew.reid at tixtrack.com>
Animesh Ray <mail.rayanimesh at gmail.com>
Arne Hormann <arnehormann at gmail.com>
Ariel Mashraki <ariel at mashraki.co.il>
Artur Melanchyk <artur.melanchyk@gmail.com>
Asta Xie <xiemengjun at gmail.com>
B Lamarche <blam413 at gmail.com>
Brian Hendriks <brian at dolthub.com>
Bulat Gaifullin <gaifullinbf at gmail.com>
Caine Jette <jette at alum.mit.edu>
Expand Down Expand Up @@ -62,6 +64,7 @@ Jennifer Purevsuren <jennifer at dolthub.com>
Jerome Meyer <jxmeyer at gmail.com>
Jiajia Zhong <zhong2plus at gmail.com>
Jian Zhen <zhenjl at gmail.com>
Joe Mann <contact at joemann.co.uk>
Joshua Prunier <joshua.prunier at gmail.com>
Julien Lefevre <julien.lefevr at gmail.com>
Julien Schmidt <go-sql-driver at julienschmidt.com>
Expand Down Expand Up @@ -92,6 +95,7 @@ Paul Bonser <misterpib at gmail.com>
Paulius Lozys <pauliuslozys at gmail.com>
Peter Schultz <peter.schultz at classmarkets.com>
Phil Porada <philporada at gmail.com>
Minh Quang <minhquang4334 at gmail.com>
Rebecca Chin <rchin at pivotal.io>
Reed Allman <rdallman10 at gmail.com>
Richard Wilkes <wilkes at me.com>
Expand Down
11 changes: 11 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,7 @@ A MySQL-Driver for Go's [database/sql](https://golang.org/pkg/database/sql/) pac
* Secure `LOAD DATA LOCAL INFILE` support with file allowlisting and `io.Reader` support
* Optional `time.Time` parsing
* Optional placeholder interpolation
* Supports zlib compression.

## Requirements

Expand Down Expand Up @@ -267,6 +268,16 @@ SELECT u.id FROM users as u

will return `u.id` instead of just `id` if `columnsWithAlias=true`.

##### `compress`

```
Type: bool
Valid Values: true, false
Default: false
```

Toggles zlib compression. false by default.

##### `interpolateParams`

```
Expand Down
28 changes: 20 additions & 8 deletions benchmark_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -46,9 +46,13 @@ func (tb *TB) checkStmt(stmt *sql.Stmt, err error) *sql.Stmt {
return stmt
}

func initDB(b *testing.B, queries ...string) *sql.DB {
func initDB(b *testing.B, useCompression bool, queries ...string) *sql.DB {
tb := (*TB)(b)
db := tb.checkDB(sql.Open(driverNameTest, dsn))
comprStr := ""
if useCompression {
comprStr = "&compress=1"
}
db := tb.checkDB(sql.Open(driverNameTest, dsn+comprStr))
for _, query := range queries {
if _, err := db.Exec(query); err != nil {
b.Fatalf("error on %q: %v", query, err)
Expand All @@ -60,10 +64,18 @@ func initDB(b *testing.B, queries ...string) *sql.DB {
const concurrencyLevel = 10

func BenchmarkQuery(b *testing.B) {
benchmarkQueryHelper(b, false)
}

func BenchmarkQueryCompression(b *testing.B) {
benchmarkQueryHelper(b, true)
}

func benchmarkQueryHelper(b *testing.B, compr bool) {
tb := (*TB)(b)
b.StopTimer()
b.ReportAllocs()
db := initDB(b,
db := initDB(b, compr,
"DROP TABLE IF EXISTS foo",
"CREATE TABLE foo (id INT PRIMARY KEY, val CHAR(50))",
`INSERT INTO foo VALUES (1, "one")`,
Expand Down Expand Up @@ -222,7 +234,7 @@ func BenchmarkInterpolation(b *testing.B) {
},
maxAllowedPacket: maxPacketSize,
maxWriteSize: maxPacketSize - 1,
buf: newBuffer(nil),
buf: newBuffer(),
}

args := []driver.Value{
Expand Down Expand Up @@ -269,7 +281,7 @@ func benchmarkQueryContext(b *testing.B, db *sql.DB, p int) {
}

func BenchmarkQueryContext(b *testing.B) {
db := initDB(b,
db := initDB(b, false,
"DROP TABLE IF EXISTS foo",
"CREATE TABLE foo (id INT PRIMARY KEY, val CHAR(50))",
`INSERT INTO foo VALUES (1, "one")`,
Expand Down Expand Up @@ -305,7 +317,7 @@ func benchmarkExecContext(b *testing.B, db *sql.DB, p int) {
}

func BenchmarkExecContext(b *testing.B) {
db := initDB(b,
db := initDB(b, false,
"DROP TABLE IF EXISTS foo",
"CREATE TABLE foo (id INT PRIMARY KEY, val CHAR(50))",
`INSERT INTO foo VALUES (1, "one")`,
Expand All @@ -323,7 +335,7 @@ func BenchmarkExecContext(b *testing.B) {
// "size=" means size of each blobs.
func BenchmarkQueryRawBytes(b *testing.B) {
var sizes []int = []int{100, 1000, 2000, 4000, 8000, 12000, 16000, 32000, 64000, 256000}
db := initDB(b,
db := initDB(b, false,
"DROP TABLE IF EXISTS bench_rawbytes",
"CREATE TABLE bench_rawbytes (id INT PRIMARY KEY, val LONGBLOB)",
)
Expand Down Expand Up @@ -376,7 +388,7 @@ func BenchmarkQueryRawBytes(b *testing.B) {
// BenchmarkReceiveMassiveRows measures performance of receiving large number of rows.
func BenchmarkReceiveMassiveRows(b *testing.B) {
// Setup -- prepare 10000 rows.
db := initDB(b,
db := initDB(b, false,
"DROP TABLE IF EXISTS foo",
"CREATE TABLE foo (id INT PRIMARY KEY, val TEXT)")
defer db.Close()
Expand Down
131 changes: 50 additions & 81 deletions buffer.go
Original file line number Diff line number Diff line change
Expand Up @@ -10,54 +10,42 @@ package mysql

import (
"io"
"net"
"time"
)

const defaultBufSize = 4096
const maxCachedBufSize = 256 * 1024

// readerFunc is a function that compatible with io.Reader.
// We use this function type instead of io.Reader because we want to
// just pass mc.readWithTimeout.
type readerFunc func([]byte) (int, error)

// A buffer which is used for both reading and writing.
// This is possible since communication on each connection is synchronous.
// In other words, we can't write and read simultaneously on the same connection.
// The buffer is similar to bufio.Reader / Writer but zero-copy-ish
// Also highly optimized for this particular use case.
// This buffer is backed by two byte slices in a double-buffering scheme
type buffer struct {
buf []byte // buf is a byte buffer who's length and capacity are equal.
nc net.Conn
idx int
length int
timeout time.Duration
dbuf [2][]byte // dbuf is an array with the two byte slices that back this buffer
flipcnt uint // flipccnt is the current buffer counter for double-buffering
buf []byte // read buffer.
cachedBuf []byte // buffer that will be reused. len(cachedBuf) <= maxCachedBufSize.
}

// newBuffer allocates and returns a new buffer.
func newBuffer(nc net.Conn) buffer {
fg := make([]byte, defaultBufSize)
func newBuffer() buffer {
return buffer{
buf: fg,
nc: nc,
dbuf: [2][]byte{fg, nil},
cachedBuf: make([]byte, defaultBufSize),
}
}

// flip replaces the active buffer with the background buffer
// this is a delayed flip that simply increases the buffer counter;
// the actual flip will be performed the next time we call `buffer.fill`
func (b *buffer) flip() {
b.flipcnt += 1
// busy returns true if the read buffer is not empty.
func (b *buffer) busy() bool {
return len(b.buf) > 0
}

// fill reads into the buffer until at least _need_ bytes are in it
func (b *buffer) fill(need int) error {
n := b.length
// fill data into its double-buffering target: if we've called
// flip on this buffer, we'll be copying to the background buffer,
// and then filling it with network data; otherwise we'll just move
// the contents of the current buffer to the front before filling it
dest := b.dbuf[b.flipcnt&1]
// fill reads into the read buffer until at least _need_ bytes are in it.
func (b *buffer) fill(need int, r readerFunc) error {
// we'll move the contents of the current buffer to dest before filling it.
dest := b.cachedBuf

// grow buffer if necessary to fit the whole packet.
if need > len(dest) {
Expand All @@ -67,83 +55,67 @@ func (b *buffer) fill(need int) error {
// if the allocated buffer is not too large, move it to backing storage
// to prevent extra allocations on applications that perform large reads
if len(dest) <= maxCachedBufSize {
b.dbuf[b.flipcnt&1] = dest
b.cachedBuf = dest
}
}

// if we're filling the fg buffer, move the existing data to the start of it.
// if we're filling the bg buffer, copy over the data
if n > 0 {
copy(dest[:n], b.buf[b.idx:])
}

b.buf = dest
b.idx = 0
// move the existing data to the start of the buffer.
n := len(b.buf)
copy(dest[:n], b.buf)

for {
if b.timeout > 0 {
if err := b.nc.SetReadDeadline(time.Now().Add(b.timeout)); err != nil {
return err
}
nn, err := r(dest[n:])
n += nn

if err == nil && n < need {
continue
}

nn, err := b.nc.Read(b.buf[n:])
n += nn
b.buf = dest[:n]

switch err {
case nil:
if err == io.EOF {
if n < need {
continue
err = io.ErrUnexpectedEOF
} else {
err = nil
}
b.length = n
return nil

case io.EOF:
if n >= need {
b.length = n
return nil
}
return io.ErrUnexpectedEOF

default:
return err
}
return err
}
}

// returns next N bytes from buffer.
// The returned slice is only guaranteed to be valid until the next read
func (b *buffer) readNext(need int) ([]byte, error) {
if b.length < need {
func (b *buffer) readNext(need int, r readerFunc) ([]byte, error) {
if len(b.buf) < need {
// refill
if err := b.fill(need); err != nil {
if err := b.fill(need, r); err != nil {
return nil, err
}
}

offset := b.idx
b.idx += need
b.length -= need
return b.buf[offset:b.idx], nil
data := b.buf[:need]
b.buf = b.buf[need:]
return data, nil
}

// takeBuffer returns a buffer with the requested size.
// If possible, a slice from the existing buffer is returned.
// Otherwise a bigger buffer is made.
// Only one buffer (total) can be used at a time.
func (b *buffer) takeBuffer(length int) ([]byte, error) {
if b.length > 0 {
if b.busy() {
return nil, ErrBusyBuffer
}

// test (cheap) general case first
if length <= cap(b.buf) {
return b.buf[:length], nil
if length <= len(b.cachedBuf) {
return b.cachedBuf[:length], nil
}

if length < maxPacketSize {
b.buf = make([]byte, length)
return b.buf, nil
if length < maxCachedBufSize {
b.cachedBuf = make([]byte, length)
return b.cachedBuf, nil
}

// buffer is larger than we want to store.
Expand All @@ -154,29 +126,26 @@ func (b *buffer) takeBuffer(length int) ([]byte, error) {
// known to be smaller than defaultBufSize.
// Only one buffer (total) can be used at a time.
func (b *buffer) takeSmallBuffer(length int) ([]byte, error) {
if b.length > 0 {
if b.busy() {
return nil, ErrBusyBuffer
}
return b.buf[:length], nil
return b.cachedBuf[:length], nil
}

// takeCompleteBuffer returns the complete existing buffer.
// This can be used if the necessary buffer size is unknown.
// cap and len of the returned buffer will be equal.
// Only one buffer (total) can be used at a time.
func (b *buffer) takeCompleteBuffer() ([]byte, error) {
if b.length > 0 {
if b.busy() {
return nil, ErrBusyBuffer
}
return b.buf, nil
return b.cachedBuf, nil
}

// store stores buf, an updated buffer, if its suitable to do so.
func (b *buffer) store(buf []byte) error {
if b.length > 0 {
return ErrBusyBuffer
} else if cap(buf) <= maxPacketSize && cap(buf) > cap(b.buf) {
b.buf = buf[:cap(buf)]
func (b *buffer) store(buf []byte) {
if cap(buf) <= maxCachedBufSize && cap(buf) > cap(b.cachedBuf) {
b.cachedBuf = buf[:cap(buf)]
}
return nil
}
Loading

0 comments on commit 9a48688

Please sign in to comment.