Skip to content

Commit

Permalink
[internal-branch.go1.17-vendor] http2: reduce frameScratchBuffer cach…
Browse files Browse the repository at this point in the history
…ing aggressiveness

Use a sync.Pool, not per ClientConn.

Co-authored with dneil@google.com

Updates golang/go#49077

Change-Id: I93f06b19857ab495ffcf15d7ed2aaa2a6cb31515
Reviewed-on: https://go-review.googlesource.com/c/net/+/325169
Run-TryBot: Brad Fitzpatrick <bradfitz@golang.org>
Trust: Brad Fitzpatrick <bradfitz@golang.org>
Reviewed-by: Damien Neil <dneil@google.com>
TryBot-Result: Go Bot <gobot@golang.org>
Reviewed-on: https://go-review.googlesource.com/c/net/+/357670
Trust: Damien Neil <dneil@google.com>
Run-TryBot: Damien Neil <dneil@google.com>
Reviewed-by: Dmitri Shuralyov <dmitshur@golang.org>
  • Loading branch information
bradfitz authored and dmitshur committed Oct 29, 2021
1 parent 8c44686 commit 136e584
Showing 1 changed file with 46 additions and 51 deletions.
97 changes: 46 additions & 51 deletions transport.go
Original file line number Diff line number Diff line change
Expand Up @@ -264,9 +264,8 @@ type ClientConn struct {
peerMaxHeaderListSize uint64
initialWindowSize uint32

hbuf bytes.Buffer // HPACK encoder writes into this
henc *hpack.Encoder
freeBuf [][]byte
hbuf bytes.Buffer // HPACK encoder writes into this
henc *hpack.Encoder

wmu sync.Mutex // held while writing; acquire AFTER mu if holding both
werr error // first write error that has occurred
Expand Down Expand Up @@ -913,46 +912,6 @@ func (cc *ClientConn) closeForLostPing() error {
return cc.closeForError(err)
}

const maxAllocFrameSize = 512 << 10

// frameBuffer returns a scratch buffer suitable for writing DATA frames.
// They're capped at the min of the peer's max frame size or 512KB
// (kinda arbitrarily), but definitely capped so we don't allocate 4GB
// bufers.
func (cc *ClientConn) frameScratchBuffer() []byte {
cc.mu.Lock()
size := cc.maxFrameSize
if size > maxAllocFrameSize {
size = maxAllocFrameSize
}
for i, buf := range cc.freeBuf {
if len(buf) >= int(size) {
cc.freeBuf[i] = nil
cc.mu.Unlock()
return buf[:size]
}
}
cc.mu.Unlock()
return make([]byte, size)
}

func (cc *ClientConn) putFrameScratchBuffer(buf []byte) {
cc.mu.Lock()
defer cc.mu.Unlock()
const maxBufs = 4 // arbitrary; 4 concurrent requests per conn? investigate.
if len(cc.freeBuf) < maxBufs {
cc.freeBuf = append(cc.freeBuf, buf)
return
}
for i, old := range cc.freeBuf {
if old == nil {
cc.freeBuf[i] = buf
return
}
}
// forget about it.
}

// errRequestCanceled is a copy of net/http's errRequestCanceled because it's not
// exported. At least they'll be DeepEqual for h1-vs-h2 comparisons tests.
var errRequestCanceled = errors.New("net/http: request canceled")
Expand Down Expand Up @@ -1295,11 +1254,35 @@ var (
errReqBodyTooLong = errors.New("http2: request body larger than specified content length")
)

// frameScratchBufferLen returns the length of a buffer to use for
// outgoing request bodies to read/write to/from.
//
// It returns max(1, min(peer's advertised max frame size,
// Request.ContentLength+1, 512KB)).
func (cs *clientStream) frameScratchBufferLen(maxFrameSize int) int {
const max = 512 << 10
n := int64(maxFrameSize)
if n > max {
n = max
}
if cl := actualContentLength(cs.req); cl != -1 && cl+1 < n {
// Add an extra byte past the declared content-length to
// give the caller's Request.Body io.Reader a chance to
// give us more bytes than they declared, so we can catch it
// early.
n = cl + 1
}
if n < 1 {
return 1
}
return int(n) // doesn't truncate; max is 512K
}

var bufPool sync.Pool // of *[]byte

func (cs *clientStream) writeRequestBody(body io.Reader, bodyCloser io.Closer) (err error) {
cc := cs.cc
sentEnd := false // whether we sent the final DATA frame w/ END_STREAM
buf := cc.frameScratchBuffer()
defer cc.putFrameScratchBuffer(buf)

defer func() {
traceWroteRequest(cs.trace, err)
Expand All @@ -1318,9 +1301,24 @@ func (cs *clientStream) writeRequestBody(body io.Reader, bodyCloser io.Closer) (
remainLen := actualContentLength(req)
hasContentLen := remainLen != -1

cc.mu.Lock()
maxFrameSize := int(cc.maxFrameSize)
cc.mu.Unlock()

// Scratch buffer for reading into & writing from.
scratchLen := cs.frameScratchBufferLen(maxFrameSize)
var buf []byte
if bp, ok := bufPool.Get().(*[]byte); ok && len(*bp) >= scratchLen {
defer bufPool.Put(bp)
buf = *bp
} else {
buf = make([]byte, scratchLen)
defer bufPool.Put(&buf)
}

var sawEOF bool
for !sawEOF {
n, err := body.Read(buf[:len(buf)-1])
n, err := body.Read(buf[:len(buf)])
if hasContentLen {
remainLen -= int64(n)
if remainLen == 0 && err == nil {
Expand All @@ -1331,8 +1329,9 @@ func (cs *clientStream) writeRequestBody(body io.Reader, bodyCloser io.Closer) (
// to send the END_STREAM bit early, double-check that we're actually
// at EOF. Subsequent reads should return (0, EOF) at this point.
// If either value is different, we return an error in one of two ways below.
var scratch [1]byte
var n1 int
n1, err = body.Read(buf[n:])
n1, err = body.Read(scratch[:])
remainLen -= int64(n1)
}
if remainLen < 0 {
Expand Down Expand Up @@ -1402,10 +1401,6 @@ func (cs *clientStream) writeRequestBody(body io.Reader, bodyCloser io.Closer) (
}
}

cc.mu.Lock()
maxFrameSize := int(cc.maxFrameSize)
cc.mu.Unlock()

cc.wmu.Lock()
defer cc.wmu.Unlock()

Expand Down

0 comments on commit 136e584

Please sign in to comment.