From 519c6f5dab090320df489621f708fc5bbc1ca25b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Wed, 10 Jul 2024 19:52:56 +0300 Subject: [PATCH] Fix spurious allocation in hasher.Merkleize --- hasher.go | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/hasher.go b/hasher.go index ee5cdfb..8d700ba 100755 --- a/hasher.go +++ b/hasher.go @@ -1,13 +1,12 @@ package ssz import ( + "encoding/binary" "fmt" "hash" "math/bits" "sync" - "encoding/binary" - "github.com/minio/sha256-simd" ) @@ -263,6 +262,16 @@ func (h *Hasher) Index() int { // Merkleize is used to merkleize the last group of the hasher func (h *Hasher) Merkleize(indx int) { + // merkleizeImpl will expand the `input` by 32 bytes if some hashing depth + // hits an odd chunk length. But if we're at the end of `h.buf` already, + // appending to `input` will allocate a new buffer, *not* expand `h.buf`, + // so the next invocation will realloc, over and over and over. We can pre- + // emptively cater for that by ensuring that an extra 32 bytes is always + // available. + if len(h.buf) == cap(h.buf) { + h.buf = append(h.buf, zeroBytes...) + h.buf = h.buf[:len(h.buf)-len(zeroBytes)] + } input := h.buf[indx:] // merkleize the input