Skip to content

Commit

Permalink
replace the node key
Browse files Browse the repository at this point in the history
  • Loading branch information
cool-develope committed Oct 25, 2022
1 parent 7204160 commit f3168eb
Show file tree
Hide file tree
Showing 15 changed files with 527 additions and 377 deletions.
16 changes: 8 additions & 8 deletions basic_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -192,31 +192,31 @@ func TestUnit(t *testing.T) {
}

expectSet := func(tree *MutableTree, i int, repr string, hashCount int64) {
origNode := tree.root
tree.SaveVersion()
updated, err := tree.Set(i2b(i), []byte{})
require.NoError(t, err)
// ensure node was added & structure is as expected.
if updated || P(tree.root) != repr {
if updated || P(tree.root, tree.ImmutableTree) != repr {
t.Fatalf("Adding %v to %v:\nExpected %v\nUnexpectedly got %v updated:%v",
i, P(origNode), repr, P(tree.root), updated)
i, P(tree.lastSaved.root, tree.lastSaved), repr, P(tree.root, tree.ImmutableTree), updated)
}
// ensure hash calculation requirements
expectHash(tree.ImmutableTree, hashCount)
tree.root = origNode
tree.ImmutableTree = tree.lastSaved.clone()
}

expectRemove := func(tree *MutableTree, i int, repr string, hashCount int64) {
origNode := tree.root
tree.SaveVersion()
value, removed, err := tree.Remove(i2b(i))
require.NoError(t, err)
// ensure node was added & structure is as expected.
if len(value) != 0 || !removed || P(tree.root) != repr {
if len(value) != 0 || !removed || P(tree.root, tree.ImmutableTree) != repr {
t.Fatalf("Removing %v from %v:\nExpected %v\nUnexpectedly got %v value:%v removed:%v",
i, P(origNode), repr, P(tree.root), value, removed)
i, P(tree.lastSaved.root, tree.lastSaved), repr, P(tree.root, tree.ImmutableTree), value, removed)
}
// ensure hash calculation requirements
expectHash(tree.ImmutableTree, hashCount)
tree.root = origNode
tree.ImmutableTree = tree.lastSaved.clone()
}

// Test Set cases:
Expand Down
75 changes: 42 additions & 33 deletions cache/cache.go
Original file line number Diff line number Diff line change
Expand Up @@ -6,29 +6,29 @@ import (
ibytes "github.com/cosmos/iavl/internal/bytes"
)

// Node represents a node eligible for caching.
type Node interface {
GetKey() []byte
// Node[T] represents a node eligible for caching.
type Node[T any] interface {
GetKey() T
}

// Cache is an in-memory structure to persist nodes for quick access.
// Please see lruCache for more details about why we need a custom
// cache implementation.
type Cache interface {
type Cache[T any] interface {
// Adds node to cache. If full and had to remove the oldest element,
// returns the oldest, otherwise nil.
// CONTRACT: node can never be nil. Otherwise, cache panics.
Add(node Node) Node
Add(node Node[T]) Node[T]

// Returns Node for the key, if exists. nil otherwise.
Get(key []byte) Node
// Returns Node[T] for the key, if exists. nil otherwise.
Get(key T) Node[T]

// Has returns true if node with key exists in cache, false otherwise.
Has(key []byte) bool
Has(key T) bool

// Remove removes node with key from cache. The removed node is returned.
// if not in cache, return nil.
Remove(key []byte) Node
Remove(key T) Node[T]

// Len returns the cache length.
Len() int
Expand All @@ -44,33 +44,33 @@ type Cache interface {
// The alternative implementations do not allow for
// customization and the ability to estimate the byte
// size of the cache.
type lruCache struct {
dict map[string]*list.Element // FastNode cache.
maxElementCount int // FastNode the maximum number of nodes in the cache.
ll *list.List // LRU queue of cache elements. Used for deletion.
type lruCache[K comparable, T any] struct {
dict map[K]*list.Element // FastNode cache.
maxElementCount int // FastNode the maximum number of nodes in the cache.
ll *list.List // LRU queue of cache elements. Used for deletion.
}

var _ Cache = (*lruCache)(nil)
var _ Cache[int] = (*lruCache[string, int])(nil)

func New(maxElementCount int) Cache {
return &lruCache{
dict: make(map[string]*list.Element),
func New[K comparable, T any](maxElementCount int) Cache[T] {
return &lruCache[K, T]{
dict: make(map[K]*list.Element),
maxElementCount: maxElementCount,
ll: list.New(),
}
}

func (c *lruCache) Add(node Node) Node {
keyStr := ibytes.UnsafeBytesToStr(node.GetKey())
if e, exists := c.dict[keyStr]; exists {
func (c *lruCache[K, T]) Add(node Node[T]) Node[T] {
key := c.getKey(node.GetKey())
if e, exists := c.dict[key]; exists {
c.ll.MoveToFront(e)
old := e.Value
e.Value = node
return old.(Node)
return old.(Node[T])
}

elem := c.ll.PushFront(node)
c.dict[keyStr] = elem
c.dict[key] = elem

if c.ll.Len() > c.maxElementCount {
oldest := c.ll.Back()
Expand All @@ -79,32 +79,41 @@ func (c *lruCache) Add(node Node) Node {
return nil
}

func (c *lruCache) Get(key []byte) Node {
if ele, hit := c.dict[ibytes.UnsafeBytesToStr(key)]; hit {
func (c *lruCache[K, T]) Get(key T) Node[T] {
if ele, hit := c.dict[c.getKey(key)]; hit {
c.ll.MoveToFront(ele)
return ele.Value.(Node)
return ele.Value.(Node[T])
}
return nil
}

func (c *lruCache) Has(key []byte) bool {
_, exists := c.dict[ibytes.UnsafeBytesToStr(key)]
func (c *lruCache[K, T]) Has(key T) bool {
_, exists := c.dict[c.getKey(key)]
return exists
}

func (c *lruCache) Len() int {
func (c *lruCache[K, T]) Len() int {
return c.ll.Len()
}

func (c *lruCache) Remove(key []byte) Node {
if elem, exists := c.dict[ibytes.UnsafeBytesToStr(key)]; exists {
func (c *lruCache[K, T]) Remove(key T) Node[T] {
if elem, exists := c.dict[c.getKey(key)]; exists {
return c.remove(elem)
}
return nil
}

func (c *lruCache) remove(e *list.Element) Node {
removed := c.ll.Remove(e).(Node)
delete(c.dict, ibytes.UnsafeBytesToStr(removed.GetKey()))
func (c *lruCache[K, T]) remove(e *list.Element) Node[T] {
removed := c.ll.Remove(e).(Node[T])
delete(c.dict, c.getKey(removed.GetKey()))
return removed
}

func (c *lruCache[K, T]) getKey(key T) K {
switch any(key).(type) {
case []byte:
return any(ibytes.UnsafeBytesToStr(any(key).([]byte))).(K)
default:
return any(key).(K)
}
}
4 changes: 2 additions & 2 deletions cache/cache_bench_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ func BenchmarkAdd(b *testing.B) {
}

for name, tc := range testcases {
cache := cache.New(tc.cacheMax)
cache := cache.New[string, []byte](tc.cacheMax)
b.Run(name, func(b *testing.B) {
for i := 0; i < b.N; i++ {
b.StopTimer()
Expand All @@ -46,7 +46,7 @@ func BenchmarkAdd(b *testing.B) {
func BenchmarkRemove(b *testing.B) {
b.ReportAllocs()

cache := cache.New(1000)
cache := cache.New[string, []byte](1000)
existentKeyMirror := [][]byte{}
// Populate cache
for i := 0; i < 50; i++ {
Expand Down
20 changes: 10 additions & 10 deletions cache/cache_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ type cacheOp struct {
}

type testcase struct {
setup func(cache.Cache)
setup func(cache.Cache[[]byte])
cacheMax int
cacheOps []cacheOp
expectedNodeIndexes []int // contents of the cache once test case completes represent by indexes in testNodes
Expand All @@ -43,9 +43,9 @@ const (
testKey = "key"
)

var _ cache.Node = (*testNode)(nil)
var _ cache.Node[[]byte] = (*testNode)(nil)

var testNodes = []cache.Node{
var testNodes = []cache.Node[[]byte]{
&testNode{
key: []byte(fmt.Sprintf("%s%d", testKey, 1)),
},
Expand Down Expand Up @@ -150,7 +150,7 @@ func Test_Cache_Add(t *testing.T) {

for name, tc := range testcases {
t.Run(name, func(t *testing.T) {
cache := cache.New(tc.cacheMax)
cache := cache.New[string, []byte](tc.cacheMax)

expectedCurSize := 0

Expand Down Expand Up @@ -189,7 +189,7 @@ func Test_Cache_Remove(t *testing.T) {
},
},
"remove non-existent key, cache max 1 - nil returned": {
setup: func(c cache.Cache) {
setup: func(c cache.Cache[[]byte]) {
require.Nil(t, c.Add(testNodes[1]))
require.Equal(t, 1, c.Len())
},
Expand All @@ -203,7 +203,7 @@ func Test_Cache_Remove(t *testing.T) {
expectedNodeIndexes: []int{1},
},
"remove existent key, cache max 1 - removed": {
setup: func(c cache.Cache) {
setup: func(c cache.Cache[[]byte]) {
require.Nil(t, c.Add(testNodes[0]))
require.Equal(t, 1, c.Len())
},
Expand All @@ -216,7 +216,7 @@ func Test_Cache_Remove(t *testing.T) {
},
},
"remove twice, cache max 1 - removed first time, then nil": {
setup: func(c cache.Cache) {
setup: func(c cache.Cache[[]byte]) {
require.Nil(t, c.Add(testNodes[0]))
require.Equal(t, 1, c.Len())
},
Expand All @@ -233,7 +233,7 @@ func Test_Cache_Remove(t *testing.T) {
},
},
"remove all, cache max 3": {
setup: func(c cache.Cache) {
setup: func(c cache.Cache[[]byte]) {
require.Nil(t, c.Add(testNodes[0]))
require.Nil(t, c.Add(testNodes[1]))
require.Nil(t, c.Add(testNodes[2]))
Expand All @@ -259,7 +259,7 @@ func Test_Cache_Remove(t *testing.T) {

for name, tc := range testcases {
t.Run(name, func(t *testing.T) {
cache := cache.New(tc.cacheMax)
cache := cache.New[string, []byte](tc.cacheMax)

if tc.setup != nil {
tc.setup(cache)
Expand Down Expand Up @@ -290,7 +290,7 @@ func Test_Cache_Remove(t *testing.T) {
}
}

func validateCacheContentsAfterTest(t *testing.T, tc testcase, cache cache.Cache) {
func validateCacheContentsAfterTest(t *testing.T, tc testcase, cache cache.Cache[[]byte]) {
require.Equal(t, len(tc.expectedNodeIndexes), cache.Len())
for _, idx := range tc.expectedNodeIndexes {
expectedNode := testNodes[idx]
Expand Down
2 changes: 1 addition & 1 deletion fastnode/fast_node.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ type Node struct {
value []byte
}

var _ cache.Node = (*Node)(nil)
var _ cache.Node[[]byte] = (*Node)(nil)

// NewNode returns a new fast node from a value and version.
func NewNode(key []byte, value []byte, version int64) *Node {
Expand Down
20 changes: 15 additions & 5 deletions import.go
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@ import (
"fmt"

db "github.com/cosmos/cosmos-db"
"github.com/cosmos/iavl/internal/encoding"
)

// maxBatchSize is the maximum size of the import batch before flushing it to the database
Expand Down Expand Up @@ -137,7 +138,7 @@ func (i *Importer) Add(exportNode *ExportNode) error {
bytesCopy := make([]byte, buf.Len())
copy(bytesCopy, buf.Bytes())

if err = i.batch.Set(i.tree.ndb.nodeKey(node.hash), bytesCopy); err != nil {
if err = i.batch.Set(i.tree.ndb.nodeKey(node.nodeKey), bytesCopy); err != nil {
return err
}

Expand All @@ -154,9 +155,9 @@ func (i *Importer) Add(exportNode *ExportNode) error {

// Update the stack now that we know there were no errors
switch {
case node.leftHash != nil && node.rightHash != nil:
case node.leftNode != nil && node.rightNode != nil:
i.stack = i.stack[:stackSize-2]
case node.leftHash != nil || node.rightHash != nil:
case node.leftNode != nil || node.rightNode != nil:
i.stack = i.stack[:stackSize-1]
}
i.stack = append(i.stack, node)
Expand All @@ -174,11 +175,20 @@ func (i *Importer) Commit() error {

switch len(i.stack) {
case 0:
if err := i.batch.Set(i.tree.ndb.rootKey(i.version), []byte{}); err != nil {
if err := i.batch.Set(i.tree.ndb.rootKey(i.version), []byte{0, 0}); err != nil {
return err
}
case 1:
if err := i.batch.Set(i.tree.ndb.rootKey(i.version), i.stack[0].hash); err != nil {
buf := new(bytes.Buffer)
err := encoding.EncodeVarint(buf, i.stack[0].nodeKey)
if err != nil {
return err
}
err = encoding.EncodeVarint(buf, i.tree.nonce)
if err != nil {
return err
}
if err := i.batch.Set(i.tree.ndb.rootKey(i.version), buf.Bytes()); err != nil {
return err
}
default:
Expand Down
Loading

0 comments on commit f3168eb

Please sign in to comment.