Skip to content

Commit

Permalink
build: update to use new generic neutrino/cache package
Browse files Browse the repository at this point in the history
lightninglabs/neutrino#261

Author: Olaoluwa Osuntokun <laolu32@gmail.com>
Date:   Fri Feb 10 19:13:50 2023 -0800
  • Loading branch information
buck54321 committed Apr 20, 2024
1 parent 40bc00f commit 54c2cc0
Show file tree
Hide file tree
Showing 4 changed files with 114 additions and 97 deletions.
20 changes: 10 additions & 10 deletions spv/cache/cache.go
Original file line number Diff line number Diff line change
Expand Up @@ -7,23 +7,23 @@ var (
ErrElementNotFound = fmt.Errorf("unable to find element")
)

// Value represents a value stored in the Cache.
type Value interface {
// Size determines how big this entry would be in the cache. For
// example, for a filter, it could be the size of the filter in bytes.
Size() (uint64, error)
}

// Cache represents a generic cache.
type Cache interface {
type Cache[K comparable, V Value] interface {
// Put stores the given (key,value) pair, replacing existing value if
// key already exists. The return value indicates whether items had to
// be evicted to make room for the new element.
Put(key interface{}, value Value) (bool, error)
Put(key K, value V) (bool, error)

// Get returns the value for a given key.
Get(key interface{}) (Value, error)
Get(key K) (V, error)

// Len returns number of elements in the cache.
Len() int
}

// Value represents a value stored in the Cache.
type Value interface {
// Size determines how big this entry would be in the cache. For
// example, for a filter, it could be the size of the filter in bytes.
Size() (uint64, error)
}
12 changes: 8 additions & 4 deletions spv/cache/cache_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -29,8 +29,12 @@ func TestBlockFilterCaches(t *testing.T) {

// Initialize all types of caches we want to test, for both filters and
// blocks. Currently the LRU cache is the only implementation.
filterCaches := []cache.Cache{lru.NewCache(cacheSize)}
blockCaches := []cache.Cache{lru.NewCache(cacheSize)}
filterCaches := []cache.Cache[cache.FilterCacheKey, *cache.CacheableFilter]{
lru.NewCache[cache.FilterCacheKey, *cache.CacheableFilter](cacheSize),
}
blockCaches := []cache.Cache[wire.InvVect, *cache.CacheableBlock]{
lru.NewCache[wire.InvVect, *cache.CacheableBlock](cacheSize),
}

// Generate a list of hashes, filters and blocks that we will use as
// cache keys an values.
Expand Down Expand Up @@ -89,7 +93,7 @@ func TestBlockFilterCaches(t *testing.T) {
}

// Ensure we got the correct filter.
filter := e.(*cache.CacheableFilter).Filter
filter := e.Filter
if filter != filters[i] {
t.Fatalf("Filters not equal: %v vs %v ",
filter, filters[i])
Expand All @@ -107,7 +111,7 @@ func TestBlockFilterCaches(t *testing.T) {
}

// Ensure it is the same block.
block := b.(*cache.CacheableBlock).Block
block := b.Block
if block != blocks[i] {
t.Fatalf("Not equal: %v vs %v ",
block, blocks[i])
Expand Down
67 changes: 45 additions & 22 deletions spv/cache/lru/lru.go
Original file line number Diff line number Diff line change
Expand Up @@ -9,18 +9,37 @@ import (
)

// elementMap is an alias for a map from a generic interface to a list.Element.
type elementMap map[interface{}]*list.Element
type elementMap[K comparable] struct {
// TODO(roasbeef): list.List generic version?
m map[K]*list.Element
}

// Del deletes an item from the cache.
func (e *elementMap[K]) Del(key K) {
delete(e.m, key)
}

// Lookup attempts to lookup a value in the cache.
func (e *elementMap[K]) Lookup(key K) (*list.Element, bool) {
el, ok := e.m[key]
return el, ok
}

// Store attempts to store an item in the cache.
func (e *elementMap[K]) Store(key K, val *list.Element) {
e.m[key] = val
}

// entry represents a (key,value) pair entry in the Cache. The Cache's list
// stores entries which let us get the cache key when an entry is evicted.
type entry struct {
key interface{}
value cache.Value
type entry[K comparable, V cache.Value] struct {
key K
value V
}

// Cache provides a generic thread-safe lru cache that can be used for
// storing filters, blocks, etc.
type Cache struct {
type Cache[K comparable, V cache.Value] struct {
// capacity represents how much this cache can hold. It could be number
// of elements or a number of bytes, decided by the cache.Value's Size.
capacity uint64
Expand All @@ -34,25 +53,27 @@ type Cache struct {

// cache is a generic cache which allows us to find an elements position
// in the ll list from a given key.
cache elementMap
cache elementMap[K]

// mtx is used to make sure the Cache is thread-safe.
mtx sync.RWMutex
}

// NewCache return a cache with specified capacity, the cache's size can't
// exceed that given capacity.
func NewCache(capacity uint64) *Cache {
return &Cache{
func NewCache[K comparable, V cache.Value](capacity uint64) *Cache[K, V] {
return &Cache[K, V]{
capacity: capacity,
ll: list.New(),
cache: make(elementMap),
cache: elementMap[K]{
m: make(map[K]*list.Element),
},
}
}

// evict will evict as many elements as necessary to make enough space for a new
// element with size needed to be inserted.
func (c *Cache) evict(needed uint64) (bool, error) {
func (c *Cache[K, V]) evict(needed uint64) (bool, error) {
if needed > c.capacity {
return false, fmt.Errorf("can't evict %v elements in size, "+
"since capacity is %v", needed, c.capacity)
Expand All @@ -72,7 +93,7 @@ func (c *Cache) evict(needed uint64) (bool, error) {
// Find the least recently used item.
if elr := c.ll.Back(); elr != nil {
// Determine lru item's size.
ce := elr.Value.(*entry)
ce := elr.Value.(*entry[K, V])
es, err := ce.value.Size()
if err != nil {
return false, fmt.Errorf("couldn't determine "+
Expand All @@ -85,7 +106,7 @@ func (c *Cache) evict(needed uint64) (bool, error) {

// Remove the element from the cache.
c.ll.Remove(elr)
delete(c.cache, ce.key)
c.cache.Del(ce.key)
evicted = true
}
}
Expand All @@ -97,7 +118,7 @@ func (c *Cache) evict(needed uint64) (bool, error) {
// exists, it will replace value and update it to be most recent item in cache.
// The return value indicates whether items had to be evicted to make room for
// the new element.
func (c *Cache) Put(key interface{}, value cache.Value) (bool, error) {
func (c *Cache[K, V]) Put(key K, value V) (bool, error) {
vs, err := value.Size()
if err != nil {
return false, fmt.Errorf("couldn't determine size of cache "+
Expand All @@ -113,9 +134,9 @@ func (c *Cache) Put(key interface{}, value cache.Value) (bool, error) {
defer c.mtx.Unlock()

// If the element already exists, remove it and decrease cache's size.
el, ok := c.cache[key]
el, ok := c.cache.Lookup(key)
if ok {
es, err := el.Value.(*entry).value.Size()
es, err := el.Value.(*entry[K, V]).value.Size()
if err != nil {
return false, fmt.Errorf("couldn't determine size of "+
"existing cache value %v", err)
Expand All @@ -132,34 +153,36 @@ func (c *Cache) Put(key interface{}, value cache.Value) (bool, error) {
}

// We have made enough space in the cache, so just insert it.
el = c.ll.PushFront(&entry{key, value})
c.cache[key] = el
el = c.ll.PushFront(&entry[K, V]{key, value})
c.cache.Store(key, el)
c.size += vs

return evicted, nil
}

// Get will return value for a given key, making the element the most recently
// accessed item in the process. Will return nil if the key isn't found.
func (c *Cache) Get(key interface{}) (cache.Value, error) {
func (c *Cache[K, V]) Get(key K) (V, error) {
c.mtx.Lock()
defer c.mtx.Unlock()

el, ok := c.cache[key]
var defaultVal V

el, ok := c.cache.Lookup(key)
if !ok {
// Element not found in the cache.
return nil, cache.ErrElementNotFound
return defaultVal, cache.ErrElementNotFound
}

// When the cache needs to evict a element to make space for another
// one, it starts eviction from the back, so by moving this element to
// the front, it's eviction is delayed because it's recently accessed.
c.ll.MoveToFront(el)
return el.Value.(*entry).value, nil
return el.Value.(*entry[K, V]).value, nil
}

// Len returns number of elements in the cache.
func (c *Cache) Len() int {
func (c *Cache[K, V]) Len() int {
c.mtx.RLock()
defer c.mtx.RUnlock()

Expand Down
Loading

0 comments on commit 54c2cc0

Please sign in to comment.