Skip to content

Commit

Permalink
add new cache.Map
Browse files Browse the repository at this point in the history
  • Loading branch information
brandon-ja committed Dec 21, 2023
1 parent 81f552c commit c371c9c
Show file tree
Hide file tree
Showing 9 changed files with 131 additions and 82 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/pipeline.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -46,4 +46,4 @@ jobs:

# Run go build
- name: Run Go Build
run: go build ./cache && go build ./memoize && go build ./persist
run: go build ./cache && go build ./persist
71 changes: 0 additions & 71 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -74,74 +74,3 @@ var GetTeams = cache.OnDisk(filepath.Join("cache", "teams"), time.Hour, func(ctx
return teams, nil
})
```

## memoize
The memoization package provides functionality for memoizing function results.
You can use these functions to cache function results both in memory as-well-as in an external data store.
Additionally, this cache is set based on the input parameter so different inputs will have their own individual cache.
Be aware that if you are memorizing large amounts of data with long TTLs you may run into OOM issues.
This is especially true for memoization where new entries are made into the cache for every new paramater.

It's important to note that the memoized function may return expired data.
This can happen when your cached function returns an error but the previous cache value still exists.
In this case valid cache data will be returned along with your function's error.
As the developer it is up to you to determine if this stale data is safe to use or if it should be ignored.

Example 1: cache function results in memory. This makes repeatedly calling the GetTeams function much faster since
only the first call will result in a network call.
```go
// GetTeam gets a team from an external api. The results will be cached in memory for at least one hour.
// The cached data is tied to the input parameter such that calls with different inputs will have their own
// individual cache.
var GetTeam = memoize.InMemory(time.Hour, func(ctx context.Context, teamName string) (*Team, error) {
client := &http.Client{}
resp, err := client.Get("https://api.weavedev.net/team/" + teamName)
if err != nil {
return nil, err
}

defer resp.Body.Close()
body, err := io.ReadAll(resp.Body)
if err != nil {
return nil, err
}

team := &Team{}
err = json.Unmarshal(body, team)
if err != nil {
return nil, err
}

return team, nil
})
```

Example 2: cache function results in memory and on disk.
Like example 1 this improves performance.
It also allows the cache to be restored across runs which can be useful for short-lived process like cron jobs or cli tools
```go
// GetTeam gets a team from an external api. The results will be cached in memory for at least one hour.
// The cached data is tied to the input parameter such that calls with different inputs will have their own
// individual cache. Additionally, the cache will be backed by the file system so it can be restored between program runs
var GetTeam = memoize.OnDisk(filepath.Join("cache", "team"), time.Hour, func(ctx context.Context, teamName string) (*Team, error) {
client := &http.Client{}
resp, err := client.Get("https://api.weavedev.net/team/" + teamName)
if err != nil {
return nil, err
}

defer resp.Body.Close()
body, err := io.ReadAll(resp.Body)
if err != nil {
return nil, err
}

team := &Team{}
err = json.Unmarshal(body, team)
if err != nil {
return nil, err
}

return team, nil
})
```
68 changes: 68 additions & 0 deletions cache/map.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,68 @@
package cache

import (
"context"
"fmt"
"time"

"github.com/weave-lab/cachin/persist"
)

// Map is a cached map that can be used to cache data using a persist.Store
type Map[K fmt.Stringer, V any] struct {
data map[string]persist.Data[V]
store persist.Store
evictionTimer time.Duration
ttl time.Duration
}

func NewMap[K fmt.Stringer, V any](ctx context.Context, store persist.Store, ttl, evictionTimer time.Duration) Map[K, V] {
m := Map[K, V]{
data: make(map[string]persist.Data[V]),
evictionTimer: evictionTimer,
ttl: ttl,
}

go m.runCleanup(ctx)

return m
}

func (m *Map[K, V]) Set(ctx context.Context, k K, v V) error {
d := persist.NewData[V](m.store, k.String())
err := d.Set(ctx, v)
if err != nil {
return err
}

m.data[k.String()] = d
return nil
}

func (m *Map[K, V]) Get(k K) (V, bool) {
data, ok := m.data[k.String()]
if !ok || data.IsExpired(m.ttl) {
return *new(V), false
}

return data.Get(), true
}

func (m *Map[K, V]) runCleanup(ctx context.Context) {
ttlWait := time.NewTicker(m.evictionTimer)
for {
for k, v := range m.data {
if v.IsExpired(m.ttl) {
// TODO: we need to handle this error somehow
_ = v.Delete(ctx)
delete(m.data, k)
}
}

select {
case <-ttlWait.C:
case <-ctx.Done():
return
}
}
}
24 changes: 14 additions & 10 deletions persist/filesystem.go
Original file line number Diff line number Diff line change
Expand Up @@ -28,10 +28,7 @@ func NewFsStore(dir string, useSafeKey bool) *FsStore {
// Get searches for a file that matches the provided key in the stores root directory. If the file is missing
// no error will be returned
func (c *FsStore) Get(_ context.Context, key string) ([]byte, time.Time, error) {
if c.useSafeKey {
key = SafeKey(key)
}
file := filepath.Join(c.dir, key)
file := c.getFilePath(key)
stat, err := os.Stat(file)
switch {
case os.IsNotExist(err):
Expand All @@ -58,14 +55,21 @@ func (c *FsStore) Set(_ context.Context, key string, val []byte) error {
}
}

file := c.getFilePath(key)
return afero.WriteFile(c.afs, file, val, 0666)
}

// Delete deletes the file that matches the provided key in the store root directory. If the file can not be deleted
// an error will be returned
func (c *FsStore) Delete(_ context.Context, key string) error {
file := c.getFilePath(key)
return os.RemoveAll(file)
}

func (c *FsStore) getFilePath(key string) string {
if c.useSafeKey {
key = SafeKey(key)
}
file := filepath.Join(c.dir, key)
err := afero.WriteFile(c.afs, file, val, 0666)
if err != nil {
return err
}

return nil
return filepath.Join(c.dir, key)
}
9 changes: 9 additions & 0 deletions persist/firestore.go
Original file line number Diff line number Diff line change
Expand Up @@ -50,3 +50,12 @@ func (s *FireStore) Set(ctx context.Context, key string, val []byte) error {

return nil
}

// Delete deletes the firestore document that matches the provided key. In order to ensure the key does not contain
// illegal characters, the key will be converted to a 'safe' key.
func (s *FireStore) Delete(ctx context.Context, key string) error {
doc := s.client.Doc(SafeKey(key))

_, err := doc.Delete(ctx)
return err
}
18 changes: 18 additions & 0 deletions persist/multi.go
Original file line number Diff line number Diff line change
Expand Up @@ -7,11 +7,14 @@ import (
"time"
)

// MultiStore can be used to store cache data in multiple stores
type MultiStore struct {
stores []Store
expire time.Duration
}

// Get attempts to get the document from the provided cache stores one at a time. The first store that
// successful returns the data
func (s *MultiStore) Get(ctx context.Context, key string) ([]byte, time.Time, error) {
// look in each store and return the first non-expired source
var errs []string
Expand All @@ -33,13 +36,28 @@ func (s *MultiStore) Get(ctx context.Context, key string) ([]byte, time.Time, er
return nil, time.Time{}, nil
}

// Set attempts to set the document at all the provided stores one at a time. Any errors returned from
// a store will be aggregated and returned
func (s *MultiStore) Set(ctx context.Context, key string, val []byte) error {
var errs []string
for _, store := range s.stores {
err := store.Set(ctx, key, val)
if err != nil {
errs = append(errs, err.Error())
}
}

return fmt.Errorf("errs: %v", strings.Join(errs, "|"))
}

// Delete attempts to delete the data associated with the key in eacy configured data stores. Ay errors returned
// for any of the stores will be aggregated and returned
func (s *MultiStore) Delete(ctx context.Context, key string) error {
var errs []string
for _, store := range s.stores {
err := store.Delete(ctx, key)
if err != nil {
errs = append(errs, err.Error())
}
}

Expand Down
10 changes: 10 additions & 0 deletions persist/persist.go
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,7 @@ var (
type Store interface {
Get(context.Context, string) ([]byte, time.Time, error)
Set(context.Context, string, []byte) error
Delete(context.Context, string) error
}

// Serializable is an optional interface that can be used to customize the way a Data struct serializes its data
Expand Down Expand Up @@ -127,6 +128,15 @@ func (d *Data[T]) IsUnset() bool {
return d.lastSet.IsZero()
}

// Delete deletes any resources associated with this cached data
func (d *Data[T]) Delete(ctx context.Context) error {
if d.IsUnset() {
return nil
}

return d.store.Delete(ctx, d.key)
}

// Bytes converts the value int a slice of bytes, so it can be stored. If the underlying type implements the
// Serializable interface that will be used. Otherwise, the type is JSON marshalled
func (d *Data[T]) Bytes() ([]byte, error) {
Expand Down
5 changes: 5 additions & 0 deletions persist/persist_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,11 @@ func (t *testStore) Set(_ context.Context, key string, data []byte) error {
return nil
}

func (t *testStore) Delete(_ context.Context, key string) error {
delete(t.data, key)
return nil
}

func TestData_Load(t *testing.T) {
type args struct {
ctx context.Context
Expand Down
6 changes: 6 additions & 0 deletions persist/redis.go
Original file line number Diff line number Diff line change
Expand Up @@ -53,3 +53,9 @@ func (s *RedisStore) Set(_ context.Context, key string, val []byte) error {
cmd := s.client.Set(SafeKey(key), d, Forever)
return cmd.Err()
}

// Delete deletes the key from the redis cache
func (s *RedisStore) Delete(_ context.Context, key string) error {
cmd := s.client.Set(SafeKey(key), nil, Forever)
return cmd.Err()
}

0 comments on commit c371c9c

Please sign in to comment.