-
Notifications
You must be signed in to change notification settings - Fork 1.4k
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
server: Cache PreparedEvalQuery's for data requests
This adds in caching of prepared queries for versioned and unversioned data queries (POST/GET to `/`, `/data`, and `/v1/data`). The cache has a max size of 100 and just starts acting as a circular buffer for queries (FIFO, no smarts for LRU caching or anything). It seems like it would be unlikely for these API's to hit the cache max size. Most OPA use-cases have a single query that is re-used over and over with different inputs. There is a new metric `counter_server_query_cache_hit` which will show whether or not a request used the query cache or not. It is there primarily to help explain away why sometimes a handful of the other metrics aren't there (the query parse/compile/etc). In the future this could be added to the query API's too. This change does not touch anything other than the "data" API's. Closes: #1567 Signed-off-by: Patrick East <east.patrick@gmail.com>
- Loading branch information
1 parent
d649a24
commit feaf745
Showing
6 changed files
with
365 additions
and
127 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,53 @@ | ||
package server | ||
|
||
import "sync" | ||
|
||
type cache struct { | ||
data map[string]interface{} | ||
keylist []string | ||
idx int | ||
maxSize int | ||
mtx sync.RWMutex | ||
} | ||
|
||
func newCache(maxSize int) *cache { | ||
return &cache{ | ||
data: map[string]interface{}{}, | ||
keylist: []string{}, | ||
maxSize: maxSize, | ||
} | ||
} | ||
|
||
func (c *cache) Get(k string) (interface{}, bool) { | ||
c.mtx.RLock() | ||
v, ok := c.data[k] | ||
c.mtx.RUnlock() | ||
return v, ok | ||
} | ||
|
||
func (c *cache) Insert(k string, v interface{}) { | ||
|
||
// Short path if its already in the cache | ||
_, ok := c.Get(k) | ||
if ok { | ||
return | ||
} | ||
|
||
// Slow path, grab the write lock and insert | ||
c.mtx.Lock() | ||
_, ok = c.data[k] | ||
if !ok { | ||
c.data[k] = v | ||
if len(c.keylist) < c.maxSize { | ||
// Haven't reached max size yet, keep adding keys. | ||
c.keylist = append(c.keylist, k) | ||
} else { | ||
// Start recycling spots in the key list and | ||
// dropping cache entries for them. | ||
delete(c.data, c.keylist[c.idx]) | ||
c.keylist[c.idx] = k | ||
c.idx = (c.idx + 1) % c.maxSize | ||
} | ||
} | ||
c.mtx.Unlock() | ||
} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,78 @@ | ||
package server | ||
|
||
import ( | ||
"strconv" | ||
"testing" | ||
) | ||
|
||
func TestCacheBase(t *testing.T) { | ||
c := newCache(5) | ||
foo := struct{}{} | ||
c.Insert("foo", foo) | ||
ensureCacheKey(t, c, "foo", foo) | ||
} | ||
|
||
func TestCacheLimit(t *testing.T) { | ||
max := 10 | ||
c := newCache(max) | ||
|
||
// Fill the cache with values | ||
var i int | ||
for i = 0; i < max; i++ { | ||
c.Insert(strconv.Itoa(i), i) | ||
} | ||
|
||
// Ensure its at the max size | ||
ensureCacheSize(t, c, max) | ||
|
||
// Ensure they are all stored.. | ||
for j := 0; j < max; j++ { | ||
ensureCacheKey(t, c, strconv.Itoa(j), j) | ||
} | ||
|
||
// Continue filling the cache, expect old keys to be dropped | ||
c.Insert(strconv.Itoa(i), i) | ||
ensureCacheKey(t, c, strconv.Itoa(i), i) | ||
|
||
// Should still be at max size | ||
ensureCacheSize(t, c, max) | ||
|
||
// Expect that "0" got dropped | ||
_, ok := c.Get("0") | ||
if ok { | ||
t.Fatal("Expected key '0' to not be found") | ||
} | ||
|
||
// Load the cache with many more than max | ||
for ; i < max*20; i++ { | ||
c.Insert(strconv.Itoa(i), i) | ||
} | ||
ensureCacheSize(t, c, max) | ||
|
||
// Ensure the last set of "max" number are available, and everything else is not | ||
for j := 0; j < i; j++ { | ||
k := strconv.Itoa(j) | ||
if j >= (i - max) { | ||
ensureCacheKey(t, c, k, j) | ||
} else { | ||
_, ok := c.Get(k) | ||
if ok { | ||
t.Fatalf("Expected key %s to not be found", k) | ||
} | ||
} | ||
} | ||
} | ||
|
||
func ensureCacheKey(t *testing.T, c *cache, k string, v interface{}) { | ||
t.Helper() | ||
actual, ok := c.Get(k) | ||
if !ok || v != actual { | ||
t.Fatalf("expected to retrieve value %v for key %s, got %v ok==%t", v, k, actual, ok) | ||
} | ||
} | ||
|
||
func ensureCacheSize(t *testing.T, c *cache, size int) { | ||
if len(c.data) != size && len(c.keylist) != size { | ||
t.Fatalf("Unexpected cache size len(data)=%d len(keylist)=%d, expected %d", size, len(c.data), len(c.keylist)) | ||
} | ||
} |
Oops, something went wrong.