Skip to content

Commit

Permalink
generate an ID for each memoiser so they can share cache
Browse files Browse the repository at this point in the history
Makes cache clearing easier, and then we don't need to add
`ResetCache()` to the Renderer interface.
  • Loading branch information
paulbellamy committed Feb 17, 2016
1 parent a0a60ca commit 0379fc4
Show file tree
Hide file tree
Showing 7 changed files with 22 additions and 26 deletions.
5 changes: 2 additions & 3 deletions app/benchmark_internal_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@ import (

"github.com/ugorji/go/codec"

"github.com/weaveworks/scope/render"
"github.com/weaveworks/scope/report"
"github.com/weaveworks/scope/test/fixture"
)
Expand Down Expand Up @@ -51,9 +52,7 @@ func BenchmarkTopologyList(b *testing.B) {
}
for i := 0; i < b.N; i++ {
b.StopTimer()
topologyRegistry.walk(func(t APITopologyDesc) {
t.renderer.ResetCache()
})
render.ResetCache()
b.StartTimer()
topologyRegistry.renderTopologies(report, request)
}
Expand Down
4 changes: 2 additions & 2 deletions render/benchmark_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@ func benchmarkRender(b *testing.B, r render.Renderer) {

for i := 0; i < b.N; i++ {
b.StopTimer()
r.ResetCache()
render.ResetCache()
b.StartTimer()
benchmarkRenderResult = r.Render(report)
if len(benchmarkRenderResult) == 0 {
Expand All @@ -83,7 +83,7 @@ func benchmarkStats(b *testing.B, r render.Renderer) {
for i := 0; i < b.N; i++ {
// No way to tell if this was successful :(
b.StopTimer()
r.ResetCache()
render.ResetCache()
b.StartTimer()
benchmarkStatsResult = r.Stats(report)
}
Expand Down
24 changes: 17 additions & 7 deletions render/memoise.go
Original file line number Diff line number Diff line change
@@ -1,33 +1,43 @@
package render

import (
"fmt"
"math/rand"

"github.com/bluele/gcache"

"github.com/weaveworks/scope/report"
)

var renderCache = gcache.New(100).LRU().Build()

type memoise struct {
Renderer
cache gcache.Cache
id string
}

// Memoise wraps the renderer in a loving embrace of caching
func Memoise(r Renderer) Renderer { return &memoise{r, gcache.New(10).LRU().Build()} }
func Memoise(r Renderer) Renderer {
return &memoise{
Renderer: r,
id: fmt.Sprintf("%x", rand.Int63()),
}
}

// Render produces a set of RenderableNodes given a Report.
// Ideally, it just retrieves it from the cache, otherwise it calls through to
// `r` and stores the result.
func (m *memoise) Render(rpt report.Report) RenderableNodes {
if result, err := m.cache.Get(rpt.ID); err == nil {
key := fmt.Sprintf("%s-%s", rpt.ID, m.id)
if result, err := renderCache.Get(key); err == nil {
return result.(RenderableNodes)
}
output := m.Renderer.Render(rpt)
m.cache.Set(rpt.ID, output)
renderCache.Set(key, output)
return output
}

// ResetCache blows away the rendered node cache.
func (m *memoise) ResetCache() {
m.cache.Purge()
m.Renderer.ResetCache()
func ResetCache() {
renderCache.Purge()
}
3 changes: 1 addition & 2 deletions render/memoise_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,6 @@ type renderFunc func(r report.Report) render.RenderableNodes

func (f renderFunc) Render(r report.Report) render.RenderableNodes { return f(r) }
func (f renderFunc) Stats(r report.Report) render.Stats { return render.Stats{} }
func (f renderFunc) ResetCache() {}

func TestMemoise(t *testing.T) {
calls := 0
Expand Down Expand Up @@ -50,7 +49,7 @@ func TestMemoise(t *testing.T) {
t.Errorf("Expected renderer to have been called again for a different report")
}

m.ResetCache()
render.ResetCache()
result4 := m.Render(rpt1)
if !reflect.DeepEqual(result1, result4) {
t.Errorf("Expected original result to be returned: %s", test.Diff(result1, result4))
Expand Down
8 changes: 0 additions & 8 deletions render/render.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,6 @@ import (
type Renderer interface {
Render(report.Report) RenderableNodes
Stats(report.Report) Stats
ResetCache()
}

// Stats is the type returned by Renderer.Stats
Expand Down Expand Up @@ -50,13 +49,6 @@ func (r *Reduce) Stats(rpt report.Report) Stats {
return result
}

// ResetCache blows away the rendered node cache.
func (r *Reduce) ResetCache() {
for _, renderer := range *r {
renderer.ResetCache()
}
}

// Map is a Renderer which produces a set of RenderableNodes from the set of
// RenderableNodes produced by another Renderer.
type Map struct {
Expand Down
1 change: 0 additions & 1 deletion render/render_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,6 @@ type mockRenderer struct {

func (m mockRenderer) Render(rpt report.Report) render.RenderableNodes { return m.RenderableNodes }
func (m mockRenderer) Stats(rpt report.Report) render.Stats { return render.Stats{} }
func (m mockRenderer) ResetCache() {}

func TestReduceRender(t *testing.T) {
renderer := render.Reduce([]render.Renderer{
Expand Down
3 changes: 0 additions & 3 deletions render/selectors.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,9 +18,6 @@ func (t TopologySelector) Stats(r report.Report) Stats {
return Stats{}
}

// ResetCache implements Renderer
func (t TopologySelector) ResetCache() {}

// MakeRenderableNodes converts a topology to a set of RenderableNodes
func MakeRenderableNodes(t report.Topology) RenderableNodes {
result := RenderableNodes{}
Expand Down

0 comments on commit 0379fc4

Please sign in to comment.