-
Notifications
You must be signed in to change notification settings - Fork 1
/
mitttlru.go
180 lines (156 loc) · 4.78 KB
/
mitttlru.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
package mitlru
import (
"sync"
"time"
"github.com/robmccoll/mitlru/list"
)
// TTLRUCache stores up to capacity triples of keys, values, and timestamps,
// then begins removing the least recently read/created
// triples for every new triple beyond insert.
type TTLRUCache struct {
capacity int
mapping map[interface{}]*triple
order *list.List
timeorder *list.List
ttl time.Duration
lock sync.RWMutex
triplePool sync.Pool
}
type triple struct {
key interface{}
val interface{}
expiration time.Time
timeelement list.Element
orderelement list.Element
}
func newTriple() interface{} {
return &triple{}
}
// NewTTLRUCache returns a cache that starts removing elements after capacity
// elements have been added
func NewTTLRUCache(capacity int, ttl time.Duration) *TTLRUCache {
rtn := &TTLRUCache{
capacity: capacity,
mapping: make(map[interface{}]*triple),
order: list.New(),
timeorder: list.New(),
ttl: ttl,
}
rtn.triplePool.New = newTriple
go rtn.timer()
return rtn
}
func (lru *TTLRUCache) timer() {
for {
lru.lock.Lock()
now := time.Now()
for lru.order.Len() > 0 {
efront := lru.timeorder.Front()
tfront := efront.Value.(*triple)
if tfront.expiration.After(now) {
break
}
lru.order.Remove(&tfront.orderelement)
lru.timeorder.Remove(efront)
delete(lru.mapping, tfront.key)
lru.triplePool.Put(tfront)
}
lru.lock.Unlock()
time.Sleep(1 * time.Second)
}
}
// Purge clears everything from the cache
func (lru *TTLRUCache) Purge() {
lru.lock.Lock()
defer lru.lock.Unlock()
lru.mapping = make(map[interface{}]*triple)
lru.order = list.New()
lru.timeorder = list.New()
}
// Add stores a key value pair in the cache. If the key was already present,
// it is moved to the front
func (lru *TTLRUCache) Add(key, val interface{}) {
lru.AddWithExpire(key, val, time.Now().Add(lru.ttl))
}
// AddWithTTL stores a key value pair in the cache. If the key was already present,
// it is moved to the front. Additionally, the given TTL is used (overriding the
// default ttl). NOTE: O(N) worst case! It is assumed that entires will mostly
// arrive in expiration order - they are kept in a doubly linked list.
func (lru *TTLRUCache) AddWithTTL(key, val interface{}, ttl time.Duration) {
lru.AddWithExpire(key, val, time.Now().Add(ttl))
}
// AddWithExpire stores a key value pair in the cache. If the key was already present,
// it is moved to the front. Additionally, the given expiration time is used (overriding the
// default ttl). NOTE: O(N) worst case! It is assumed that entires will mostly arrive
// in expiration order - they are kept in a doubly linked list.
func (lru *TTLRUCache) AddWithExpire(key, val interface{}, expiration time.Time) {
lru.lock.Lock()
defer lru.lock.Unlock()
if etriple, ok := lru.mapping[key]; ok {
etriple.val = val
lru.order.MoveToFront(&etriple.orderelement)
lru.timeorder.MoveToBack(&etriple.timeelement)
etriple.expiration = expiration
return
}
etriple := lru.triplePool.Get().(*triple)
etriple.key = key
etriple.val = val
etriple.expiration = expiration
etriple.orderelement.Value = etriple
etriple.timeelement.Value = etriple
lru.order.MoveToFront(&etriple.orderelement)
if lru.timeorder.Len() == 0 {
lru.timeorder.MoveToBack(&etriple.timeelement)
} else {
ecur := lru.timeorder.Back()
for ecur != nil && ecur.Value.(*triple).expiration.After(expiration) {
ecur = ecur.Value.(*triple).timeelement.Prev()
}
if ecur == nil {
lru.timeorder.MoveToFront(&etriple.timeelement)
} else {
lru.timeorder.MoveAfter(&etriple.timeelement, &ecur.Value.(*triple).timeelement)
}
}
lru.mapping[key] = etriple
for lru.order.Len() > lru.capacity {
element := lru.order.Back()
etriple := element.Value.(*triple)
lru.order.Remove(element)
lru.timeorder.Remove(&etriple.timeelement)
delete(lru.mapping, etriple.key)
lru.triplePool.Put(etriple)
}
}
// Get returns the value if the keyh exists in the cache
func (lru *TTLRUCache) Get(key interface{}) (value interface{}, foundInCache bool) {
lru.lock.Lock()
defer lru.lock.Unlock()
if element, ok := lru.mapping[key]; ok {
lru.order.MoveToFront(&element.orderelement)
return element.val, true
}
return nil, false
}
// Remove removes a key / value combination
func (lru *TTLRUCache) Remove(key interface{}) {
lru.lock.Lock()
defer lru.lock.Unlock()
if element, ok := lru.mapping[key]; ok {
lru.order.Remove(&element.orderelement)
lru.timeorder.Remove(&element.timeelement)
delete(lru.mapping, element.key)
lru.triplePool.Put(element)
}
}
// Len returns the number of triples in the cache
func (lru *TTLRUCache) Len() int {
lru.lock.RLock()
defer lru.lock.RUnlock()
return lru.order.Len()
}
// Capacity returns the capacity of the cache
func (lru *TTLRUCache) Capacity() int {
return lru.capacity
}