Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add LRU mem cache implementation #16226

Merged
merged 3 commits into from
Jul 10, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion custom/conf/app.example.ini
Original file line number Diff line number Diff line change
Expand Up @@ -1471,7 +1471,7 @@ PATH =
;; if the cache enabled
;ENABLED = true
;;
;; Either "memory", "redis", or "memcache", default is "memory"
;; Either "memory", "redis", "memcache", or "twoqueue". default is "memory"
;ADAPTER = memory
;;
;; For "memory" only, GC interval in seconds, default is 60
Expand All @@ -1480,6 +1480,7 @@ PATH =
;; For "redis" and "memcache", connection host address
;; redis: network=tcp,addr=:6379,password=macaron,db=0,pool_size=100,idle_timeout=180
;; memcache: `127.0.0.1:11211`
;; twoqueue: `{"size":50000,"recent_ratio":0.25,"ghost_ratio":0.5}` or `50000`
;HOST =
;;
;; Time to keep items in cache if not used, default is 16 hours.
Expand Down
7 changes: 4 additions & 3 deletions docs/content/doc/advanced/config-cheat-sheet.en-us.md
Original file line number Diff line number Diff line change
Expand Up @@ -590,11 +590,12 @@ Define allowed algorithms and their minimum key length (use -1 to disable a type
## Cache (`cache`)

- `ENABLED`: **true**: Enable the cache.
- `ADAPTER`: **memory**: Cache engine adapter, either `memory`, `redis`, or `memcache`.
- `INTERVAL`: **60**: Garbage Collection interval (sec), for memory cache only.
- `HOST`: **\<empty\>**: Connection string for `redis` and `memcache`.
- `ADAPTER`: **memory**: Cache engine adapter, either `memory`, `redis`, `twoqueue` or `memcache`. (`twoqueue` represents a size limited LRU cache.)
- `INTERVAL`: **60**: Garbage Collection interval (sec), for memory and twoqueue cache only.
- `HOST`: **\<empty\>**: Connection string for `redis` and `memcache`. For `twoqueue` sets configuration for the queue.
- Redis: `redis://:macaron@127.0.0.1:6379/0?pool_size=100&idle_timeout=180s`
- Memcache: `127.0.0.1:9090;127.0.0.1:9091`
- TwoQueue LRU cache: `{"size":50000,"recent_ratio":0.25,"ghost_ratio":0.5}` or `50000` representing the maximum number of objects stored in the cache.
- `ITEM_TTL`: **16h**: Time to keep items in cache if not used, Setting it to 0 disables caching.

## Cache - LastCommitCache settings (`cache.last_commit`)
Expand Down
204 changes: 204 additions & 0 deletions modules/cache/cache_twoqueue.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,204 @@
// Copyright 2021 The Gitea Authors. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.

package cache

import (
"strconv"
"sync"
"time"

mc "gitea.com/go-chi/cache"
lru "github.com/hashicorp/golang-lru"
jsoniter "github.com/json-iterator/go"
)

// TwoQueueCache represents a LRU 2Q cache adapter implementation
type TwoQueueCache struct {
lock sync.Mutex
cache *lru.TwoQueueCache
interval int
}

// TwoQueueCacheConfig describes the configuration for TwoQueueCache
type TwoQueueCacheConfig struct {
Size int `ini:"SIZE" json:"size"`
RecentRatio float64 `ini:"RECENT_RATIO" json:"recent_ratio"`
GhostRatio float64 `ini:"GHOST_RATIO" json:"ghost_ratio"`
}

// MemoryItem represents a memory cache item.
type MemoryItem struct {
Val interface{}
Created int64
Timeout int64
}

func (item *MemoryItem) hasExpired() bool {
return item.Timeout > 0 &&
(time.Now().Unix()-item.Created) >= item.Timeout
}

var _ mc.Cache = &TwoQueueCache{}

// Put puts value into cache with key and expire time.
func (c *TwoQueueCache) Put(key string, val interface{}, timeout int64) error {
item := &MemoryItem{
Val: val,
Created: time.Now().Unix(),
Timeout: timeout,
}
c.lock.Lock()
defer c.lock.Unlock()
c.cache.Add(key, item)
return nil
}

// Get gets cached value by given key.
func (c *TwoQueueCache) Get(key string) interface{} {
c.lock.Lock()
defer c.lock.Unlock()
cached, ok := c.cache.Get(key)
if !ok {
return nil
}
item, ok := cached.(*MemoryItem)

if !ok || item.hasExpired() {
zeripath marked this conversation as resolved.
Show resolved Hide resolved
c.cache.Remove(key)
return nil
}

return item.Val
}

// Delete deletes cached value by given key.
func (c *TwoQueueCache) Delete(key string) error {
c.lock.Lock()
defer c.lock.Unlock()
c.cache.Remove(key)
return nil
}

// Incr increases cached int-type value by given key as a counter.
func (c *TwoQueueCache) Incr(key string) error {
c.lock.Lock()
defer c.lock.Unlock()
cached, ok := c.cache.Get(key)
if !ok {
return nil
}
item, ok := cached.(*MemoryItem)

if !ok || item.hasExpired() {
c.cache.Remove(key)
return nil
}

var err error
item.Val, err = mc.Incr(item.Val)
return err
}

// Decr decreases cached int-type value by given key as a counter.
func (c *TwoQueueCache) Decr(key string) error {
c.lock.Lock()
defer c.lock.Unlock()
cached, ok := c.cache.Get(key)
if !ok {
return nil
}
item, ok := cached.(*MemoryItem)

if !ok || item.hasExpired() {
c.cache.Remove(key)
zeripath marked this conversation as resolved.
Show resolved Hide resolved
return nil
}

var err error
item.Val, err = mc.Decr(item.Val)
return err
}

// IsExist returns true if cached value exists.
func (c *TwoQueueCache) IsExist(key string) bool {
c.lock.Lock()
defer c.lock.Unlock()
cached, ok := c.cache.Peek(key)
if !ok {
return false
}
item, ok := cached.(*MemoryItem)
if !ok || item.hasExpired() {
zeripath marked this conversation as resolved.
Show resolved Hide resolved
c.cache.Remove(key)
return false
}

return true
}

// Flush deletes all cached data.
func (c *TwoQueueCache) Flush() error {
c.lock.Lock()
defer c.lock.Unlock()
c.cache.Purge()
return nil
}

func (c *TwoQueueCache) checkAndInvalidate(key interface{}) {
c.lock.Lock()
defer c.lock.Unlock()
cached, ok := c.cache.Peek(key)
if !ok {
return
}
item, ok := cached.(*MemoryItem)
if !ok || item.hasExpired() {
c.cache.Remove(item)
}
}

func (c *TwoQueueCache) startGC() {
if c.interval < 0 {
return
}
for _, key := range c.cache.Keys() {
c.checkAndInvalidate(key)
}
time.AfterFunc(time.Duration(c.interval)*time.Second, c.startGC)
}

// StartAndGC starts GC routine based on config string settings.
func (c *TwoQueueCache) StartAndGC(opts mc.Options) error {
var err error
size := 50000
if opts.AdapterConfig != "" {
size, err = strconv.Atoi(opts.AdapterConfig)
}
if err != nil {
json := jsoniter.ConfigCompatibleWithStandardLibrary
if !json.Valid([]byte(opts.AdapterConfig)) {
return err
}

cfg := &TwoQueueCacheConfig{
Size: 50000,
RecentRatio: lru.Default2QRecentRatio,
GhostRatio: lru.Default2QGhostEntries,
}
_ = json.Unmarshal([]byte(opts.AdapterConfig), cfg)
c.cache, err = lru.New2QParams(cfg.Size, cfg.RecentRatio, cfg.GhostRatio)
} else {
c.cache, err = lru.New2Q(size)
}
c.interval = opts.Interval
if c.interval > 0 {
go c.startGC()
}
return err
}

func init() {
mc.Register("twoqueue", &TwoQueueCache{})
}
7 changes: 6 additions & 1 deletion modules/setting/cache.go
Original file line number Diff line number Diff line change
Expand Up @@ -58,11 +58,16 @@ func newCacheService() {
log.Fatal("Failed to map Cache settings: %v", err)
}

CacheService.Adapter = sec.Key("ADAPTER").In("memory", []string{"memory", "redis", "memcache"})
CacheService.Adapter = sec.Key("ADAPTER").In("memory", []string{"memory", "redis", "memcache", "twoqueue"})
switch CacheService.Adapter {
case "memory":
case "redis", "memcache":
CacheService.Conn = strings.Trim(sec.Key("HOST").String(), "\" ")
case "twoqueue":
CacheService.Conn = strings.TrimSpace(sec.Key("HOST").String())
if CacheService.Conn == "" {
CacheService.Conn = "50000"
6543 marked this conversation as resolved.
Show resolved Hide resolved
}
case "": // disable cache
CacheService.Enabled = false
default:
Expand Down
4 changes: 3 additions & 1 deletion routers/init.go
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,9 @@ func NewServices() {
log.Fatal("repository init failed: %v", err)
}
mailer.NewContext()
_ = cache.NewContext()
if err := cache.NewContext(); err != nil {
log.Fatal("Unable to start cache service: %v", err)
}
notification.NewContext()
if err := archiver.Init(); err != nil {
log.Fatal("archiver init failed: %v", err)
Expand Down