diff --git a/CONTRIBUTORS b/CONTRIBUTORS index 2b16e99..7e0a2e2 100644 --- a/CONTRIBUTORS +++ b/CONTRIBUTORS @@ -5,5 +5,6 @@ code was contributed.) Dustin Sallings Jason Mooberry +Matthew Keller Sergey Shepelev Alex Edwards diff --git a/README.md b/README.md index c5789cc..bd89333 100644 --- a/README.md +++ b/README.md @@ -13,6 +13,15 @@ cache can be saved to and loaded from a file (using `c.Items()` to retrieve the items map to serialize, and `NewFrom()` to create a cache from a deserialized one) to recover from downtime quickly. (See the docs for `NewFrom()` for caveats.) +When creating a cache object using `NewWithLRU()`, if you set the maxItems value +above 0, the LRU functionality is enabled. The cache automatically updates a +timestamp every time a given item is retrieved. In the background, the janitor takes +care of deleting items that have expired because of staleness, or are +least-recently-used when the cache is under pressure. Whatever you set your purge +interval to controls when the item will actually be removed from the cache. If you +don't want to use the janitor, and wish to manually purge LRU items, then +`c.DeleteLRU(n)` where `n` is the number of items you'd like to purge. + ### Installation `go get github.com/patrickmn/go-cache` diff --git a/cache.go b/cache.go index 30b1ea2..d7ef059 100644 --- a/cache.go +++ b/cache.go @@ -13,6 +13,7 @@ import ( type Item struct { Object interface{} Expiration int64 + Accessed int64 } // Returns true if the item has expired. @@ -23,6 +24,11 @@ func (item Item) Expired() bool { return time.Now().UnixNano() > item.Expiration } +// Return the time at which this item was last accessed. +func (item Item) LastAccessed() time.Time { + return time.Unix(0, item.Accessed) +} + const ( // For use with functions that take an expiration time. NoExpiration time.Duration = -1 @@ -43,6 +49,7 @@ type cache struct { mu sync.RWMutex onEvicted func(string, interface{}) janitor *janitor + maxItems int } // Add an item to the cache, replacing any existing item. If the duration is 0 @@ -50,34 +57,68 @@ type cache struct { // (NoExpiration), the item never expires. func (c *cache) Set(k string, x interface{}, d time.Duration) { // "Inlining" of set - var e int64 + var ( + now time.Time + e int64 + ) if d == DefaultExpiration { d = c.defaultExpiration } if d > 0 { - e = time.Now().Add(d).UnixNano() + now = time.Now() + e = now.Add(d).UnixNano() } - c.mu.Lock() - c.items[k] = Item{ - Object: x, - Expiration: e, + if c.maxItems > 0 { + if d <= 0 { + // d <= 0 means we didn't set now above + now = time.Now() + } + c.mu.Lock() + c.items[k] = Item{ + Object: x, + Expiration: e, + Accessed: now.UnixNano(), + } + // TODO: Calls to mu.Unlock are currently not deferred because + // defer adds ~200 ns (as of go1.) + c.mu.Unlock() + } else { + c.mu.Lock() + c.items[k] = Item{ + Object: x, + Expiration: e, + } + c.mu.Unlock() } - // TODO: Calls to mu.Unlock are currently not deferred because defer - // adds ~200 ns (as of go1.) - c.mu.Unlock() } func (c *cache) set(k string, x interface{}, d time.Duration) { - var e int64 + var ( + now time.Time + e int64 + ) if d == DefaultExpiration { d = c.defaultExpiration } if d > 0 { - e = time.Now().Add(d).UnixNano() + now = time.Now() + e = now.Add(d).UnixNano() } - c.items[k] = Item{ - Object: x, - Expiration: e, + if c.maxItems > 0 { + if d <= 0 { + // d <= 0 means we didn't set now above + now = time.Now() + } + c.items[k] = Item{ + Object: x, + Expiration: e, + Accessed: now.UnixNano(), + } + } else { + c.items[k] = Item{ + Object: x, + Expiration: e, + } } } @@ -118,20 +159,70 @@ func (c *cache) Replace(k string, x interface{}, d time.Duration) error { // Get an item from the cache. Returns the item or nil, and a bool indicating // whether the key was found. func (c *cache) Get(k string) (interface{}, bool) { - c.mu.RLock() + if c.maxItems > 0 { + // LRU enabled; Get implies write + c.mu.Lock() + } else { + // LRU not enabled; Get is read-only + c.mu.RLock() + } // "Inlining" of get and Expired item, found := c.items[k] if !found { - c.mu.RUnlock() + if c.maxItems > 0 { + c.mu.Unlock() + } else { + c.mu.RUnlock() + } return nil, false } + var now int64 if item.Expiration > 0 { - if time.Now().UnixNano() > item.Expiration { - c.mu.RUnlock() + now = time.Now().UnixNano() + if now > item.Expiration { + if c.maxItems > 0 { + c.mu.Unlock() + } else { + c.mu.RUnlock() + } return nil, false } } - c.mu.RUnlock() + if c.maxItems > 0 { + if now == 0 { + now = time.Now().UnixNano() + } + item.Accessed = now + c.items[k] = item + c.mu.Unlock() + } else { + c.mu.RUnlock() + } + return item.Object, true +} + +// If LRU functionality is being used (and get implies updating item.Accessed,) +// this function must be write-locked. +func (c *cache) get(k string) (interface{}, bool) { + item, found := c.items[k] + if !found { + return nil, false + } + // "Inlining" of Expired + var now int64 + if item.Expiration > 0 { + now = time.Now().UnixNano() + if now > item.Expiration { + return nil, false + } + } + if c.maxItems > 0 { + if now == 0 { + now = time.Now().UnixNano() + } + item.Accessed = now + c.items[k] = item + } return item.Object, true } @@ -140,45 +231,61 @@ func (c *cache) Get(k string) (interface{}, bool) { // never expires a zero value for time.Time is returned), and a bool indicating // whether the key was found. func (c *cache) GetWithExpiration(k string) (interface{}, time.Time, bool) { - c.mu.RLock() + if c.maxItems > 0 { + // LRU enabled; GetWithExpiration implies write + c.mu.Lock() + } else { + // LRU not enabled; GetWithExpiration is read-only + c.mu.RLock() + } // "Inlining" of get and Expired item, found := c.items[k] if !found { - c.mu.RUnlock() + if c.maxItems > 0 { + c.mu.Unlock() + } else { + c.mu.RUnlock() + } return nil, time.Time{}, false } - + var now int64 if item.Expiration > 0 { - if time.Now().UnixNano() > item.Expiration { - c.mu.RUnlock() + now = time.Now().UnixNano() + if now > item.Expiration { + if c.maxItems > 0 { + c.mu.Unlock() + } else { + c.mu.RUnlock() + } return nil, time.Time{}, false } - - // Return the item and the expiration time - c.mu.RUnlock() + if c.maxItems > 0 { + if now == 0 { + now = time.Now().UnixNano() + } + item.Accessed = now + c.items[k] = item + c.mu.Unlock() + } else { + c.mu.RUnlock() + } return item.Object, time.Unix(0, item.Expiration), true } - + if c.maxItems > 0 { + if now == 0 { + now = time.Now().UnixNano() + } + item.Accessed = now + c.items[k] = item + c.mu.Unlock() + } else { + c.mu.RUnlock() + } // If expiration <= 0 (i.e. no expiration time set) then return the item // and a zeroed time.Time - c.mu.RUnlock() return item.Object, time.Time{}, true } -func (c *cache) get(k string) (interface{}, bool) { - item, found := c.items[k] - if !found { - return nil, false - } - // "Inlining" of Expired - if item.Expiration > 0 { - if time.Now().UnixNano() > item.Expiration { - return nil, false - } - } - return item.Object, true -} - // Increment an item of type int, int8, int16, int32, int64, uintptr, uint, // uint8, uint32, or uint64, float32 or float64 by n. Returns an error if the // item's value is not an integer, if it was not found, or if it is not @@ -191,6 +298,9 @@ func (c *cache) Increment(k string, n int64) error { c.mu.Unlock() return fmt.Errorf("Item %s not found", k) } + if c.maxItems > 0 { + v.Accessed = time.Now().UnixNano() + } switch v.Object.(type) { case int: v.Object = v.Object.(int) + int(n) @@ -239,6 +349,9 @@ func (c *cache) IncrementFloat(k string, n float64) error { c.mu.Unlock() return fmt.Errorf("Item %s not found", k) } + if c.maxItems > 0 { + v.Accessed = time.Now().UnixNano() + } switch v.Object.(type) { case float32: v.Object = v.Object.(float32) + float32(n) @@ -263,6 +376,9 @@ func (c *cache) IncrementInt(k string, n int) (int, error) { c.mu.Unlock() return 0, fmt.Errorf("Item %s not found", k) } + if c.maxItems > 0 { + v.Accessed = time.Now().UnixNano() + } rv, ok := v.Object.(int) if !ok { c.mu.Unlock() @@ -285,6 +401,9 @@ func (c *cache) IncrementInt8(k string, n int8) (int8, error) { c.mu.Unlock() return 0, fmt.Errorf("Item %s not found", k) } + if c.maxItems > 0 { + v.Accessed = time.Now().UnixNano() + } rv, ok := v.Object.(int8) if !ok { c.mu.Unlock() @@ -307,6 +426,9 @@ func (c *cache) IncrementInt16(k string, n int16) (int16, error) { c.mu.Unlock() return 0, fmt.Errorf("Item %s not found", k) } + if c.maxItems > 0 { + v.Accessed = time.Now().UnixNano() + } rv, ok := v.Object.(int16) if !ok { c.mu.Unlock() @@ -329,6 +451,9 @@ func (c *cache) IncrementInt32(k string, n int32) (int32, error) { c.mu.Unlock() return 0, fmt.Errorf("Item %s not found", k) } + if c.maxItems > 0 { + v.Accessed = time.Now().UnixNano() + } rv, ok := v.Object.(int32) if !ok { c.mu.Unlock() @@ -351,6 +476,9 @@ func (c *cache) IncrementInt64(k string, n int64) (int64, error) { c.mu.Unlock() return 0, fmt.Errorf("Item %s not found", k) } + if c.maxItems > 0 { + v.Accessed = time.Now().UnixNano() + } rv, ok := v.Object.(int64) if !ok { c.mu.Unlock() @@ -373,6 +501,9 @@ func (c *cache) IncrementUint(k string, n uint) (uint, error) { c.mu.Unlock() return 0, fmt.Errorf("Item %s not found", k) } + if c.maxItems > 0 { + v.Accessed = time.Now().UnixNano() + } rv, ok := v.Object.(uint) if !ok { c.mu.Unlock() @@ -395,6 +526,9 @@ func (c *cache) IncrementUintptr(k string, n uintptr) (uintptr, error) { c.mu.Unlock() return 0, fmt.Errorf("Item %s not found", k) } + if c.maxItems > 0 { + v.Accessed = time.Now().UnixNano() + } rv, ok := v.Object.(uintptr) if !ok { c.mu.Unlock() @@ -417,6 +551,9 @@ func (c *cache) IncrementUint8(k string, n uint8) (uint8, error) { c.mu.Unlock() return 0, fmt.Errorf("Item %s not found", k) } + if c.maxItems > 0 { + v.Accessed = time.Now().UnixNano() + } rv, ok := v.Object.(uint8) if !ok { c.mu.Unlock() @@ -439,6 +576,9 @@ func (c *cache) IncrementUint16(k string, n uint16) (uint16, error) { c.mu.Unlock() return 0, fmt.Errorf("Item %s not found", k) } + if c.maxItems > 0 { + v.Accessed = time.Now().UnixNano() + } rv, ok := v.Object.(uint16) if !ok { c.mu.Unlock() @@ -461,6 +601,9 @@ func (c *cache) IncrementUint32(k string, n uint32) (uint32, error) { c.mu.Unlock() return 0, fmt.Errorf("Item %s not found", k) } + if c.maxItems > 0 { + v.Accessed = time.Now().UnixNano() + } rv, ok := v.Object.(uint32) if !ok { c.mu.Unlock() @@ -483,6 +626,9 @@ func (c *cache) IncrementUint64(k string, n uint64) (uint64, error) { c.mu.Unlock() return 0, fmt.Errorf("Item %s not found", k) } + if c.maxItems > 0 { + v.Accessed = time.Now().UnixNano() + } rv, ok := v.Object.(uint64) if !ok { c.mu.Unlock() @@ -505,6 +651,9 @@ func (c *cache) IncrementFloat32(k string, n float32) (float32, error) { c.mu.Unlock() return 0, fmt.Errorf("Item %s not found", k) } + if c.maxItems > 0 { + v.Accessed = time.Now().UnixNano() + } rv, ok := v.Object.(float32) if !ok { c.mu.Unlock() @@ -527,6 +676,9 @@ func (c *cache) IncrementFloat64(k string, n float64) (float64, error) { c.mu.Unlock() return 0, fmt.Errorf("Item %s not found", k) } + if c.maxItems > 0 { + v.Accessed = time.Now().UnixNano() + } rv, ok := v.Object.(float64) if !ok { c.mu.Unlock() @@ -553,6 +705,9 @@ func (c *cache) Decrement(k string, n int64) error { c.mu.Unlock() return fmt.Errorf("Item not found") } + if c.maxItems > 0 { + v.Accessed = time.Now().UnixNano() + } switch v.Object.(type) { case int: v.Object = v.Object.(int) - int(n) @@ -601,6 +756,9 @@ func (c *cache) DecrementFloat(k string, n float64) error { c.mu.Unlock() return fmt.Errorf("Item %s not found", k) } + if c.maxItems > 0 { + v.Accessed = time.Now().UnixNano() + } switch v.Object.(type) { case float32: v.Object = v.Object.(float32) - float32(n) @@ -625,6 +783,9 @@ func (c *cache) DecrementInt(k string, n int) (int, error) { c.mu.Unlock() return 0, fmt.Errorf("Item %s not found", k) } + if c.maxItems > 0 { + v.Accessed = time.Now().UnixNano() + } rv, ok := v.Object.(int) if !ok { c.mu.Unlock() @@ -647,6 +808,9 @@ func (c *cache) DecrementInt8(k string, n int8) (int8, error) { c.mu.Unlock() return 0, fmt.Errorf("Item %s not found", k) } + if c.maxItems > 0 { + v.Accessed = time.Now().UnixNano() + } rv, ok := v.Object.(int8) if !ok { c.mu.Unlock() @@ -669,6 +833,9 @@ func (c *cache) DecrementInt16(k string, n int16) (int16, error) { c.mu.Unlock() return 0, fmt.Errorf("Item %s not found", k) } + if c.maxItems > 0 { + v.Accessed = time.Now().UnixNano() + } rv, ok := v.Object.(int16) if !ok { c.mu.Unlock() @@ -691,6 +858,9 @@ func (c *cache) DecrementInt32(k string, n int32) (int32, error) { c.mu.Unlock() return 0, fmt.Errorf("Item %s not found", k) } + if c.maxItems > 0 { + v.Accessed = time.Now().UnixNano() + } rv, ok := v.Object.(int32) if !ok { c.mu.Unlock() @@ -713,6 +883,9 @@ func (c *cache) DecrementInt64(k string, n int64) (int64, error) { c.mu.Unlock() return 0, fmt.Errorf("Item %s not found", k) } + if c.maxItems > 0 { + v.Accessed = time.Now().UnixNano() + } rv, ok := v.Object.(int64) if !ok { c.mu.Unlock() @@ -735,6 +908,9 @@ func (c *cache) DecrementUint(k string, n uint) (uint, error) { c.mu.Unlock() return 0, fmt.Errorf("Item %s not found", k) } + if c.maxItems > 0 { + v.Accessed = time.Now().UnixNano() + } rv, ok := v.Object.(uint) if !ok { c.mu.Unlock() @@ -757,6 +933,9 @@ func (c *cache) DecrementUintptr(k string, n uintptr) (uintptr, error) { c.mu.Unlock() return 0, fmt.Errorf("Item %s not found", k) } + if c.maxItems > 0 { + v.Accessed = time.Now().UnixNano() + } rv, ok := v.Object.(uintptr) if !ok { c.mu.Unlock() @@ -779,6 +958,9 @@ func (c *cache) DecrementUint8(k string, n uint8) (uint8, error) { c.mu.Unlock() return 0, fmt.Errorf("Item %s not found", k) } + if c.maxItems > 0 { + v.Accessed = time.Now().UnixNano() + } rv, ok := v.Object.(uint8) if !ok { c.mu.Unlock() @@ -801,6 +983,9 @@ func (c *cache) DecrementUint16(k string, n uint16) (uint16, error) { c.mu.Unlock() return 0, fmt.Errorf("Item %s not found", k) } + if c.maxItems > 0 { + v.Accessed = time.Now().UnixNano() + } rv, ok := v.Object.(uint16) if !ok { c.mu.Unlock() @@ -823,6 +1008,9 @@ func (c *cache) DecrementUint32(k string, n uint32) (uint32, error) { c.mu.Unlock() return 0, fmt.Errorf("Item %s not found", k) } + if c.maxItems > 0 { + v.Accessed = time.Now().UnixNano() + } rv, ok := v.Object.(uint32) if !ok { c.mu.Unlock() @@ -845,6 +1033,9 @@ func (c *cache) DecrementUint64(k string, n uint64) (uint64, error) { c.mu.Unlock() return 0, fmt.Errorf("Item %s not found", k) } + if c.maxItems > 0 { + v.Accessed = time.Now().UnixNano() + } rv, ok := v.Object.(uint64) if !ok { c.mu.Unlock() @@ -867,6 +1058,9 @@ func (c *cache) DecrementFloat32(k string, n float32) (float32, error) { c.mu.Unlock() return 0, fmt.Errorf("Item %s not found", k) } + if c.maxItems > 0 { + v.Accessed = time.Now().UnixNano() + } rv, ok := v.Object.(float32) if !ok { c.mu.Unlock() @@ -889,6 +1083,9 @@ func (c *cache) DecrementFloat64(k string, n float64) (float64, error) { c.mu.Unlock() return 0, fmt.Errorf("Item %s not found", k) } + if c.maxItems > 0 { + v.Accessed = time.Now().UnixNano() + } rv, ok := v.Object.(float64) if !ok { c.mu.Unlock() @@ -905,9 +1102,10 @@ func (c *cache) DecrementFloat64(k string, n float64) (float64, error) { func (c *cache) Delete(k string) { c.mu.Lock() v, evicted := c.delete(k) + evictFunc := c.onEvicted c.mu.Unlock() if evicted { - c.onEvicted(k, v) + evictFunc(k, v) } } @@ -932,8 +1130,9 @@ func (c *cache) DeleteExpired() { var evictedItems []keyAndValue now := time.Now().UnixNano() c.mu.Lock() + evictFunc := c.onEvicted for k, v := range c.items { - // "Inlining" of expired + // "Inlining" of Expired if v.Expiration > 0 && now > v.Expiration { ov, evicted := c.delete(k) if evicted { @@ -943,7 +1142,7 @@ func (c *cache) DeleteExpired() { } c.mu.Unlock() for _, v := range evictedItems { - c.onEvicted(v.key, v.value) + evictFunc(v.key, v.value) } } @@ -956,6 +1155,80 @@ func (c *cache) OnEvicted(f func(string, interface{})) { c.mu.Unlock() } +// Delete some of the oldest items in the cache if the soft size limit has been +// exceeded. +func (c *cache) DeleteLRU() { + c.mu.Lock() + var ( + overCount = c.itemCount() - c.maxItems + evictFunc = c.onEvicted + ) + evicted := c.deleteLRUAmount(overCount) + c.mu.Unlock() + for _, v := range evicted { + evictFunc(v.key, v.value) + } +} + +// Delete a number of the oldest items from the cache. +func (c *cache) DeleteLRUAmount(numItems int) { + c.mu.Lock() + evictFunc := c.onEvicted + evicted := c.deleteLRUAmount(numItems) + c.mu.Unlock() + for _, v := range evicted { + evictFunc(v.key, v.value) + } +} + +func (c *cache) deleteLRUAmount(numItems int) []keyAndValue { + if numItems <= 0 { + return nil + } + var ( + lastTime int64 = 0 + lastItems = make([]string, numItems) // Ring buffer + liCount = 0 + full = false + evictedItems []keyAndValue + now = time.Now().UnixNano() + ) + if c.onEvicted != nil { + evictedItems = make([]keyAndValue, 0, numItems) + } + for k, v := range c.items { + // "Inlining" of !Expired + if v.Expiration == 0 || now <= v.Expiration { + if full == false || v.Accessed < lastTime { + // We found a least-recently-used item, or our + // purge buffer isn't full yet + lastTime = v.Accessed + // Append it to the buffer, or start overwriting + // it + if liCount < numItems { + lastItems[liCount] = k + liCount++ + } else { + lastItems[0] = k + liCount = 1 + full = true + } + } + } + } + if lastTime > 0 { + for _, v := range lastItems { + if v != "" { + ov, evicted := c.delete(v) + if evicted { + evictedItems = append(evictedItems, keyAndValue{v, ov}) + } + } + } + } + return evictedItems +} + // Write the cache's items (using Gob) to an io.Writer. // // NOTE: This method is deprecated in favor of c.Items() and NewFrom() (see the @@ -1061,6 +1334,14 @@ func (c *cache) ItemCount() int { return n } +// Returns the number of items in the cache without locking. This may include +// items that have expired, but have not yet been cleaned up. Equivalent to +// len(c.Items()). +func (c *cache) itemCount() int { + n := len(c.items) + return n +} + // Delete all items from the cache. func (c *cache) Flush() { c.mu.Lock() @@ -1080,6 +1361,9 @@ func (j *janitor) Run(c *cache) { select { case <-ticker.C: c.DeleteExpired() + if c.maxItems > 0 { + c.DeleteLRU() + } case <-j.stop: ticker.Stop() return @@ -1099,19 +1383,20 @@ func runJanitor(c *cache, ci time.Duration) { go j.Run(c) } -func newCache(de time.Duration, m map[string]Item) *cache { +func newCache(de time.Duration, m map[string]Item, mi int) *cache { if de == 0 { de = -1 } c := &cache{ defaultExpiration: de, + maxItems: mi, items: m, } return c } -func newCacheWithJanitor(de time.Duration, ci time.Duration, m map[string]Item) *Cache { - c := newCache(de, m) +func newCacheWithJanitor(de time.Duration, ci time.Duration, m map[string]Item, mi int) *Cache { + c := newCache(de, m, mi) // This trick ensures that the janitor goroutine (which--granted it // was enabled--is running DeleteExpired on c forever) does not keep // the returned C object from being garbage collected. When it is @@ -1132,7 +1417,22 @@ func newCacheWithJanitor(de time.Duration, ci time.Duration, m map[string]Item) // deleted from the cache before calling c.DeleteExpired(). func New(defaultExpiration, cleanupInterval time.Duration) *Cache { items := make(map[string]Item) - return newCacheWithJanitor(defaultExpiration, cleanupInterval, items) + return newCacheWithJanitor(defaultExpiration, cleanupInterval, items, 0) +} + +// Return a new cache with a given default expiration duration, cleanup +// interval, and maximum-ish number of items. If the expiration duration +// is less than one (or NoExpiration), the items in the cache never expire +// (by default), and must be deleted manually. If the cleanup interval is +// less than one, expired items are not deleted from the cache before +// calling c.DeleteExpired(), c.DeleteLRU(), or c.DeleteLRUAmount(). If maxItems +// is not greater than zero, then there will be no soft cap on the number of +// items in the cache. +// +// Using the LRU functionality makes Get() a slower, write-locked operation. +func NewWithLRU(defaultExpiration, cleanupInterval time.Duration, maxItems int) *Cache { + items := make(map[string]Item) + return newCacheWithJanitor(defaultExpiration, cleanupInterval, items, maxItems) } // Return a new cache with a given default expiration duration and cleanup @@ -1157,5 +1457,10 @@ func New(defaultExpiration, cleanupInterval time.Duration) *Cache { // map retrieved with c.Items(), and to register those same types before // decoding a blob containing an items map. func NewFrom(defaultExpiration, cleanupInterval time.Duration, items map[string]Item) *Cache { - return newCacheWithJanitor(defaultExpiration, cleanupInterval, items) + return newCacheWithJanitor(defaultExpiration, cleanupInterval, items, 0) +} + +// Similar to NewFrom, but creates a cache with LRU functionality enabled. +func NewFromWithLRU(defaultExpiration, cleanupInterval time.Duration, items map[string]Item, maxItems int) *Cache { + return newCacheWithJanitor(defaultExpiration, cleanupInterval, items, maxItems) } diff --git a/cache_test.go b/cache_test.go index 47a3d53..a112ca4 100644 --- a/cache_test.go +++ b/cache_test.go @@ -1224,6 +1224,41 @@ func TestDecrementUnderflowUint(t *testing.T) { } } +// TODO: Ring buffer is more efficient but doesn't guarantee that the actually +// oldest items are removed, just some old items. This shouldn't be significant +// for large caches, but we can't test it easily. +// +// func TestDeleteLRU(t *testing.T) { +// tc := NewWithLRU(1*time.Second, 0, 1) +// tc.Set("foo", 0, DefaultExpiration) +// tc.Set("bar", 1, DefaultExpiration) +// tc.Set("baz", 2, DefaultExpiration) +// tc.Get("foo") +// tc.Get("baz") +// time.Sleep(5 * time.Millisecond) +// tc.Get("bar") +// // Bar was accessed most recently, and should be the only value that +// // stays. +// tc.DeleteLRU() +// if tc.ItemCount() != 1 { +// t.Error("tc.ItemCount() is not 1") +// } +// if _, found := tc.Get("bar"); !found { +// t.Error("bar was not found") +// } +// } + +func TestDeleteLRU(t *testing.T) { + tc := NewWithLRU(1*time.Second, 0, 1) + tc.Set("foo", 0, DefaultExpiration) + tc.Set("bar", 1, DefaultExpiration) + tc.Set("baz", 2, DefaultExpiration) + tc.DeleteLRU() + if tc.ItemCount() != 1 { + t.Error("tc.ItemCount() is not 1") + } +} + func TestOnEvicted(t *testing.T) { tc := New(DefaultExpiration, 0) tc.Set("foo", 3, DefaultExpiration) @@ -1443,6 +1478,24 @@ func benchmarkCacheGet(b *testing.B, exp time.Duration) { } } +func BenchmarkCacheWithLRUGetExpiring(b *testing.B) { + benchmarkCacheWithLRUGet(b, 5*time.Minute, 10) +} + +func BenchmarkCacheWithLRUGetNotExpiring(b *testing.B) { + benchmarkCacheWithLRUGet(b, NoExpiration, 10) +} + +func benchmarkCacheWithLRUGet(b *testing.B, exp time.Duration, max int) { + b.StopTimer() + tc := NewWithLRU(exp, 0, max) + tc.Set("foo", "bar", DefaultExpiration) + b.StartTimer() + for i := 0; i < b.N; i++ { + tc.Get("foo") + } +} + func BenchmarkRWMutexMapGet(b *testing.B) { b.StopTimer() m := map[string]string{ @@ -1514,6 +1567,34 @@ func benchmarkCacheGetConcurrent(b *testing.B, exp time.Duration) { wg.Wait() } +func BenchmarkCacheWithLRUGetConcurrentExpiring(b *testing.B) { + benchmarkCacheWithLRUGetConcurrent(b, 5*time.Minute, 10) +} + +func BenchmarkCacheWithLRUGetConcurrentNotExpiring(b *testing.B) { + benchmarkCacheWithLRUGetConcurrent(b, NoExpiration, 10) +} + +func benchmarkCacheWithLRUGetConcurrent(b *testing.B, exp time.Duration, max int) { + b.StopTimer() + tc := NewWithLRU(exp, 0, max) + tc.Set("foo", "bar", DefaultExpiration) + wg := new(sync.WaitGroup) + workers := runtime.NumCPU() + each := b.N / workers + wg.Add(workers) + b.StartTimer() + for i := 0; i < workers; i++ { + go func() { + for j := 0; j < each; j++ { + tc.Get("foo") + } + wg.Done() + }() + } + wg.Wait() +} + func BenchmarkRWMutexMapGetConcurrent(b *testing.B) { b.StopTimer() m := map[string]string{ @@ -1574,6 +1655,42 @@ func benchmarkCacheGetManyConcurrent(b *testing.B, exp time.Duration) { wg.Wait() } +func BenchmarkCacheWithLRUGetManyConcurrentExpiring(b *testing.B) { + benchmarkCacheWithLRUGetManyConcurrent(b, 5*time.Minute, 10000) +} + +func BenchmarkCacheWithLRUGetManyConcurrentNotExpiring(b *testing.B) { + benchmarkCacheWithLRUGetManyConcurrent(b, NoExpiration, 10000) +} + +func benchmarkCacheWithLRUGetManyConcurrent(b *testing.B, exp time.Duration, max int) { + // This is the same as BenchmarkCacheWithLRUGetConcurrent, but its result + // can be compared against BenchmarkShardedCacheWithLRUGetManyConcurrent + // in sharded_test.go. + b.StopTimer() + n := 10000 + tc := NewWithLRU(exp, 0, max) + keys := make([]string, n) + for i := 0; i < n; i++ { + k := "foo" + strconv.Itoa(n) + keys[i] = k + tc.Set(k, "bar", DefaultExpiration) + } + each := b.N / n + wg := new(sync.WaitGroup) + wg.Add(n) + for _, v := range keys { + go func() { + for j := 0; j < each; j++ { + tc.Get(v) + } + wg.Done() + }() + } + b.StartTimer() + wg.Wait() +} + func BenchmarkCacheSetExpiring(b *testing.B) { benchmarkCacheSet(b, 5*time.Minute) }