sm
/
cache
1
0
Fork 0

Move the unexportedShardedCache to sharded.go

This commit is contained in:
Patrick Mylund Nielsen 2014-12-22 00:24:49 -05:00
parent 6ea7e6fb42
commit ffd0b9e3ec
2 changed files with 135 additions and 121 deletions

121
cache.go
View File

@ -1,10 +1,8 @@
package cache
import (
"encoding/binary"
"encoding/gob"
"fmt"
"hash/fnv"
"io"
"os"
"runtime"
@ -975,122 +973,3 @@ func New(defaultExpiration, cleanupInterval time.Duration) *Cache {
}
return C
}
type unexportedShardedCache struct {
*shardedCache
}
type shardedCache struct {
m uint32
cs []*cache
janitor *shardedJanitor
}
func (sc *shardedCache) bucket(k string) *cache {
h := fnv.New32()
h.Write([]byte(k))
n := binary.BigEndian.Uint32(h.Sum(nil))
return sc.cs[n%sc.m]
}
func (sc *shardedCache) Set(k string, x interface{}, d time.Duration) {
sc.bucket(k).Set(k, x, d)
}
func (sc *shardedCache) Add(k string, x interface{}, d time.Duration) error {
return sc.bucket(k).Add(k, x, d)
}
func (sc *shardedCache) Replace(k string, x interface{}, d time.Duration) error {
return sc.bucket(k).Replace(k, x, d)
}
func (sc *shardedCache) Get(k string) (interface{}, bool) {
return sc.bucket(k).Get(k)
}
func (sc *shardedCache) Increment(k string, n int64) error {
return sc.bucket(k).Increment(k, n)
}
func (sc *shardedCache) IncrementFloat(k string, n float64) error {
return sc.bucket(k).IncrementFloat(k, n)
}
func (sc *shardedCache) Decrement(k string, n int64) error {
return sc.bucket(k).Decrement(k, n)
}
func (sc *shardedCache) Delete(k string) {
sc.bucket(k).Delete(k)
}
func (sc *shardedCache) DeleteExpired() {
for _, v := range sc.cs {
v.DeleteExpired()
}
}
func (sc *shardedCache) Flush() {
for _, v := range sc.cs {
v.Flush()
}
}
type shardedJanitor struct {
Interval time.Duration
stop chan bool
}
func (j *shardedJanitor) Run(sc *shardedCache) {
j.stop = make(chan bool)
tick := time.Tick(j.Interval)
for {
select {
case <-tick:
sc.DeleteExpired()
case <-j.stop:
return
}
}
}
func stopShardedJanitor(sc *unexportedShardedCache) {
sc.janitor.stop <- true
}
func runShardedJanitor(sc *shardedCache, ci time.Duration) {
j := &shardedJanitor{
Interval: ci,
}
sc.janitor = j
go j.Run(sc)
}
func newShardedCache(n int, de time.Duration) *shardedCache {
sc := &shardedCache{
m: uint32(n - 1),
cs: make([]*cache, n),
}
for i := 0; i < n; i++ {
c := &cache{
defaultExpiration: de,
items: map[string]*Item{},
}
sc.cs[i] = c
}
return sc
}
func unexportedNewSharded(shards int, defaultExpiration, cleanupInterval time.Duration) *unexportedShardedCache {
if defaultExpiration == 0 {
defaultExpiration = -1
}
sc := newShardedCache(shards, defaultExpiration)
SC := &unexportedShardedCache{sc}
if cleanupInterval > 0 {
runShardedJanitor(sc, cleanupInterval)
runtime.SetFinalizer(SC, stopShardedJanitor)
}
return SC
}

135
sharded.go Normal file
View File

@ -0,0 +1,135 @@
package cache
import (
"encoding/binary"
"hash/fnv"
"runtime"
"time"
)
// This is an experimental and unexported (for now) attempt at making a cache
// with better algorithmic complexity than the standard one, namely by
// preventing write locks of the entire cache when an item is added. As of the
// time of writing, the overhead of selecting buckets is much too great to be
// worth it except perhaps with extremely large caches.
//
// See cache_test.go for a few benchmarks.
type unexportedShardedCache struct {
*shardedCache
}
type shardedCache struct {
m uint32
cs []*cache
janitor *shardedJanitor
}
func (sc *shardedCache) bucket(k string) *cache {
h := fnv.New32()
h.Write([]byte(k))
n := binary.BigEndian.Uint32(h.Sum(nil))
return sc.cs[n%sc.m]
}
func (sc *shardedCache) Set(k string, x interface{}, d time.Duration) {
sc.bucket(k).Set(k, x, d)
}
func (sc *shardedCache) Add(k string, x interface{}, d time.Duration) error {
return sc.bucket(k).Add(k, x, d)
}
func (sc *shardedCache) Replace(k string, x interface{}, d time.Duration) error {
return sc.bucket(k).Replace(k, x, d)
}
func (sc *shardedCache) Get(k string) (interface{}, bool) {
return sc.bucket(k).Get(k)
}
func (sc *shardedCache) Increment(k string, n int64) error {
return sc.bucket(k).Increment(k, n)
}
func (sc *shardedCache) IncrementFloat(k string, n float64) error {
return sc.bucket(k).IncrementFloat(k, n)
}
func (sc *shardedCache) Decrement(k string, n int64) error {
return sc.bucket(k).Decrement(k, n)
}
func (sc *shardedCache) Delete(k string) {
sc.bucket(k).Delete(k)
}
func (sc *shardedCache) DeleteExpired() {
for _, v := range sc.cs {
v.DeleteExpired()
}
}
func (sc *shardedCache) Flush() {
for _, v := range sc.cs {
v.Flush()
}
}
type shardedJanitor struct {
Interval time.Duration
stop chan bool
}
func (j *shardedJanitor) Run(sc *shardedCache) {
j.stop = make(chan bool)
tick := time.Tick(j.Interval)
for {
select {
case <-tick:
sc.DeleteExpired()
case <-j.stop:
return
}
}
}
func stopShardedJanitor(sc *unexportedShardedCache) {
sc.janitor.stop <- true
}
func runShardedJanitor(sc *shardedCache, ci time.Duration) {
j := &shardedJanitor{
Interval: ci,
}
sc.janitor = j
go j.Run(sc)
}
func newShardedCache(n int, de time.Duration) *shardedCache {
sc := &shardedCache{
m: uint32(n - 1),
cs: make([]*cache, n),
}
for i := 0; i < n; i++ {
c := &cache{
defaultExpiration: de,
items: map[string]*Item{},
}
sc.cs[i] = c
}
return sc
}
func unexportedNewSharded(shards int, defaultExpiration, cleanupInterval time.Duration) *unexportedShardedCache {
if defaultExpiration == 0 {
defaultExpiration = -1
}
sc := newShardedCache(shards, defaultExpiration)
SC := &unexportedShardedCache{sc}
if cleanupInterval > 0 {
runShardedJanitor(sc, cleanupInterval)
runtime.SetFinalizer(SC, stopShardedJanitor)
}
return SC
}