sm
/
cache
1
0
Fork 0
This commit is contained in:
Peng Gao 2016-08-27 12:02:52 +00:00 committed by GitHub
commit 6528eb255b
12 changed files with 731 additions and 2200 deletions

9
.pullapprove.yml Normal file
View File

@ -0,0 +1,9 @@
approve_by_comment: true
approve_regex: ^LGTM
reject_regex: ^Rejected
reset_on_push: true
reviewers:
members:
- ggaaooppeenngg
name: pullapprove
required: 1

8
.travis.yml Normal file
View File

@ -0,0 +1,8 @@
language: go
go:
- tip
script:
- go test -v -coverprofile=coverage.txt -covermode=atomic

View File

@ -1,8 +1,10 @@
This is a list of people who have contributed code to go-cache. They, or their
This is a list of people who have contributed code to cachemap. They, or their
employers, are the copyright holders of the contributed code. Contributed code
is subject to the license restrictions listed in LICENSE (as they were when the
code was contributed.)
Peng Gao <peng.gao.dut@gmail.com>
Dustin Sallings <dustin@spy.net>
Jason Mooberry <jasonmoo@me.com>
Sergey Shepelev <temotor@gmail.com>
Peng Gao <peng.gao.dut@gmail.com>

View File

@ -1,4 +1,5 @@
Copyright (c) 2012-2015 Patrick Mylund Nielsen and the go-cache contributors
Copyright (c) 2016 Peng Gao and cachemap contributors
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal

View File

@ -1,6 +1,9 @@
# go-cache
[![Build Status](https://travis-ci.org/ggaaooppeenngg/cachemap.svg?branch=master)](https://travis-ci.org/ggaaooppeenngg/cachemap)
[![Go Report Card](https://goreportcard.com/badge/github.com/ggaaooppeenngg/cachemap)](https://goreportcard.com/report/github.com/ggaaooppeenngg/cachemap)
[![GoDoc](https://godoc.org/github.com/ggaaooppeenngg/cachemap?status.svg)](https://godoc.org/github.com/ggaaooppeenngg/cachemap)
# cachemap
go-cache is an in-memory key:value store/cache similar to memcached that is
cachemap is an in-memory key:value store/cache similar to memcached that is
suitable for applications running on a single machine. Its major advantage is
that, being essentially a thread-safe `map[string]interface{}` with expiration
times, it doesn't need to serialize or transmit its contents over the network.
@ -8,21 +11,21 @@ times, it doesn't need to serialize or transmit its contents over the network.
Any object can be stored, for a given duration or forever, and the cache can be
safely used by multiple goroutines.
Although go-cache isn't meant to be used as a persistent datastore, the entire
Although cachemap isn't meant to be used as a persistent datastore, the entire
cache can be saved to and loaded from a file (using `c.Items()` to retrieve the
items map to serialize, and `NewFrom()` to create a cache from a deserialized
one) to recover from downtime quickly. (See the docs for `NewFrom()` for caveats.)
### Installation
`go get github.com/patrickmn/go-cache`
`go get github.com/ggaaooppeenngg/cachemap`
### Usage
```go
import (
"fmt"
"github.com/patrickmn/go-cache"
"github.com/patrickmn/cachemap"
"time"
)
@ -102,6 +105,30 @@ one) to recover from downtime quickly. (See the docs for `NewFrom()` for caveats
}
```
### Reference
### Benchmark
`godoc` or [http://godoc.org/github.com/patrickmn/go-cache](http://godoc.org/github.com/patrickmn/go-cache)
| benchmark\package | go-cache | cachemap |
|-----------------------------------------------------|-----------------------|----------------------|
| BenchmarkCacheGetExpiring-v | 30000000,46.3 ns/op | 20000000,43.4 ns/op |
| BenchmarkCacheGetNotExpiring-v | 50000000,29.6 ns/op | 50000000,29.6 ns/op |
| BenchmarkRWMutexMapGet-x | 50000000,26.7 ns/op | 50000000,26.6 ns/op |
| BenchmarkRWMutexInterfaceMapGetStruct-x | 20000000,75.1 ns/op | 20000000,66.1 ns/op |
| BenchmarkRWMutexInterfaceMapGetString-x | 20000000,75.3 ns/op | 20000000,67.6 ns/op |
| BenchmarkCacheGetConcurrentExpiring-v | 20000000,67.8 ns/op | 20000000,68.9 ns/op |
| BenchmarkCacheGetConcurrentNotExpiring-v | 20000000,69.2 ns/op | 20000000,68.6 ns/op |
| BenchmarkRWMutexMapGetConcurrent-x | 30000000,57.4 ns/op | 20000000,64.7 ns/op |
| BenchmarkCacheGetManyConcurrentExpiring-v | 100000000,68.0 ns/op | 100000000,66.7 ns/op |
| BenchmarkCacheGetManyConcurrentNotExpiring-v | 2000000000,68.3 ns/op | 20000000,69.3 ns/op |
| BenchmarkCacheSetExpiring-4 | 10000000,173 ns/op | 20000000,91.4 ns/op |
| BenchmarkCacheSetNotExpiring-4 | 10000000,123 ns/op | 20000000,100 ns/op |
| BenchmarkRWMutexMapSet-4 | 20000000,88.5 ns/op | 20000000,74.5 ns/op |
| BenchmarkCacheSetDelete-4 | 5000000,257 ns/op | 10000000,151 ns/op |
| BenchmarkRWMutexMapSetDelete-4 | 10000000,180 ns/op | 10000000,154 ns/op |
| BenchmarkCacheSetDeleteSingleLock-4 | 10000000,211 ns/op | 20000000,118 ns/op |
| BenchmarkRWMutexMapSetDeleteSingleLock-4 | 10000000,142 ns/op | 20000000,118 ns/op |
| BenchmarkIncrementInt-4 | 10000000,167 ns/op | |
| BenchmarkDeleteExpiredLoop-4 | 500,2584384 ns/op | 1000,2173019 ns/op |
| BenchmarkShardedCacheGetExpiring-4 | 20000000,79.5 ns/op | 20000000,67.9 ns/op |
| BenchmarkShardedCacheGetNotExpiring-4 | 30000000,59.3 ns/op | 20000000,49.9 ns/op |
| BenchmarkShardedCacheGetManyConcurrentExpiring-4 | 2000000000,52.4 ns/op | 10000000,75.8 ns/op |
| BenchmarkShardedCacheGetManyConcurrentNotExpiring-4 | 100000000,68.2 ns/op | 20000000,75.8 ns/op |

964
cache.go

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

323
cachemap/cache.tmpl Normal file
View File

@ -0,0 +1,323 @@
package {{.PackageName}}
import (
"fmt"
"runtime"
"sync"
"time"
)
// Attr is cachmap attribute
type {{.ValueType}}CacheAttr struct {
OnEvicted func(k string, v {{.ValueType}}) // called when k evicted if set
DefaultCleanupInterval time.Duration // default clean interval
DefaultExpiration time.Duration // default expiration duration
Size int64 // inital size of map
}
// Item struct
type Item struct {
Object {{.ValueType}}
Expiration int64
}
// Expired Returns true if the item has expired, if valid Expiration is set.
func (item Item) Expired() bool {
return item.Expiration != 0 && time.Now().UnixNano() > item.Expiration
}
const (
// NoExpiration is for use with functions that take no expiration time.
NoExpiration time.Duration = -1
// DefaultExpiration is for use with functions that take an
// expiration time. Equivalent to passing in the same expiration
// duration as was given to New() or NewFrom() when the cache was
// created (e.g. 5 minutes.)
DefaultExpiration time.Duration = 0
)
// Cache struct
type {{.ValueType}}Cache struct {
*cache
// If this is confusing, see the comment at the bottom of New()
}
type cache struct {
defaultExpiration time.Duration
items map[string]Item
mu sync.RWMutex
onEvicted func(string, {{.ValueType}})
janitor *janitor
}
// Add an item to the cache, replacing any existing item. If the duration is 0
// (DefaultExpiration), the cache's default expiration time is used. If it is -1
// (NoExpiration), the item never expires.
func (c *cache) Set(k string, x {{.ValueType}}, d time.Duration) {
// "Inlining" of set
var e int64
if d == DefaultExpiration {
d = c.defaultExpiration
}
if d > 0 {
e = time.Now().Add(d).UnixNano()
}
c.mu.Lock()
c.items[k] = Item{
Object: x,
Expiration: e,
}
// TODO: Calls to mu.Unlock are currently not deferred because defer
// adds ~200 ns (as of go1.)
c.mu.Unlock()
}
func (c *cache) set(k string, x {{.ValueType}}, d time.Duration) {
var e int64
if d == DefaultExpiration {
d = c.defaultExpiration
}
if d > 0 {
e = time.Now().Add(d).UnixNano()
}
c.items[k] = Item{
Object: x,
Expiration: e,
}
}
// Add an item to the cache only if an item doesn't already exist for the given
// key, or if the existing item has expired. Returns an error otherwise.
func (c *cache) Add(k string, x {{.ValueType}}, d time.Duration) error {
c.mu.Lock()
_, found := c.get(k)
if found {
c.mu.Unlock()
return fmt.Errorf("Item %s already exists", k)
}
c.set(k, x, d)
c.mu.Unlock()
return nil
}
// Set a new value for the cache key only if it already exists, and the existing
// item hasn't expired. Returns an error otherwise.
func (c *cache) Replace(k string, x {{.ValueType}}, d time.Duration) error {
c.mu.Lock()
_, found := c.get(k)
if !found {
c.mu.Unlock()
return fmt.Errorf("Item %s doesn't exist", k)
}
c.set(k, x, d)
c.mu.Unlock()
return nil
}
// Get an item from the cache. Returns the item or nil, and a bool indicating
// whether the key was found.
func (c *cache) Get(k string) ({{.ValueType}}, bool) {
c.mu.RLock()
// "Inlining" of get and Expired
item, found := c.items[k]
// TODO: inline time.Now implementation
if !found || item.Expiration > 0 && time.Now().UnixNano() > item.Expiration {
c.mu.RUnlock()
return {{.ValueType}}(0), false
}
c.mu.RUnlock()
return item.Object, true
}
func (c *cache) get(k string) (*{{.ValueType}}, bool) {
item, found := c.items[k]
if !found || item.Expiration > 0 && time.Now().UnixNano() > item.Expiration {
return nil, false
}
return &item.Object, true
}
// Increment an item of type int, int8, int16, int32, int64, uintptr, uint,
// uint8, uint32, or uint64, float32 or float64 by n. Returns an error if the
// item's value is not an integer, if it was not found, or if it is not
// possible to increment it by n. To retrieve the incremented value, use one
// of the specialized methods, e.g. IncrementInt64.
// TODO: Increment for numberic type.
func (c *cache) Increment(k string, n int64) error {
return nil
}
// Decrement an item of type int, int8, int16, int32, int64, uintptr, uint,
// uint8, uint32, or uint64, float32 or float64 by n. Returns an error if the
// item's value is not an integer, if it was not found, or if it is not
// possible to decrement it by n. To retrieve the decremented value, use one
// of the specialized methods, e.g. DecrementInt64.
// TODO: Decrement
func (c *cache) Decrement(k string, n int64) error {
// TODO: Implement Increment and Decrement more cleanly.
// (Cannot do Increment(k, n*-1) for uints.)
return nil
}
// Delete an item from the cache. Does nothing if the key is not in the cache.
func (c *cache) Delete(k string) {
// fast path
if c.onEvicted == nil {
c.mu.Lock()
c.deleteFast(k)
c.mu.Unlock()
return
}
// slow path
c.mu.Lock()
v, evicted := c.delete(k)
c.mu.Unlock()
if evicted {
c.onEvicted(k, v)
}
}
func (c *cache) delete(k string) ({{.ValueType}}, bool) {
if v, found := c.items[k]; found {
delete(c.items, k)
return v.Object, true
}
return {{ .ZeroValue }}, false
}
func (c *cache) deleteFast(k string) {
delete(c.items, k)
}
type keyAndValue struct {
key string
value {{.ValueType}}
}
// Delete all expired items from the cache.
func (c *cache) DeleteExpired() {
var evictedItems []keyAndValue
now := time.Now().UnixNano()
// fast path
if c.onEvicted == nil {
c.mu.Lock()
for k, v := range c.items {
// "Inlining" of expired
if v.Expiration > 0 && now > v.Expiration {
c.deleteFast(k)
}
}
c.mu.Unlock()
return
}
// slow path
c.mu.Lock()
for k, v := range c.items {
// "Inlining" of expired
if v.Expiration > 0 && now > v.Expiration {
ov, evicted := c.delete(k)
if evicted {
evictedItems = append(evictedItems, keyAndValue{k, ov})
}
}
}
c.mu.Unlock()
for _, v := range evictedItems {
c.onEvicted(v.key, v.value)
}
}
// Sets an (optional) function that is called with the key and value when an
// item is evicted from the cache. (Including when it is deleted manually, but
// not when it is overwritten.) Set to nil to disable.
// 这里加锁没有意义
func (c *cache) OnEvicted(f func(string, {{.ValueType}})) {
c.mu.Lock()
c.onEvicted = f
c.mu.Unlock()
}
// Returns the number of items in the cache. This may include items that have
// expired, but have not yet been cleaned up. Equivalent to len(c.Items()).
func (c *cache) ItemCount() int {
c.mu.RLock()
n := len(c.items)
c.mu.RUnlock()
return n
}
// Delete all items from the cache.
func (c *cache) Flush() {
c.mu.Lock()
c.items = map[string]Item{}
c.mu.Unlock()
}
type janitor struct {
Interval time.Duration
stop chan bool
}
func (j *janitor) Run(c *cache) {
j.stop = make(chan bool)
ticker := time.NewTicker(j.Interval)
for {
select {
case <-ticker.C:
c.DeleteExpired()
case <-j.stop:
ticker.Stop()
return
}
}
}
func stopJanitor(c *{{.ValueType}}Cache) {
c.janitor.stop <- true
}
func runJanitor(c *cache, ci time.Duration) {
j := &janitor{
Interval: ci,
}
c.janitor = j
go j.Run(c)
}
func newCache(de time.Duration, m map[string]Item) *cache {
if de == 0 {
de = -1
}
c := &cache{
defaultExpiration: de,
items: m,
}
return c
}
func newCacheWithJanitor(de time.Duration, ci time.Duration, m map[string]Item, onEvicted func(k string, v {{.ValueType}})) *{{.ValueType}}Cache {
c := newCache(de, m)
c.onEvicted = onEvicted
// This trick ensures that the janitor goroutine (which--granted it
// was enabled--is running DeleteExpired on c forever) does not keep
// the returned C object from being garbage collected. When it is
// garbage collected, the finalizer stops the janitor goroutine, after
// which c can be collected.
C := &{{.ValueType}}Cache{c}
if ci > 0 {
runJanitor(c, ci)
runtime.SetFinalizer(C, stopJanitor)
}
return C
}
// New Returns a new cache with a given default expiration duration and
// cleanup interval. If the expiration duration is less than one
// (or NoExpiration), the items in the cache never expire (by default),
// and must be deleted manually. If the cleanup interval is less than one,
// expired items are not deleted from the cache before calling c.DeleteExpired().
//
func New{{.ValueType}}Cache(attr {{.ValueType}}CacheAttr) *{{.ValueType}}Cache {
items := make(map[string]Item, attr.Size)
return newCacheWithJanitor(attr.DefaultExpiration, attr.DefaultCleanupInterval, items, attr.OnEvicted)
}

137
cachemap/main.go Normal file
View File

@ -0,0 +1,137 @@
package main
import (
"flag"
"fmt"
"go/ast"
"go/parser"
"go/token"
"os"
"path"
"path/filepath"
"runtime"
"text/template"
)
func fatal(v ...interface{}) {
fmt.Fprintln(os.Stderr, v...)
os.Exit(1)
}
func packageDir() string {
_, filename, _, ok := runtime.Caller(0)
if !ok {
panic("No caller information")
}
return path.Dir(filename)
}
// find value of ident 'grammar' in GenDecl.
func findInGenDecl(genDecl *ast.GenDecl, grammarName string) string {
for _, spec := range genDecl.Specs {
valueSpec, ok := spec.(*ast.TypeSpec)
if ok {
// type ident
ident, ok := valueSpec.Type.(*ast.Ident)
if ok {
return ident.Name
}
}
}
return ""
}
func findInDecl(decl ast.Decl, grammarName string) string {
genDecl, ok := decl.(*ast.GenDecl)
if ok {
g := findInGenDecl(genDecl, grammarName)
if g != "" {
return g
}
}
return ""
}
// zeroValue returns literal zero value.
func zeroValue(s string) string {
// TODO: support func type.
switch s {
case "string":
return "\"\""
case "int", "uint", "int64", "uint64", "uint32", "int32", "int16",
"uint16", "int8", "uint8", "byte", "rune", "float64", "float32",
"complex64", "complex32", "uintptr":
return "0"
case "slice":
return "nil"
default:
if s[0] == '*' { // Pointer
return "nil"
}
return s + "{}"
}
}
// TODO: support more builtin types
func builtin(s string) bool {
switch s {
case "string":
return true
}
return false
}
func main() {
keyType := flag.String("k", "", "key type")
valueType := flag.String("v", "", "value type")
flag.Parse()
if *keyType == "" {
fatal("key empty")
}
if *valueType == "" {
fatal("value empty")
}
fset := token.NewFileSet()
pkgs, err := parser.ParseDir(fset, ".", nil, parser.ParseComments)
if err != nil {
fatal(err)
}
packageName := "main"
typeName := ""
for name, pkg := range pkgs {
packageName = name
for _, f := range pkg.Files {
for _, decl := range f.Decls {
typeName = findInDecl(decl, *valueType)
}
}
}
if typeName == "" && !builtin(*valueType) {
fatal(fmt.Errorf("found no definition of %s in files\n", *valueType))
}
if typeName == "" {
typeName = *valueType
}
zeroTypeValue := zeroValue(typeName)
f, err := os.OpenFile(fmt.Sprintf("%s2%s_cachemap.go", *keyType, *valueType), os.O_TRUNC|os.O_WRONLY|os.O_CREATE, 0644)
if err != nil {
fatal(err)
}
defer f.Close()
tpl, err := template.New("cache.tmpl").ParseFiles(filepath.Join(packageDir(), "cache.tmpl"))
if err != nil {
fatal(err)
}
err = tpl.Execute(
f,
map[string]string{
"ValueType": *valueType,
"PackageName": packageName,
"Cache": fmt.Sprintf("String2%sCache", *valueType),
"ZeroValue": zeroTypeValue,
},
)
if err != nil {
fatal(err)
}
}

View File

@ -66,19 +66,19 @@ func (sc *shardedCache) bucket(k string) *cache {
return sc.cs[djb33(sc.seed, k)%sc.m]
}
func (sc *shardedCache) Set(k string, x interface{}, d time.Duration) {
func (sc *shardedCache) Set(k string, x ValueType_tpl, d time.Duration) {
sc.bucket(k).Set(k, x, d)
}
func (sc *shardedCache) Add(k string, x interface{}, d time.Duration) error {
func (sc *shardedCache) Add(k string, x ValueType_tpl, d time.Duration) error {
return sc.bucket(k).Add(k, x, d)
}
func (sc *shardedCache) Replace(k string, x interface{}, d time.Duration) error {
func (sc *shardedCache) Replace(k string, x ValueType_tpl, d time.Duration) error {
return sc.bucket(k).Replace(k, x, d)
}
func (sc *shardedCache) Get(k string) (interface{}, bool) {
func (sc *shardedCache) Get(k string) (ValueType_tpl, bool) {
return sc.bucket(k).Get(k)
}
@ -86,10 +86,6 @@ func (sc *shardedCache) Increment(k string, n int64) error {
return sc.bucket(k).Increment(k, n)
}
func (sc *shardedCache) IncrementFloat(k string, n float64) error {
return sc.bucket(k).IncrementFloat(k, n)
}
func (sc *shardedCache) Decrement(k string, n int64) error {
return sc.bucket(k).Decrement(k, n)
}
@ -109,12 +105,9 @@ func (sc *shardedCache) DeleteExpired() {
// fields of the items should be checked. Note that explicit synchronization
// is needed to use a cache and its corresponding Items() return values at
// the same time, as the maps are shared.
func (sc *shardedCache) Items() []map[string]Item {
res := make([]map[string]Item, len(sc.cs))
for i, v := range sc.cs {
res[i] = v.Items()
}
return res
// TODO: 不准备暴露这个接口,使用者不应该知道底层的数据.
func (sc *shardedCache) items() []map[string]Item {
return nil
}
func (sc *shardedCache) Flush() {

View File

@ -29,7 +29,7 @@ var shardedKeys = []string{
func TestShardedCache(t *testing.T) {
tc := unexportedNewSharded(DefaultExpiration, 0, 13)
for _, v := range shardedKeys {
tc.Set(v, "value", DefaultExpiration)
tc.Set(v, ValueType_tpl(1), DefaultExpiration)
}
}
@ -44,7 +44,7 @@ func BenchmarkShardedCacheGetNotExpiring(b *testing.B) {
func benchmarkShardedCacheGet(b *testing.B, exp time.Duration) {
b.StopTimer()
tc := unexportedNewSharded(exp, 0, 10)
tc.Set("foobarba", "zquux", DefaultExpiration)
tc.Set("foobarba", ValueType_tpl(1), DefaultExpiration)
b.StartTimer()
for i := 0; i < b.N; i++ {
tc.Get("foobarba")
@ -65,17 +65,18 @@ func benchmarkShardedCacheGetManyConcurrent(b *testing.B, exp time.Duration) {
tsc := unexportedNewSharded(exp, 0, 20)
keys := make([]string, n)
for i := 0; i < n; i++ {
k := "foo" + strconv.Itoa(n)
k := "foo" + strconv.Itoa(i)
keys[i] = k
tsc.Set(k, "bar", DefaultExpiration)
tsc.Set(k, ValueType_tpl(1), DefaultExpiration)
}
each := b.N / n
wg := new(sync.WaitGroup)
wg.Add(n)
for _, v := range keys {
x := v
go func() {
for j := 0; j < each; j++ {
tsc.Get(v)
tsc.Get(x)
}
wg.Done()
}()

3
valtyp.go Normal file
View File

@ -0,0 +1,3 @@
package cache
type ValueType_tpl int