Skip to content

Commit

Permalink
initial
Browse files Browse the repository at this point in the history
  • Loading branch information
coocood committed Apr 29, 2015
0 parents commit 416d76a
Show file tree
Hide file tree
Showing 8 changed files with 1,160 additions and 0 deletions.
21 changes: 21 additions & 0 deletions LICENSE
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
The MIT License

Copyright (c) 2015 Ewan Chou.

Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:

The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.

THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
46 changes: 46 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,46 @@
#FreeCache - A cache library for Go with ZERO GC overhead.

Long lived objects in memory introduce expensive GC overhead, the GC latency can go up to hundreds of milliseconds with just a few millions of live objects.
With FreeCache, you can cache unlimited number of objects in memory without increased GC latency.

##Features
* Store hundreds of millions of entries
* Zero GC overhead
* High concurrent thread-safe access
* Pure Go implementation
* Expiration support
* Nearly LRU algorithm
* Strictly limited memory usage
* Come with a toy server that supports a few basic Redis commands with pipeline

##Example

cacheSize := 1024*1024
cache := freecache.NewCache(cacheSize)
key := []byte("abc")
val := []byte("def")
expire := 60 // expire in 60 seconds
cache.Set(key, val, expire)
got, err := cache.Get(key)
if err != nil {
fmt.Println(err)
} else {
fmt.Println(string(got))
}
affected := cache.Del(key)
fmt.Println("deleted key ", affected)
fmt.Println("entry count ", cache.EntryCount())

##Notice
* Recommended Go version is 1.4.
* Memory is preallocated.
* If you allocate large amount of memory, you may need to set `debug.SetGCPercent()`
to a much lower percentage to get a normal GC frequency.

##How it is done
FreeCache avoids GC overhead by reducing the number of pointers.
No matter how many entries stored in it, there are only 512 pointers.

##License
The MIT License

93 changes: 93 additions & 0 deletions cache.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,93 @@
package freecache

import (
"sync"
)

type Cache struct {
locks [256]sync.Mutex
segments [256]segment
}

func fnvaHash(data []byte) uint64 {
var hash uint64 = 14695981039346656037
for _, c := range data {
hash ^= uint64(c)
hash *= 1099511628211
}
return hash
}

// The cache size will be set to 512KB at minimum.
func NewCache(size int) (cache *Cache) {
if size < 512*1024 {
size = 512 * 1024
}
cache = new(Cache)
for i := 0; i < 256; i++ {
cache.segments[i] = newSegment(size/256, i)
}
return
}

// The Set method do not return error because it does not promise the data written
// can be retrieved later. If the key is larger than 65535 or value is larger
// than 1/1024 of the cache size, the entry will not be written to the cache.
// expireSeconds <= 0 means no expire, but it can be evicted when cache is full.
func (cache *Cache) Set(key, value []byte, expireSeconds int) (err error) {
hashVal := fnvaHash(key)
segId := hashVal & 255
cache.locks[segId].Lock()
err = cache.segments[segId].set(key, value, hashVal, expireSeconds)
cache.locks[segId].Unlock()
return
}

// Get the value or not found error.
func (cache *Cache) Get(key []byte) (value []byte, err error) {
hashVal := fnvaHash(key)
segId := hashVal & 255
cache.locks[segId].Lock()
value, err = cache.segments[segId].get(key, hashVal)
cache.locks[segId].Unlock()
return
}

func (cache *Cache) Del(key []byte) (affected bool) {
hashVal := fnvaHash(key)
segId := hashVal & 255
cache.locks[segId].Lock()
affected = cache.segments[segId].del(key, hashVal)
cache.locks[segId].Unlock()
return
}

func (cache *Cache) EvacuateCount() (count int) {
for i := 0; i < 256; i++ {
cache.locks[i].Lock()
count += cache.segments[i].totalEvacuate
cache.locks[i].Unlock()
}
return
}

func (cache *Cache) EntryCount() (entryCount int64) {
for i := 0; i < 256; i++ {
cache.locks[i].Lock()
entryCount += cache.segments[i].entryCount
cache.locks[i].Unlock()
}
return
}

func (cache *Cache) AverageAccessTime() (averageTime int64) {
var entryCount, totalTime int64
for i := 0; i < 256; i++ {
cache.locks[i].Lock()
totalTime += cache.segments[i].totalTime
entryCount += cache.segments[i].entryCount
cache.locks[i].Unlock()
}
averageTime = totalTime / entryCount
return
}
96 changes: 96 additions & 0 deletions cache_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,96 @@
package freecache

import (
"bytes"
"fmt"
"strings"
"testing"
"time"
)

func TestRingCache(t *testing.T) {
cache := NewCache(1024)
key := []byte("abcd")
val := []byte("efghijkl")
err := cache.Set(key, val, 0)
if err != nil {
t.Error("err should be nil")
}
value, err := cache.Get(key)
if err != nil || !bytes.Equal(value, val) {
t.Error("value not equal")
}
affected := cache.Del(key)
if !affected {
t.Error("del should return affected true")
}
value, err = cache.Get(key)
if err != ErrNotFound {
t.Error("error should be ErrNotFound after being deleted")
}
affected = cache.Del(key)
if affected {
t.Error("del should not return affected true")
}
// test expire
err = cache.Set(key, val, 1)
if err != nil {
t.Error("err should be nil")
}
time.Sleep(time.Second)
value, err = cache.Get(key)
if err == nil {
t.Fatal("key should be expired", string(value))
}

bigKey := make([]byte, 65536)
err = cache.Set(bigKey, val, 0)
if err != ErrLargeKey {
t.Error("large key should return ErrLargeKey")
}
value, err = cache.Get(bigKey)
if value != nil {
t.Error("value should be nil when get a big key")
}
err = cache.Set(key, bigKey, 0)
if err != ErrLargeEntry {
t.Error("err should be ErrLargeEntry")
}
n := 5000
for i := 0; i < n; i++ {
keyStr := fmt.Sprintf("key%v", i)
valStr := strings.Repeat(keyStr, 10)
err = cache.Set([]byte(keyStr), []byte(valStr), 0)
if err != nil {
t.Error(err)
}
}
time.Sleep(time.Second)
for i := 1; i < n; i += 2 {
keyStr := fmt.Sprintf("key%v", i)
cache.Get([]byte(keyStr))
}
for i := 0; i < n; i += 2 {
keyStr := fmt.Sprintf("key%v", i)
valStr := strings.Repeat(keyStr, 10)
err = cache.Set([]byte(keyStr), []byte(valStr), 0)
if err != nil {
t.Error(err)
}
}
hitCount := 0
for i := 1; i < n; i += 2 {
keyStr := fmt.Sprintf("key%v", i)
expectedValStr := strings.Repeat(keyStr, 10)
value, err = cache.Get([]byte(keyStr))
if err == nil {
hitCount++
if string(value) != expectedValStr {
t.Errorf("value is %v, expected %v", string(value), expectedValStr)
}
}
}

t.Logf("hit rate is %v, evacuates %v, entries %v, average time %v\n",
float64(hitCount)/float64(n/2), cache.EvacuateCount(), cache.EntryCount(), cache.AverageAccessTime())
}
Loading

0 comments on commit 416d76a

Please sign in to comment.