Movatterモバイル変換


[0]ホーム

URL:


Skip to content

Navigation Menu

Search code, repositories, users, issues, pull requests...

Provide feedback

We read every piece of feedback, and take your input very seriously.

Saved searches

Use saved searches to filter your results more quickly

Sign up
forked fromphuslu/lru

High performance LRU cache

License

NotificationsYou must be signed in to change notification settings

godruoyi/lru

 
 

Folders and files

NameName
Last commit message
Last commit date

Latest commit

 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 

Repository files navigation

godocreleasegoreportcodecov

Features

  • Simple
    • No Dependencies.
    • Straightforward API.
  • Fast
    • Outperforms well-knownLRU caches.
    • Zero memory allocations.
  • GC friendly
    • Pointerless and continuous data structs.
    • Minimized GC scan times.
  • Memory efficient
    • Adds only 26 extra bytes per entry.
    • Minimized memory usage.
  • Feature optional
    • Using SlidingCache viaWithSliding(true) option.
    • Create LoadingCache viaWithLoader(func(context.Context, K) (V, time.Duration, error)) option.

Limitations

  1. The TTL is accurate to the nearest second.
  2. Expired items are only removed when accessed again or the cache is full.

Getting Started

package mainimport ("time""github.com/phuslu/lru")funcmain() {cache:= lru.NewTTLCache[string,int](8192)cache.Set("a",1,2*time.Second)println(cache.Get("a"))println(cache.Get("b"))time.Sleep(1*time.Second)println(cache.Get("a"))time.Sleep(2*time.Second)println(cache.Get("a"))stats:=cache.Stats()println("SetCalls",stats.SetCalls,"GetCalls",stats.GetCalls,"Misses",stats.Misses)}

Throughput benchmarks

Disclaimer: This have been testing on the busy GitHub runners with 8 vCPUs and the results may be very different from your real environment. seephuslu#14

A Performance result as below. Check githubbenchmark action for more results and details.

go1.22 benchmark on keysize=16, itemsize=1000000, cachesize=50%, concurrency=8
// env writeratio=0.1 zipfian=false go test -v -cpu=8 -run=none -bench=. -benchtime=5s -benchmem bench_test.gopackage benchimport ("crypto/sha1""fmt""math/rand/v2""math/bits""os""runtime""strconv""testing""time"_"unsafe"theine"github.com/Yiling-J/theine-go""github.com/cespare/xxhash/v2"cloudflare"github.com/cloudflare/golibs/lrucache"ristretto"github.com/dgraph-io/ristretto/v2"freelru"github.com/elastic/go-freelru"hashicorp"github.com/hashicorp/golang-lru/v2/expirable"ccache"github.com/karlseguin/ccache/v3"lxzan"github.com/lxzan/memorycache"otter"github.com/maypok86/otter"ecache"github.com/orca-zhang/ecache"phuslu"github.com/phuslu/lru""github.com/aclements/go-perfevent/perfbench")const (keysize=16cachesize=1000000)varwriteratio,_=strconv.ParseFloat(os.Getenv("writeratio"),64)varzipfian,_=strconv.ParseBool(os.Getenv("zipfian"))typeCheapRandstruct {Seeduint64}func (rand*CheapRand)Uint32()uint32 {rand.Seed+=0xa0761d6478bd642fhi,lo:=bits.Mul64(rand.Seed,rand.Seed^0xe7037ed1a0b428db)returnuint32(hi^lo)}func (rand*CheapRand)Uint32n(nuint32)uint32 {returnuint32((uint64(rand.Uint32())*uint64(n))>>32)}func (rand*CheapRand)Uint64()uint64 {returnuint64(rand.Uint32())<<32^uint64(rand.Uint32())}varshardcount=func()int {n:=runtime.GOMAXPROCS(0)*16k:=1fork<n {k=k*2}returnk}()varkeys=func() (x []string) {x=make([]string,cachesize)fori:=rangecachesize {x[i]=fmt.Sprintf("%x",sha1.Sum([]byte(fmt.Sprint(i))))[:keysize]}return}()funcBenchmarkHashicorpSetGet(b*testing.B) {c:=perfbench.Open(b)cache:=hashicorp.NewLRU[string,int](cachesize,nil,time.Hour)fori:=rangecachesize/2 {cache.Add(keys[i],i)}b.ResetTimer()c.Reset()b.RunParallel(func(pb*testing.PB) {threshold:=uint32(float64(^uint32(0))*writeratio)cheaprand:=&CheapRand{uint64(time.Now().UnixNano())}zipf:=rand.NewZipf(rand.New(cheaprand),1.0001,10,cachesize-1)forpb.Next() {ifthreshold>0&&cheaprand.Uint32()<=threshold {i:=int(cheaprand.Uint32n(cachesize))cache.Add(keys[i],i)}elseifzipfian {cache.Get(keys[zipf.Uint64()])}else {cache.Get(keys[cheaprand.Uint32n(cachesize)])}}})}funcBenchmarkCloudflareSetGet(b*testing.B) {c:=perfbench.Open(b)cache:=cloudflare.NewMultiLRUCache(uint(shardcount),uint(cachesize/shardcount))fori:=rangecachesize/2 {cache.Set(keys[i],i,time.Now().Add(time.Hour))}expires:=time.Now().Add(time.Hour)b.ResetTimer()c.Reset()b.RunParallel(func(pb*testing.PB) {threshold:=uint32(float64(^uint32(0))*writeratio)cheaprand:=&CheapRand{uint64(time.Now().UnixNano())}zipf:=rand.NewZipf(rand.New(cheaprand),1.0001,10,cachesize-1)forpb.Next() {ifthreshold>0&&cheaprand.Uint32()<=threshold {i:=int(cheaprand.Uint32n(cachesize))cache.Set(keys[i],i,expires)}elseifzipfian {cache.Get(keys[zipf.Uint64()])}else {cache.Get(keys[cheaprand.Uint32n(cachesize)])}}})}funcBenchmarkEcacheSetGet(b*testing.B) {c:=perfbench.Open(b)cache:=ecache.NewLRUCache(uint16(shardcount),uint16(cachesize/shardcount),time.Hour)fori:=rangecachesize/2 {cache.Put(keys[i],i)}b.ResetTimer()c.Reset()b.RunParallel(func(pb*testing.PB) {threshold:=uint32(float64(^uint32(0))*writeratio)cheaprand:=&CheapRand{uint64(time.Now().UnixNano())}zipf:=rand.NewZipf(rand.New(cheaprand),1.0001,10,cachesize-1)forpb.Next() {ifthreshold>0&&cheaprand.Uint32()<=threshold {i:=int(cheaprand.Uint32n(cachesize))cache.Put(keys[i],i)}elseifzipfian {cache.Get(keys[zipf.Uint64()])}else {cache.Get(keys[cheaprand.Uint32n(cachesize)])}}})}funcBenchmarkLxzanSetGet(b*testing.B) {c:=perfbench.Open(b)cache:=lxzan.New[string,int](lxzan.WithBucketNum(shardcount),lxzan.WithBucketSize(cachesize/shardcount,cachesize/shardcount),lxzan.WithInterval(time.Hour,time.Hour),)fori:=rangecachesize/2 {cache.Set(keys[i],i,time.Hour)}b.ResetTimer()c.Reset()b.RunParallel(func(pb*testing.PB) {threshold:=uint32(float64(^uint32(0))*writeratio)cheaprand:=&CheapRand{uint64(time.Now().UnixNano())}zipf:=rand.NewZipf(rand.New(cheaprand),1.0001,10,cachesize-1)forpb.Next() {ifthreshold>0&&cheaprand.Uint32()<=threshold {i:=int(cheaprand.Uint32n(cachesize))cache.Set(keys[i],i,time.Hour)}elseifzipfian {cache.Get(keys[zipf.Uint64()])}else {cache.Get(keys[cheaprand.Uint32n(cachesize)])}}})}funchashStringXXHASH(sstring)uint32 {returnuint32(xxhash.Sum64String(s))}funcBenchmarkFreelruSetGet(b*testing.B) {c:=perfbench.Open(b)cache,_:=freelru.NewSharded[string,int](cachesize,hashStringXXHASH)fori:=rangecachesize/2 {cache.AddWithLifetime(keys[i],i,time.Hour)}b.ResetTimer()c.Reset()b.RunParallel(func(pb*testing.PB) {threshold:=uint32(float64(^uint32(0))*writeratio)cheaprand:=&CheapRand{uint64(time.Now().UnixNano())}zipf:=rand.NewZipf(rand.New(cheaprand),1.0001,10,cachesize-1)forpb.Next() {ifthreshold>0&&cheaprand.Uint32()<=threshold {i:=int(cheaprand.Uint32n(cachesize))cache.AddWithLifetime(keys[i],i,time.Hour)}elseifzipfian {cache.Get(keys[zipf.Uint64()])}else {cache.Get(keys[cheaprand.Uint32n(cachesize)])}}})}funcBenchmarkPhusluSetGet(b*testing.B) {c:=perfbench.Open(b)cache:=phuslu.NewTTLCache[string,int](cachesize, phuslu.WithShards[string,int](uint32(shardcount)))fori:=rangecachesize/2 {cache.Set(keys[i],i,time.Hour)}b.ResetTimer()c.Reset()b.RunParallel(func(pb*testing.PB) {threshold:=uint32(float64(^uint32(0))*writeratio)cheaprand:=&CheapRand{uint64(time.Now().UnixNano())}zipf:=rand.NewZipf(rand.New(cheaprand),1.0001,10,cachesize-1)forpb.Next() {ifthreshold>0&&cheaprand.Uint32()<=threshold {i:=int(cheaprand.Uint32n(cachesize))cache.Set(keys[i],i,time.Hour)}elseifzipfian {cache.Get(keys[zipf.Uint64()])}else {cache.Get(keys[cheaprand.Uint32n(cachesize)])}}})}funcBenchmarkNoTTLSetGet(b*testing.B) {c:=perfbench.Open(b)cache:=phuslu.NewLRUCache[string,int](cachesize, phuslu.WithShards[string,int](uint32(shardcount)))fori:=rangecachesize/2 {cache.Set(keys[i],i)}b.ResetTimer()c.Reset()b.RunParallel(func(pb*testing.PB) {threshold:=uint32(float64(^uint32(0))*writeratio)cheaprand:=&CheapRand{uint64(time.Now().UnixNano())}zipf:=rand.NewZipf(rand.New(cheaprand),1.0001,10,cachesize-1)forpb.Next() {ifthreshold>0&&cheaprand.Uint32()<=threshold {i:=int(cheaprand.Uint32n(cachesize))cache.Set(keys[i],i)}elseifzipfian {cache.Get(keys[zipf.Uint64()])}else {cache.Get(keys[cheaprand.Uint32n(cachesize)])}}})}funcBenchmarkCcacheSetGet(b*testing.B) {c:=perfbench.Open(b)cache:=ccache.New(ccache.Configure[int]().MaxSize(cachesize).ItemsToPrune(100))fori:=rangecachesize/2 {cache.Set(keys[i],i,time.Hour)}b.ResetTimer()c.Reset()b.RunParallel(func(pb*testing.PB) {threshold:=uint32(float64(^uint32(0))*writeratio)cheaprand:=&CheapRand{uint64(time.Now().UnixNano())}zipf:=rand.NewZipf(rand.New(cheaprand),1.0001,10,cachesize-1)forpb.Next() {ifthreshold>0&&cheaprand.Uint32()<=threshold {i:=int(cheaprand.Uint32n(cachesize))cache.Set(keys[i],i,time.Hour)}elseifzipfian {cache.Get(keys[zipf.Uint64()])}else {cache.Get(keys[cheaprand.Uint32n(cachesize)])}}})}funcBenchmarkRistrettoSetGet(b*testing.B) {c:=perfbench.Open(b)cache,_:=ristretto.NewCache(&ristretto.Config[string,int]{NumCounters:10*cachesize,// number of keys to track frequency of (10M).MaxCost:cachesize,// maximum cost of cache (1M).BufferItems:64,// number of keys per Get buffer.})fori:=rangecachesize/2 {cache.SetWithTTL(keys[i],i,1,time.Hour)}b.ResetTimer()c.Reset()b.RunParallel(func(pb*testing.PB) {threshold:=uint32(float64(^uint32(0))*writeratio)cheaprand:=&CheapRand{uint64(time.Now().UnixNano())}zipf:=rand.NewZipf(rand.New(cheaprand),1.0001,10,cachesize-1)forpb.Next() {ifthreshold>0&&cheaprand.Uint32()<=threshold {i:=int(cheaprand.Uint32n(cachesize))cache.SetWithTTL(keys[i],i,1,time.Hour)}elseifzipfian {cache.Get(keys[zipf.Uint64()])}else {cache.Get(keys[cheaprand.Uint32n(cachesize)])}}})}funcBenchmarkTheineSetGet(b*testing.B) {c:=perfbench.Open(b)cache,_:= theine.NewBuilder[string,int](cachesize).Build()fori:=rangecachesize/2 {cache.SetWithTTL(keys[i],i,1,time.Hour)}b.ResetTimer()c.Reset()b.RunParallel(func(pb*testing.PB) {threshold:=uint32(float64(^uint32(0))*writeratio)cheaprand:=&CheapRand{uint64(time.Now().UnixNano())}zipf:=rand.NewZipf(rand.New(cheaprand),1.0001,10,cachesize-1)forpb.Next() {ifthreshold>0&&cheaprand.Uint32()<=threshold {i:=int(cheaprand.Uint32n(cachesize))cache.SetWithTTL(keys[i],i,1,time.Hour)}elseifzipfian {cache.Get(keys[zipf.Uint64()])}else {cache.Get(keys[cheaprand.Uint32n(cachesize)])}}})}funcBenchmarkOtterSetGet(b*testing.B) {c:=perfbench.Open(b)cache,_:= otter.MustBuilder[string,int](cachesize).WithVariableTTL().Build()fori:=rangecachesize/2 {cache.Set(keys[i],i,time.Hour)}b.ResetTimer()c.Reset()b.RunParallel(func(pb*testing.PB) {threshold:=uint32(float64(^uint32(0))*writeratio)cheaprand:=&CheapRand{uint64(time.Now().UnixNano())}zipf:=rand.NewZipf(rand.New(cheaprand),1.0001,10,cachesize-1)forpb.Next() {ifthreshold>0&&cheaprand.Uint32()<=threshold {i:=int(cheaprand.Uint32n(cachesize))cache.Set(keys[i],i,time.Hour)}elseifzipfian {cache.Get(keys[zipf.Uint64()])}else {cache.Get(keys[cheaprand.Uint32n(cachesize)])}}})}

with randomly read (90%) and randomly write(10%)

goos: linuxgoarch: amd64cpu: AMD EPYC 7763 64-Core Processor                BenchmarkHashicorpSetGetBenchmarkHashicorpSetGet-8    11929687       564.5 ns/op       2 B/op       0 allocs/opBenchmarkCloudflareSetGetBenchmarkCloudflareSetGet-8   36970554       204.9 ns/op      16 B/op       1 allocs/opBenchmarkEcacheSetGetBenchmarkEcacheSetGet-8       46409206       140.7 ns/op       2 B/op       0 allocs/opBenchmarkLxzanSetGetBenchmarkLxzanSetGet-8        45660375       155.6 ns/op       0 B/op       0 allocs/opBenchmarkFreelruSetGetBenchmarkFreelruSetGet-8      51934801       144.4 ns/op       0 B/op       0 allocs/opBenchmarkPhusluSetGetBenchmarkPhusluSetGet-8       66166340       123.6 ns/op       0 B/op       0 allocs/opBenchmarkNoTTLSetGetBenchmarkNoTTLSetGet-8        65648408       110.8 ns/op       0 B/op       0 allocs/opBenchmarkCcacheSetGetBenchmarkCcacheSetGet-8       20042799       390.7 ns/op      34 B/op       2 allocs/opBenchmarkRistrettoSetGetBenchmarkRistrettoSetGet-8    46214467       131.3 ns/op      11 B/op       0 allocs/opBenchmarkTheineSetGetBenchmarkTheineSetGet-8       26940531       235.0 ns/op       4 B/op       0 allocs/opBenchmarkOtterSetGetBenchmarkOtterSetGet-8        49227819       164.6 ns/op       8 B/op       0 allocs/opPASSok  command-line-arguments119.471s

with zipfian read (99%) and randomly write(1%)

goos: linuxgoarch: amd64cpu: AMD EPYC 7763 64-Core Processor                BenchmarkHashicorpSetGetBenchmarkHashicorpSetGet-8    12568664       447.8 ns/op       0 B/op       0 allocs/opBenchmarkCloudflareSetGetBenchmarkCloudflareSetGet-8   50304914       132.2 ns/op      16 B/op       1 allocs/opBenchmarkEcacheSetGetBenchmarkEcacheSetGet-8       53473498       104.4 ns/op       0 B/op       0 allocs/opBenchmarkLxzanSetGetBenchmarkLxzanSetGet-8        61482066       103.9 ns/op       0 B/op       0 allocs/opBenchmarkFreelruSetGetBenchmarkFreelruSetGet-8      56053724       115.7 ns/op       0 B/op       0 allocs/opBenchmarkPhusluSetGetBenchmarkPhusluSetGet-8       74092269        85.47 ns/op       0 B/op       0 allocs/opBenchmarkNoTTLSetGetBenchmarkNoTTLSetGet-8        72505081        83.70 ns/op       0 B/op       0 allocs/opBenchmarkCcacheSetGetBenchmarkCcacheSetGet-8       20945337       277.7 ns/op      21 B/op       2 allocs/opBenchmarkRistrettoSetGetBenchmarkRistrettoSetGet-8    59896165       101.1 ns/op       4 B/op       0 allocs/opBenchmarkTheineSetGetBenchmarkTheineSetGet-8       51948990       116.5 ns/op       0 B/op       0 allocs/opBenchmarkOtterSetGetBenchmarkOtterSetGet-8        67258905        94.86 ns/op       1 B/op       0 allocs/opPASSok  command-line-arguments104.343s

GC scan

The GC scan times as below. Check githubgcscan action for more results and details.

GC scan times on keysize=16(string), valuesize=8(int), cachesize in (100000,200000,400000,1000000)
// env GODEBUG=gctrace=1 go run gcscan.go phuslu 1000000package mainimport ("fmt""os""runtime""runtime/debug""strconv""time"theine"github.com/Yiling-J/theine-go""github.com/cespare/xxhash/v2"cloudflare"github.com/cloudflare/golibs/lrucache"ristretto"github.com/dgraph-io/ristretto/v2"freelru"github.com/elastic/go-freelru"hashicorp"github.com/hashicorp/golang-lru/v2/expirable"ccache"github.com/karlseguin/ccache/v3"lxzan"github.com/lxzan/memorycache"otter"github.com/maypok86/otter"ecache"github.com/orca-zhang/ecache"phuslu"github.com/phuslu/lru")constkeysize=16varrepeat,_=strconv.Atoi(os.Getenv("repeat"))varkeys []stringfuncmain() {name:=os.Args[1]cachesize,_:=strconv.Atoi(os.Args[2])keys=make([]string,cachesize)fori:=rangecachesize {keys[i]=fmt.Sprintf(fmt.Sprintf("%%0%dd",keysize),i)}map[string]func(int){"nottl":SetupNottl,"phuslu":SetupPhuslu,"freelru":SetupFreelru,"ristretto":SetupRistretto,"otter":SetupOtter,"lxzan":SetupLxzan,"ecache":SetupEcache,"cloudflare":SetupCloudflare,"ccache":SetupCcache,"hashicorp":SetupHashicorp,"theine":SetupTheine,}[name](cachesize)}funcSetupNottl(cachesizeint) {deferdebug.SetGCPercent(debug.SetGCPercent(-1))cache:= phuslu.NewLRUCache[string,int](cachesize)runtime.GC()forrangerepeat {fori:=rangecachesize {cache.Set(keys[i],i)}runtime.GC()}}funcSetupPhuslu(cachesizeint) {deferdebug.SetGCPercent(debug.SetGCPercent(-1))cache:= phuslu.NewTTLCache[string,int](cachesize)runtime.GC()forrangerepeat {fori:=rangecachesize {cache.Set(keys[i],i,time.Hour)}runtime.GC()}}funcSetupFreelru(cachesizeint) {deferdebug.SetGCPercent(debug.SetGCPercent(-1))cache,_:=freelru.NewSharded[string,int](uint32(cachesize),func(sstring)uint32 {returnuint32(xxhash.Sum64String(s)) })runtime.GC()forrangerepeat {fori:=rangecachesize {cache.AddWithLifetime(keys[i],i,time.Hour)}runtime.GC()}}funcSetupOtter(cachesizeint) {deferdebug.SetGCPercent(debug.SetGCPercent(-1))cache,_:= otter.MustBuilder[string,int](cachesize).WithVariableTTL().Build()runtime.GC()forrangerepeat {fori:=rangecachesize {cache.Set(keys[i],i,time.Hour)}runtime.GC()}}funcSetupEcache(cachesizeint) {deferdebug.SetGCPercent(debug.SetGCPercent(-1))cache:=ecache.NewLRUCache(1024,uint16(cachesize/1024),time.Hour)runtime.GC()forrangerepeat {fori:=rangecachesize {cache.Put(keys[i],i)}runtime.GC()}}funcSetupRistretto(cachesizeint) {deferdebug.SetGCPercent(debug.SetGCPercent(-1))cache,_:=ristretto.NewCache(&ristretto.Config[string,int]{NumCounters:int64(10*cachesize),// number of keys to track frequency of (10M).MaxCost:int64(cachesize),// maximum cost of cache (1M).BufferItems:64,// number of keys per Get buffer.})runtime.GC()forrangerepeat {fori:=rangecachesize {cache.SetWithTTL(keys[i],i,1,time.Hour)}runtime.GC()}}funcSetupLxzan(cachesizeint) {deferdebug.SetGCPercent(debug.SetGCPercent(-1))cache:=lxzan.New[string,int](lxzan.WithBucketNum(128),lxzan.WithBucketSize(cachesize/128,cachesize/128),lxzan.WithInterval(time.Hour,time.Hour),)runtime.GC()forrangerepeat {fori:=rangecachesize {cache.Set(keys[i],i,time.Hour)}runtime.GC()}}funcSetupTheine(cachesizeint) {deferdebug.SetGCPercent(debug.SetGCPercent(-1))cache,_:= theine.NewBuilder[string,int](int64(cachesize)).Build()runtime.GC()forrangerepeat {fori:=rangecachesize {cache.SetWithTTL(keys[i],i,1,time.Hour)}runtime.GC()}}funcSetupCloudflare(cachesizeint) {deferdebug.SetGCPercent(debug.SetGCPercent(-1))cache:=cloudflare.NewMultiLRUCache(1024,uint(cachesize/1024))runtime.GC()forrangerepeat {fori:=rangecachesize {cache.Set(keys[i],i,time.Now().Add(time.Hour))}runtime.GC()}}funcSetupCcache(cachesizeint) {deferdebug.SetGCPercent(debug.SetGCPercent(-1))cache:=ccache.New(ccache.Configure[int]().MaxSize(int64(cachesize)).ItemsToPrune(100))runtime.GC()forrangerepeat {fori:=rangecachesize {cache.Set(keys[i],i,time.Hour)}runtime.GC()}}funcSetupHashicorp(cachesizeint) {deferdebug.SetGCPercent(debug.SetGCPercent(-1))cache:=hashicorp.NewLRU[string,int](cachesize,nil,time.Hour)runtime.GC()forrangerepeat {fori:=rangecachesize {cache.Add(keys[i],i)}runtime.GC()}}
GCScan1000002000004000001000000
nottl1 ms3 ms6 ms15 ms
phuslu1 ms3 ms6 ms14 ms
ristretto2 ms4 ms7 ms13 ms
freelru2 ms4 ms7 ms16 ms
lxzan2 ms5 ms8 ms19 ms
cloudflare5 ms11 ms21 ms56 ms
otter5 ms12 ms22 ms58 ms
ecache5 ms11 ms23 ms60 ms
ccache5 ms11 ms24 ms60 ms
hashicorp9 ms17 ms35 ms79 ms
theine7 ms15 ms35 ms83 ms

Memory usage

The Memory usage result as below. Check githubmemory action for more results and details.

memory usage on keysize=16(string), valuesize=8(int), cachesize in (100000,200000,400000,1000000,2000000,4000000)
// memusage.gopackage mainimport ("fmt""os""runtime""time""strconv"theine"github.com/Yiling-J/theine-go""github.com/cespare/xxhash/v2"cloudflare"github.com/cloudflare/golibs/lrucache"ristretto"github.com/dgraph-io/ristretto/v2"freelru"github.com/elastic/go-freelru"hashicorp"github.com/hashicorp/golang-lru/v2/expirable"ccache"github.com/karlseguin/ccache/v3"lxzan"github.com/lxzan/memorycache"otter"github.com/maypok86/otter"ecache"github.com/orca-zhang/ecache"phuslu"github.com/phuslu/lru")constkeysize=16varkeys []stringfuncmain() {name:=os.Args[1]cachesize,_:=strconv.Atoi(os.Args[2])keys=make([]string,cachesize)fori:=rangecachesize {keys[i]=fmt.Sprintf(fmt.Sprintf("%%0%dd",keysize),i)}varo runtime.MemStatsruntime.ReadMemStats(&o)map[string]func(int){"nottl":SetupNottl,"phuslu":SetupPhuslu,"freelru":SetupFreelru,"ristretto":SetupRistretto,"otter":SetupOtter,"lxzan":SetupLxzan,"ecache":SetupEcache,"cloudflare":SetupCloudflare,"ccache":SetupCcache,"hashicorp":SetupHashicorp,"theine":SetupTheine,}[name](cachesize)varm runtime.MemStatsruntime.ReadMemStats(&m)fmt.Printf("%s\t%d\t%v MB\t%v MB\t%v MB\n",name,cachesize,(m.Alloc-o.Alloc)/1048576,(m.TotalAlloc-o.TotalAlloc)/1048576,(m.Sys-o.Sys)/1048576,)}funcSetupNottl(cachesizeint) {cache:= phuslu.NewLRUCache[string,int](cachesize)fori:=rangecachesize {cache.Set(keys[i],i)}}funcSetupPhuslu(cachesizeint) {cache:= phuslu.NewTTLCache[string,int](cachesize)fori:=rangecachesize {cache.Set(keys[i],i,time.Hour)}}funcSetupFreelru(cachesizeint) {cache,_:=freelru.NewSharded[string,int](uint32(cachesize),func(sstring)uint32 {returnuint32(xxhash.Sum64String(s)) })fori:=rangecachesize {cache.AddWithLifetime(keys[i],i,time.Hour)}}funcSetupOtter(cachesizeint) {cache,_:= otter.MustBuilder[string,int](cachesize).WithVariableTTL().Build()fori:=rangecachesize {cache.Set(keys[i],i,time.Hour)}}funcSetupEcache(cachesizeint) {cache:=ecache.NewLRUCache(1024,uint16(cachesize/1024),time.Hour)fori:=rangecachesize {cache.Put(keys[i],i)}}funcSetupRistretto(cachesizeint) {cache,_:=ristretto.NewCache(&ristretto.Config[string,int]{NumCounters:int64(10*cachesize),// number of keys to track frequency of (10M).MaxCost:int64(cachesize),// maximum cost of cache (1M).BufferItems:64,// number of keys per Get buffer.})fori:=rangecachesize {cache.SetWithTTL(keys[i],i,1,time.Hour)}}funcSetupLxzan(cachesizeint) {cache:=lxzan.New[string,int](lxzan.WithBucketNum(128),lxzan.WithBucketSize(cachesize/128,cachesize/128),lxzan.WithInterval(time.Hour,time.Hour),)fori:=rangecachesize {cache.Set(keys[i],i,time.Hour)}}funcSetupTheine(cachesizeint) {cache,_:= theine.NewBuilder[string,int](int64(cachesize)).Build()fori:=rangecachesize {cache.SetWithTTL(keys[i],i,1,time.Hour)}}funcSetupCloudflare(cachesizeint) {cache:=cloudflare.NewMultiLRUCache(1024,uint(cachesize/1024))fori:=rangecachesize {cache.Set(keys[i],i,time.Now().Add(time.Hour))}}funcSetupCcache(cachesizeint) {cache:=ccache.New(ccache.Configure[int]().MaxSize(int64(cachesize)).ItemsToPrune(100))fori:=rangecachesize {cache.Set(keys[i],i,time.Hour)}}funcSetupHashicorp(cachesizeint) {cache:=hashicorp.NewLRU[string,int](cachesize,nil,time.Hour)fori:=rangecachesize {cache.Add(keys[i],i)}}
100000200000400000100000020000004000000
nottl3 MB6 MB13 MB39 MB77 MB155 MB
phuslu4 MB8 MB16 MB46 MB92 MB185 MB
ristretto13 MB12 MB28 MB58 MB150 MB291 MB
lxzan8 MB17 MB35 MB95 MB191 MB379 MB
otter13 MB27 MB54 MB104 MB209 MB418 MB
freelru6 MB13 MB27 MB112 MB224 MB448 MB
ecache11 MB22 MB44 MB123 MB238 MB468 MB
theine15 MB31 MB62 MB178 MB357 MB714 MB
cloudflare16 MB33 MB64 MB183 MB358 MB716 MB
ccache16 MB32 MB65 MB182 MB365 MB730 MB
hashicorp18 MB37 MB57 MB241 MB484 MB967 MB
  • nottl saves 20% memory usage compared to phuslu by removing its ttl functionality, resulting in a slight increase in throughput.
  • ristretto employs a questionable usage pattern due to its rejection of items via a bloom filter, resulting in a lower hit ratio.
  • freelru overcommits the cache size to the next power of 2, leading to higher memory usage particularly at larger cache sizes.

Hit ratio

It is a classic sharded LRU implementation, so the hit ratio is comparable to or slightly lower than a regular LRU.

License

LRU is licensed under the MIT License. See the LICENSE file for details.

Contact

For inquiries or support, contactphus.lu@gmail.com or raise github issues.

About

High performance LRU cache

Resources

License

Stars

Watchers

Forks

Releases

No releases published

Packages

No packages published

Languages

  • Go100.0%

[8]ページ先頭

©2009-2025 Movatter.jp