BigCache是用于在Go中写入千兆字节数据的高效缓存。快速,并发,逐行扫描内存缓存,以保持大量条目,而不影响性能。BigCache在堆上保留条目,但为它们省略了GC。要实现对字节数组的操作,因此在大多数用例中将需要在高速缓存前面进行条目(de)序列化。
使用简单初始化
import "github.com/allegro/bigcache"cache, _ := bigcache.NewBigCache(bigcache.DefaultConfig(10 * time.Minute))cache.Set("my-unique-key", []byte("value"))entry, _ := cache.Get("my-unique-key")fmt.Println(string(entry))自定义初始化
import ("log""github.com/allegro/bigcache")config := bigcache.Config {// number of shards (must be a power of 2)Shards: 1024,// time after which entry can be evictedLifeWindow: 10 * time.Minute,// rps * lifeWindow, used only in initial memory allocationMaxEntriesInWindow: 1000 * 10 * 60,// max entry size in bytes, used only in initial memory allocationMaxEntrySize: 500,// prints information about additional memory allocationVerbose: true,// cache will not allocate more memory than this limit, value in MB// if value is reached then the oldest entries can be overridden for the new ones// 0 value means no size limitHardMaxCacheSize: 8192,// callback fired when the oldest entry is removed because of its// expiration time or no space left for the new entry. Default value is nil which// means no callback and it prevents from unwrapping the oldest entry.OnRemove: nil,}cache, initErr := bigcache.NewBigCache(config)if initErr != nil {log.Fatal(initErr)}cache.Set("my-unique-key", []byte("value"))if entry, err := cache.Get("my-unique-key"); err == nil {fmt.Println(string(entry))}
评论