cache.go 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798
  1. /*
  2. * SPDX-FileCopyrightText: © Hypermode Inc. <hello@hypermode.com>
  3. * SPDX-License-Identifier: Apache-2.0
  4. */
  5. // Ristretto is a fast, fixed size, in-memory cache with a dual focus on
  6. // throughput and hit ratio performance. You can easily add Ristretto to an
  7. // existing system and keep the most valuable data where you need it.
  8. package ristretto
  9. import (
  10. "bytes"
  11. "errors"
  12. "fmt"
  13. "sync"
  14. "sync/atomic"
  15. "time"
  16. "unsafe"
  17. "github.com/dgraph-io/ristretto/v2/z"
  18. )
  19. var (
  20. // TODO: find the optimal value for this or make it configurable
  21. setBufSize = 32 * 1024
  22. )
  23. const itemSize = int64(unsafe.Sizeof(storeItem[any]{}))
  24. func zeroValue[T any]() T {
  25. var zero T
  26. return zero
  27. }
  28. // Key is the generic type to represent the keys type in key-value pair of the cache.
  29. type Key = z.Key
  30. // Cache is a thread-safe implementation of a hashmap with a TinyLFU admission
  31. // policy and a Sampled LFU eviction policy. You can use the same Cache instance
  32. // from as many goroutines as you want.
  33. type Cache[K Key, V any] struct {
  34. // storedItems is the central concurrent hashmap where key-value items are stored.
  35. storedItems store[V]
  36. // cachePolicy determines what gets let in to the cache and what gets kicked out.
  37. cachePolicy *defaultPolicy[V]
  38. // getBuf is a custom ring buffer implementation that gets pushed to when
  39. // keys are read.
  40. getBuf *ringBuffer
  41. // setBuf is a buffer allowing us to batch/drop Sets during times of high
  42. // contention.
  43. setBuf chan *Item[V]
  44. // onEvict is called for item evictions.
  45. onEvict func(*Item[V])
  46. // onReject is called when an item is rejected via admission policy.
  47. onReject func(*Item[V])
  48. // onExit is called whenever a value goes out of scope from the cache.
  49. onExit (func(V))
  50. // KeyToHash function is used to customize the key hashing algorithm.
  51. // Each key will be hashed using the provided function. If keyToHash value
  52. // is not set, the default keyToHash function is used.
  53. keyToHash func(K) (uint64, uint64)
  54. // stop is used to stop the processItems goroutine.
  55. stop chan struct{}
  56. done chan struct{}
  57. // indicates whether cache is closed.
  58. isClosed atomic.Bool
  59. // cost calculates cost from a value.
  60. cost func(value V) int64
  61. // ignoreInternalCost dictates whether to ignore the cost of internally storing
  62. // the item in the cost calculation.
  63. ignoreInternalCost bool
  64. // cleanupTicker is used to periodically check for entries whose TTL has passed.
  65. cleanupTicker *time.Ticker
  66. // Metrics contains a running log of important statistics like hits, misses,
  67. // and dropped items.
  68. Metrics *Metrics
  69. }
  70. // Config is passed to NewCache for creating new Cache instances.
  71. type Config[K Key, V any] struct {
  72. // NumCounters determines the number of counters (keys) to keep that hold
  73. // access frequency information. It's generally a good idea to have more
  74. // counters than the max cache capacity, as this will improve eviction
  75. // accuracy and subsequent hit ratios.
  76. //
  77. // For example, if you expect your cache to hold 1,000,000 items when full,
  78. // NumCounters should be 10,000,000 (10x). Each counter takes up roughly
  79. // 3 bytes (4 bits for each counter * 4 copies plus about a byte per
  80. // counter for the bloom filter). Note that the number of counters is
  81. // internally rounded up to the nearest power of 2, so the space usage
  82. // may be a little larger than 3 bytes * NumCounters.
  83. //
  84. // We've seen good performance in setting this to 10x the number of items
  85. // you expect to keep in the cache when full.
  86. NumCounters int64
  87. // MaxCost is how eviction decisions are made. For example, if MaxCost is
  88. // 100 and a new item with a cost of 1 increases total cache cost to 101,
  89. // 1 item will be evicted.
  90. //
  91. // MaxCost can be considered as the cache capacity, in whatever units you
  92. // choose to use.
  93. //
  94. // For example, if you want the cache to have a max capacity of 100MB, you
  95. // would set MaxCost to 100,000,000 and pass an item's number of bytes as
  96. // the `cost` parameter for calls to Set. If new items are accepted, the
  97. // eviction process will take care of making room for the new item and not
  98. // overflowing the MaxCost value.
  99. //
  100. // MaxCost could be anything as long as it matches how you're using the cost
  101. // values when calling Set.
  102. MaxCost int64
  103. // BufferItems determines the size of Get buffers.
  104. //
  105. // Unless you have a rare use case, using `64` as the BufferItems value
  106. // results in good performance.
  107. //
  108. // If for some reason you see Get performance decreasing with lots of
  109. // contention (you shouldn't), try increasing this value in increments of 64.
  110. // This is a fine-tuning mechanism and you probably won't have to touch this.
  111. BufferItems int64
  112. // Metrics is true when you want variety of stats about the cache.
  113. // There is some overhead to keeping statistics, so you should only set this
  114. // flag to true when testing or throughput performance isn't a major factor.
  115. Metrics bool
  116. // OnEvict is called for every eviction with the evicted item.
  117. OnEvict func(item *Item[V])
  118. // OnReject is called for every rejection done via the policy.
  119. OnReject func(item *Item[V])
  120. // OnExit is called whenever a value is removed from cache. This can be
  121. // used to do manual memory deallocation. Would also be called on eviction
  122. // as well as on rejection of the value.
  123. OnExit func(val V)
  124. // ShouldUpdate is called when a value already exists in cache and is being updated.
  125. // If ShouldUpdate returns true, the cache continues with the update (Set). If the
  126. // function returns false, no changes are made in the cache. If the value doesn't
  127. // already exist, the cache continue with setting that value for the given key.
  128. //
  129. // In this function, you can check whether the new value is valid. For example, if
  130. // your value has timestamp assosicated with it, you could check whether the new
  131. // value has the latest timestamp, preventing you from setting an older value.
  132. ShouldUpdate func(cur, prev V) bool
  133. // KeyToHash function is used to customize the key hashing algorithm.
  134. // Each key will be hashed using the provided function. If keyToHash value
  135. // is not set, the default keyToHash function is used.
  136. //
  137. // Ristretto has a variety of defaults depending on the underlying interface type
  138. // https://github.com/hypermodeinc/ristretto/blob/main/z/z.go#L19-L41).
  139. //
  140. // Note that if you want 128bit hashes you should use the both the values
  141. // in the return of the function. If you want to use 64bit hashes, you can
  142. // just return the first uint64 and return 0 for the second uint64.
  143. KeyToHash func(key K) (uint64, uint64)
  144. // Cost evaluates a value and outputs a corresponding cost. This function is ran
  145. // after Set is called for a new item or an item is updated with a cost param of 0.
  146. //
  147. // Cost is an optional function you can pass to the Config in order to evaluate
  148. // item cost at runtime, and only whentthe Set call isn't going to be dropped. This
  149. // is useful if calculating item cost is particularly expensive and you don't want to
  150. // waste time on items that will be dropped anyways.
  151. //
  152. // To signal to Ristretto that you'd like to use this Cost function:
  153. // 1. Set the Cost field to a non-nil function.
  154. // 2. When calling Set for new items or item updates, use a `cost` of 0.
  155. Cost func(value V) int64
  156. // IgnoreInternalCost set to true indicates to the cache that the cost of
  157. // internally storing the value should be ignored. This is useful when the
  158. // cost passed to set is not using bytes as units. Keep in mind that setting
  159. // this to true will increase the memory usage.
  160. IgnoreInternalCost bool
  161. // TtlTickerDurationInSec sets the value of time ticker for cleanup keys on TTL expiry.
  162. TtlTickerDurationInSec int64
  163. }
  164. type itemFlag byte
  165. const (
  166. itemNew itemFlag = iota
  167. itemDelete
  168. itemUpdate
  169. )
  170. // Item is a full representation of what's stored in the cache for each key-value pair.
  171. type Item[V any] struct {
  172. flag itemFlag
  173. Key uint64
  174. Conflict uint64
  175. Value V
  176. Cost int64
  177. Expiration time.Time
  178. wait chan struct{}
  179. }
  180. // NewCache returns a new Cache instance and any configuration errors, if any.
  181. func NewCache[K Key, V any](config *Config[K, V]) (*Cache[K, V], error) {
  182. switch {
  183. case config.NumCounters == 0:
  184. return nil, errors.New("NumCounters can't be zero")
  185. case config.NumCounters < 0:
  186. return nil, errors.New("NumCounters can't be negative")
  187. case config.MaxCost == 0:
  188. return nil, errors.New("MaxCost can't be zero")
  189. case config.MaxCost < 0:
  190. return nil, errors.New("MaxCost can't be negative")
  191. case config.BufferItems == 0:
  192. return nil, errors.New("BufferItems can't be zero")
  193. case config.BufferItems < 0:
  194. return nil, errors.New("BufferItems can't be negative")
  195. case config.TtlTickerDurationInSec == 0:
  196. config.TtlTickerDurationInSec = bucketDurationSecs
  197. }
  198. policy := newPolicy[V](config.NumCounters, config.MaxCost)
  199. cache := &Cache[K, V]{
  200. storedItems: newStore[V](),
  201. cachePolicy: policy,
  202. getBuf: newRingBuffer(policy, config.BufferItems),
  203. setBuf: make(chan *Item[V], setBufSize),
  204. keyToHash: config.KeyToHash,
  205. stop: make(chan struct{}),
  206. done: make(chan struct{}),
  207. cost: config.Cost,
  208. ignoreInternalCost: config.IgnoreInternalCost,
  209. cleanupTicker: time.NewTicker(time.Duration(config.TtlTickerDurationInSec) * time.Second / 2),
  210. }
  211. cache.storedItems.SetShouldUpdateFn(config.ShouldUpdate)
  212. cache.onExit = func(val V) {
  213. if config.OnExit != nil {
  214. config.OnExit(val)
  215. }
  216. }
  217. cache.onEvict = func(item *Item[V]) {
  218. if config.OnEvict != nil {
  219. config.OnEvict(item)
  220. }
  221. cache.onExit(item.Value)
  222. }
  223. cache.onReject = func(item *Item[V]) {
  224. if config.OnReject != nil {
  225. config.OnReject(item)
  226. }
  227. cache.onExit(item.Value)
  228. }
  229. if cache.keyToHash == nil {
  230. cache.keyToHash = z.KeyToHash[K]
  231. }
  232. if config.Metrics {
  233. cache.collectMetrics()
  234. }
  235. // NOTE: benchmarks seem to show that performance decreases the more
  236. // goroutines we have running cache.processItems(), so 1 should
  237. // usually be sufficient
  238. go cache.processItems()
  239. return cache, nil
  240. }
  241. // Wait blocks until all buffered writes have been applied. This ensures a call to Set()
  242. // will be visible to future calls to Get().
  243. func (c *Cache[K, V]) Wait() {
  244. if c == nil || c.isClosed.Load() {
  245. return
  246. }
  247. wait := make(chan struct{})
  248. c.setBuf <- &Item[V]{wait: wait}
  249. <-wait
  250. }
  251. // Get returns the value (if any) and a boolean representing whether the
  252. // value was found or not. The value can be nil and the boolean can be true at
  253. // the same time. Get will not return expired items.
  254. func (c *Cache[K, V]) Get(key K) (V, bool) {
  255. if c == nil || c.isClosed.Load() {
  256. return zeroValue[V](), false
  257. }
  258. keyHash, conflictHash := c.keyToHash(key)
  259. c.getBuf.Push(keyHash)
  260. value, ok := c.storedItems.Get(keyHash, conflictHash)
  261. if ok {
  262. c.Metrics.add(hit, keyHash, 1)
  263. } else {
  264. c.Metrics.add(miss, keyHash, 1)
  265. }
  266. return value, ok
  267. }
  268. // Set attempts to add the key-value item to the cache. If it returns false,
  269. // then the Set was dropped and the key-value item isn't added to the cache. If
  270. // it returns true, there's still a chance it could be dropped by the policy if
  271. // its determined that the key-value item isn't worth keeping, but otherwise the
  272. // item will be added and other items will be evicted in order to make room.
  273. //
  274. // To dynamically evaluate the items cost using the Config.Coster function, set
  275. // the cost parameter to 0 and Coster will be ran when needed in order to find
  276. // the items true cost.
  277. //
  278. // Set writes the value of type V as is. If type V is a pointer type, It is ok
  279. // to update the memory pointed to by the pointer. Updating the pointer itself
  280. // will not be reflected in the cache. Be careful when using slice types as the
  281. // value type V. Calling `append` may update the underlined array pointer which
  282. // will not be reflected in the cache.
  283. func (c *Cache[K, V]) Set(key K, value V, cost int64) bool {
  284. return c.SetWithTTL(key, value, cost, 0*time.Second)
  285. }
  286. // SetWithTTL works like Set but adds a key-value pair to the cache that will expire
  287. // after the specified TTL (time to live) has passed. A zero value means the value never
  288. // expires, which is identical to calling Set. A negative value is a no-op and the value
  289. // is discarded.
  290. //
  291. // See Set for more information.
  292. func (c *Cache[K, V]) SetWithTTL(key K, value V, cost int64, ttl time.Duration) bool {
  293. if c == nil || c.isClosed.Load() {
  294. return false
  295. }
  296. var expiration time.Time
  297. switch {
  298. case ttl == 0:
  299. // No expiration.
  300. break
  301. case ttl < 0:
  302. // Treat this a no-op.
  303. return false
  304. default:
  305. expiration = time.Now().Add(ttl)
  306. }
  307. keyHash, conflictHash := c.keyToHash(key)
  308. i := &Item[V]{
  309. flag: itemNew,
  310. Key: keyHash,
  311. Conflict: conflictHash,
  312. Value: value,
  313. Cost: cost,
  314. Expiration: expiration,
  315. }
  316. // cost is eventually updated. The expiration must also be immediately updated
  317. // to prevent items from being prematurely removed from the map.
  318. if prev, ok := c.storedItems.Update(i); ok {
  319. c.onExit(prev)
  320. i.flag = itemUpdate
  321. }
  322. // Attempt to send item to cachePolicy.
  323. select {
  324. case c.setBuf <- i:
  325. return true
  326. default:
  327. if i.flag == itemUpdate {
  328. // Return true if this was an update operation since we've already
  329. // updated the storedItems. For all the other operations (set/delete), we
  330. // return false which means the item was not inserted.
  331. return true
  332. }
  333. c.Metrics.add(dropSets, keyHash, 1)
  334. return false
  335. }
  336. }
  337. // Del deletes the key-value item from the cache if it exists.
  338. func (c *Cache[K, V]) Del(key K) {
  339. if c == nil || c.isClosed.Load() {
  340. return
  341. }
  342. keyHash, conflictHash := c.keyToHash(key)
  343. // Delete immediately.
  344. _, prev := c.storedItems.Del(keyHash, conflictHash)
  345. c.onExit(prev)
  346. // If we've set an item, it would be applied slightly later.
  347. // So we must push the same item to `setBuf` with the deletion flag.
  348. // This ensures that if a set is followed by a delete, it will be
  349. // applied in the correct order.
  350. c.setBuf <- &Item[V]{
  351. flag: itemDelete,
  352. Key: keyHash,
  353. Conflict: conflictHash,
  354. }
  355. }
  356. // GetTTL returns the TTL for the specified key and a bool that is true if the
  357. // item was found and is not expired.
  358. func (c *Cache[K, V]) GetTTL(key K) (time.Duration, bool) {
  359. if c == nil {
  360. return 0, false
  361. }
  362. keyHash, conflictHash := c.keyToHash(key)
  363. if _, ok := c.storedItems.Get(keyHash, conflictHash); !ok {
  364. // not found
  365. return 0, false
  366. }
  367. expiration := c.storedItems.Expiration(keyHash)
  368. if expiration.IsZero() {
  369. // found but no expiration
  370. return 0, true
  371. }
  372. if time.Now().After(expiration) {
  373. // found but expired
  374. return 0, false
  375. }
  376. return time.Until(expiration), true
  377. }
  378. // Close stops all goroutines and closes all channels.
  379. func (c *Cache[K, V]) Close() {
  380. if c == nil || c.isClosed.Load() {
  381. return
  382. }
  383. c.Clear()
  384. // Block until processItems goroutine is returned.
  385. c.stop <- struct{}{}
  386. <-c.done
  387. close(c.stop)
  388. close(c.done)
  389. close(c.setBuf)
  390. c.cachePolicy.Close()
  391. c.cleanupTicker.Stop()
  392. c.isClosed.Store(true)
  393. }
  394. // Clear empties the hashmap and zeroes all cachePolicy counters. Note that this is
  395. // not an atomic operation (but that shouldn't be a problem as it's assumed that
  396. // Set/Get calls won't be occurring until after this).
  397. func (c *Cache[K, V]) Clear() {
  398. if c == nil || c.isClosed.Load() {
  399. return
  400. }
  401. // Block until processItems goroutine is returned.
  402. c.stop <- struct{}{}
  403. <-c.done
  404. // Clear out the setBuf channel.
  405. loop:
  406. for {
  407. select {
  408. case i := <-c.setBuf:
  409. if i.wait != nil {
  410. close(i.wait)
  411. continue
  412. }
  413. if i.flag != itemUpdate {
  414. // In itemUpdate, the value is already set in the storedItems. So, no need to call
  415. // onEvict here.
  416. c.onEvict(i)
  417. }
  418. default:
  419. break loop
  420. }
  421. }
  422. // Clear value hashmap and cachePolicy data.
  423. c.cachePolicy.Clear()
  424. c.storedItems.Clear(c.onEvict)
  425. // Only reset metrics if they're enabled.
  426. if c.Metrics != nil {
  427. c.Metrics.Clear()
  428. }
  429. // Restart processItems goroutine.
  430. go c.processItems()
  431. }
  432. // MaxCost returns the max cost of the cache.
  433. func (c *Cache[K, V]) MaxCost() int64 {
  434. if c == nil {
  435. return 0
  436. }
  437. return c.cachePolicy.MaxCost()
  438. }
  439. // UpdateMaxCost updates the maxCost of an existing cache.
  440. func (c *Cache[K, V]) UpdateMaxCost(maxCost int64) {
  441. if c == nil {
  442. return
  443. }
  444. c.cachePolicy.UpdateMaxCost(maxCost)
  445. }
  446. // RemainingCost returns the remaining cost capacity (MaxCost - Used) of an existing cache.
  447. func (c *Cache[K, V]) RemainingCost() int64 {
  448. if c == nil {
  449. return 0
  450. }
  451. return c.cachePolicy.Cap()
  452. }
  453. // processItems is ran by goroutines processing the Set buffer.
  454. func (c *Cache[K, V]) processItems() {
  455. startTs := make(map[uint64]time.Time)
  456. numToKeep := 100000 // TODO: Make this configurable via options.
  457. trackAdmission := func(key uint64) {
  458. if c.Metrics == nil {
  459. return
  460. }
  461. startTs[key] = time.Now()
  462. if len(startTs) > numToKeep {
  463. for k := range startTs {
  464. if len(startTs) <= numToKeep {
  465. break
  466. }
  467. delete(startTs, k)
  468. }
  469. }
  470. }
  471. onEvict := func(i *Item[V]) {
  472. if ts, has := startTs[i.Key]; has {
  473. c.Metrics.trackEviction(int64(time.Since(ts) / time.Second))
  474. delete(startTs, i.Key)
  475. }
  476. if c.onEvict != nil {
  477. c.onEvict(i)
  478. }
  479. }
  480. for {
  481. select {
  482. case i := <-c.setBuf:
  483. if i.wait != nil {
  484. close(i.wait)
  485. continue
  486. }
  487. // Calculate item cost value if new or update.
  488. if i.Cost == 0 && c.cost != nil && i.flag != itemDelete {
  489. i.Cost = c.cost(i.Value)
  490. }
  491. if !c.ignoreInternalCost {
  492. // Add the cost of internally storing the object.
  493. i.Cost += itemSize
  494. }
  495. switch i.flag {
  496. case itemNew:
  497. victims, added := c.cachePolicy.Add(i.Key, i.Cost)
  498. if added {
  499. c.storedItems.Set(i)
  500. c.Metrics.add(keyAdd, i.Key, 1)
  501. trackAdmission(i.Key)
  502. } else {
  503. c.onReject(i)
  504. }
  505. for _, victim := range victims {
  506. victim.Conflict, victim.Value = c.storedItems.Del(victim.Key, 0)
  507. onEvict(victim)
  508. }
  509. case itemUpdate:
  510. c.cachePolicy.Update(i.Key, i.Cost)
  511. case itemDelete:
  512. c.cachePolicy.Del(i.Key) // Deals with metrics updates.
  513. _, val := c.storedItems.Del(i.Key, i.Conflict)
  514. c.onExit(val)
  515. }
  516. case <-c.cleanupTicker.C:
  517. c.storedItems.Cleanup(c.cachePolicy, onEvict)
  518. case <-c.stop:
  519. c.done <- struct{}{}
  520. return
  521. }
  522. }
  523. }
  524. // collectMetrics just creates a new *Metrics instance and adds the pointers
  525. // to the cache and policy instances.
  526. func (c *Cache[K, V]) collectMetrics() {
  527. c.Metrics = newMetrics()
  528. c.cachePolicy.CollectMetrics(c.Metrics)
  529. }
  530. type metricType int
  531. const (
  532. // The following 2 keep track of hits and misses.
  533. hit = iota
  534. miss
  535. // The following 3 keep track of number of keys added, updated and evicted.
  536. keyAdd
  537. keyUpdate
  538. keyEvict
  539. // The following 2 keep track of cost of keys added and evicted.
  540. costAdd
  541. costEvict
  542. // The following keep track of how many sets were dropped or rejected later.
  543. dropSets
  544. rejectSets
  545. // The following 2 keep track of how many gets were kept and dropped on the
  546. // floor.
  547. dropGets
  548. keepGets
  549. // This should be the final enum. Other enums should be set before this.
  550. doNotUse
  551. )
  552. func stringFor(t metricType) string {
  553. switch t {
  554. case hit:
  555. return "hit"
  556. case miss:
  557. return "miss"
  558. case keyAdd:
  559. return "keys-added"
  560. case keyUpdate:
  561. return "keys-updated"
  562. case keyEvict:
  563. return "keys-evicted"
  564. case costAdd:
  565. return "cost-added"
  566. case costEvict:
  567. return "cost-evicted"
  568. case dropSets:
  569. return "sets-dropped"
  570. case rejectSets:
  571. return "sets-rejected" // by policy.
  572. case dropGets:
  573. return "gets-dropped"
  574. case keepGets:
  575. return "gets-kept"
  576. default:
  577. return "unidentified"
  578. }
  579. }
  580. // Metrics is a snapshot of performance statistics for the lifetime of a cache instance.
  581. type Metrics struct {
  582. all [doNotUse][]*uint64
  583. mu sync.RWMutex
  584. life *z.HistogramData // Tracks the life expectancy of a key.
  585. }
  586. func newMetrics() *Metrics {
  587. s := &Metrics{
  588. life: z.NewHistogramData(z.HistogramBounds(1, 16)),
  589. }
  590. for i := 0; i < doNotUse; i++ {
  591. s.all[i] = make([]*uint64, 256)
  592. slice := s.all[i]
  593. for j := range slice {
  594. slice[j] = new(uint64)
  595. }
  596. }
  597. return s
  598. }
  599. func (p *Metrics) add(t metricType, hash, delta uint64) {
  600. if p == nil {
  601. return
  602. }
  603. valp := p.all[t]
  604. // Avoid false sharing by padding at least 64 bytes of space between two
  605. // atomic counters which would be incremented.
  606. idx := (hash % 25) * 10
  607. atomic.AddUint64(valp[idx], delta)
  608. }
  609. func (p *Metrics) get(t metricType) uint64 {
  610. if p == nil {
  611. return 0
  612. }
  613. valp := p.all[t]
  614. var total uint64
  615. for i := range valp {
  616. total += atomic.LoadUint64(valp[i])
  617. }
  618. return total
  619. }
  620. // Hits is the number of Get calls where a value was found for the corresponding key.
  621. func (p *Metrics) Hits() uint64 {
  622. return p.get(hit)
  623. }
  624. // Misses is the number of Get calls where a value was not found for the corresponding key.
  625. func (p *Metrics) Misses() uint64 {
  626. return p.get(miss)
  627. }
  628. // KeysAdded is the total number of Set calls where a new key-value item was added.
  629. func (p *Metrics) KeysAdded() uint64 {
  630. return p.get(keyAdd)
  631. }
  632. // KeysUpdated is the total number of Set calls where the value was updated.
  633. func (p *Metrics) KeysUpdated() uint64 {
  634. return p.get(keyUpdate)
  635. }
  636. // KeysEvicted is the total number of keys evicted.
  637. func (p *Metrics) KeysEvicted() uint64 {
  638. return p.get(keyEvict)
  639. }
  640. // CostAdded is the sum of costs that have been added (successful Set calls).
  641. func (p *Metrics) CostAdded() uint64 {
  642. return p.get(costAdd)
  643. }
  644. // CostEvicted is the sum of all costs that have been evicted.
  645. func (p *Metrics) CostEvicted() uint64 {
  646. return p.get(costEvict)
  647. }
  648. // SetsDropped is the number of Set calls that don't make it into internal
  649. // buffers (due to contention or some other reason).
  650. func (p *Metrics) SetsDropped() uint64 {
  651. return p.get(dropSets)
  652. }
  653. // SetsRejected is the number of Set calls rejected by the policy (TinyLFU).
  654. func (p *Metrics) SetsRejected() uint64 {
  655. return p.get(rejectSets)
  656. }
  657. // GetsDropped is the number of Get counter increments that are dropped
  658. // internally.
  659. func (p *Metrics) GetsDropped() uint64 {
  660. return p.get(dropGets)
  661. }
  662. // GetsKept is the number of Get counter increments that are kept.
  663. func (p *Metrics) GetsKept() uint64 {
  664. return p.get(keepGets)
  665. }
  666. // Ratio is the number of Hits over all accesses (Hits + Misses). This is the
  667. // percentage of successful Get calls.
  668. func (p *Metrics) Ratio() float64 {
  669. if p == nil {
  670. return 0.0
  671. }
  672. hits, misses := p.get(hit), p.get(miss)
  673. if hits == 0 && misses == 0 {
  674. return 0.0
  675. }
  676. return float64(hits) / float64(hits+misses)
  677. }
  678. func (p *Metrics) trackEviction(numSeconds int64) {
  679. if p == nil {
  680. return
  681. }
  682. p.mu.Lock()
  683. defer p.mu.Unlock()
  684. p.life.Update(numSeconds)
  685. }
  686. func (p *Metrics) LifeExpectancySeconds() *z.HistogramData {
  687. if p == nil {
  688. return nil
  689. }
  690. p.mu.RLock()
  691. defer p.mu.RUnlock()
  692. return p.life.Copy()
  693. }
  694. // Clear resets all the metrics.
  695. func (p *Metrics) Clear() {
  696. if p == nil {
  697. return
  698. }
  699. for i := 0; i < doNotUse; i++ {
  700. for j := range p.all[i] {
  701. atomic.StoreUint64(p.all[i][j], 0)
  702. }
  703. }
  704. p.mu.Lock()
  705. p.life = z.NewHistogramData(z.HistogramBounds(1, 16))
  706. p.mu.Unlock()
  707. }
  708. // String returns a string representation of the metrics.
  709. func (p *Metrics) String() string {
  710. if p == nil {
  711. return ""
  712. }
  713. var buf bytes.Buffer
  714. for i := 0; i < doNotUse; i++ {
  715. t := metricType(i)
  716. fmt.Fprintf(&buf, "%s: %d ", stringFor(t), p.get(t))
  717. }
  718. fmt.Fprintf(&buf, "gets-total: %d ", p.get(hit)+p.get(miss))
  719. fmt.Fprintf(&buf, "hit-ratio: %.2f", p.Ratio())
  720. return buf.String()
  721. }