cache.go 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802
  1. /*
  2. * Copyright 2019 Dgraph Labs, Inc. and Contributors
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. // Ristretto is a fast, fixed size, in-memory cache with a dual focus on
  17. // throughput and hit ratio performance. You can easily add Ristretto to an
  18. // existing system and keep the most valuable data where you need it.
  19. package ristretto
  20. import (
  21. "bytes"
  22. "errors"
  23. "fmt"
  24. "sync"
  25. "sync/atomic"
  26. "time"
  27. "unsafe"
  28. "github.com/dgraph-io/ristretto/v2/z"
  29. )
  30. var (
  31. // TODO: find the optimal value for this or make it configurable
  32. setBufSize = 32 * 1024
  33. )
  34. const itemSize = int64(unsafe.Sizeof(storeItem[any]{}))
  35. func zeroValue[T any]() T {
  36. var zero T
  37. return zero
  38. }
  39. // Key is the generic type to represent the keys type in key-value pair of the cache.
  40. type Key = z.Key
  41. // Cache is a thread-safe implementation of a hashmap with a TinyLFU admission
  42. // policy and a Sampled LFU eviction policy. You can use the same Cache instance
  43. // from as many goroutines as you want.
  44. type Cache[K Key, V any] struct {
  45. // storedItems is the central concurrent hashmap where key-value items are stored.
  46. storedItems store[V]
  47. // cachePolicy determines what gets let in to the cache and what gets kicked out.
  48. cachePolicy *defaultPolicy[V]
  49. // getBuf is a custom ring buffer implementation that gets pushed to when
  50. // keys are read.
  51. getBuf *ringBuffer
  52. // setBuf is a buffer allowing us to batch/drop Sets during times of high
  53. // contention.
  54. setBuf chan *Item[V]
  55. // onEvict is called for item evictions.
  56. onEvict func(*Item[V])
  57. // onReject is called when an item is rejected via admission policy.
  58. onReject func(*Item[V])
  59. // onExit is called whenever a value goes out of scope from the cache.
  60. onExit (func(V))
  61. // KeyToHash function is used to customize the key hashing algorithm.
  62. // Each key will be hashed using the provided function. If keyToHash value
  63. // is not set, the default keyToHash function is used.
  64. keyToHash func(K) (uint64, uint64)
  65. // stop is used to stop the processItems goroutine.
  66. stop chan struct{}
  67. done chan struct{}
  68. // indicates whether cache is closed.
  69. isClosed atomic.Bool
  70. // cost calculates cost from a value.
  71. cost func(value V) int64
  72. // ignoreInternalCost dictates whether to ignore the cost of internally storing
  73. // the item in the cost calculation.
  74. ignoreInternalCost bool
  75. // cleanupTicker is used to periodically check for entries whose TTL has passed.
  76. cleanupTicker *time.Ticker
  77. // Metrics contains a running log of important statistics like hits, misses,
  78. // and dropped items.
  79. Metrics *Metrics
  80. }
  81. // Config is passed to NewCache for creating new Cache instances.
  82. type Config[K Key, V any] struct {
  83. // NumCounters determines the number of counters (keys) to keep that hold
  84. // access frequency information. It's generally a good idea to have more
  85. // counters than the max cache capacity, as this will improve eviction
  86. // accuracy and subsequent hit ratios.
  87. //
  88. // For example, if you expect your cache to hold 1,000,000 items when full,
  89. // NumCounters should be 10,000,000 (10x). Each counter takes up roughly
  90. // 3 bytes (4 bits for each counter * 4 copies plus about a byte per
  91. // counter for the bloom filter). Note that the number of counters is
  92. // internally rounded up to the nearest power of 2, so the space usage
  93. // may be a little larger than 3 bytes * NumCounters.
  94. //
  95. // We've seen good performance in setting this to 10x the number of items
  96. // you expect to keep in the cache when full.
  97. NumCounters int64
  98. // MaxCost is how eviction decisions are made. For example, if MaxCost is
  99. // 100 and a new item with a cost of 1 increases total cache cost to 101,
  100. // 1 item will be evicted.
  101. //
  102. // MaxCost can be considered as the cache capacity, in whatever units you
  103. // choose to use.
  104. //
  105. // For example, if you want the cache to have a max capacity of 100MB, you
  106. // would set MaxCost to 100,000,000 and pass an item's number of bytes as
  107. // the `cost` parameter for calls to Set. If new items are accepted, the
  108. // eviction process will take care of making room for the new item and not
  109. // overflowing the MaxCost value.
  110. //
  111. // MaxCost could be anything as long as it matches how you're using the cost
  112. // values when calling Set.
  113. MaxCost int64
  114. // BufferItems determines the size of Get buffers.
  115. //
  116. // Unless you have a rare use case, using `64` as the BufferItems value
  117. // results in good performance.
  118. //
  119. // If for some reason you see Get performance decreasing with lots of
  120. // contention (you shouldn't), try increasing this value in increments of 64.
  121. // This is a fine-tuning mechanism and you probably won't have to touch this.
  122. BufferItems int64
  123. // Metrics is true when you want variety of stats about the cache.
  124. // There is some overhead to keeping statistics, so you should only set this
  125. // flag to true when testing or throughput performance isn't a major factor.
  126. Metrics bool
  127. // OnEvict is called for every eviction with the evicted item.
  128. OnEvict func(item *Item[V])
  129. // OnReject is called for every rejection done via the policy.
  130. OnReject func(item *Item[V])
  131. // OnExit is called whenever a value is removed from cache. This can be
  132. // used to do manual memory deallocation. Would also be called on eviction
  133. // as well as on rejection of the value.
  134. OnExit func(val V)
  135. // ShouldUpdate is called when a value already exists in cache and is being updated.
  136. // If ShouldUpdate returns true, the cache continues with the update (Set). If the
  137. // function returns false, no changes are made in the cache. If the value doesn't
  138. // already exist, the cache continue with setting that value for the given key.
  139. //
  140. // In this function, you can check whether the new value is valid. For example, if
  141. // your value has timestamp assosicated with it, you could check whether the new
  142. // value has the latest timestamp, preventing you from setting an older value.
  143. ShouldUpdate func(cur, prev V) bool
  144. // KeyToHash function is used to customize the key hashing algorithm.
  145. // Each key will be hashed using the provided function. If keyToHash value
  146. // is not set, the default keyToHash function is used.
  147. //
  148. // Ristretto has a variety of defaults depending on the underlying interface type
  149. // https://github.com/dgraph-io/ristretto/blob/master/z/z.go#L19-L41).
  150. //
  151. // Note that if you want 128bit hashes you should use the both the values
  152. // in the return of the function. If you want to use 64bit hashes, you can
  153. // just return the first uint64 and return 0 for the second uint64.
  154. KeyToHash func(key K) (uint64, uint64)
  155. // Cost evaluates a value and outputs a corresponding cost. This function is ran
  156. // after Set is called for a new item or an item is updated with a cost param of 0.
  157. //
  158. // Cost is an optional function you can pass to the Config in order to evaluate
  159. // item cost at runtime, and only whentthe Set call isn't going to be dropped. This
  160. // is useful if calculating item cost is particularly expensive and you don't want to
  161. // waste time on items that will be dropped anyways.
  162. //
  163. // To signal to Ristretto that you'd like to use this Cost function:
  164. // 1. Set the Cost field to a non-nil function.
  165. // 2. When calling Set for new items or item updates, use a `cost` of 0.
  166. Cost func(value V) int64
  167. // IgnoreInternalCost set to true indicates to the cache that the cost of
  168. // internally storing the value should be ignored. This is useful when the
  169. // cost passed to set is not using bytes as units. Keep in mind that setting
  170. // this to true will increase the memory usage.
  171. IgnoreInternalCost bool
  172. // TtlTickerDurationInSec sets the value of time ticker for cleanup keys on TTL expiry.
  173. TtlTickerDurationInSec int64
  174. }
  175. type itemFlag byte
  176. const (
  177. itemNew itemFlag = iota
  178. itemDelete
  179. itemUpdate
  180. )
  181. // Item is a full representation of what's stored in the cache for each key-value pair.
  182. type Item[V any] struct {
  183. flag itemFlag
  184. Key uint64
  185. Conflict uint64
  186. Value V
  187. Cost int64
  188. Expiration time.Time
  189. wg *sync.WaitGroup
  190. }
  191. // NewCache returns a new Cache instance and any configuration errors, if any.
  192. func NewCache[K Key, V any](config *Config[K, V]) (*Cache[K, V], error) {
  193. switch {
  194. case config.NumCounters == 0:
  195. return nil, errors.New("NumCounters can't be zero")
  196. case config.NumCounters < 0:
  197. return nil, errors.New("NumCounters can't be negative number")
  198. case config.MaxCost == 0:
  199. return nil, errors.New("MaxCost can't be zero")
  200. case config.MaxCost < 0:
  201. return nil, errors.New("MaxCost can't be be negative number")
  202. case config.BufferItems == 0:
  203. return nil, errors.New("BufferItems can't be zero")
  204. case config.BufferItems < 0:
  205. return nil, errors.New("BufferItems can't be be negative number")
  206. case config.TtlTickerDurationInSec == 0:
  207. config.TtlTickerDurationInSec = bucketDurationSecs
  208. }
  209. policy := newPolicy[V](config.NumCounters, config.MaxCost)
  210. cache := &Cache[K, V]{
  211. storedItems: newStore[V](),
  212. cachePolicy: policy,
  213. getBuf: newRingBuffer(policy, config.BufferItems),
  214. setBuf: make(chan *Item[V], setBufSize),
  215. keyToHash: config.KeyToHash,
  216. stop: make(chan struct{}),
  217. done: make(chan struct{}),
  218. cost: config.Cost,
  219. ignoreInternalCost: config.IgnoreInternalCost,
  220. cleanupTicker: time.NewTicker(time.Duration(config.TtlTickerDurationInSec) * time.Second / 2),
  221. }
  222. cache.storedItems.SetShouldUpdateFn(config.ShouldUpdate)
  223. cache.onExit = func(val V) {
  224. if config.OnExit != nil {
  225. config.OnExit(val)
  226. }
  227. }
  228. cache.onEvict = func(item *Item[V]) {
  229. if config.OnEvict != nil {
  230. config.OnEvict(item)
  231. }
  232. cache.onExit(item.Value)
  233. }
  234. cache.onReject = func(item *Item[V]) {
  235. if config.OnReject != nil {
  236. config.OnReject(item)
  237. }
  238. cache.onExit(item.Value)
  239. }
  240. if cache.keyToHash == nil {
  241. cache.keyToHash = z.KeyToHash[K]
  242. }
  243. if config.Metrics {
  244. cache.collectMetrics()
  245. }
  246. // NOTE: benchmarks seem to show that performance decreases the more
  247. // goroutines we have running cache.processItems(), so 1 should
  248. // usually be sufficient
  249. go cache.processItems()
  250. return cache, nil
  251. }
  252. // Wait blocks until all buffered writes have been applied. This ensures a call to Set()
  253. // will be visible to future calls to Get().
  254. func (c *Cache[K, V]) Wait() {
  255. if c == nil || c.isClosed.Load() {
  256. return
  257. }
  258. wg := &sync.WaitGroup{}
  259. wg.Add(1)
  260. c.setBuf <- &Item[V]{wg: wg}
  261. wg.Wait()
  262. }
  263. // Get returns the value (if any) and a boolean representing whether the
  264. // value was found or not. The value can be nil and the boolean can be true at
  265. // the same time. Get will not return expired items.
  266. func (c *Cache[K, V]) Get(key K) (V, bool) {
  267. if c == nil || c.isClosed.Load() {
  268. return zeroValue[V](), false
  269. }
  270. keyHash, conflictHash := c.keyToHash(key)
  271. c.getBuf.Push(keyHash)
  272. value, ok := c.storedItems.Get(keyHash, conflictHash)
  273. if ok {
  274. c.Metrics.add(hit, keyHash, 1)
  275. } else {
  276. c.Metrics.add(miss, keyHash, 1)
  277. }
  278. return value, ok
  279. }
  280. // Set attempts to add the key-value item to the cache. If it returns false,
  281. // then the Set was dropped and the key-value item isn't added to the cache. If
  282. // it returns true, there's still a chance it could be dropped by the policy if
  283. // its determined that the key-value item isn't worth keeping, but otherwise the
  284. // item will be added and other items will be evicted in order to make room.
  285. //
  286. // To dynamically evaluate the items cost using the Config.Coster function, set
  287. // the cost parameter to 0 and Coster will be ran when needed in order to find
  288. // the items true cost.
  289. //
  290. // Set writes the value of type V as is. If type V is a pointer type, It is ok
  291. // to update the memory pointed to by the pointer. Updating the pointer itself
  292. // will not be reflected in the cache. Be careful when using slice types as the
  293. // value type V. Calling `append` may update the underlined array pointer which
  294. // will not be reflected in the cache.
  295. func (c *Cache[K, V]) Set(key K, value V, cost int64) bool {
  296. return c.SetWithTTL(key, value, cost, 0*time.Second)
  297. }
  298. // SetWithTTL works like Set but adds a key-value pair to the cache that will expire
  299. // after the specified TTL (time to live) has passed. A zero value means the value never
  300. // expires, which is identical to calling Set. A negative value is a no-op and the value
  301. // is discarded.
  302. //
  303. // See Set for more information.
  304. func (c *Cache[K, V]) SetWithTTL(key K, value V, cost int64, ttl time.Duration) bool {
  305. if c == nil || c.isClosed.Load() {
  306. return false
  307. }
  308. var expiration time.Time
  309. switch {
  310. case ttl == 0:
  311. // No expiration.
  312. break
  313. case ttl < 0:
  314. // Treat this a no-op.
  315. return false
  316. default:
  317. expiration = time.Now().Add(ttl)
  318. }
  319. keyHash, conflictHash := c.keyToHash(key)
  320. i := &Item[V]{
  321. flag: itemNew,
  322. Key: keyHash,
  323. Conflict: conflictHash,
  324. Value: value,
  325. Cost: cost,
  326. Expiration: expiration,
  327. }
  328. // cost is eventually updated. The expiration must also be immediately updated
  329. // to prevent items from being prematurely removed from the map.
  330. if prev, ok := c.storedItems.Update(i); ok {
  331. c.onExit(prev)
  332. i.flag = itemUpdate
  333. }
  334. // Attempt to send item to cachePolicy.
  335. select {
  336. case c.setBuf <- i:
  337. return true
  338. default:
  339. if i.flag == itemUpdate {
  340. // Return true if this was an update operation since we've already
  341. // updated the storedItems. For all the other operations (set/delete), we
  342. // return false which means the item was not inserted.
  343. return true
  344. }
  345. c.Metrics.add(dropSets, keyHash, 1)
  346. return false
  347. }
  348. }
  349. // Del deletes the key-value item from the cache if it exists.
  350. func (c *Cache[K, V]) Del(key K) {
  351. if c == nil || c.isClosed.Load() {
  352. return
  353. }
  354. keyHash, conflictHash := c.keyToHash(key)
  355. // Delete immediately.
  356. _, prev := c.storedItems.Del(keyHash, conflictHash)
  357. c.onExit(prev)
  358. // If we've set an item, it would be applied slightly later.
  359. // So we must push the same item to `setBuf` with the deletion flag.
  360. // This ensures that if a set is followed by a delete, it will be
  361. // applied in the correct order.
  362. c.setBuf <- &Item[V]{
  363. flag: itemDelete,
  364. Key: keyHash,
  365. Conflict: conflictHash,
  366. }
  367. }
  368. // GetTTL returns the TTL for the specified key and a bool that is true if the
  369. // item was found and is not expired.
  370. func (c *Cache[K, V]) GetTTL(key K) (time.Duration, bool) {
  371. if c == nil {
  372. return 0, false
  373. }
  374. keyHash, conflictHash := c.keyToHash(key)
  375. if _, ok := c.storedItems.Get(keyHash, conflictHash); !ok {
  376. // not found
  377. return 0, false
  378. }
  379. expiration := c.storedItems.Expiration(keyHash)
  380. if expiration.IsZero() {
  381. // found but no expiration
  382. return 0, true
  383. }
  384. if time.Now().After(expiration) {
  385. // found but expired
  386. return 0, false
  387. }
  388. return time.Until(expiration), true
  389. }
  390. // Close stops all goroutines and closes all channels.
  391. func (c *Cache[K, V]) Close() {
  392. if c == nil || c.isClosed.Load() {
  393. return
  394. }
  395. c.Clear()
  396. // Block until processItems goroutine is returned.
  397. c.stop <- struct{}{}
  398. <-c.done
  399. close(c.stop)
  400. close(c.done)
  401. close(c.setBuf)
  402. c.cachePolicy.Close()
  403. c.cleanupTicker.Stop()
  404. c.isClosed.Store(true)
  405. }
  406. // Clear empties the hashmap and zeroes all cachePolicy counters. Note that this is
  407. // not an atomic operation (but that shouldn't be a problem as it's assumed that
  408. // Set/Get calls won't be occurring until after this).
  409. func (c *Cache[K, V]) Clear() {
  410. if c == nil || c.isClosed.Load() {
  411. return
  412. }
  413. // Block until processItems goroutine is returned.
  414. c.stop <- struct{}{}
  415. <-c.done
  416. // Clear out the setBuf channel.
  417. loop:
  418. for {
  419. select {
  420. case i := <-c.setBuf:
  421. if i.wg != nil {
  422. i.wg.Done()
  423. continue
  424. }
  425. if i.flag != itemUpdate {
  426. // In itemUpdate, the value is already set in the storedItems. So, no need to call
  427. // onEvict here.
  428. c.onEvict(i)
  429. }
  430. default:
  431. break loop
  432. }
  433. }
  434. // Clear value hashmap and cachePolicy data.
  435. c.cachePolicy.Clear()
  436. c.storedItems.Clear(c.onEvict)
  437. // Only reset metrics if they're enabled.
  438. if c.Metrics != nil {
  439. c.Metrics.Clear()
  440. }
  441. // Restart processItems goroutine.
  442. go c.processItems()
  443. }
  444. // MaxCost returns the max cost of the cache.
  445. func (c *Cache[K, V]) MaxCost() int64 {
  446. if c == nil {
  447. return 0
  448. }
  449. return c.cachePolicy.MaxCost()
  450. }
  451. // UpdateMaxCost updates the maxCost of an existing cache.
  452. func (c *Cache[K, V]) UpdateMaxCost(maxCost int64) {
  453. if c == nil {
  454. return
  455. }
  456. c.cachePolicy.UpdateMaxCost(maxCost)
  457. }
  458. // processItems is ran by goroutines processing the Set buffer.
  459. func (c *Cache[K, V]) processItems() {
  460. startTs := make(map[uint64]time.Time)
  461. numToKeep := 100000 // TODO: Make this configurable via options.
  462. trackAdmission := func(key uint64) {
  463. if c.Metrics == nil {
  464. return
  465. }
  466. startTs[key] = time.Now()
  467. if len(startTs) > numToKeep {
  468. for k := range startTs {
  469. if len(startTs) <= numToKeep {
  470. break
  471. }
  472. delete(startTs, k)
  473. }
  474. }
  475. }
  476. onEvict := func(i *Item[V]) {
  477. if ts, has := startTs[i.Key]; has {
  478. c.Metrics.trackEviction(int64(time.Since(ts) / time.Second))
  479. delete(startTs, i.Key)
  480. }
  481. if c.onEvict != nil {
  482. c.onEvict(i)
  483. }
  484. }
  485. for {
  486. select {
  487. case i := <-c.setBuf:
  488. if i.wg != nil {
  489. i.wg.Done()
  490. continue
  491. }
  492. // Calculate item cost value if new or update.
  493. if i.Cost == 0 && c.cost != nil && i.flag != itemDelete {
  494. i.Cost = c.cost(i.Value)
  495. }
  496. if !c.ignoreInternalCost {
  497. // Add the cost of internally storing the object.
  498. i.Cost += itemSize
  499. }
  500. switch i.flag {
  501. case itemNew:
  502. victims, added := c.cachePolicy.Add(i.Key, i.Cost)
  503. if added {
  504. c.storedItems.Set(i)
  505. c.Metrics.add(keyAdd, i.Key, 1)
  506. trackAdmission(i.Key)
  507. } else {
  508. c.onReject(i)
  509. }
  510. for _, victim := range victims {
  511. victim.Conflict, victim.Value = c.storedItems.Del(victim.Key, 0)
  512. onEvict(victim)
  513. }
  514. case itemUpdate:
  515. c.cachePolicy.Update(i.Key, i.Cost)
  516. case itemDelete:
  517. c.cachePolicy.Del(i.Key) // Deals with metrics updates.
  518. _, val := c.storedItems.Del(i.Key, i.Conflict)
  519. c.onExit(val)
  520. }
  521. case <-c.cleanupTicker.C:
  522. c.storedItems.Cleanup(c.cachePolicy, onEvict)
  523. case <-c.stop:
  524. c.done <- struct{}{}
  525. return
  526. }
  527. }
  528. }
  529. // collectMetrics just creates a new *Metrics instance and adds the pointers
  530. // to the cache and policy instances.
  531. func (c *Cache[K, V]) collectMetrics() {
  532. c.Metrics = newMetrics()
  533. c.cachePolicy.CollectMetrics(c.Metrics)
  534. }
  535. type metricType int
  536. const (
  537. // The following 2 keep track of hits and misses.
  538. hit = iota
  539. miss
  540. // The following 3 keep track of number of keys added, updated and evicted.
  541. keyAdd
  542. keyUpdate
  543. keyEvict
  544. // The following 2 keep track of cost of keys added and evicted.
  545. costAdd
  546. costEvict
  547. // The following keep track of how many sets were dropped or rejected later.
  548. dropSets
  549. rejectSets
  550. // The following 2 keep track of how many gets were kept and dropped on the
  551. // floor.
  552. dropGets
  553. keepGets
  554. // This should be the final enum. Other enums should be set before this.
  555. doNotUse
  556. )
  557. func stringFor(t metricType) string {
  558. switch t {
  559. case hit:
  560. return "hit"
  561. case miss:
  562. return "miss"
  563. case keyAdd:
  564. return "keys-added"
  565. case keyUpdate:
  566. return "keys-updated"
  567. case keyEvict:
  568. return "keys-evicted"
  569. case costAdd:
  570. return "cost-added"
  571. case costEvict:
  572. return "cost-evicted"
  573. case dropSets:
  574. return "sets-dropped"
  575. case rejectSets:
  576. return "sets-rejected" // by policy.
  577. case dropGets:
  578. return "gets-dropped"
  579. case keepGets:
  580. return "gets-kept"
  581. default:
  582. return "unidentified"
  583. }
  584. }
  585. // Metrics is a snapshot of performance statistics for the lifetime of a cache instance.
  586. type Metrics struct {
  587. all [doNotUse][]*uint64
  588. mu sync.RWMutex
  589. life *z.HistogramData // Tracks the life expectancy of a key.
  590. }
  591. func newMetrics() *Metrics {
  592. s := &Metrics{
  593. life: z.NewHistogramData(z.HistogramBounds(1, 16)),
  594. }
  595. for i := 0; i < doNotUse; i++ {
  596. s.all[i] = make([]*uint64, 256)
  597. slice := s.all[i]
  598. for j := range slice {
  599. slice[j] = new(uint64)
  600. }
  601. }
  602. return s
  603. }
  604. func (p *Metrics) add(t metricType, hash, delta uint64) {
  605. if p == nil {
  606. return
  607. }
  608. valp := p.all[t]
  609. // Avoid false sharing by padding at least 64 bytes of space between two
  610. // atomic counters which would be incremented.
  611. idx := (hash % 25) * 10
  612. atomic.AddUint64(valp[idx], delta)
  613. }
  614. func (p *Metrics) get(t metricType) uint64 {
  615. if p == nil {
  616. return 0
  617. }
  618. valp := p.all[t]
  619. var total uint64
  620. for i := range valp {
  621. total += atomic.LoadUint64(valp[i])
  622. }
  623. return total
  624. }
  625. // Hits is the number of Get calls where a value was found for the corresponding key.
  626. func (p *Metrics) Hits() uint64 {
  627. return p.get(hit)
  628. }
  629. // Misses is the number of Get calls where a value was not found for the corresponding key.
  630. func (p *Metrics) Misses() uint64 {
  631. return p.get(miss)
  632. }
  633. // KeysAdded is the total number of Set calls where a new key-value item was added.
  634. func (p *Metrics) KeysAdded() uint64 {
  635. return p.get(keyAdd)
  636. }
  637. // KeysUpdated is the total number of Set calls where the value was updated.
  638. func (p *Metrics) KeysUpdated() uint64 {
  639. return p.get(keyUpdate)
  640. }
  641. // KeysEvicted is the total number of keys evicted.
  642. func (p *Metrics) KeysEvicted() uint64 {
  643. return p.get(keyEvict)
  644. }
  645. // CostAdded is the sum of costs that have been added (successful Set calls).
  646. func (p *Metrics) CostAdded() uint64 {
  647. return p.get(costAdd)
  648. }
  649. // CostEvicted is the sum of all costs that have been evicted.
  650. func (p *Metrics) CostEvicted() uint64 {
  651. return p.get(costEvict)
  652. }
  653. // SetsDropped is the number of Set calls that don't make it into internal
  654. // buffers (due to contention or some other reason).
  655. func (p *Metrics) SetsDropped() uint64 {
  656. return p.get(dropSets)
  657. }
  658. // SetsRejected is the number of Set calls rejected by the policy (TinyLFU).
  659. func (p *Metrics) SetsRejected() uint64 {
  660. return p.get(rejectSets)
  661. }
  662. // GetsDropped is the number of Get counter increments that are dropped
  663. // internally.
  664. func (p *Metrics) GetsDropped() uint64 {
  665. return p.get(dropGets)
  666. }
  667. // GetsKept is the number of Get counter increments that are kept.
  668. func (p *Metrics) GetsKept() uint64 {
  669. return p.get(keepGets)
  670. }
  671. // Ratio is the number of Hits over all accesses (Hits + Misses). This is the
  672. // percentage of successful Get calls.
  673. func (p *Metrics) Ratio() float64 {
  674. if p == nil {
  675. return 0.0
  676. }
  677. hits, misses := p.get(hit), p.get(miss)
  678. if hits == 0 && misses == 0 {
  679. return 0.0
  680. }
  681. return float64(hits) / float64(hits+misses)
  682. }
  683. func (p *Metrics) trackEviction(numSeconds int64) {
  684. if p == nil {
  685. return
  686. }
  687. p.mu.Lock()
  688. defer p.mu.Unlock()
  689. p.life.Update(numSeconds)
  690. }
  691. func (p *Metrics) LifeExpectancySeconds() *z.HistogramData {
  692. if p == nil {
  693. return nil
  694. }
  695. p.mu.RLock()
  696. defer p.mu.RUnlock()
  697. return p.life.Copy()
  698. }
  699. // Clear resets all the metrics.
  700. func (p *Metrics) Clear() {
  701. if p == nil {
  702. return
  703. }
  704. for i := 0; i < doNotUse; i++ {
  705. for j := range p.all[i] {
  706. atomic.StoreUint64(p.all[i][j], 0)
  707. }
  708. }
  709. p.mu.Lock()
  710. p.life = z.NewHistogramData(z.HistogramBounds(1, 16))
  711. p.mu.Unlock()
  712. }
  713. // String returns a string representation of the metrics.
  714. func (p *Metrics) String() string {
  715. if p == nil {
  716. return ""
  717. }
  718. var buf bytes.Buffer
  719. for i := 0; i < doNotUse; i++ {
  720. t := metricType(i)
  721. fmt.Fprintf(&buf, "%s: %d ", stringFor(t), p.get(t))
  722. }
  723. fmt.Fprintf(&buf, "gets-total: %d ", p.get(hit)+p.get(miss))
  724. fmt.Fprintf(&buf, "hit-ratio: %.2f", p.Ratio())
  725. return buf.String()
  726. }