|
11 | 11 | namespace BitFaster.Caching.Lru
|
12 | 12 | {
|
13 | 13 | /// <summary>
|
14 |
| - /// Pseudo LRU implementation where LRU list is composed of 3 segments: hot, warm and cold. Cost of maintaining |
15 |
| - /// segments is amortized across requests. Items are only cycled when capacity is exceeded. Pure read does |
16 |
| - /// not cycle items if all segments are within capacity constraints. |
17 |
| - /// There are no global locks. On cache miss, a new item is added. Tail items in each segment are dequeued, |
18 |
| - /// examined, and are either enqueued or discarded. |
19 |
| - /// This scheme of hot, warm and cold is based on the implementation used in MemCached described online here: |
20 |
| - /// https://memcached.org/blog/modern-lru/ |
| 14 | + /// A pseudo LRU based on the TU-Q eviction policy. The LRU list is composed of 3 segments: hot, warm and cold. |
| 15 | + /// Cost of maintaining segments is amortized across requests. Items are only cycled when capacity is exceeded. |
| 16 | + /// Pure read does not cycle items if all segments are within capacity constraints. There are no global locks. |
| 17 | + /// On cache miss, a new item is added. Tail items in each segment are dequeued, examined, and are either enqueued |
| 18 | + /// or discarded. |
| 19 | + /// The TU-Q scheme of hot, warm and cold is similar to that used in MemCached (https://memcached.org/blog/modern-lru/) |
| 20 | + /// and OpenBSD (https://flak.tedunangst.com/post/2Q-buffer-cache-algorithm), but does not use a background thread |
| 21 | + /// to maintain the internal queues. |
21 | 22 | /// </summary>
|
22 | 23 | /// <remarks>
|
23 | 24 | /// Each segment has a capacity. When segment capacity is exceeded, items are moved as follows:
|
24 |
| - /// 1. New items are added to hot, WasAccessed = false |
25 |
| - /// 2. When items are accessed, update WasAccessed = true |
26 |
| - /// 3. When items are moved WasAccessed is set to false. |
27 |
| - /// 4. When hot is full, hot tail is moved to either Warm or Cold depending on WasAccessed. |
28 |
| - /// 5. When warm is full, warm tail is moved to warm head or cold depending on WasAccessed. |
29 |
| - /// 6. When cold is full, cold tail is moved to warm head or removed from dictionary on depending on WasAccessed. |
| 25 | + /// <list type="number"> |
| 26 | + /// <item><description>New items are added to hot, WasAccessed = false.</description></item> |
| 27 | + /// <item><description>When items are accessed, update WasAccessed = true.</description></item> |
| 28 | + /// <item><description>When items are moved WasAccessed is set to false.</description></item> |
| 29 | + /// <item><description>When hot is full, hot tail is moved to either Warm or Cold depending on WasAccessed.</description></item> |
| 30 | + /// <item><description>When warm is full, warm tail is moved to warm head or cold depending on WasAccessed.</description></item> |
| 31 | + /// <item><description>When cold is full, cold tail is moved to warm head or removed from dictionary on depending on WasAccessed.</description></item> |
| 32 | + ///</list> |
30 | 33 | /// </remarks>
|
31 | 34 | public class ConcurrentLruCore<K, V, I, P, T> : ICache<K, V>, IAsyncCache<K, V>, IEnumerable<KeyValuePair<K, V>>
|
32 | 35 | where I : LruItem<K, V>
|
|
0 commit comments