Skip to content

Commit 9150637

Browse files
authored
docs (#249)
* docs * hc
1 parent 240de5e commit 9150637

File tree

3 files changed

+35
-17
lines changed

3 files changed

+35
-17
lines changed

BitFaster.Caching/Lfu/CmSketch.cs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@ namespace BitFaster.Caching.Lfu
1010
/// </summary>
1111
/// <remarks>
1212
/// This is a direct C# translation of FrequencySketch in the Caffeine library by [email protected] (Ben Manes).
13-
/// http://www.apache.org/licenses/LICENSE-2.0
13+
/// https://github.com/ben-manes/caffeine
1414
/// </remarks>
1515
public sealed class CmSketch<T>
1616
{

BitFaster.Caching/Lfu/ConcurrentLfu.cs

Lines changed: 18 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -20,11 +20,26 @@
2020
namespace BitFaster.Caching.Lfu
2121
{
2222
/// <summary>
23-
/// An LFU cache with a W-TinyLfu eviction policy.
23+
/// An approximate LFU based on the W-TinyLfu eviction policy. W-TinyLfu tracks items using a window LRU list, and
24+
/// a main space LRU divided into protected and probation segments. Reads and writes to the cache are stored in buffers
25+
/// and later applied to the policy LRU lists in batches under a lock. Each read and write is tracked using a compact
26+
/// popularity sketch to probalistically estimate item frequency. Items proceed through the LRU lists as follows:
27+
/// <list type="number">
28+
/// <item><description>New items are added to the window LRU. When acessed window items move to the window MRU position.</description></item>
29+
/// <item><description>When the window is full, candidate items are moved to the probation segment in LRU order.</description></item>
30+
/// <item><description>When the main space is full, the access frequency of each window candidate is compared
31+
/// to probation victims in LRU order. The item with the lowest frequency is evicted until the cache size is within bounds.</description></item>
32+
/// <item><description>When a probation item is accessed, it is moved to the protected segment. If the protected segment is full,
33+
/// the LRU protected item is demoted to probation.</description></item>
34+
/// <item><description>When a protected item is accessed, it is moved to the protected MRU position.</description></item>
35+
/// </list>
36+
/// The size of the admission window and main space are adapted over time to iteratively improve hit rate using a
37+
/// hill climbing algorithm. A larger window favors workloads with high recency bias, whereas a larger main space
38+
/// favors workloads with frequency bias.
2439
/// </summary>
2540
/// <remarks>
26-
/// Based on Caffeine written by Ben Manes.
27-
/// https://www.apache.org/licenses/LICENSE-2.0
41+
/// Based on the Caffeine library by [email protected] (Ben Manes).
42+
/// https://github.com/ben-manes/caffeine
2843
/// </remarks>
2944
[DebuggerTypeProxy(typeof(ConcurrentLfu<,>.LfuDebugView))]
3045
[DebuggerDisplay("Count = {Count}/{Capacity}")]

BitFaster.Caching/Lru/ConcurrentLruCore.cs

Lines changed: 16 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -11,22 +11,25 @@
1111
namespace BitFaster.Caching.Lru
1212
{
1313
/// <summary>
14-
/// Pseudo LRU implementation where LRU list is composed of 3 segments: hot, warm and cold. Cost of maintaining
15-
/// segments is amortized across requests. Items are only cycled when capacity is exceeded. Pure read does
16-
/// not cycle items if all segments are within capacity constraints.
17-
/// There are no global locks. On cache miss, a new item is added. Tail items in each segment are dequeued,
18-
/// examined, and are either enqueued or discarded.
19-
/// This scheme of hot, warm and cold is based on the implementation used in MemCached described online here:
20-
/// https://memcached.org/blog/modern-lru/
14+
/// A pseudo LRU based on the TU-Q eviction policy. The LRU list is composed of 3 segments: hot, warm and cold.
15+
/// Cost of maintaining segments is amortized across requests. Items are only cycled when capacity is exceeded.
16+
/// Pure read does not cycle items if all segments are within capacity constraints. There are no global locks.
17+
/// On cache miss, a new item is added. Tail items in each segment are dequeued, examined, and are either enqueued
18+
/// or discarded.
19+
/// The TU-Q scheme of hot, warm and cold is similar to that used in MemCached (https://memcached.org/blog/modern-lru/)
20+
/// and OpenBSD (https://flak.tedunangst.com/post/2Q-buffer-cache-algorithm), but does not use a background thread
21+
/// to maintain the internal queues.
2122
/// </summary>
2223
/// <remarks>
2324
/// Each segment has a capacity. When segment capacity is exceeded, items are moved as follows:
24-
/// 1. New items are added to hot, WasAccessed = false
25-
/// 2. When items are accessed, update WasAccessed = true
26-
/// 3. When items are moved WasAccessed is set to false.
27-
/// 4. When hot is full, hot tail is moved to either Warm or Cold depending on WasAccessed.
28-
/// 5. When warm is full, warm tail is moved to warm head or cold depending on WasAccessed.
29-
/// 6. When cold is full, cold tail is moved to warm head or removed from dictionary on depending on WasAccessed.
25+
/// <list type="number">
26+
/// <item><description>New items are added to hot, WasAccessed = false.</description></item>
27+
/// <item><description>When items are accessed, update WasAccessed = true.</description></item>
28+
/// <item><description>When items are moved WasAccessed is set to false.</description></item>
29+
/// <item><description>When hot is full, hot tail is moved to either Warm or Cold depending on WasAccessed.</description></item>
30+
/// <item><description>When warm is full, warm tail is moved to warm head or cold depending on WasAccessed.</description></item>
31+
/// <item><description>When cold is full, cold tail is moved to warm head or removed from dictionary on depending on WasAccessed.</description></item>
32+
///</list>
3033
/// </remarks>
3134
public class ConcurrentLruCore<K, V, I, P, T> : ICache<K, V>, IAsyncCache<K, V>, IEnumerable<KeyValuePair<K, V>>
3235
where I : LruItem<K, V>

0 commit comments

Comments
 (0)