Multi-Level Cache Hierarchy

Exercise: Multi-Level Cache Hierarchy

Difficulty - Intermediate

Learning Objectives

  • Implement multi-level caching strategies
  • Handle automatic cache promotion and demotion
  • Manage TTL across cache levels
  • Build thread-safe cache operations
  • Optimize cache hit rates and performance

Problem Statement

Create a hierarchical caching system that supports multiple cache levels with automatic promotion of frequently accessed data to faster levels. Your implementation should be thread-safe, handle TTL expiration, and provide cache statistics for monitoring.

Requirements

1. CacheLevel Interface

Define a generic cache level interface that:

  • Provides Get(key string) for retrieving values
  • Provides Set(key string, value interface{}, ttl time.Duration) for storing values
  • Supports Delete(key string) for removing entries
  • Includes Size() int to get the number of cached items
  • Implements Clear() to remove all entries

Example Usage:

1var cache CacheLevel = NewMemoryCache(1000) // max 1000 entries
2cache.Set("user:123", userData, 5*time.Minute)
3if val, ok := cache.Get("user:123"); ok {
4    // Use cached value
5}

2. MemoryCache Implementation

Create an in-memory cache implementation that:

  • Stores data in a map with automatic expiration
  • Uses LRU eviction when capacity is reached
  • Is thread-safe using appropriate synchronization
  • Cleans up expired entries periodically
  • Tracks access count for promotion decisions

Example Usage:

1cache := NewMemoryCache(100) // max 100 entries
2cache.Set("key1", "value1", 1*time.Minute)
3cache.Set("key2", "value2", 5*time.Minute)
4// After 1 minute, key1 expires automatically

3. HierarchicalCache Implementation

Build a multi-level cache that:

  • Accepts multiple cache levels in order of speed
  • On cache miss in L1, checks L2, then L3, etc.
  • Automatically promotes values found in lower levels to all higher levels
  • Writes to all levels on Set operations
  • Provides configurable promotion strategies

Example Usage:

 1l1 := NewMemoryCache(100)    // Fast, small
 2l2 := NewMemoryCache(1000)   // Medium, medium
 3l3 := NewMemoryCache(10000)  // Slow, large
 4
 5hierarchical := NewHierarchicalCache(l1, l2, l3)
 6hierarchical.Set("data", value, 10*time.Minute)
 7// Stored in all three levels
 8
 9value, ok := hierarchical.Get("data")
10// Checks L1 first, then L2, then L3
11// If found in L2, promotes to L1 automatically

4. Cache Statistics

Implement statistics tracking that records:

  • Total hits and misses per level
  • Hit rate percentage for each level
  • Average retrieval time per level
  • Total number of promotions
  • Eviction count per level

Example Usage:

1stats := hierarchical.GetStats()
2fmt.Printf("L1 Hit Rate: %.2f%%\n", stats.L1HitRate)
3fmt.Printf("Total Promotions: %d\n", stats.Promotions)
4fmt.Printf("Average L1 Latency: %v\n", stats.L1AvgLatency)

5. Smart Promotion Strategy

Implement an intelligent promotion strategy that:

  • Only promotes items accessed more than N times from lower levels
  • Considers TTL remaining
  • Supports configurable promotion threshold per level
  • Prevents cache pollution by avoiding promoting one-time accesses

Example Usage:

1strategy := NewSmartPromotionStrategy(
2    MinAccessCount: 3,        // Promote after 3 accesses
3    MinTTLRemaining: 30*time.Second, // At least 30s left
4)
5hierarchical.SetPromotionStrategy(strategy)

Function Signatures

 1package cache
 2
 3import (
 4    "sync"
 5    "time"
 6)
 7
 8// CacheLevel represents a single level in the cache hierarchy
 9type CacheLevel interface {
10    Get(key string)
11    Set(key string, value interface{}, ttl time.Duration)
12    Delete(key string)
13    Size() int
14    Clear()
15}
16
17// CacheEntry stores value with metadata
18type CacheEntry struct {
19    Value      interface{}
20    Expiry     time.Time
21    AccessCount int
22    CreatedAt  time.Time
23}
24
25// MemoryCache is an in-memory LRU cache implementation
26type MemoryCache struct {
27    mu       sync.RWMutex
28    data     map[string]*CacheEntry
29    capacity int
30    lru      *lruList
31}
32
33// NewMemoryCache creates a new memory cache with given capacity
34func NewMemoryCache(capacity int) *MemoryCache
35
36// HierarchicalCache implements multi-level caching
37type HierarchicalCache struct {
38    levels   []CacheLevel
39    strategy PromotionStrategy
40    stats    *CacheStats
41}
42
43// NewHierarchicalCache creates a new hierarchical cache
44func NewHierarchicalCache(levels ...CacheLevel) *HierarchicalCache
45
46// PromotionStrategy determines when to promote cache entries
47type PromotionStrategy interface {
48    ShouldPromote(entry *CacheEntry, fromLevel int) bool
49}
50
51// CacheStats tracks cache performance metrics
52type CacheStats struct {
53    Hits        []int64
54    Misses      []int64
55    Promotions  int64
56    Evictions   []int64
57}
58
59// GetStats returns current cache statistics
60func GetStats() *CacheStats

Test Cases

Your implementation should pass these test scenarios:

 1// Test basic hierarchical cache operations
 2func TestHierarchicalCacheBasics() {
 3    l1 := NewMemoryCache(10)
 4    l2 := NewMemoryCache(100)
 5    hc := NewHierarchicalCache(l1, l2)
 6
 7    hc.Set("key1", "value1", 1*time.Minute)
 8
 9    // Should be in both levels
10    assert.Equal(t, 1, l1.Size())
11    assert.Equal(t, 1, l2.Size())
12
13    val, ok := hc.Get("key1")
14    assert.True(t, ok)
15    assert.Equal(t, "value1", val)
16}
17
18// Test automatic promotion from L2 to L1
19func TestCachePromotion() {
20    l1 := NewMemoryCache(10)
21    l2 := NewMemoryCache(100)
22    hc := NewHierarchicalCache(l1, l2)
23
24    // Set only in L2
25    l2.Set("key1", "value1", 1*time.Minute)
26
27    // Get from hierarchical cache
28    val, ok := hc.Get("key1")
29    assert.True(t, ok)
30
31    // Should now be promoted to L1
32    _, ok = l1.Get("key1")
33    assert.True(t, ok)
34}
35
36// Test TTL expiration
37func TestCacheExpiration() {
38    cache := NewMemoryCache(100)
39    cache.Set("key1", "value1", 100*time.Millisecond)
40
41    // Should exist initially
42    _, ok := cache.Get("key1")
43    assert.True(t, ok)
44
45    // Wait for expiration
46    time.Sleep(150 * time.Millisecond)
47
48    // Should be expired
49    _, ok = cache.Get("key1")
50    assert.False(t, ok)
51}
52
53// Test LRU eviction
54func TestLRUEviction() {
55    cache := NewMemoryCache(3) // capacity of 3
56
57    cache.Set("key1", "value1", 1*time.Minute)
58    cache.Set("key2", "value2", 1*time.Minute)
59    cache.Set("key3", "value3", 1*time.Minute)
60
61    // Access key1 and key2 to make key3 least recently used
62    cache.Get("key1")
63    cache.Get("key2")
64
65    // Add fourth item, should evict key3
66    cache.Set("key4", "value4", 1*time.Minute)
67
68    _, ok := cache.Get("key3")
69    assert.False(t, ok) // key3 should be evicted
70}

Common Pitfalls

⚠️ Watch out for these common mistakes:

  1. Race conditions: Use proper read/write locks when accessing shared cache data
  2. Memory leaks: Expired entries must be cleaned up, not just marked as expired
  3. Promotion storms: Don't promote on every access - use thresholds
  4. Lock contention: Don't hold locks during expensive operations like copying data
  5. Cache stampede: Multiple goroutines might try to populate the same missing key
  6. TTL inheritance: When promoting, preserve the original TTL, don't extend it

Hints

💡 Hint 1: LRU Implementation

Use a doubly-linked list combined with a map for O(1) LRU operations:

 1type lruNode struct {
 2    key  string
 3    prev *lruNode
 4    next *lruNode
 5}
 6
 7type lruList struct {
 8    head *lruNode
 9    tail *lruNode
10}
11
12// moveToFront promotes a node to most recently used
13func moveToFront(node *lruNode) {
14    // Remove from current position
15    // Add to front
16}
💡 Hint 2: Efficient TTL Cleanup

Use a background goroutine to periodically clean expired entries:

1func startCleanup() {
2    ticker := time.NewTicker(1 * time.Minute)
3    go func() {
4        for range ticker.C {
5            mc.removeExpired()
6        }
7    }()
8}
💡 Hint 3: Promotion Logic

When promoting from level N to levels 0..N-1, use the remaining TTL:

1remaining := entry.Expiry.Sub(time.Now())
2if remaining > 0 {
3    for i := 0; i < fromLevel; i++ {
4        hc.levels[i].Set(key, entry.Value, remaining)
5    }
6}

Solution

Click to see the solution
  1package cache
  2
  3import (
  4    "sync"
  5    "sync/atomic"
  6    "time"
  7)
  8
  9// CacheLevel represents a single level in the cache hierarchy
 10type CacheLevel interface {
 11    Get(key string)
 12    Set(key string, value interface{}, ttl time.Duration)
 13    Delete(key string)
 14    Size() int
 15    Clear()
 16    GetEntry(key string)
 17}
 18
 19// CacheEntry stores value with metadata
 20type CacheEntry struct {
 21    Value       interface{}
 22    Expiry      time.Time
 23    AccessCount int64
 24    CreatedAt   time.Time
 25}
 26
 27// lruNode represents a node in the LRU list
 28type lruNode struct {
 29    key  string
 30    prev *lruNode
 31    next *lruNode
 32}
 33
 34// lruList implements a doubly-linked list for LRU tracking
 35type lruList struct {
 36    head *lruNode
 37    tail *lruNode
 38    size int
 39}
 40
 41func newLRUList() *lruList {
 42    head := &lruNode{}
 43    tail := &lruNode{}
 44    head.next = tail
 45    tail.prev = head
 46    return &lruList{head: head, tail: tail}
 47}
 48
 49func moveToFront(node *lruNode) {
 50    // Remove from current position
 51    node.prev.next = node.next
 52    node.next.prev = node.prev
 53
 54    // Add to front
 55    node.next = l.head.next
 56    node.prev = l.head
 57    l.head.next.prev = node
 58    l.head.next = node
 59}
 60
 61func addToFront(key string) *lruNode {
 62    node := &lruNode{key: key}
 63    node.next = l.head.next
 64    node.prev = l.head
 65    l.head.next.prev = node
 66    l.head.next = node
 67    l.size++
 68    return node
 69}
 70
 71func removeLast() string {
 72    if l.size == 0 {
 73        return ""
 74    }
 75
 76    node := l.tail.prev
 77    node.prev.next = l.tail
 78    l.tail.prev = node.prev
 79    l.size--
 80    return node.key
 81}
 82
 83func remove(node *lruNode) {
 84    node.prev.next = node.next
 85    node.next.prev = node.prev
 86    l.size--
 87}
 88
 89// MemoryCache is an in-memory LRU cache implementation
 90type MemoryCache struct {
 91    mu       sync.RWMutex
 92    data     map[string]*CacheEntry
 93    lruNodes map[string]*lruNode
 94    capacity int
 95    lru      *lruList
 96    stopChan chan struct{}
 97}
 98
 99// NewMemoryCache creates a new memory cache with given capacity
100func NewMemoryCache(capacity int) *MemoryCache {
101    mc := &MemoryCache{
102        data:     make(map[string]*CacheEntry),
103        lruNodes: make(map[string]*lruNode),
104        capacity: capacity,
105        lru:      newLRUList(),
106        stopChan: make(chan struct{}),
107    }
108    mc.startCleanup()
109    return mc
110}
111
112func Get(key string) {
113    entry, ok := mc.GetEntry(key)
114    if !ok {
115        return nil, false
116    }
117    return entry.Value, true
118}
119
120func GetEntry(key string) {
121    mc.mu.RLock()
122    entry, ok := mc.data[key]
123    mc.mu.RUnlock()
124
125    if !ok {
126        return nil, false
127    }
128
129    // Check expiration
130    if time.Now().After(entry.Expiry) {
131        mc.Delete(key)
132        return nil, false
133    }
134
135    // Increment access count
136    atomic.AddInt64(&entry.AccessCount, 1)
137
138    // Update LRU
139    mc.mu.Lock()
140    if node, exists := mc.lruNodes[key]; exists {
141        mc.lru.moveToFront(node)
142    }
143    mc.mu.Unlock()
144
145    return entry, true
146}
147
148func Set(key string, value interface{}, ttl time.Duration) {
149    mc.mu.Lock()
150    defer mc.mu.Unlock()
151
152    // Check if we need to evict
153    if len(mc.data) >= mc.capacity {
154        if _, exists := mc.data[key]; !exists {
155            // Evict LRU item
156            evictKey := mc.lru.removeLast()
157            if evictKey != "" {
158                delete(mc.data, evictKey)
159                delete(mc.lruNodes, evictKey)
160            }
161        }
162    }
163
164    entry := &CacheEntry{
165        Value:       value,
166        Expiry:      time.Now().Add(ttl),
167        AccessCount: 0,
168        CreatedAt:   time.Now(),
169    }
170
171    mc.data[key] = entry
172
173    // Update LRU
174    if node, exists := mc.lruNodes[key]; exists {
175        mc.lru.moveToFront(node)
176    } else {
177        mc.lruNodes[key] = mc.lru.addToFront(key)
178    }
179}
180
181func Delete(key string) {
182    mc.mu.Lock()
183    defer mc.mu.Unlock()
184
185    delete(mc.data, key)
186    if node, exists := mc.lruNodes[key]; exists {
187        mc.lru.remove(node)
188        delete(mc.lruNodes, key)
189    }
190}
191
192func Size() int {
193    mc.mu.RLock()
194    defer mc.mu.RUnlock()
195    return len(mc.data)
196}
197
198func Clear() {
199    mc.mu.Lock()
200    defer mc.mu.Unlock()
201    mc.data = make(map[string]*CacheEntry)
202    mc.lruNodes = make(map[string]*lruNode)
203    mc.lru = newLRUList()
204}
205
206func startCleanup() {
207    ticker := time.NewTicker(1 * time.Minute)
208    go func() {
209        for {
210            select {
211            case <-ticker.C:
212                mc.removeExpired()
213            case <-mc.stopChan:
214                ticker.Stop()
215                return
216            }
217        }
218    }()
219}
220
221func removeExpired() {
222    mc.mu.Lock()
223    defer mc.mu.Unlock()
224
225    now := time.Now()
226    for key, entry := range mc.data {
227        if now.After(entry.Expiry) {
228            delete(mc.data, key)
229            if node, exists := mc.lruNodes[key]; exists {
230                mc.lru.remove(node)
231                delete(mc.lruNodes, key)
232            }
233        }
234    }
235}
236
237// PromotionStrategy determines when to promote cache entries
238type PromotionStrategy interface {
239    ShouldPromote(entry *CacheEntry, fromLevel int) bool
240}
241
242// AlwaysPromoteStrategy always promotes on cache hit
243type AlwaysPromoteStrategy struct{}
244
245func ShouldPromote(entry *CacheEntry, fromLevel int) bool {
246    return true
247}
248
249// SmartPromotionStrategy promotes based on access count and TTL
250type SmartPromotionStrategy struct {
251    MinAccessCount  int64
252    MinTTLRemaining time.Duration
253}
254
255func NewSmartPromotionStrategy(minAccess int64, minTTL time.Duration) *SmartPromotionStrategy {
256    return &SmartPromotionStrategy{
257        MinAccessCount:  minAccess,
258        MinTTLRemaining: minTTL,
259    }
260}
261
262func ShouldPromote(entry *CacheEntry, fromLevel int) bool {
263    // Check access count
264    if atomic.LoadInt64(&entry.AccessCount) < s.MinAccessCount {
265        return false
266    }
267
268    // Check remaining TTL
269    remaining := entry.Expiry.Sub(time.Now())
270    if remaining < s.MinTTLRemaining {
271        return false
272    }
273
274    return true
275}
276
277// CacheStats tracks cache performance metrics
278type CacheStats struct {
279    Hits       []int64
280    Misses     []int64
281    Promotions int64
282    Evictions  []int64
283}
284
285// HierarchicalCache implements multi-level caching
286type HierarchicalCache struct {
287    levels   []CacheLevel
288    strategy PromotionStrategy
289    stats    *CacheStats
290    mu       sync.RWMutex
291}
292
293// NewHierarchicalCache creates a new hierarchical cache
294func NewHierarchicalCache(levels ...CacheLevel) *HierarchicalCache {
295    stats := &CacheStats{
296        Hits:      make([]int64, len(levels)),
297        Misses:    make([]int64, len(levels)),
298        Evictions: make([]int64, len(levels)),
299    }
300
301    return &HierarchicalCache{
302        levels:   levels,
303        strategy: &AlwaysPromoteStrategy{},
304        stats:    stats,
305    }
306}
307
308func SetPromotionStrategy(strategy PromotionStrategy) {
309    hc.mu.Lock()
310    defer hc.mu.Unlock()
311    hc.strategy = strategy
312}
313
314func Get(key string) {
315    for i, level := range hc.levels {
316        if entry, ok := level.GetEntry(key); ok {
317            atomic.AddInt64(&hc.stats.Hits[i], 1)
318
319            // Promote to higher levels if strategy allows
320            if i > 0 && hc.strategy.ShouldPromote(entry, i) {
321                remaining := entry.Expiry.Sub(time.Now())
322                if remaining > 0 {
323                    for j := 0; j < i; j++ {
324                        hc.levels[j].Set(key, entry.Value, remaining)
325                    }
326                    atomic.AddInt64(&hc.stats.Promotions, 1)
327                }
328            }
329
330            return entry.Value, true
331        } else {
332            atomic.AddInt64(&hc.stats.Misses[i], 1)
333        }
334    }
335
336    return nil, false
337}
338
339func Set(key string, value interface{}, ttl time.Duration) {
340    // Set in all levels
341    for _, level := range hc.levels {
342        level.Set(key, value, ttl)
343    }
344}
345
346func Delete(key string) {
347    for _, level := range hc.levels {
348        level.Delete(key)
349    }
350}
351
352func GetStats() *CacheStats {
353    return hc.stats
354}
355
356func Size() int {
357    if len(hc.levels) > 0 {
358        return hc.levels[0].Size()
359    }
360    return 0
361}
362
363func Clear() {
364    for _, level := range hc.levels {
365        level.Clear()
366    }
367}
368
369// GetHitRate returns the hit rate for a specific level
370func GetHitRate(level int) float64 {
371    if level >= len(s.Hits) {
372        return 0
373    }
374
375    hits := atomic.LoadInt64(&s.Hits[level])
376    misses := atomic.LoadInt64(&s.Misses[level])
377    total := hits + misses
378
379    if total == 0 {
380        return 0
381    }
382
383    return float64(hits) / float64(total) * 100
384}

Key Takeaways

  • Multi-level caches improve performance by keeping hot data in fast storage
  • Automatic promotion reduces latency for frequently accessed items
  • LRU eviction ensures optimal cache utilization with limited capacity
  • Thread-safety is critical in concurrent environments
  • TTL management prevents stale data from polluting the cache
  • Smart promotion strategies prevent cache pollution and unnecessary promotions
  • Cache statistics enable monitoring and optimization of cache performance