Exercise: Rate Limiter
Difficulty - Intermediate
Learning Objectives
- Implement rate limiting algorithms
- Use token bucket pattern
- Handle concurrent rate limit checks
- Implement per-user rate limiting
- Build rate limiter middleware
Problem Statement
Create a flexible rate limiter supporting multiple algorithms with per-key limiting.
Core Components
1package ratelimiter
2
3import (
4 "sync"
5 "time"
6)
7
8type RateLimiter interface {
9 Allow(key string) bool
10 Wait(key string) time.Duration
11}
12
13type TokenBucketLimiter struct {
14 rate int
15 burst int
16 buckets map[string]*bucket
17 mu sync.Mutex
18}
19
20type bucket struct {
21 tokens int
22 lastRefill time.Time
23}
24
25func NewTokenBucket(rate, burst int) *TokenBucketLimiter
26func Allow(key string) bool
27func Wait(key string) time.Duration
28
29type SlidingWindowLimiter struct {
30 limit int
31 window time.Duration
32 counters map[string]*windowCounter
33 mu sync.Mutex
34}
35
36func NewSlidingWindow(limit int, window time.Duration) *SlidingWindowLimiter
37func Allow(key string) bool
Solution
Click to see the solution
1package ratelimiter
2
3import (
4 "sync"
5 "time"
6)
7
8type RateLimiter interface {
9 Allow(key string) bool
10 Wait(key string) time.Duration
11}
12
13// Token Bucket Implementation
14
15type TokenBucketLimiter struct {
16 rate int
17 burst int
18 buckets map[string]*bucket
19 mu sync.Mutex
20}
21
22type bucket struct {
23 tokens int
24 lastRefill time.Time
25}
26
27func NewTokenBucket(rate, burst int) *TokenBucketLimiter {
28 return &TokenBucketLimiter{
29 rate: rate,
30 burst: burst,
31 buckets: make(map[string]*bucket),
32 }
33}
34
35func Allow(key string) bool {
36 tb.mu.Lock()
37 defer tb.mu.Unlock()
38
39 b, exists := tb.buckets[key]
40 if !exists {
41 b = &bucket{
42 tokens: tb.burst - 1,
43 lastRefill: time.Now(),
44 }
45 tb.buckets[key] = b
46 return true
47 }
48
49 now := time.Now()
50 elapsed := now.Sub(b.lastRefill)
51 refillTokens := int(elapsed.Seconds()) * tb.rate
52
53 b.tokens = min(b.burst, b.tokens+refillTokens)
54 b.lastRefill = now
55
56 if b.tokens > 0 {
57 b.tokens--
58 return true
59 }
60
61 return false
62}
63
64func Wait(key string) time.Duration {
65 tb.mu.Lock()
66 defer tb.mu.Unlock()
67
68 b, exists := tb.buckets[key]
69 if !exists || b.tokens > 0 {
70 return 0
71 }
72
73 return time.Second / time.Duration(tb.rate)
74}
75
76func min(a, b int) int {
77 if a < b {
78 return a
79 }
80 return b
81}
82
83// Sliding Window Implementation
84
85type SlidingWindowLimiter struct {
86 limit int
87 window time.Duration
88 counters map[string]*windowCounter
89 mu sync.Mutex
90}
91
92type windowCounter struct {
93 requests []time.Time
94}
95
96func NewSlidingWindow(limit int, window time.Duration) *SlidingWindowLimiter {
97 return &SlidingWindowLimiter{
98 limit: limit,
99 window: window,
100 counters: make(map[string]*windowCounter),
101 }
102}
103
104func Allow(key string) bool {
105 sw.mu.Lock()
106 defer sw.mu.Unlock()
107
108 now := time.Now()
109 counter, exists := sw.counters[key]
110 if !exists {
111 counter = &windowCounter{
112 requests: []time.Time{now},
113 }
114 sw.counters[key] = counter
115 return true
116 }
117
118 // Remove expired requests
119 cutoff := now.Add(-sw.window)
120 valid := make([]time.Time, 0, len(counter.requests))
121 for _, t := range counter.requests {
122 if t.After(cutoff) {
123 valid = append(valid, t)
124 }
125 }
126 counter.requests = valid
127
128 if len(counter.requests) < sw.limit {
129 counter.requests = append(counter.requests, now)
130 return true
131 }
132
133 return false
134}
Key Takeaways
- Rate limiting protects services from overload
- Token bucket allows bursts while controlling average rate
- Sliding window provides accurate limits but uses more memory
- Per-key limiting enables fair resource allocation
- Cleanup prevents memory leaks in long-running limiters