-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathcache.go
More file actions
367 lines (304 loc) Β· 8.11 KB
/
cache.go
File metadata and controls
367 lines (304 loc) Β· 8.11 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
package matcher
import (
"crypto/md5"
"fmt"
"sort"
"strings"
"sync"
"time"
)
// CacheEntry represents a cached query result
type CacheEntry struct {
Key string `json:"key"`
Result *MatchResult `json:"result"`
Timestamp time.Time `json:"timestamp"`
TTL time.Duration `json:"ttl"`
}
// IsExpired checks if the cache entry has expired
func (ce *CacheEntry) IsExpired() bool {
return time.Now().After(ce.Timestamp.Add(ce.TTL))
}
// QueryCache implements a thread-safe LRU cache for query results
type QueryCache struct {
entries map[string]*CacheEntry
accessTime map[string]time.Time // For LRU tracking
maxSize int
defaultTTL time.Duration
mu sync.RWMutex
}
// NewQueryCache creates a new query cache
func NewQueryCache(maxSize int, defaultTTL time.Duration) *QueryCache {
return &QueryCache{
entries: make(map[string]*CacheEntry),
accessTime: make(map[string]time.Time),
maxSize: maxSize,
defaultTTL: defaultTTL,
}
}
// generateCacheKey generates a cache key from a query
func (qc *QueryCache) generateCacheKey(query *QueryRule) (string, string) {
// Sort dimension values for consistent key generation
var keys []string
for dim, value := range query.Values {
keys = append(keys, fmt.Sprintf("%s:%s", dim, value))
}
sort.Strings(keys)
// Create a hash of the sorted keys
keyString := ""
for i, key := range keys {
if i > 0 {
keyString += "|"
}
keyString += key
}
// Include IncludeAllRules flag in the key to distinguish cache entries
if query.IncludeAllRules {
keyString += "|include_all:true"
} else {
keyString += "|include_all:false"
}
// Include excluded rules in the cache key
if len(query.ExcludeRules) > 0 {
var excludedRuleIDs []string
for ruleID := range query.ExcludeRules {
excludedRuleIDs = append(excludedRuleIDs, ruleID)
}
sort.Strings(excludedRuleIDs) // Sort for consistent key generation
keyString += "|exclude:" + strings.Join(excludedRuleIDs, ",")
}
// Include tenant and application context to ensure isolation
keyString += fmt.Sprintf("|tenant:%s|app:%s", query.TenantID, query.ApplicationID)
// Generate MD5 hash for consistent key length
hash := md5.Sum([]byte(keyString))
return fmt.Sprintf("%x", hash), keyString
}
// Get retrieves a cached result for a query
func (qc *QueryCache) Get(query *QueryRule) *MatchResult {
qc.mu.Lock()
defer qc.mu.Unlock()
key, _ := qc.generateCacheKey(query)
entry, exists := qc.entries[key]
if !exists {
return nil
}
// Check if entry has expired
if entry.IsExpired() {
// Remove expired entry
delete(qc.entries, key)
delete(qc.accessTime, key)
return nil
}
// Update access time
qc.accessTime[key] = time.Now()
return entry.Result
}
// Set stores a result in the cache
func (qc *QueryCache) Set(query *QueryRule, result *MatchResult) {
qc.mu.Lock()
defer qc.mu.Unlock()
key, plain := qc.generateCacheKey(query)
now := time.Now()
// Create cache entry
entry := &CacheEntry{
Key: plain,
Result: result,
Timestamp: now,
TTL: qc.defaultTTL,
}
// Check if we need to evict entries
if len(qc.entries) >= qc.maxSize {
qc.evictLRU()
}
// Store the entry
qc.entries[key] = entry
qc.accessTime[key] = now
}
// SetWithTTL stores a result in the cache with custom TTL
func (qc *QueryCache) SetWithTTL(query *QueryRule, result *MatchResult, ttl time.Duration) {
qc.mu.Lock()
defer qc.mu.Unlock()
key, plain := qc.generateCacheKey(query)
now := time.Now()
// Create cache entry with custom TTL
entry := &CacheEntry{
Key: plain,
Result: result,
Timestamp: now,
TTL: ttl,
}
// Check if we need to evict entries
if len(qc.entries) >= qc.maxSize {
qc.evictLRU()
}
// Store the entry
qc.entries[key] = entry
qc.accessTime[key] = now
}
// evictLRU removes the least recently used entry
func (qc *QueryCache) evictLRU() {
if len(qc.entries) == 0 {
return
}
var oldestKey string
var oldestTime time.Time
first := true
for key, accessTime := range qc.accessTime {
if first || accessTime.Before(oldestTime) {
oldestKey = key
oldestTime = accessTime
first = false
}
}
if oldestKey != "" {
delete(qc.entries, oldestKey)
delete(qc.accessTime, oldestKey)
}
}
// Clear removes all entries from the cache
func (qc *QueryCache) Clear() {
qc.mu.Lock()
defer qc.mu.Unlock()
qc.entries = make(map[string]*CacheEntry)
qc.accessTime = make(map[string]time.Time)
}
// Size returns the current number of entries in the cache
func (qc *QueryCache) Size() int {
qc.mu.RLock()
defer qc.mu.RUnlock()
return len(qc.entries)
}
// CleanupExpired removes all expired entries from the cache
func (qc *QueryCache) CleanupExpired() int {
qc.mu.Lock()
defer qc.mu.Unlock()
var expiredKeys []string
for key, entry := range qc.entries {
if entry.IsExpired() {
expiredKeys = append(expiredKeys, key)
}
}
for _, key := range expiredKeys {
delete(qc.entries, key)
delete(qc.accessTime, key)
}
return len(expiredKeys)
}
// Stats returns cache statistics
func (qc *QueryCache) Stats() map[string]interface{} {
qc.mu.RLock()
defer qc.mu.RUnlock()
totalEntries := len(qc.entries)
expiredCount := 0
for _, entry := range qc.entries {
if entry.IsExpired() {
expiredCount++
}
}
return map[string]interface{}{
"total_entries": totalEntries,
"expired_entries": expiredCount,
"max_size": qc.maxSize,
"default_ttl": qc.defaultTTL.String(),
}
}
// StartCleanupWorker starts a background worker to clean up expired entries
func (qc *QueryCache) StartCleanupWorker(interval time.Duration) chan<- bool {
stopChan := make(chan bool)
go func() {
ticker := time.NewTicker(interval)
defer ticker.Stop()
for {
select {
case <-ticker.C:
qc.CleanupExpired()
case <-stopChan:
return
}
}
}()
return stopChan
}
// MultiLevelCache implements a multi-level cache system
type MultiLevelCache struct {
l1Cache *QueryCache // Fast, small cache
l2Cache *QueryCache // Larger, slower cache
l1HitRate float64
l2HitRate float64
totalHits int64
totalQueries int64
mu sync.RWMutex
}
// NewMultiLevelCache creates a new multi-level cache
func NewMultiLevelCache(l1Size int, l1TTL time.Duration, l2Size int, l2TTL time.Duration) *MultiLevelCache {
return &MultiLevelCache{
l1Cache: NewQueryCache(l1Size, l1TTL),
l2Cache: NewQueryCache(l2Size, l2TTL),
}
}
// Get retrieves a result from the multi-level cache
func (mlc *MultiLevelCache) Get(query *QueryRule) *MatchResult {
mlc.mu.Lock()
mlc.totalQueries++
mlc.mu.Unlock()
// Try L1 cache first
if result := mlc.l1Cache.Get(query); result != nil {
mlc.updateHitStats(1)
return result
}
// Try L2 cache
if result := mlc.l2Cache.Get(query); result != nil {
// Promote to L1 cache
mlc.l1Cache.Set(query, result)
mlc.updateHitStats(2)
return result
}
// Cache miss
return nil
}
// Set stores a result in both cache levels
func (mlc *MultiLevelCache) Set(query *QueryRule, result *MatchResult) {
mlc.l1Cache.Set(query, result)
mlc.l2Cache.Set(query, result)
}
// updateHitStats updates hit rate statistics
func (mlc *MultiLevelCache) updateHitStats(level int) {
mlc.mu.Lock()
defer mlc.mu.Unlock()
mlc.totalHits++
// Update level-specific hit rates using exponential moving average
switch level {
case 1:
mlc.l1HitRate = mlc.l1HitRate*0.95 + 0.05
case 2:
mlc.l2HitRate = mlc.l2HitRate*0.95 + 0.05
}
}
// Clear clears both cache levels
func (mlc *MultiLevelCache) Clear() {
mlc.l1Cache.Clear()
mlc.l2Cache.Clear()
mlc.mu.Lock()
mlc.l1HitRate = 0
mlc.l2HitRate = 0
mlc.totalHits = 0
mlc.totalQueries = 0
mlc.mu.Unlock()
}
// Stats returns comprehensive cache statistics
func (mlc *MultiLevelCache) Stats() map[string]interface{} {
mlc.mu.RLock()
defer mlc.mu.RUnlock()
overallHitRate := 0.0
if mlc.totalQueries > 0 {
overallHitRate = float64(mlc.totalHits) / float64(mlc.totalQueries)
}
return map[string]interface{}{
"overall_hit_rate": overallHitRate,
"l1_hit_rate": mlc.l1HitRate,
"l2_hit_rate": mlc.l2HitRate,
"total_queries": mlc.totalQueries,
"total_hits": mlc.totalHits,
"l1_stats": mlc.l1Cache.Stats(),
"l2_stats": mlc.l2Cache.Stats(),
}
}