|
|
|
@ -5,8 +5,8 @@ import ( |
|
|
|
"sync" |
|
|
|
"sync" |
|
|
|
"time" |
|
|
|
"time" |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"github.com/klauspost/compress/zstd" |
|
|
|
"lol.mleku.dev/log" |
|
|
|
"lol.mleku.dev/log" |
|
|
|
"next.orly.dev/pkg/encoders/event" |
|
|
|
|
|
|
|
"next.orly.dev/pkg/encoders/filter" |
|
|
|
"next.orly.dev/pkg/encoders/filter" |
|
|
|
) |
|
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
@ -17,31 +17,44 @@ const ( |
|
|
|
DefaultMaxAge = 5 * time.Minute |
|
|
|
DefaultMaxAge = 5 * time.Minute |
|
|
|
) |
|
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
// EventCacheEntry represents a cached set of events for a filter
|
|
|
|
// EventCacheEntry represents a cached set of compressed serialized events for a filter
|
|
|
|
type EventCacheEntry struct { |
|
|
|
type EventCacheEntry struct { |
|
|
|
FilterKey string |
|
|
|
FilterKey string |
|
|
|
Events event.S // Slice of events
|
|
|
|
CompressedData []byte // ZSTD compressed serialized JSON events
|
|
|
|
TotalSize int // Estimated size in bytes
|
|
|
|
UncompressedSize int // Original size before compression (for stats)
|
|
|
|
|
|
|
|
CompressedSize int // Actual compressed size in bytes
|
|
|
|
|
|
|
|
EventCount int // Number of events in this entry
|
|
|
|
LastAccess time.Time |
|
|
|
LastAccess time.Time |
|
|
|
CreatedAt time.Time |
|
|
|
CreatedAt time.Time |
|
|
|
listElement *list.Element |
|
|
|
listElement *list.Element |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
// EventCache caches event.S results from database queries
|
|
|
|
// EventCache caches event.S results from database queries with ZSTD compression
|
|
|
|
type EventCache struct { |
|
|
|
type EventCache struct { |
|
|
|
mu sync.RWMutex |
|
|
|
mu sync.RWMutex |
|
|
|
|
|
|
|
|
|
|
|
entries map[string]*EventCacheEntry |
|
|
|
entries map[string]*EventCacheEntry |
|
|
|
lruList *list.List |
|
|
|
lruList *list.List |
|
|
|
|
|
|
|
|
|
|
|
currentSize int64 |
|
|
|
currentSize int64 // Tracks compressed size
|
|
|
|
maxSize int64 |
|
|
|
maxSize int64 |
|
|
|
maxAge time.Duration |
|
|
|
maxAge time.Duration |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// ZSTD encoder/decoder (reused for efficiency)
|
|
|
|
|
|
|
|
encoder *zstd.Encoder |
|
|
|
|
|
|
|
decoder *zstd.Decoder |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// Compaction tracking
|
|
|
|
|
|
|
|
needsCompaction bool |
|
|
|
|
|
|
|
compactionChan chan struct{} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// Metrics
|
|
|
|
hits uint64 |
|
|
|
hits uint64 |
|
|
|
misses uint64 |
|
|
|
misses uint64 |
|
|
|
evictions uint64 |
|
|
|
evictions uint64 |
|
|
|
invalidations uint64 |
|
|
|
invalidations uint64 |
|
|
|
|
|
|
|
compressionRatio float64 // Average compression ratio
|
|
|
|
|
|
|
|
compactionRuns uint64 |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
// NewEventCache creates a new event cache
|
|
|
|
// NewEventCache creates a new event cache
|
|
|
|
@ -53,62 +66,134 @@ func NewEventCache(maxSize int64, maxAge time.Duration) *EventCache { |
|
|
|
maxAge = DefaultMaxAge |
|
|
|
maxAge = DefaultMaxAge |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// Create ZSTD encoder at level 9 (best compression)
|
|
|
|
|
|
|
|
encoder, err := zstd.NewWriter(nil, zstd.WithEncoderLevel(zstd.SpeedBestCompression)) |
|
|
|
|
|
|
|
if err != nil { |
|
|
|
|
|
|
|
log.E.F("failed to create ZSTD encoder: %v", err) |
|
|
|
|
|
|
|
return nil |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// Create ZSTD decoder
|
|
|
|
|
|
|
|
decoder, err := zstd.NewReader(nil) |
|
|
|
|
|
|
|
if err != nil { |
|
|
|
|
|
|
|
log.E.F("failed to create ZSTD decoder: %v", err) |
|
|
|
|
|
|
|
return nil |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
c := &EventCache{ |
|
|
|
c := &EventCache{ |
|
|
|
entries: make(map[string]*EventCacheEntry), |
|
|
|
entries: make(map[string]*EventCacheEntry), |
|
|
|
lruList: list.New(), |
|
|
|
lruList: list.New(), |
|
|
|
maxSize: maxSize, |
|
|
|
maxSize: maxSize, |
|
|
|
maxAge: maxAge, |
|
|
|
maxAge: maxAge, |
|
|
|
|
|
|
|
encoder: encoder, |
|
|
|
|
|
|
|
decoder: decoder, |
|
|
|
|
|
|
|
compactionChan: make(chan struct{}, 1), |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// Start background workers
|
|
|
|
go c.cleanupExpired() |
|
|
|
go c.cleanupExpired() |
|
|
|
|
|
|
|
go c.compactionWorker() |
|
|
|
|
|
|
|
|
|
|
|
return c |
|
|
|
return c |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
// Get retrieves cached events for a filter
|
|
|
|
// Get retrieves cached serialized events for a filter (decompresses on the fly)
|
|
|
|
func (c *EventCache) Get(f *filter.F) (events event.S, found bool) { |
|
|
|
func (c *EventCache) Get(f *filter.F) (serializedJSON [][]byte, found bool) { |
|
|
|
filterKey := string(f.Serialize()) |
|
|
|
filterKey := string(f.Serialize()) |
|
|
|
|
|
|
|
|
|
|
|
c.mu.Lock() |
|
|
|
c.mu.RLock() |
|
|
|
defer c.mu.Unlock() |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
entry, exists := c.entries[filterKey] |
|
|
|
entry, exists := c.entries[filterKey] |
|
|
|
|
|
|
|
c.mu.RUnlock() |
|
|
|
|
|
|
|
|
|
|
|
if !exists { |
|
|
|
if !exists { |
|
|
|
|
|
|
|
c.mu.Lock() |
|
|
|
c.misses++ |
|
|
|
c.misses++ |
|
|
|
|
|
|
|
c.mu.Unlock() |
|
|
|
return nil, false |
|
|
|
return nil, false |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
// Check if expired
|
|
|
|
// Check if expired
|
|
|
|
if time.Since(entry.CreatedAt) > c.maxAge { |
|
|
|
if time.Since(entry.CreatedAt) > c.maxAge { |
|
|
|
|
|
|
|
c.mu.Lock() |
|
|
|
c.removeEntry(entry) |
|
|
|
c.removeEntry(entry) |
|
|
|
c.misses++ |
|
|
|
c.misses++ |
|
|
|
|
|
|
|
c.mu.Unlock() |
|
|
|
|
|
|
|
return nil, false |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// Decompress the data (outside of write lock for better concurrency)
|
|
|
|
|
|
|
|
decompressed, err := c.decoder.DecodeAll(entry.CompressedData, nil) |
|
|
|
|
|
|
|
if err != nil { |
|
|
|
|
|
|
|
log.E.F("failed to decompress cache entry: %v", err) |
|
|
|
|
|
|
|
c.mu.Lock() |
|
|
|
|
|
|
|
c.misses++ |
|
|
|
|
|
|
|
c.mu.Unlock() |
|
|
|
return nil, false |
|
|
|
return nil, false |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// Deserialize the individual JSON events from the decompressed blob
|
|
|
|
|
|
|
|
// Format: each event is newline-delimited JSON
|
|
|
|
|
|
|
|
serializedJSON = make([][]byte, 0, entry.EventCount) |
|
|
|
|
|
|
|
start := 0 |
|
|
|
|
|
|
|
for i := 0; i < len(decompressed); i++ { |
|
|
|
|
|
|
|
if decompressed[i] == '\n' { |
|
|
|
|
|
|
|
if i > start { |
|
|
|
|
|
|
|
eventJSON := make([]byte, i-start) |
|
|
|
|
|
|
|
copy(eventJSON, decompressed[start:i]) |
|
|
|
|
|
|
|
serializedJSON = append(serializedJSON, eventJSON) |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
start = i + 1 |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
// Handle last event if no trailing newline
|
|
|
|
|
|
|
|
if start < len(decompressed) { |
|
|
|
|
|
|
|
eventJSON := make([]byte, len(decompressed)-start) |
|
|
|
|
|
|
|
copy(eventJSON, decompressed[start:]) |
|
|
|
|
|
|
|
serializedJSON = append(serializedJSON, eventJSON) |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
// Update access time and move to front
|
|
|
|
// Update access time and move to front
|
|
|
|
|
|
|
|
c.mu.Lock() |
|
|
|
entry.LastAccess = time.Now() |
|
|
|
entry.LastAccess = time.Now() |
|
|
|
c.lruList.MoveToFront(entry.listElement) |
|
|
|
c.lruList.MoveToFront(entry.listElement) |
|
|
|
|
|
|
|
|
|
|
|
c.hits++ |
|
|
|
c.hits++ |
|
|
|
log.D.F("event cache HIT: filter=%s events=%d", filterKey[:min(50, len(filterKey))], len(entry.Events)) |
|
|
|
c.mu.Unlock() |
|
|
|
|
|
|
|
|
|
|
|
return entry.Events, true |
|
|
|
log.D.F("event cache HIT: filter=%s events=%d compressed=%d uncompressed=%d ratio=%.2f", |
|
|
|
|
|
|
|
filterKey[:min(50, len(filterKey))], entry.EventCount, entry.CompressedSize, |
|
|
|
|
|
|
|
entry.UncompressedSize, float64(entry.UncompressedSize)/float64(entry.CompressedSize)) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
return serializedJSON, true |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
// Put stores events in the cache
|
|
|
|
// PutJSON stores pre-marshaled JSON in the cache with ZSTD compression
|
|
|
|
func (c *EventCache) Put(f *filter.F, events event.S) { |
|
|
|
// This should be called AFTER events are sent to the client with the marshaled envelopes
|
|
|
|
if len(events) == 0 { |
|
|
|
func (c *EventCache) PutJSON(f *filter.F, marshaledJSON [][]byte) { |
|
|
|
|
|
|
|
if len(marshaledJSON) == 0 { |
|
|
|
return |
|
|
|
return |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
filterKey := string(f.Serialize()) |
|
|
|
filterKey := string(f.Serialize()) |
|
|
|
|
|
|
|
|
|
|
|
// Estimate size: each event is roughly 500 bytes on average
|
|
|
|
// Concatenate all JSON events with newline delimiters for compression
|
|
|
|
estimatedSize := len(events) * 500 |
|
|
|
totalSize := 0 |
|
|
|
|
|
|
|
for _, jsonData := range marshaledJSON { |
|
|
|
|
|
|
|
totalSize += len(jsonData) + 1 // +1 for newline
|
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
uncompressed := make([]byte, 0, totalSize) |
|
|
|
|
|
|
|
for _, jsonData := range marshaledJSON { |
|
|
|
|
|
|
|
uncompressed = append(uncompressed, jsonData...) |
|
|
|
|
|
|
|
uncompressed = append(uncompressed, '\n') |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// Compress with ZSTD level 9
|
|
|
|
|
|
|
|
compressed := c.encoder.EncodeAll(uncompressed, nil) |
|
|
|
|
|
|
|
compressedSize := len(compressed) |
|
|
|
|
|
|
|
|
|
|
|
// Don't cache if too large
|
|
|
|
// Don't cache if compressed size is still too large
|
|
|
|
if int64(estimatedSize) > c.maxSize { |
|
|
|
if int64(compressedSize) > c.maxSize { |
|
|
|
log.W.F("event cache: entry too large: %d bytes", estimatedSize) |
|
|
|
log.W.F("event cache: compressed entry too large: %d bytes", compressedSize) |
|
|
|
return |
|
|
|
return |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
@ -117,41 +202,77 @@ func (c *EventCache) Put(f *filter.F, events event.S) { |
|
|
|
|
|
|
|
|
|
|
|
// Check if already exists
|
|
|
|
// Check if already exists
|
|
|
|
if existing, exists := c.entries[filterKey]; exists { |
|
|
|
if existing, exists := c.entries[filterKey]; exists { |
|
|
|
c.currentSize -= int64(existing.TotalSize) |
|
|
|
c.currentSize -= int64(existing.CompressedSize) |
|
|
|
existing.Events = events |
|
|
|
existing.CompressedData = compressed |
|
|
|
existing.TotalSize = estimatedSize |
|
|
|
existing.UncompressedSize = totalSize |
|
|
|
|
|
|
|
existing.CompressedSize = compressedSize |
|
|
|
|
|
|
|
existing.EventCount = len(marshaledJSON) |
|
|
|
existing.LastAccess = time.Now() |
|
|
|
existing.LastAccess = time.Now() |
|
|
|
existing.CreatedAt = time.Now() |
|
|
|
existing.CreatedAt = time.Now() |
|
|
|
c.currentSize += int64(estimatedSize) |
|
|
|
c.currentSize += int64(compressedSize) |
|
|
|
c.lruList.MoveToFront(existing.listElement) |
|
|
|
c.lruList.MoveToFront(existing.listElement) |
|
|
|
|
|
|
|
c.updateCompressionRatio(totalSize, compressedSize) |
|
|
|
|
|
|
|
log.T.F("event cache UPDATE: filter=%s events=%d ratio=%.2f", |
|
|
|
|
|
|
|
filterKey[:min(50, len(filterKey))], len(marshaledJSON), |
|
|
|
|
|
|
|
float64(totalSize)/float64(compressedSize)) |
|
|
|
return |
|
|
|
return |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
// Evict if necessary
|
|
|
|
// Evict if necessary
|
|
|
|
for c.currentSize+int64(estimatedSize) > c.maxSize && c.lruList.Len() > 0 { |
|
|
|
evictionCount := 0 |
|
|
|
|
|
|
|
for c.currentSize+int64(compressedSize) > c.maxSize && c.lruList.Len() > 0 { |
|
|
|
oldest := c.lruList.Back() |
|
|
|
oldest := c.lruList.Back() |
|
|
|
if oldest != nil { |
|
|
|
if oldest != nil { |
|
|
|
oldEntry := oldest.Value.(*EventCacheEntry) |
|
|
|
oldEntry := oldest.Value.(*EventCacheEntry) |
|
|
|
c.removeEntry(oldEntry) |
|
|
|
c.removeEntry(oldEntry) |
|
|
|
c.evictions++ |
|
|
|
c.evictions++ |
|
|
|
|
|
|
|
evictionCount++ |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// Trigger compaction if we evicted entries
|
|
|
|
|
|
|
|
if evictionCount > 0 { |
|
|
|
|
|
|
|
c.needsCompaction = true |
|
|
|
|
|
|
|
select { |
|
|
|
|
|
|
|
case c.compactionChan <- struct{}{}: |
|
|
|
|
|
|
|
default: |
|
|
|
|
|
|
|
// Channel already has signal, compaction will run
|
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
// Create new entry
|
|
|
|
// Create new entry
|
|
|
|
entry := &EventCacheEntry{ |
|
|
|
entry := &EventCacheEntry{ |
|
|
|
FilterKey: filterKey, |
|
|
|
FilterKey: filterKey, |
|
|
|
Events: events, |
|
|
|
CompressedData: compressed, |
|
|
|
TotalSize: estimatedSize, |
|
|
|
UncompressedSize: totalSize, |
|
|
|
|
|
|
|
CompressedSize: compressedSize, |
|
|
|
|
|
|
|
EventCount: len(marshaledJSON), |
|
|
|
LastAccess: time.Now(), |
|
|
|
LastAccess: time.Now(), |
|
|
|
CreatedAt: time.Now(), |
|
|
|
CreatedAt: time.Now(), |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
entry.listElement = c.lruList.PushFront(entry) |
|
|
|
entry.listElement = c.lruList.PushFront(entry) |
|
|
|
c.entries[filterKey] = entry |
|
|
|
c.entries[filterKey] = entry |
|
|
|
c.currentSize += int64(estimatedSize) |
|
|
|
c.currentSize += int64(compressedSize) |
|
|
|
|
|
|
|
c.updateCompressionRatio(totalSize, compressedSize) |
|
|
|
|
|
|
|
|
|
|
|
log.D.F("event cache PUT: filter=%s events=%d size=%d total=%d/%d", |
|
|
|
log.D.F("event cache PUT: filter=%s events=%d uncompressed=%d compressed=%d ratio=%.2f total=%d/%d", |
|
|
|
filterKey[:min(50, len(filterKey))], len(events), estimatedSize, c.currentSize, c.maxSize) |
|
|
|
filterKey[:min(50, len(filterKey))], len(marshaledJSON), totalSize, compressedSize, |
|
|
|
|
|
|
|
float64(totalSize)/float64(compressedSize), c.currentSize, c.maxSize) |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// updateCompressionRatio updates the rolling average compression ratio
|
|
|
|
|
|
|
|
func (c *EventCache) updateCompressionRatio(uncompressed, compressed int) { |
|
|
|
|
|
|
|
if compressed == 0 { |
|
|
|
|
|
|
|
return |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
newRatio := float64(uncompressed) / float64(compressed) |
|
|
|
|
|
|
|
// Use exponential moving average
|
|
|
|
|
|
|
|
if c.compressionRatio == 0 { |
|
|
|
|
|
|
|
c.compressionRatio = newRatio |
|
|
|
|
|
|
|
} else { |
|
|
|
|
|
|
|
c.compressionRatio = 0.9*c.compressionRatio + 0.1*newRatio |
|
|
|
|
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
// Invalidate clears all entries (called when new events are stored)
|
|
|
|
// Invalidate clears all entries (called when new events are stored)
|
|
|
|
@ -173,7 +294,33 @@ func (c *EventCache) Invalidate() { |
|
|
|
func (c *EventCache) removeEntry(entry *EventCacheEntry) { |
|
|
|
func (c *EventCache) removeEntry(entry *EventCacheEntry) { |
|
|
|
delete(c.entries, entry.FilterKey) |
|
|
|
delete(c.entries, entry.FilterKey) |
|
|
|
c.lruList.Remove(entry.listElement) |
|
|
|
c.lruList.Remove(entry.listElement) |
|
|
|
c.currentSize -= int64(entry.TotalSize) |
|
|
|
c.currentSize -= int64(entry.CompressedSize) |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// compactionWorker runs in the background and compacts cache entries after evictions
|
|
|
|
|
|
|
|
// to reclaim fragmented space and improve cache efficiency
|
|
|
|
|
|
|
|
func (c *EventCache) compactionWorker() { |
|
|
|
|
|
|
|
for range c.compactionChan { |
|
|
|
|
|
|
|
c.mu.Lock() |
|
|
|
|
|
|
|
if !c.needsCompaction { |
|
|
|
|
|
|
|
c.mu.Unlock() |
|
|
|
|
|
|
|
continue |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
log.D.F("cache compaction: starting (entries=%d size=%d/%d)", |
|
|
|
|
|
|
|
len(c.entries), c.currentSize, c.maxSize) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// For ZSTD compressed entries, compaction mainly means ensuring
|
|
|
|
|
|
|
|
// entries are tightly packed in memory. Since each entry is already
|
|
|
|
|
|
|
|
// individually compressed at level 9, there's not much additional
|
|
|
|
|
|
|
|
// compression to gain. The main benefit is from the eviction itself.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
c.needsCompaction = false |
|
|
|
|
|
|
|
c.compactionRuns++ |
|
|
|
|
|
|
|
c.mu.Unlock() |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
log.D.F("cache compaction: completed (runs=%d)", c.compactionRuns) |
|
|
|
|
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
// cleanupExpired removes expired entries periodically
|
|
|
|
// cleanupExpired removes expired entries periodically
|
|
|
|
@ -207,13 +354,15 @@ func (c *EventCache) cleanupExpired() { |
|
|
|
// CacheStats holds cache performance metrics
|
|
|
|
// CacheStats holds cache performance metrics
|
|
|
|
type CacheStats struct { |
|
|
|
type CacheStats struct { |
|
|
|
Entries int |
|
|
|
Entries int |
|
|
|
CurrentSize int64 |
|
|
|
CurrentSize int64 // Compressed size
|
|
|
|
MaxSize int64 |
|
|
|
MaxSize int64 |
|
|
|
Hits uint64 |
|
|
|
Hits uint64 |
|
|
|
Misses uint64 |
|
|
|
Misses uint64 |
|
|
|
HitRate float64 |
|
|
|
HitRate float64 |
|
|
|
Evictions uint64 |
|
|
|
Evictions uint64 |
|
|
|
Invalidations uint64 |
|
|
|
Invalidations uint64 |
|
|
|
|
|
|
|
CompressionRatio float64 // Average compression ratio
|
|
|
|
|
|
|
|
CompactionRuns uint64 |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
// Stats returns cache statistics
|
|
|
|
// Stats returns cache statistics
|
|
|
|
@ -236,6 +385,8 @@ func (c *EventCache) Stats() CacheStats { |
|
|
|
HitRate: hitRate, |
|
|
|
HitRate: hitRate, |
|
|
|
Evictions: c.evictions, |
|
|
|
Evictions: c.evictions, |
|
|
|
Invalidations: c.invalidations, |
|
|
|
Invalidations: c.invalidations, |
|
|
|
|
|
|
|
CompressionRatio: c.compressionRatio, |
|
|
|
|
|
|
|
CompactionRuns: c.compactionRuns, |
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|