14 changed files with 369 additions and 1962 deletions
@ -1,140 +0,0 @@
@@ -1,140 +0,0 @@
|
||||
================================================================ |
||||
NOSTR RELAY BENCHMARK AGGREGATE REPORT |
||||
================================================================ |
||||
Generated: 2025-09-20T11:04:39+00:00 |
||||
Benchmark Configuration: |
||||
Events per test: 10000 |
||||
Concurrent workers: 8 |
||||
Test duration: 60s |
||||
|
||||
Relays tested: 6 |
||||
|
||||
================================================================ |
||||
SUMMARY BY RELAY |
||||
================================================================ |
||||
|
||||
Relay: next-orly |
||||
---------------------------------------- |
||||
Status: COMPLETED |
||||
Events/sec: 1035.42 |
||||
Events/sec: 659.20 |
||||
Events/sec: 1094.56 |
||||
Success Rate: 100.0% |
||||
Success Rate: 100.0% |
||||
Success Rate: 100.0% |
||||
Avg Latency: 470.069µs |
||||
Bottom 10% Avg Latency: 750.491µs |
||||
Avg Latency: 190.573µs |
||||
P95 Latency: 693.101µs |
||||
P95 Latency: 289.761µs |
||||
P95 Latency: 22.450848ms |
||||
|
||||
Relay: khatru-sqlite |
||||
---------------------------------------- |
||||
Status: COMPLETED |
||||
Events/sec: 1105.61 |
||||
Events/sec: 624.87 |
||||
Events/sec: 1070.10 |
||||
Success Rate: 100.0% |
||||
Success Rate: 100.0% |
||||
Success Rate: 100.0% |
||||
Avg Latency: 458.035µs |
||||
Bottom 10% Avg Latency: 702.193µs |
||||
Avg Latency: 193.997µs |
||||
P95 Latency: 660.608µs |
||||
P95 Latency: 302.666µs |
||||
P95 Latency: 23.653412ms |
||||
|
||||
Relay: khatru-badger |
||||
---------------------------------------- |
||||
Status: COMPLETED |
||||
Events/sec: 1040.11 |
||||
Events/sec: 663.14 |
||||
Events/sec: 1065.58 |
||||
Success Rate: 100.0% |
||||
Success Rate: 100.0% |
||||
Success Rate: 100.0% |
||||
Avg Latency: 454.784µs |
||||
Bottom 10% Avg Latency: 706.219µs |
||||
Avg Latency: 193.914µs |
||||
P95 Latency: 654.637µs |
||||
P95 Latency: 296.525µs |
||||
P95 Latency: 21.642655ms |
||||
|
||||
Relay: relayer-basic |
||||
---------------------------------------- |
||||
Status: COMPLETED |
||||
Events/sec: 1104.88 |
||||
Events/sec: 642.17 |
||||
Events/sec: 1079.27 |
||||
Success Rate: 100.0% |
||||
Success Rate: 100.0% |
||||
Success Rate: 100.0% |
||||
Avg Latency: 433.89µs |
||||
Bottom 10% Avg Latency: 653.813µs |
||||
Avg Latency: 186.306µs |
||||
P95 Latency: 617.868µs |
||||
P95 Latency: 279.192µs |
||||
P95 Latency: 21.247322ms |
||||
|
||||
Relay: strfry |
||||
---------------------------------------- |
||||
Status: COMPLETED |
||||
Events/sec: 1090.49 |
||||
Events/sec: 652.03 |
||||
Events/sec: 1098.57 |
||||
Success Rate: 100.0% |
||||
Success Rate: 100.0% |
||||
Success Rate: 100.0% |
||||
Avg Latency: 448.058µs |
||||
Bottom 10% Avg Latency: 729.464µs |
||||
Avg Latency: 189.06µs |
||||
P95 Latency: 667.141µs |
||||
P95 Latency: 290.433µs |
||||
P95 Latency: 20.822884ms |
||||
|
||||
Relay: nostr-rs-relay |
||||
---------------------------------------- |
||||
Status: COMPLETED |
||||
Events/sec: 1123.91 |
||||
Events/sec: 647.62 |
||||
Events/sec: 1033.64 |
||||
Success Rate: 100.0% |
||||
Success Rate: 100.0% |
||||
Success Rate: 100.0% |
||||
Avg Latency: 416.753µs |
||||
Bottom 10% Avg Latency: 638.318µs |
||||
Avg Latency: 185.217µs |
||||
P95 Latency: 597.338µs |
||||
P95 Latency: 273.191µs |
||||
P95 Latency: 22.416221ms |
||||
|
||||
|
||||
================================================================ |
||||
DETAILED RESULTS |
||||
================================================================ |
||||
|
||||
Individual relay reports are available in: |
||||
- /reports/run_20250920_101521/khatru-badger_results.txt |
||||
- /reports/run_20250920_101521/khatru-sqlite_results.txt |
||||
- /reports/run_20250920_101521/next-orly_results.txt |
||||
- /reports/run_20250920_101521/nostr-rs-relay_results.txt |
||||
- /reports/run_20250920_101521/relayer-basic_results.txt |
||||
- /reports/run_20250920_101521/strfry_results.txt |
||||
|
||||
================================================================ |
||||
BENCHMARK COMPARISON TABLE |
||||
================================================================ |
||||
|
||||
Relay Status Peak Tput/s Avg Latency Success Rate |
||||
---- ------ ----------- ----------- ------------ |
||||
next-orly OK 1035.42 470.069µs 100.0% |
||||
khatru-sqlite OK 1105.61 458.035µs 100.0% |
||||
khatru-badger OK 1040.11 454.784µs 100.0% |
||||
relayer-basic OK 1104.88 433.89µs 100.0% |
||||
strfry OK 1090.49 448.058µs 100.0% |
||||
nostr-rs-relay OK 1123.91 416.753µs 100.0% |
||||
|
||||
================================================================ |
||||
End of Report |
||||
================================================================ |
||||
@ -0,0 +1,247 @@
@@ -0,0 +1,247 @@
|
||||
package querycache |
||||
|
||||
import ( |
||||
"container/list" |
||||
"sync" |
||||
"time" |
||||
|
||||
"lol.mleku.dev/log" |
||||
"next.orly.dev/pkg/encoders/event" |
||||
"next.orly.dev/pkg/encoders/filter" |
||||
) |
||||
|
||||
const ( |
||||
// DefaultMaxSize is the default maximum cache size in bytes (512 MB)
|
||||
DefaultMaxSize = 512 * 1024 * 1024 |
||||
// DefaultMaxAge is the default maximum age for cache entries
|
||||
DefaultMaxAge = 5 * time.Minute |
||||
) |
||||
|
||||
// EventCacheEntry represents a cached set of events for a filter
|
||||
type EventCacheEntry struct { |
||||
FilterKey string |
||||
Events event.S // Slice of events
|
||||
TotalSize int // Estimated size in bytes
|
||||
LastAccess time.Time |
||||
CreatedAt time.Time |
||||
listElement *list.Element |
||||
} |
||||
|
||||
// EventCache caches event.S results from database queries
|
||||
type EventCache struct { |
||||
mu sync.RWMutex |
||||
|
||||
entries map[string]*EventCacheEntry |
||||
lruList *list.List |
||||
|
||||
currentSize int64 |
||||
maxSize int64 |
||||
maxAge time.Duration |
||||
|
||||
hits uint64 |
||||
misses uint64 |
||||
evictions uint64 |
||||
invalidations uint64 |
||||
} |
||||
|
||||
// NewEventCache creates a new event cache
|
||||
func NewEventCache(maxSize int64, maxAge time.Duration) *EventCache { |
||||
if maxSize <= 0 { |
||||
maxSize = DefaultMaxSize |
||||
} |
||||
if maxAge <= 0 { |
||||
maxAge = DefaultMaxAge |
||||
} |
||||
|
||||
c := &EventCache{ |
||||
entries: make(map[string]*EventCacheEntry), |
||||
lruList: list.New(), |
||||
maxSize: maxSize, |
||||
maxAge: maxAge, |
||||
} |
||||
|
||||
go c.cleanupExpired() |
||||
|
||||
return c |
||||
} |
||||
|
||||
// Get retrieves cached events for a filter
|
||||
func (c *EventCache) Get(f *filter.F) (events event.S, found bool) { |
||||
filterKey := string(f.Serialize()) |
||||
|
||||
c.mu.Lock() |
||||
defer c.mu.Unlock() |
||||
|
||||
entry, exists := c.entries[filterKey] |
||||
if !exists { |
||||
c.misses++ |
||||
return nil, false |
||||
} |
||||
|
||||
// Check if expired
|
||||
if time.Since(entry.CreatedAt) > c.maxAge { |
||||
c.removeEntry(entry) |
||||
c.misses++ |
||||
return nil, false |
||||
} |
||||
|
||||
// Update access time and move to front
|
||||
entry.LastAccess = time.Now() |
||||
c.lruList.MoveToFront(entry.listElement) |
||||
|
||||
c.hits++ |
||||
log.D.F("event cache HIT: filter=%s events=%d", filterKey[:min(50, len(filterKey))], len(entry.Events)) |
||||
|
||||
return entry.Events, true |
||||
} |
||||
|
||||
// Put stores events in the cache
|
||||
func (c *EventCache) Put(f *filter.F, events event.S) { |
||||
if len(events) == 0 { |
||||
return |
||||
} |
||||
|
||||
filterKey := string(f.Serialize()) |
||||
|
||||
// Estimate size: each event is roughly 500 bytes on average
|
||||
estimatedSize := len(events) * 500 |
||||
|
||||
// Don't cache if too large
|
||||
if int64(estimatedSize) > c.maxSize { |
||||
log.W.F("event cache: entry too large: %d bytes", estimatedSize) |
||||
return |
||||
} |
||||
|
||||
c.mu.Lock() |
||||
defer c.mu.Unlock() |
||||
|
||||
// Check if already exists
|
||||
if existing, exists := c.entries[filterKey]; exists { |
||||
c.currentSize -= int64(existing.TotalSize) |
||||
existing.Events = events |
||||
existing.TotalSize = estimatedSize |
||||
existing.LastAccess = time.Now() |
||||
existing.CreatedAt = time.Now() |
||||
c.currentSize += int64(estimatedSize) |
||||
c.lruList.MoveToFront(existing.listElement) |
||||
return |
||||
} |
||||
|
||||
// Evict if necessary
|
||||
for c.currentSize+int64(estimatedSize) > c.maxSize && c.lruList.Len() > 0 { |
||||
oldest := c.lruList.Back() |
||||
if oldest != nil { |
||||
oldEntry := oldest.Value.(*EventCacheEntry) |
||||
c.removeEntry(oldEntry) |
||||
c.evictions++ |
||||
} |
||||
} |
||||
|
||||
// Create new entry
|
||||
entry := &EventCacheEntry{ |
||||
FilterKey: filterKey, |
||||
Events: events, |
||||
TotalSize: estimatedSize, |
||||
LastAccess: time.Now(), |
||||
CreatedAt: time.Now(), |
||||
} |
||||
|
||||
entry.listElement = c.lruList.PushFront(entry) |
||||
c.entries[filterKey] = entry |
||||
c.currentSize += int64(estimatedSize) |
||||
|
||||
log.D.F("event cache PUT: filter=%s events=%d size=%d total=%d/%d", |
||||
filterKey[:min(50, len(filterKey))], len(events), estimatedSize, c.currentSize, c.maxSize) |
||||
} |
||||
|
||||
// Invalidate clears all entries (called when new events are stored)
|
||||
func (c *EventCache) Invalidate() { |
||||
c.mu.Lock() |
||||
defer c.mu.Unlock() |
||||
|
||||
if len(c.entries) > 0 { |
||||
cleared := len(c.entries) |
||||
c.entries = make(map[string]*EventCacheEntry) |
||||
c.lruList = list.New() |
||||
c.currentSize = 0 |
||||
c.invalidations += uint64(cleared) |
||||
log.T.F("event cache INVALIDATE: cleared %d entries", cleared) |
||||
} |
||||
} |
||||
|
||||
// removeEntry removes an entry (must be called with lock held)
|
||||
func (c *EventCache) removeEntry(entry *EventCacheEntry) { |
||||
delete(c.entries, entry.FilterKey) |
||||
c.lruList.Remove(entry.listElement) |
||||
c.currentSize -= int64(entry.TotalSize) |
||||
} |
||||
|
||||
// cleanupExpired removes expired entries periodically
|
||||
func (c *EventCache) cleanupExpired() { |
||||
ticker := time.NewTicker(1 * time.Minute) |
||||
defer ticker.Stop() |
||||
|
||||
for range ticker.C { |
||||
c.mu.Lock() |
||||
now := time.Now() |
||||
var toRemove []*EventCacheEntry |
||||
|
||||
for _, entry := range c.entries { |
||||
if now.Sub(entry.CreatedAt) > c.maxAge { |
||||
toRemove = append(toRemove, entry) |
||||
} |
||||
} |
||||
|
||||
for _, entry := range toRemove { |
||||
c.removeEntry(entry) |
||||
} |
||||
|
||||
if len(toRemove) > 0 { |
||||
log.D.F("event cache cleanup: removed %d expired entries", len(toRemove)) |
||||
} |
||||
|
||||
c.mu.Unlock() |
||||
} |
||||
} |
||||
|
||||
// CacheStats holds cache performance metrics
|
||||
type CacheStats struct { |
||||
Entries int |
||||
CurrentSize int64 |
||||
MaxSize int64 |
||||
Hits uint64 |
||||
Misses uint64 |
||||
HitRate float64 |
||||
Evictions uint64 |
||||
Invalidations uint64 |
||||
} |
||||
|
||||
// Stats returns cache statistics
|
||||
func (c *EventCache) Stats() CacheStats { |
||||
c.mu.RLock() |
||||
defer c.mu.RUnlock() |
||||
|
||||
total := c.hits + c.misses |
||||
hitRate := 0.0 |
||||
if total > 0 { |
||||
hitRate = float64(c.hits) / float64(total) |
||||
} |
||||
|
||||
return CacheStats{ |
||||
Entries: len(c.entries), |
||||
CurrentSize: c.currentSize, |
||||
MaxSize: c.maxSize, |
||||
Hits: c.hits, |
||||
Misses: c.misses, |
||||
HitRate: hitRate, |
||||
Evictions: c.evictions, |
||||
Invalidations: c.invalidations, |
||||
} |
||||
} |
||||
|
||||
func min(a, b int) int { |
||||
if a < b { |
||||
return a |
||||
} |
||||
return b |
||||
} |
||||
Loading…
Reference in new issue