26 changed files with 2715 additions and 184 deletions
@ -0,0 +1,574 @@
@@ -0,0 +1,574 @@
|
||||
package main |
||||
|
||||
import ( |
||||
"context" |
||||
"fmt" |
||||
"sort" |
||||
"sync" |
||||
"time" |
||||
|
||||
"next.orly.dev/pkg/database" |
||||
"next.orly.dev/pkg/encoders/event" |
||||
"next.orly.dev/pkg/encoders/filter" |
||||
"next.orly.dev/pkg/encoders/kind" |
||||
"next.orly.dev/pkg/encoders/tag" |
||||
"next.orly.dev/pkg/encoders/timestamp" |
||||
"next.orly.dev/pkg/interfaces/signer/p8k" |
||||
) |
||||
|
||||
// BenchmarkAdapter adapts a database.Database interface to work with benchmark tests
|
||||
type BenchmarkAdapter struct { |
||||
config *BenchmarkConfig |
||||
db database.Database |
||||
results []*BenchmarkResult |
||||
mu sync.RWMutex |
||||
} |
||||
|
||||
// NewBenchmarkAdapter creates a new benchmark adapter
|
||||
func NewBenchmarkAdapter(config *BenchmarkConfig, db database.Database) *BenchmarkAdapter { |
||||
return &BenchmarkAdapter{ |
||||
config: config, |
||||
db: db, |
||||
results: make([]*BenchmarkResult, 0), |
||||
} |
||||
} |
||||
|
||||
// RunPeakThroughputTest runs the peak throughput benchmark
|
||||
func (ba *BenchmarkAdapter) RunPeakThroughputTest() { |
||||
fmt.Println("\n=== Peak Throughput Test ===") |
||||
|
||||
start := time.Now() |
||||
var wg sync.WaitGroup |
||||
var totalEvents int64 |
||||
var errors []error |
||||
var latencies []time.Duration |
||||
var mu sync.Mutex |
||||
|
||||
events := ba.generateEvents(ba.config.NumEvents) |
||||
eventChan := make(chan *event.E, len(events)) |
||||
|
||||
// Fill event channel
|
||||
for _, ev := range events { |
||||
eventChan <- ev |
||||
} |
||||
close(eventChan) |
||||
|
||||
// Start workers
|
||||
for i := 0; i < ba.config.ConcurrentWorkers; i++ { |
||||
wg.Add(1) |
||||
go func(workerID int) { |
||||
defer wg.Done() |
||||
|
||||
ctx := context.Background() |
||||
for ev := range eventChan { |
||||
eventStart := time.Now() |
||||
|
||||
_, err := ba.db.SaveEvent(ctx, ev) |
||||
latency := time.Since(eventStart) |
||||
|
||||
mu.Lock() |
||||
if err != nil { |
||||
errors = append(errors, err) |
||||
} else { |
||||
totalEvents++ |
||||
latencies = append(latencies, latency) |
||||
} |
||||
mu.Unlock() |
||||
} |
||||
}(i) |
||||
} |
||||
|
||||
wg.Wait() |
||||
duration := time.Since(start) |
||||
|
||||
// Calculate metrics
|
||||
result := &BenchmarkResult{ |
||||
TestName: "Peak Throughput", |
||||
Duration: duration, |
||||
TotalEvents: int(totalEvents), |
||||
EventsPerSecond: float64(totalEvents) / duration.Seconds(), |
||||
ConcurrentWorkers: ba.config.ConcurrentWorkers, |
||||
MemoryUsed: getMemUsage(), |
||||
} |
||||
|
||||
if len(latencies) > 0 { |
||||
sort.Slice(latencies, func(i, j int) bool { |
||||
return latencies[i] < latencies[j] |
||||
}) |
||||
result.AvgLatency = calculateAverage(latencies) |
||||
result.P90Latency = latencies[int(float64(len(latencies))*0.90)] |
||||
result.P95Latency = latencies[int(float64(len(latencies))*0.95)] |
||||
result.P99Latency = latencies[int(float64(len(latencies))*0.99)] |
||||
|
||||
bottom10 := latencies[:int(float64(len(latencies))*0.10)] |
||||
result.Bottom10Avg = calculateAverage(bottom10) |
||||
} |
||||
|
||||
result.SuccessRate = float64(totalEvents) / float64(ba.config.NumEvents) * 100 |
||||
if len(errors) > 0 { |
||||
result.Errors = make([]string, 0, len(errors)) |
||||
for _, err := range errors { |
||||
result.Errors = append(result.Errors, err.Error()) |
||||
} |
||||
} |
||||
|
||||
ba.mu.Lock() |
||||
ba.results = append(ba.results, result) |
||||
ba.mu.Unlock() |
||||
|
||||
ba.printResult(result) |
||||
} |
||||
|
||||
// RunBurstPatternTest runs burst pattern test
|
||||
func (ba *BenchmarkAdapter) RunBurstPatternTest() { |
||||
fmt.Println("\n=== Burst Pattern Test ===") |
||||
|
||||
start := time.Now() |
||||
var totalEvents int64 |
||||
var latencies []time.Duration |
||||
var mu sync.Mutex |
||||
|
||||
ctx := context.Background() |
||||
burstSize := 100 |
||||
bursts := ba.config.NumEvents / burstSize |
||||
|
||||
for i := 0; i < bursts; i++ { |
||||
// Generate a burst of events
|
||||
events := ba.generateEvents(burstSize) |
||||
|
||||
var wg sync.WaitGroup |
||||
for _, ev := range events { |
||||
wg.Add(1) |
||||
go func(e *event.E) { |
||||
defer wg.Done() |
||||
|
||||
eventStart := time.Now() |
||||
_, err := ba.db.SaveEvent(ctx, e) |
||||
latency := time.Since(eventStart) |
||||
|
||||
mu.Lock() |
||||
if err == nil { |
||||
totalEvents++ |
||||
latencies = append(latencies, latency) |
||||
} |
||||
mu.Unlock() |
||||
}(ev) |
||||
} |
||||
|
||||
wg.Wait() |
||||
|
||||
// Short pause between bursts
|
||||
time.Sleep(10 * time.Millisecond) |
||||
} |
||||
|
||||
duration := time.Since(start) |
||||
|
||||
result := &BenchmarkResult{ |
||||
TestName: "Burst Pattern", |
||||
Duration: duration, |
||||
TotalEvents: int(totalEvents), |
||||
EventsPerSecond: float64(totalEvents) / duration.Seconds(), |
||||
ConcurrentWorkers: burstSize, |
||||
MemoryUsed: getMemUsage(), |
||||
SuccessRate: float64(totalEvents) / float64(ba.config.NumEvents) * 100, |
||||
} |
||||
|
||||
if len(latencies) > 0 { |
||||
sort.Slice(latencies, func(i, j int) bool { |
||||
return latencies[i] < latencies[j] |
||||
}) |
||||
result.AvgLatency = calculateAverage(latencies) |
||||
result.P90Latency = latencies[int(float64(len(latencies))*0.90)] |
||||
result.P95Latency = latencies[int(float64(len(latencies))*0.95)] |
||||
result.P99Latency = latencies[int(float64(len(latencies))*0.99)] |
||||
|
||||
bottom10 := latencies[:int(float64(len(latencies))*0.10)] |
||||
result.Bottom10Avg = calculateAverage(bottom10) |
||||
} |
||||
|
||||
ba.mu.Lock() |
||||
ba.results = append(ba.results, result) |
||||
ba.mu.Unlock() |
||||
|
||||
ba.printResult(result) |
||||
} |
||||
|
||||
// RunMixedReadWriteTest runs mixed read/write test
|
||||
func (ba *BenchmarkAdapter) RunMixedReadWriteTest() { |
||||
fmt.Println("\n=== Mixed Read/Write Test ===") |
||||
|
||||
// First, populate some events
|
||||
fmt.Println("Populating database with initial events...") |
||||
populateEvents := ba.generateEvents(1000) |
||||
ctx := context.Background() |
||||
|
||||
for _, ev := range populateEvents { |
||||
ba.db.SaveEvent(ctx, ev) |
||||
} |
||||
|
||||
start := time.Now() |
||||
var writeCount, readCount int64 |
||||
var latencies []time.Duration |
||||
var mu sync.Mutex |
||||
var wg sync.WaitGroup |
||||
|
||||
// Start workers doing mixed read/write
|
||||
for i := 0; i < ba.config.ConcurrentWorkers; i++ { |
||||
wg.Add(1) |
||||
go func(workerID int) { |
||||
defer wg.Done() |
||||
|
||||
events := ba.generateEvents(ba.config.NumEvents / ba.config.ConcurrentWorkers) |
||||
|
||||
for idx, ev := range events { |
||||
eventStart := time.Now() |
||||
|
||||
if idx%3 == 0 { |
||||
// Read operation
|
||||
f := filter.New() |
||||
f.Kinds = kind.NewS(kind.TextNote) |
||||
limit := uint(10) |
||||
f.Limit = &limit |
||||
_, _ = ba.db.QueryEvents(ctx, f) |
||||
|
||||
mu.Lock() |
||||
readCount++ |
||||
mu.Unlock() |
||||
} else { |
||||
// Write operation
|
||||
_, _ = ba.db.SaveEvent(ctx, ev) |
||||
|
||||
mu.Lock() |
||||
writeCount++ |
||||
mu.Unlock() |
||||
} |
||||
|
||||
latency := time.Since(eventStart) |
||||
mu.Lock() |
||||
latencies = append(latencies, latency) |
||||
mu.Unlock() |
||||
} |
||||
}(i) |
||||
} |
||||
|
||||
wg.Wait() |
||||
duration := time.Since(start) |
||||
|
||||
result := &BenchmarkResult{ |
||||
TestName: fmt.Sprintf("Mixed R/W (R:%d W:%d)", readCount, writeCount), |
||||
Duration: duration, |
||||
TotalEvents: int(writeCount + readCount), |
||||
EventsPerSecond: float64(writeCount+readCount) / duration.Seconds(), |
||||
ConcurrentWorkers: ba.config.ConcurrentWorkers, |
||||
MemoryUsed: getMemUsage(), |
||||
SuccessRate: 100.0, |
||||
} |
||||
|
||||
if len(latencies) > 0 { |
||||
sort.Slice(latencies, func(i, j int) bool { |
||||
return latencies[i] < latencies[j] |
||||
}) |
||||
result.AvgLatency = calculateAverage(latencies) |
||||
result.P90Latency = latencies[int(float64(len(latencies))*0.90)] |
||||
result.P95Latency = latencies[int(float64(len(latencies))*0.95)] |
||||
result.P99Latency = latencies[int(float64(len(latencies))*0.99)] |
||||
|
||||
bottom10 := latencies[:int(float64(len(latencies))*0.10)] |
||||
result.Bottom10Avg = calculateAverage(bottom10) |
||||
} |
||||
|
||||
ba.mu.Lock() |
||||
ba.results = append(ba.results, result) |
||||
ba.mu.Unlock() |
||||
|
||||
ba.printResult(result) |
||||
} |
||||
|
||||
// RunQueryTest runs query performance test
|
||||
func (ba *BenchmarkAdapter) RunQueryTest() { |
||||
fmt.Println("\n=== Query Performance Test ===") |
||||
|
||||
// Populate with test data
|
||||
fmt.Println("Populating database for query tests...") |
||||
events := ba.generateEvents(5000) |
||||
ctx := context.Background() |
||||
|
||||
for _, ev := range events { |
||||
ba.db.SaveEvent(ctx, ev) |
||||
} |
||||
|
||||
start := time.Now() |
||||
var queryCount int64 |
||||
var latencies []time.Duration |
||||
var mu sync.Mutex |
||||
var wg sync.WaitGroup |
||||
|
||||
queryTypes := []func() *filter.F{ |
||||
func() *filter.F { |
||||
f := filter.New() |
||||
f.Kinds = kind.NewS(kind.TextNote) |
||||
limit := uint(100) |
||||
f.Limit = &limit |
||||
return f |
||||
}, |
||||
func() *filter.F { |
||||
f := filter.New() |
||||
f.Kinds = kind.NewS(kind.TextNote, kind.Repost) |
||||
limit := uint(50) |
||||
f.Limit = &limit |
||||
return f |
||||
}, |
||||
func() *filter.F { |
||||
f := filter.New() |
||||
limit := uint(10) |
||||
f.Limit = &limit |
||||
since := time.Now().Add(-1 * time.Hour).Unix() |
||||
f.Since = timestamp.FromUnix(since) |
||||
return f |
||||
}, |
||||
} |
||||
|
||||
// Run concurrent queries
|
||||
iterations := 1000 |
||||
for i := 0; i < ba.config.ConcurrentWorkers; i++ { |
||||
wg.Add(1) |
||||
go func() { |
||||
defer wg.Done() |
||||
|
||||
for j := 0; j < iterations/ba.config.ConcurrentWorkers; j++ { |
||||
f := queryTypes[j%len(queryTypes)]() |
||||
|
||||
queryStart := time.Now() |
||||
_, _ = ba.db.QueryEvents(ctx, f) |
||||
latency := time.Since(queryStart) |
||||
|
||||
mu.Lock() |
||||
queryCount++ |
||||
latencies = append(latencies, latency) |
||||
mu.Unlock() |
||||
} |
||||
}() |
||||
} |
||||
|
||||
wg.Wait() |
||||
duration := time.Since(start) |
||||
|
||||
result := &BenchmarkResult{ |
||||
TestName: fmt.Sprintf("Query Performance (%d queries)", queryCount), |
||||
Duration: duration, |
||||
TotalEvents: int(queryCount), |
||||
EventsPerSecond: float64(queryCount) / duration.Seconds(), |
||||
ConcurrentWorkers: ba.config.ConcurrentWorkers, |
||||
MemoryUsed: getMemUsage(), |
||||
SuccessRate: 100.0, |
||||
} |
||||
|
||||
if len(latencies) > 0 { |
||||
sort.Slice(latencies, func(i, j int) bool { |
||||
return latencies[i] < latencies[j] |
||||
}) |
||||
result.AvgLatency = calculateAverage(latencies) |
||||
result.P90Latency = latencies[int(float64(len(latencies))*0.90)] |
||||
result.P95Latency = latencies[int(float64(len(latencies))*0.95)] |
||||
result.P99Latency = latencies[int(float64(len(latencies))*0.99)] |
||||
|
||||
bottom10 := latencies[:int(float64(len(latencies))*0.10)] |
||||
result.Bottom10Avg = calculateAverage(bottom10) |
||||
} |
||||
|
||||
ba.mu.Lock() |
||||
ba.results = append(ba.results, result) |
||||
ba.mu.Unlock() |
||||
|
||||
ba.printResult(result) |
||||
} |
||||
|
||||
// RunConcurrentQueryStoreTest runs concurrent query and store test
|
||||
func (ba *BenchmarkAdapter) RunConcurrentQueryStoreTest() { |
||||
fmt.Println("\n=== Concurrent Query+Store Test ===") |
||||
|
||||
start := time.Now() |
||||
var storeCount, queryCount int64 |
||||
var latencies []time.Duration |
||||
var mu sync.Mutex |
||||
var wg sync.WaitGroup |
||||
|
||||
ctx := context.Background() |
||||
|
||||
// Half workers write, half query
|
||||
halfWorkers := ba.config.ConcurrentWorkers / 2 |
||||
if halfWorkers < 1 { |
||||
halfWorkers = 1 |
||||
} |
||||
|
||||
// Writers
|
||||
for i := 0; i < halfWorkers; i++ { |
||||
wg.Add(1) |
||||
go func() { |
||||
defer wg.Done() |
||||
|
||||
events := ba.generateEvents(ba.config.NumEvents / halfWorkers) |
||||
for _, ev := range events { |
||||
eventStart := time.Now() |
||||
ba.db.SaveEvent(ctx, ev) |
||||
latency := time.Since(eventStart) |
||||
|
||||
mu.Lock() |
||||
storeCount++ |
||||
latencies = append(latencies, latency) |
||||
mu.Unlock() |
||||
} |
||||
}() |
||||
} |
||||
|
||||
// Readers
|
||||
for i := 0; i < halfWorkers; i++ { |
||||
wg.Add(1) |
||||
go func() { |
||||
defer wg.Done() |
||||
|
||||
for j := 0; j < ba.config.NumEvents/halfWorkers; j++ { |
||||
f := filter.New() |
||||
f.Kinds = kind.NewS(kind.TextNote) |
||||
limit := uint(10) |
||||
f.Limit = &limit |
||||
|
||||
queryStart := time.Now() |
||||
ba.db.QueryEvents(ctx, f) |
||||
latency := time.Since(queryStart) |
||||
|
||||
mu.Lock() |
||||
queryCount++ |
||||
latencies = append(latencies, latency) |
||||
mu.Unlock() |
||||
|
||||
time.Sleep(1 * time.Millisecond) |
||||
} |
||||
}() |
||||
} |
||||
|
||||
wg.Wait() |
||||
duration := time.Since(start) |
||||
|
||||
result := &BenchmarkResult{ |
||||
TestName: fmt.Sprintf("Concurrent Q+S (Q:%d S:%d)", queryCount, storeCount), |
||||
Duration: duration, |
||||
TotalEvents: int(storeCount + queryCount), |
||||
EventsPerSecond: float64(storeCount+queryCount) / duration.Seconds(), |
||||
ConcurrentWorkers: ba.config.ConcurrentWorkers, |
||||
MemoryUsed: getMemUsage(), |
||||
SuccessRate: 100.0, |
||||
} |
||||
|
||||
if len(latencies) > 0 { |
||||
sort.Slice(latencies, func(i, j int) bool { |
||||
return latencies[i] < latencies[j] |
||||
}) |
||||
result.AvgLatency = calculateAverage(latencies) |
||||
result.P90Latency = latencies[int(float64(len(latencies))*0.90)] |
||||
result.P95Latency = latencies[int(float64(len(latencies))*0.95)] |
||||
result.P99Latency = latencies[int(float64(len(latencies))*0.99)] |
||||
|
||||
bottom10 := latencies[:int(float64(len(latencies))*0.10)] |
||||
result.Bottom10Avg = calculateAverage(bottom10) |
||||
} |
||||
|
||||
ba.mu.Lock() |
||||
ba.results = append(ba.results, result) |
||||
ba.mu.Unlock() |
||||
|
||||
ba.printResult(result) |
||||
} |
||||
|
||||
// generateEvents generates test events with proper signatures
|
||||
func (ba *BenchmarkAdapter) generateEvents(count int) []*event.E { |
||||
events := make([]*event.E, count) |
||||
|
||||
// Create a test signer
|
||||
signer := p8k.MustNew() |
||||
if err := signer.Generate(); err != nil { |
||||
panic(fmt.Sprintf("failed to generate test key: %v", err)) |
||||
} |
||||
|
||||
for i := 0; i < count; i++ { |
||||
ev := event.New() |
||||
ev.Kind = kind.TextNote.ToU16() |
||||
ev.CreatedAt = time.Now().Unix() |
||||
ev.Content = []byte(fmt.Sprintf("Benchmark event #%d - Testing Nostr relay performance with automated load generation", i)) |
||||
ev.Tags = tag.NewS() |
||||
|
||||
// Add some tags for variety
|
||||
if i%10 == 0 { |
||||
benchmarkTag := tag.NewFromBytesSlice([]byte("t"), []byte("benchmark")) |
||||
ev.Tags.Append(benchmarkTag) |
||||
} |
||||
|
||||
// Sign the event (sets Pubkey, ID, and Sig)
|
||||
if err := ev.Sign(signer); err != nil { |
||||
panic(fmt.Sprintf("failed to sign event: %v", err)) |
||||
} |
||||
|
||||
events[i] = ev |
||||
} |
||||
|
||||
return events |
||||
} |
||||
|
||||
func (ba *BenchmarkAdapter) printResult(r *BenchmarkResult) { |
||||
fmt.Printf("\nResults for %s:\n", r.TestName) |
||||
fmt.Printf(" Duration: %v\n", r.Duration) |
||||
fmt.Printf(" Total Events: %d\n", r.TotalEvents) |
||||
fmt.Printf(" Events/sec: %.2f\n", r.EventsPerSecond) |
||||
fmt.Printf(" Success Rate: %.2f%%\n", r.SuccessRate) |
||||
fmt.Printf(" Workers: %d\n", r.ConcurrentWorkers) |
||||
fmt.Printf(" Memory Used: %.2f MB\n", float64(r.MemoryUsed)/1024/1024) |
||||
|
||||
if r.AvgLatency > 0 { |
||||
fmt.Printf(" Avg Latency: %v\n", r.AvgLatency) |
||||
fmt.Printf(" P90 Latency: %v\n", r.P90Latency) |
||||
fmt.Printf(" P95 Latency: %v\n", r.P95Latency) |
||||
fmt.Printf(" P99 Latency: %v\n", r.P99Latency) |
||||
fmt.Printf(" Bottom 10%% Avg: %v\n", r.Bottom10Avg) |
||||
} |
||||
|
||||
if len(r.Errors) > 0 { |
||||
fmt.Printf(" Errors: %d\n", len(r.Errors)) |
||||
// Print first few errors as samples
|
||||
sampleCount := 3 |
||||
if len(r.Errors) < sampleCount { |
||||
sampleCount = len(r.Errors) |
||||
} |
||||
for i := 0; i < sampleCount; i++ { |
||||
fmt.Printf(" Sample %d: %s\n", i+1, r.Errors[i]) |
||||
} |
||||
} |
||||
} |
||||
|
||||
func (ba *BenchmarkAdapter) GenerateReport() { |
||||
// Delegate to main benchmark report generator
|
||||
// We'll add the results to a file
|
||||
fmt.Println("\n=== Benchmark Results Summary ===") |
||||
ba.mu.RLock() |
||||
defer ba.mu.RUnlock() |
||||
|
||||
for _, result := range ba.results { |
||||
ba.printResult(result) |
||||
} |
||||
} |
||||
|
||||
func (ba *BenchmarkAdapter) GenerateAsciidocReport() { |
||||
// TODO: Implement asciidoc report generation
|
||||
fmt.Println("Asciidoc report generation not yet implemented for adapter") |
||||
} |
||||
|
||||
func calculateAverage(durations []time.Duration) time.Duration { |
||||
if len(durations) == 0 { |
||||
return 0 |
||||
} |
||||
|
||||
var total time.Duration |
||||
for _, d := range durations { |
||||
total += d |
||||
} |
||||
return total / time.Duration(len(durations)) |
||||
} |
||||
@ -0,0 +1,122 @@
@@ -0,0 +1,122 @@
|
||||
package main |
||||
|
||||
import ( |
||||
"context" |
||||
"fmt" |
||||
"log" |
||||
"os" |
||||
"time" |
||||
|
||||
"next.orly.dev/pkg/database" |
||||
_ "next.orly.dev/pkg/dgraph" // Import to register dgraph factory
|
||||
) |
||||
|
||||
// DgraphBenchmark wraps a Benchmark with dgraph-specific setup
|
||||
type DgraphBenchmark struct { |
||||
config *BenchmarkConfig |
||||
docker *DgraphDocker |
||||
database database.Database |
||||
bench *BenchmarkAdapter |
||||
} |
||||
|
||||
// NewDgraphBenchmark creates a new dgraph benchmark instance
|
||||
func NewDgraphBenchmark(config *BenchmarkConfig) (*DgraphBenchmark, error) { |
||||
// Create Docker manager
|
||||
docker := NewDgraphDocker() |
||||
|
||||
// Start dgraph containers
|
||||
ctx := context.Background() |
||||
if err := docker.Start(ctx); err != nil { |
||||
return nil, fmt.Errorf("failed to start dgraph: %w", err) |
||||
} |
||||
|
||||
// Set environment variable for dgraph connection
|
||||
os.Setenv("ORLY_DGRAPH_URL", docker.GetGRPCEndpoint()) |
||||
|
||||
// Create database instance using dgraph backend
|
||||
cancel := func() {} |
||||
db, err := database.NewDatabase(ctx, cancel, "dgraph", config.DataDir, "warn") |
||||
if err != nil { |
||||
docker.Stop() |
||||
return nil, fmt.Errorf("failed to create dgraph database: %w", err) |
||||
} |
||||
|
||||
// Wait for database to be ready
|
||||
fmt.Println("Waiting for dgraph database to be ready...") |
||||
select { |
||||
case <-db.Ready(): |
||||
fmt.Println("Dgraph database is ready") |
||||
case <-time.After(30 * time.Second): |
||||
db.Close() |
||||
docker.Stop() |
||||
return nil, fmt.Errorf("dgraph database failed to become ready") |
||||
} |
||||
|
||||
// Create adapter to use Database interface with Benchmark
|
||||
adapter := NewBenchmarkAdapter(config, db) |
||||
|
||||
dgraphBench := &DgraphBenchmark{ |
||||
config: config, |
||||
docker: docker, |
||||
database: db, |
||||
bench: adapter, |
||||
} |
||||
|
||||
return dgraphBench, nil |
||||
} |
||||
|
||||
// Close closes the dgraph benchmark and stops Docker containers
|
||||
func (dgb *DgraphBenchmark) Close() { |
||||
fmt.Println("Closing dgraph benchmark...") |
||||
|
||||
if dgb.database != nil { |
||||
dgb.database.Close() |
||||
} |
||||
|
||||
if dgb.docker != nil { |
||||
if err := dgb.docker.Stop(); err != nil { |
||||
log.Printf("Error stopping dgraph Docker: %v", err) |
||||
} |
||||
} |
||||
} |
||||
|
||||
// RunSuite runs the benchmark suite on dgraph
|
||||
func (dgb *DgraphBenchmark) RunSuite() { |
||||
fmt.Println("\n╔════════════════════════════════════════════════════════╗") |
||||
fmt.Println("║ DGRAPH BACKEND BENCHMARK SUITE ║") |
||||
fmt.Println("╚════════════════════════════════════════════════════════╝") |
||||
|
||||
// Run only one round for dgraph to keep benchmark time reasonable
|
||||
fmt.Printf("\n=== Starting dgraph benchmark ===\n") |
||||
|
||||
fmt.Printf("RunPeakThroughputTest (dgraph)..\n") |
||||
dgb.bench.RunPeakThroughputTest() |
||||
time.Sleep(10 * time.Second) |
||||
|
||||
fmt.Printf("RunBurstPatternTest (dgraph)..\n") |
||||
dgb.bench.RunBurstPatternTest() |
||||
time.Sleep(10 * time.Second) |
||||
|
||||
fmt.Printf("RunMixedReadWriteTest (dgraph)..\n") |
||||
dgb.bench.RunMixedReadWriteTest() |
||||
time.Sleep(10 * time.Second) |
||||
|
||||
fmt.Printf("RunQueryTest (dgraph)..\n") |
||||
dgb.bench.RunQueryTest() |
||||
time.Sleep(10 * time.Second) |
||||
|
||||
fmt.Printf("RunConcurrentQueryStoreTest (dgraph)..\n") |
||||
dgb.bench.RunConcurrentQueryStoreTest() |
||||
|
||||
fmt.Printf("\n=== Dgraph benchmark completed ===\n\n") |
||||
} |
||||
|
||||
// GenerateReport generates the benchmark report
|
||||
func (dgb *DgraphBenchmark) GenerateReport() { |
||||
dgb.bench.GenerateReport() |
||||
} |
||||
|
||||
// GenerateAsciidocReport generates asciidoc format report
|
||||
func (dgb *DgraphBenchmark) GenerateAsciidocReport() { |
||||
dgb.bench.GenerateAsciidocReport() |
||||
} |
||||
@ -0,0 +1,160 @@
@@ -0,0 +1,160 @@
|
||||
package main |
||||
|
||||
import ( |
||||
"context" |
||||
"fmt" |
||||
"os" |
||||
"os/exec" |
||||
"path/filepath" |
||||
"time" |
||||
) |
||||
|
||||
// DgraphDocker manages a dgraph instance via Docker Compose
|
||||
type DgraphDocker struct { |
||||
composeFile string |
||||
projectName string |
||||
running bool |
||||
} |
||||
|
||||
// NewDgraphDocker creates a new dgraph Docker manager
|
||||
func NewDgraphDocker() *DgraphDocker { |
||||
// Try to find the docker-compose file in the current directory first
|
||||
composeFile := "docker-compose-dgraph.yml" |
||||
|
||||
// If not found, try the cmd/benchmark directory (for running from project root)
|
||||
if _, err := os.Stat(composeFile); os.IsNotExist(err) { |
||||
composeFile = filepath.Join("cmd", "benchmark", "docker-compose-dgraph.yml") |
||||
} |
||||
|
||||
return &DgraphDocker{ |
||||
composeFile: composeFile, |
||||
projectName: "orly-benchmark-dgraph", |
||||
running: false, |
||||
} |
||||
} |
||||
|
||||
// Start starts the dgraph Docker containers
|
||||
func (d *DgraphDocker) Start(ctx context.Context) error { |
||||
fmt.Println("Starting dgraph Docker containers...") |
||||
|
||||
// Stop any existing containers first
|
||||
d.Stop() |
||||
|
||||
// Start containers
|
||||
cmd := exec.CommandContext( |
||||
ctx, |
||||
"docker-compose", |
||||
"-f", d.composeFile, |
||||
"-p", d.projectName, |
||||
"up", "-d", |
||||
) |
||||
cmd.Stdout = os.Stdout |
||||
cmd.Stderr = os.Stderr |
||||
|
||||
if err := cmd.Run(); err != nil { |
||||
return fmt.Errorf("failed to start dgraph containers: %w", err) |
||||
} |
||||
|
||||
fmt.Println("Waiting for dgraph to be healthy...") |
||||
|
||||
// Wait for health checks to pass
|
||||
if err := d.waitForHealthy(ctx, 60*time.Second); err != nil { |
||||
d.Stop() // Clean up on failure
|
||||
return err |
||||
} |
||||
|
||||
d.running = true |
||||
fmt.Println("Dgraph is ready!") |
||||
return nil |
||||
} |
||||
|
||||
// waitForHealthy waits for dgraph to become healthy
|
||||
func (d *DgraphDocker) waitForHealthy(ctx context.Context, timeout time.Duration) error { |
||||
deadline := time.Now().Add(timeout) |
||||
|
||||
for time.Now().Before(deadline) { |
||||
// Check if alpha is healthy by checking docker health status
|
||||
cmd := exec.CommandContext( |
||||
ctx, |
||||
"docker", |
||||
"inspect", |
||||
"--format={{.State.Health.Status}}", |
||||
"orly-benchmark-dgraph-alpha", |
||||
) |
||||
|
||||
output, err := cmd.Output() |
||||
if err == nil && string(output) == "healthy\n" { |
||||
// Additional short wait to ensure full readiness
|
||||
time.Sleep(2 * time.Second) |
||||
return nil |
||||
} |
||||
|
||||
select { |
||||
case <-ctx.Done(): |
||||
return ctx.Err() |
||||
case <-time.After(2 * time.Second): |
||||
// Continue waiting
|
||||
} |
||||
} |
||||
|
||||
return fmt.Errorf("dgraph failed to become healthy within %v", timeout) |
||||
} |
||||
|
||||
// Stop stops and removes the dgraph Docker containers
|
||||
func (d *DgraphDocker) Stop() error { |
||||
if !d.running { |
||||
// Try to stop anyway in case of untracked state
|
||||
cmd := exec.Command( |
||||
"docker-compose", |
||||
"-f", d.composeFile, |
||||
"-p", d.projectName, |
||||
"down", "-v", |
||||
) |
||||
cmd.Stdout = os.Stdout |
||||
cmd.Stderr = os.Stderr |
||||
_ = cmd.Run() // Ignore errors
|
||||
return nil |
||||
} |
||||
|
||||
fmt.Println("Stopping dgraph Docker containers...") |
||||
|
||||
cmd := exec.Command( |
||||
"docker-compose", |
||||
"-f", d.composeFile, |
||||
"-p", d.projectName, |
||||
"down", "-v", |
||||
) |
||||
cmd.Stdout = os.Stdout |
||||
cmd.Stderr = os.Stderr |
||||
|
||||
if err := cmd.Run(); err != nil { |
||||
return fmt.Errorf("failed to stop dgraph containers: %w", err) |
||||
} |
||||
|
||||
d.running = false |
||||
fmt.Println("Dgraph containers stopped") |
||||
return nil |
||||
} |
||||
|
||||
// GetGRPCEndpoint returns the dgraph gRPC endpoint
|
||||
func (d *DgraphDocker) GetGRPCEndpoint() string { |
||||
return "localhost:9080" |
||||
} |
||||
|
||||
// IsRunning returns whether dgraph is running
|
||||
func (d *DgraphDocker) IsRunning() bool { |
||||
return d.running |
||||
} |
||||
|
||||
// Logs returns the logs from dgraph containers
|
||||
func (d *DgraphDocker) Logs() error { |
||||
cmd := exec.Command( |
||||
"docker-compose", |
||||
"-f", d.composeFile, |
||||
"-p", d.projectName, |
||||
"logs", |
||||
) |
||||
cmd.Stdout = os.Stdout |
||||
cmd.Stderr = os.Stderr |
||||
return cmd.Run() |
||||
} |
||||
@ -0,0 +1,44 @@
@@ -0,0 +1,44 @@
|
||||
version: "3.9" |
||||
|
||||
services: |
||||
dgraph-zero: |
||||
image: dgraph/dgraph:v23.1.0 |
||||
container_name: orly-benchmark-dgraph-zero |
||||
working_dir: /data/zero |
||||
ports: |
||||
- "5080:5080" |
||||
- "6080:6080" |
||||
command: dgraph zero --my=dgraph-zero:5080 |
||||
networks: |
||||
- orly-benchmark |
||||
healthcheck: |
||||
test: ["CMD", "sh", "-c", "dgraph version || exit 1"] |
||||
interval: 5s |
||||
timeout: 3s |
||||
retries: 3 |
||||
start_period: 5s |
||||
|
||||
dgraph-alpha: |
||||
image: dgraph/dgraph:v23.1.0 |
||||
container_name: orly-benchmark-dgraph-alpha |
||||
working_dir: /data/alpha |
||||
ports: |
||||
- "8080:8080" |
||||
- "9080:9080" |
||||
command: dgraph alpha --my=dgraph-alpha:7080 --zero=dgraph-zero:5080 --security whitelist=0.0.0.0/0 |
||||
networks: |
||||
- orly-benchmark |
||||
depends_on: |
||||
dgraph-zero: |
||||
condition: service_healthy |
||||
healthcheck: |
||||
test: ["CMD", "sh", "-c", "dgraph version || exit 1"] |
||||
interval: 5s |
||||
timeout: 3s |
||||
retries: 6 |
||||
start_period: 10s |
||||
|
||||
networks: |
||||
orly-benchmark: |
||||
name: orly-benchmark-network |
||||
driver: bridge |
||||
@ -0,0 +1,19 @@
@@ -0,0 +1,19 @@
|
||||
#!/bin/bash |
||||
# Run Badger benchmark with reduced cache sizes to avoid OOM |
||||
|
||||
# Set reasonable cache sizes for benchmark |
||||
export ORLY_DB_BLOCK_CACHE_MB=256 # Reduced from 1024MB |
||||
export ORLY_DB_INDEX_CACHE_MB=128 # Reduced from 512MB |
||||
export ORLY_QUERY_CACHE_SIZE_MB=128 # Reduced from 512MB |
||||
|
||||
# Clean up old data |
||||
rm -rf /tmp/benchmark_db_badger |
||||
|
||||
echo "Running Badger benchmark with reduced cache sizes:" |
||||
echo " Block Cache: ${ORLY_DB_BLOCK_CACHE_MB}MB" |
||||
echo " Index Cache: ${ORLY_DB_INDEX_CACHE_MB}MB" |
||||
echo " Query Cache: ${ORLY_QUERY_CACHE_SIZE_MB}MB" |
||||
echo "" |
||||
|
||||
# Run benchmark |
||||
./benchmark -events "${1:-1000}" -workers "${2:-4}" -datadir /tmp/benchmark_db_badger |
||||
Loading…
Reference in new issue