Browse Source
Replaced legacy `*.orly` module imports with `next.orly.dev/pkg` paths across the codebase for consistency. Removed legacy `go.mod` files from sub-packages, consolidating dependency management. Added Dockerfiles and configurations for benchmarking environments.main
236 changed files with 4098 additions and 1253 deletions
@ -0,0 +1,46 @@ |
|||||||
|
# Dockerfile for benchmark runner |
||||||
|
FROM golang:1.25-alpine AS builder |
||||||
|
|
||||||
|
# Install build dependencies |
||||||
|
RUN apk add --no-cache git ca-certificates |
||||||
|
|
||||||
|
# Set working directory |
||||||
|
WORKDIR /build |
||||||
|
|
||||||
|
# Copy go modules |
||||||
|
COPY go.mod go.sum ./ |
||||||
|
RUN go mod download |
||||||
|
|
||||||
|
# Copy source code |
||||||
|
COPY . . |
||||||
|
|
||||||
|
# Build the benchmark tool |
||||||
|
RUN CGO_ENABLED=0 GOOS=linux go build -a -installsuffix cgo -o benchmark cmd/benchmark/main.go |
||||||
|
|
||||||
|
# Final stage |
||||||
|
FROM alpine:latest |
||||||
|
|
||||||
|
# Install runtime dependencies |
||||||
|
RUN apk --no-cache add ca-certificates curl wget |
||||||
|
|
||||||
|
WORKDIR /app |
||||||
|
|
||||||
|
# Copy benchmark binary |
||||||
|
COPY --from=builder /build/benchmark /app/benchmark |
||||||
|
|
||||||
|
# Copy benchmark runner script |
||||||
|
COPY cmd/benchmark/benchmark-runner.sh /app/benchmark-runner |
||||||
|
|
||||||
|
# Make scripts executable |
||||||
|
RUN chmod +x /app/benchmark-runner |
||||||
|
|
||||||
|
# Create reports directory |
||||||
|
RUN mkdir -p /reports |
||||||
|
|
||||||
|
# Environment variables |
||||||
|
ENV BENCHMARK_EVENTS=10000 |
||||||
|
ENV BENCHMARK_WORKERS=8 |
||||||
|
ENV BENCHMARK_DURATION=60s |
||||||
|
|
||||||
|
# Run the benchmark runner |
||||||
|
CMD ["/app/benchmark-runner"] |
||||||
@ -0,0 +1,23 @@ |
|||||||
|
FROM golang:1.25-alpine AS builder |
||||||
|
|
||||||
|
RUN apk add --no-cache git ca-certificates |
||||||
|
|
||||||
|
WORKDIR /build |
||||||
|
COPY . . |
||||||
|
|
||||||
|
# Build the basic-badger example |
||||||
|
RUN cd examples/basic-badger && \ |
||||||
|
go mod tidy && \ |
||||||
|
CGO_ENABLED=0 go build -o khatru-badger . |
||||||
|
|
||||||
|
FROM alpine:latest |
||||||
|
RUN apk --no-cache add ca-certificates wget |
||||||
|
WORKDIR /app |
||||||
|
COPY --from=builder /build/examples/basic-badger/khatru-badger /app/ |
||||||
|
RUN mkdir -p /data |
||||||
|
EXPOSE 8080 |
||||||
|
ENV DATABASE_PATH=/data/badger |
||||||
|
ENV PORT=8080 |
||||||
|
HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \ |
||||||
|
CMD wget --quiet --tries=1 --spider http://localhost:8080 || exit 1 |
||||||
|
CMD ["/app/khatru-badger"] |
||||||
@ -0,0 +1,23 @@ |
|||||||
|
FROM golang:1.25-alpine AS builder |
||||||
|
|
||||||
|
RUN apk add --no-cache git ca-certificates sqlite-dev gcc musl-dev |
||||||
|
|
||||||
|
WORKDIR /build |
||||||
|
COPY . . |
||||||
|
|
||||||
|
# Build the basic-sqlite example |
||||||
|
RUN cd examples/basic-sqlite3 && \ |
||||||
|
go mod tidy && \ |
||||||
|
CGO_ENABLED=1 go build -o khatru-sqlite . |
||||||
|
|
||||||
|
FROM alpine:latest |
||||||
|
RUN apk --no-cache add ca-certificates sqlite wget |
||||||
|
WORKDIR /app |
||||||
|
COPY --from=builder /build/examples/basic-sqlite3/khatru-sqlite /app/ |
||||||
|
RUN mkdir -p /data |
||||||
|
EXPOSE 8080 |
||||||
|
ENV DATABASE_PATH=/data/khatru.db |
||||||
|
ENV PORT=8080 |
||||||
|
HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \ |
||||||
|
CMD wget --quiet --tries=1 --spider http://localhost:8080 || exit 1 |
||||||
|
CMD ["/app/khatru-sqlite"] |
||||||
@ -0,0 +1,80 @@ |
|||||||
|
# Dockerfile for next.orly.dev relay |
||||||
|
FROM ubuntu:22.04 as builder |
||||||
|
|
||||||
|
# Set environment variables |
||||||
|
ARG GOLANG_VERSION=1.25.1 |
||||||
|
|
||||||
|
# Update package list and install dependencies |
||||||
|
RUN apt-get update && \ |
||||||
|
apt-get install -y wget ca-certificates && \ |
||||||
|
rm -rf /var/lib/apt/lists/* |
||||||
|
|
||||||
|
# Download Go binary |
||||||
|
RUN wget https://go.dev/dl/go${GOLANG_VERSION}.linux-amd64.tar.gz && \ |
||||||
|
rm -rf /usr/local/go && \ |
||||||
|
tar -C /usr/local -xzf go${GOLANG_VERSION}.linux-amd64.tar.gz && \ |
||||||
|
rm go${GOLANG_VERSION}.linux-amd64.tar.gz |
||||||
|
|
||||||
|
# Set PATH environment variable |
||||||
|
ENV PATH="/usr/local/go/bin:${PATH}" |
||||||
|
|
||||||
|
# Verify installation |
||||||
|
RUN go version |
||||||
|
|
||||||
|
RUN apt update && \ |
||||||
|
apt -y install build-essential autoconf libtool git wget |
||||||
|
RUN cd /tmp && \ |
||||||
|
rm -rf secp256k1 && \ |
||||||
|
git clone https://github.com/bitcoin-core/secp256k1.git && \ |
||||||
|
cd secp256k1 && \ |
||||||
|
git checkout v0.6.0 && \ |
||||||
|
git submodule init && \ |
||||||
|
git submodule update && \ |
||||||
|
./autogen.sh && \ |
||||||
|
./configure --enable-module-schnorrsig --enable-module-ecdh --prefix=/usr && \ |
||||||
|
make -j1 && \ |
||||||
|
make install |
||||||
|
|
||||||
|
# Set working directory |
||||||
|
WORKDIR /build |
||||||
|
|
||||||
|
# Copy go modules |
||||||
|
COPY go.mod go.sum ./ |
||||||
|
RUN go mod download |
||||||
|
|
||||||
|
# Copy source code |
||||||
|
COPY . . |
||||||
|
|
||||||
|
# Build the relay |
||||||
|
RUN CGO_ENABLED=1 GOOS=linux go build -o relay . |
||||||
|
|
||||||
|
# Final stage |
||||||
|
FROM ubuntu:22.04 |
||||||
|
|
||||||
|
# Install runtime dependencies |
||||||
|
RUN apt-get update && apt-get install -y ca-certificates curl libsecp256k1-0 libsecp256k1-dev && rm -rf /var/lib/apt/lists/* && \ |
||||||
|
ln -sf /usr/lib/x86_64-linux-gnu/libsecp256k1.so.0 /usr/lib/x86_64-linux-gnu/libsecp256k1.so.5 |
||||||
|
|
||||||
|
WORKDIR /app |
||||||
|
|
||||||
|
# Copy binary from builder |
||||||
|
COPY --from=builder /build/relay /app/relay |
||||||
|
|
||||||
|
# Create data directory |
||||||
|
RUN mkdir -p /data |
||||||
|
|
||||||
|
# Expose port |
||||||
|
EXPOSE 8080 |
||||||
|
|
||||||
|
# Set environment variables |
||||||
|
ENV DATA_DIR=/data |
||||||
|
ENV LISTEN=0.0.0.0 |
||||||
|
ENV PORT=8080 |
||||||
|
ENV LOG_LEVEL=info |
||||||
|
|
||||||
|
# Health check |
||||||
|
HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \ |
||||||
|
CMD curl -f http://localhost:8080 || exit 1 |
||||||
|
|
||||||
|
# Run the relay |
||||||
|
CMD ["/app/relay"] |
||||||
@ -0,0 +1,33 @@ |
|||||||
|
FROM ubuntu:22.04 AS builder |
||||||
|
|
||||||
|
RUN apt-get update && apt-get install -y \ |
||||||
|
curl \ |
||||||
|
build-essential \ |
||||||
|
libsqlite3-dev \ |
||||||
|
pkg-config \ |
||||||
|
protobuf-compiler \ |
||||||
|
&& rm -rf /var/lib/apt/lists/* |
||||||
|
|
||||||
|
# Install Rust |
||||||
|
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y |
||||||
|
ENV PATH="/root/.cargo/bin:${PATH}" |
||||||
|
|
||||||
|
WORKDIR /build |
||||||
|
COPY . . |
||||||
|
|
||||||
|
# Build the relay |
||||||
|
RUN cargo build --release |
||||||
|
|
||||||
|
FROM ubuntu:22.04 |
||||||
|
RUN apt-get update && apt-get install -y ca-certificates sqlite3 wget && rm -rf /var/lib/apt/lists/* |
||||||
|
WORKDIR /app |
||||||
|
COPY --from=builder /build/target/release/nostr-rs-relay /app/ |
||||||
|
RUN mkdir -p /data |
||||||
|
|
||||||
|
EXPOSE 8080 |
||||||
|
ENV RUST_LOG=info |
||||||
|
|
||||||
|
HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \ |
||||||
|
CMD wget --quiet --tries=1 --spider http://localhost:8080 || exit 1 |
||||||
|
|
||||||
|
CMD ["/app/nostr-rs-relay"] |
||||||
@ -0,0 +1,23 @@ |
|||||||
|
FROM golang:1.25-alpine AS builder |
||||||
|
|
||||||
|
RUN apk add --no-cache git ca-certificates sqlite-dev gcc musl-dev |
||||||
|
|
||||||
|
WORKDIR /build |
||||||
|
COPY . . |
||||||
|
|
||||||
|
# Build the basic example |
||||||
|
RUN cd examples/basic && \ |
||||||
|
go mod tidy && \ |
||||||
|
CGO_ENABLED=1 go build -o relayer-basic . |
||||||
|
|
||||||
|
FROM alpine:latest |
||||||
|
RUN apk --no-cache add ca-certificates sqlite wget |
||||||
|
WORKDIR /app |
||||||
|
COPY --from=builder /build/examples/basic/relayer-basic /app/ |
||||||
|
RUN mkdir -p /data |
||||||
|
EXPOSE 8080 |
||||||
|
ENV DATABASE_PATH=/data/relayer.db |
||||||
|
ENV PORT=8080 |
||||||
|
HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \ |
||||||
|
CMD wget --quiet --tries=1 --spider http://localhost:8080 || exit 1 |
||||||
|
CMD ["/app/relayer-basic"] |
||||||
@ -0,0 +1,54 @@ |
|||||||
|
FROM ubuntu:22.04 AS builder |
||||||
|
|
||||||
|
ENV DEBIAN_FRONTEND=noninteractive |
||||||
|
|
||||||
|
# Install build dependencies |
||||||
|
RUN apt-get update && apt-get install -y git g++ make libssl-dev zlib1g-dev liblmdb-dev libflatbuffers-dev libsecp256k1-dev libzstd-dev \ |
||||||
|
&& rm -rf /var/lib/apt/lists/* |
||||||
|
|
||||||
|
WORKDIR /build |
||||||
|
COPY . . |
||||||
|
|
||||||
|
# Initialize git submodules |
||||||
|
RUN git submodule update --init --recursive |
||||||
|
|
||||||
|
# Build strfry |
||||||
|
RUN make setup-golpe && \ |
||||||
|
make -j$(nproc) |
||||||
|
|
||||||
|
FROM ubuntu:22.04 |
||||||
|
RUN apt-get update && apt-get install -y \ |
||||||
|
liblmdb0 \ |
||||||
|
libsecp256k1-0 \ |
||||||
|
curl \ |
||||||
|
bash \ |
||||||
|
&& rm -rf /var/lib/apt/lists/* |
||||||
|
|
||||||
|
WORKDIR /app |
||||||
|
COPY --from=builder /build/strfry /app/ |
||||||
|
COPY --from=builder /build/strfry.conf /app/ |
||||||
|
|
||||||
|
# Create the data directory placeholder (may be masked by volume at runtime) |
||||||
|
RUN mkdir -p /data && \ |
||||||
|
chmod 755 /data |
||||||
|
|
||||||
|
# Update strfry.conf to bind to all interfaces and use port 8080 |
||||||
|
RUN sed -i 's/bind = "127.0.0.1"/bind = "0.0.0.0"/' /app/strfry.conf && \ |
||||||
|
sed -i 's/port = 7777/port = 8080/' /app/strfry.conf |
||||||
|
|
||||||
|
# Entrypoint ensures the LMDB directory exists inside the mounted volume before starting |
||||||
|
ENV STRFRY_DB_PATH=/data/strfry.lmdb |
||||||
|
RUN echo '#!/usr/bin/env bash' > /entrypoint.sh && \ |
||||||
|
echo 'set -euo pipefail' >> /entrypoint.sh && \ |
||||||
|
echo 'DB_PATH="${STRFRY_DB_PATH:-/data/strfry.lmdb}"' >> /entrypoint.sh && \ |
||||||
|
echo 'mkdir -p "$DB_PATH"' >> /entrypoint.sh && \ |
||||||
|
echo 'chown -R root:root "$(dirname "$DB_PATH")"' >> /entrypoint.sh && \ |
||||||
|
echo 'exec /app/strfry relay' >> /entrypoint.sh && \ |
||||||
|
chmod +x /entrypoint.sh |
||||||
|
|
||||||
|
EXPOSE 8080 |
||||||
|
|
||||||
|
HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \ |
||||||
|
CMD curl -f http://localhost:8080 || exit 1 |
||||||
|
|
||||||
|
ENTRYPOINT ["/entrypoint.sh"] |
||||||
@ -0,0 +1,260 @@ |
|||||||
|
# Nostr Relay Benchmark Suite |
||||||
|
|
||||||
|
A comprehensive benchmarking system for testing and comparing the performance of multiple Nostr relay implementations, including: |
||||||
|
|
||||||
|
- **next.orly.dev** (this repository) - BadgerDB-based relay |
||||||
|
- **Khatru** - SQLite and Badger variants |
||||||
|
- **Relayer** - Basic example implementation |
||||||
|
- **Strfry** - C++ LMDB-based relay |
||||||
|
- **nostr-rs-relay** - Rust-based relay with SQLite |
||||||
|
|
||||||
|
## Features |
||||||
|
|
||||||
|
### Benchmark Tests |
||||||
|
|
||||||
|
1. **Peak Throughput Test** |
||||||
|
- Tests maximum event ingestion rate |
||||||
|
- Concurrent workers pushing events as fast as possible |
||||||
|
- Measures events/second, latency distribution, success rate |
||||||
|
|
||||||
|
2. **Burst Pattern Test** |
||||||
|
- Simulates real-world traffic patterns |
||||||
|
- Alternating high-activity bursts and quiet periods |
||||||
|
- Tests relay behavior under varying loads |
||||||
|
|
||||||
|
3. **Mixed Read/Write Test** |
||||||
|
- Concurrent read and write operations |
||||||
|
- Tests query performance while events are being ingested |
||||||
|
- Measures combined throughput and latency |
||||||
|
|
||||||
|
### Performance Metrics |
||||||
|
|
||||||
|
- **Throughput**: Events processed per second |
||||||
|
- **Latency**: Average, P95, and P99 response times |
||||||
|
- **Success Rate**: Percentage of successful operations |
||||||
|
- **Memory Usage**: Peak memory consumption during tests |
||||||
|
- **Error Analysis**: Detailed error reporting and categorization |
||||||
|
|
||||||
|
### Reporting |
||||||
|
|
||||||
|
- Individual relay reports with detailed metrics |
||||||
|
- Aggregate comparison report across all relays |
||||||
|
- Comparison tables for easy performance analysis |
||||||
|
- Timestamped results for tracking improvements over time |
||||||
|
|
||||||
|
## Quick Start |
||||||
|
|
||||||
|
### 1. Setup External Relays |
||||||
|
|
||||||
|
Run the setup script to download and configure all external relay repositories: |
||||||
|
|
||||||
|
```bash |
||||||
|
cd cmd/benchmark |
||||||
|
./setup-external-relays.sh |
||||||
|
``` |
||||||
|
|
||||||
|
This will: |
||||||
|
- Clone all external relay repositories |
||||||
|
- Create Docker configurations for each relay |
||||||
|
- Set up configuration files |
||||||
|
- Create data and report directories |
||||||
|
|
||||||
|
### 2. Run Benchmarks |
||||||
|
|
||||||
|
Start all relays and run the benchmark suite: |
||||||
|
|
||||||
|
```bash |
||||||
|
docker compose up --build |
||||||
|
``` |
||||||
|
|
||||||
|
The system will: |
||||||
|
- Build and start all relay containers |
||||||
|
- Wait for all relays to become healthy |
||||||
|
- Run benchmarks against each relay sequentially |
||||||
|
- Generate individual and aggregate reports |
||||||
|
|
||||||
|
### 3. View Results |
||||||
|
|
||||||
|
Results are stored in the `reports/` directory with timestamps: |
||||||
|
|
||||||
|
```bash |
||||||
|
# View the aggregate report |
||||||
|
cat reports/run_YYYYMMDD_HHMMSS/aggregate_report.txt |
||||||
|
|
||||||
|
# View individual relay results |
||||||
|
ls reports/run_YYYYMMDD_HHMMSS/ |
||||||
|
``` |
||||||
|
|
||||||
|
## Architecture |
||||||
|
|
||||||
|
### Docker Compose Services |
||||||
|
|
||||||
|
| Service | Port | Description | |
||||||
|
|---------|------|-------------| |
||||||
|
| next-orly | 8001 | This repository's BadgerDB relay | |
||||||
|
| khatru-sqlite | 8002 | Khatru with SQLite backend | |
||||||
|
| khatru-badger | 8003 | Khatru with Badger backend | |
||||||
|
| relayer-basic | 8004 | Basic relayer example | |
||||||
|
| strfry | 8005 | Strfry C++ LMDB relay | |
||||||
|
| nostr-rs-relay | 8006 | Rust SQLite relay | |
||||||
|
| benchmark-runner | - | Orchestrates tests and aggregates results | |
||||||
|
|
||||||
|
### File Structure |
||||||
|
|
||||||
|
``` |
||||||
|
cmd/benchmark/ |
||||||
|
├── main.go # Benchmark tool implementation |
||||||
|
├── docker-compose.yml # Service orchestration |
||||||
|
├── setup-external-relays.sh # Repository setup script |
||||||
|
├── benchmark-runner.sh # Test orchestration script |
||||||
|
├── Dockerfile.next-orly # This repo's relay container |
||||||
|
├── Dockerfile.benchmark # Benchmark runner container |
||||||
|
├── Dockerfile.khatru-sqlite # Khatru SQLite variant |
||||||
|
├── Dockerfile.khatru-badger # Khatru Badger variant |
||||||
|
├── Dockerfile.relayer-basic # Relayer basic example |
||||||
|
├── Dockerfile.strfry # Strfry relay |
||||||
|
├── Dockerfile.nostr-rs-relay # Rust relay |
||||||
|
├── configs/ |
||||||
|
│ ├── strfry.conf # Strfry configuration |
||||||
|
│ └── config.toml # nostr-rs-relay configuration |
||||||
|
├── external/ # External relay repositories |
||||||
|
├── data/ # Persistent data for each relay |
||||||
|
└── reports/ # Benchmark results |
||||||
|
``` |
||||||
|
|
||||||
|
## Configuration |
||||||
|
|
||||||
|
### Environment Variables |
||||||
|
|
||||||
|
The benchmark can be configured via environment variables in `docker-compose.yml`: |
||||||
|
|
||||||
|
```yaml |
||||||
|
environment: |
||||||
|
- BENCHMARK_EVENTS=10000 # Number of events per test |
||||||
|
- BENCHMARK_WORKERS=8 # Concurrent workers |
||||||
|
- BENCHMARK_DURATION=60s # Test duration |
||||||
|
- BENCHMARK_TARGETS=... # Relay endpoints to test |
||||||
|
``` |
||||||
|
|
||||||
|
### Custom Configuration |
||||||
|
|
||||||
|
1. **Modify test parameters**: Edit environment variables in `docker-compose.yml` |
||||||
|
2. **Add new relays**: |
||||||
|
- Add service to `docker-compose.yml` |
||||||
|
- Create appropriate Dockerfile |
||||||
|
- Update `BENCHMARK_TARGETS` environment variable |
||||||
|
3. **Adjust relay configs**: Edit files in `configs/` directory |
||||||
|
|
||||||
|
## Manual Usage |
||||||
|
|
||||||
|
### Run Individual Relay |
||||||
|
|
||||||
|
```bash |
||||||
|
# Build and run a specific relay |
||||||
|
docker-compose up next-orly |
||||||
|
|
||||||
|
# Run benchmark against specific endpoint |
||||||
|
./benchmark -datadir=/tmp/test -events=1000 -workers=4 |
||||||
|
``` |
||||||
|
|
||||||
|
### Run Benchmark Tool Directly |
||||||
|
|
||||||
|
```bash |
||||||
|
# Build the benchmark tool |
||||||
|
go build -o benchmark main.go |
||||||
|
|
||||||
|
# Run with custom parameters |
||||||
|
./benchmark \ |
||||||
|
-datadir=/tmp/benchmark_db \ |
||||||
|
-events=5000 \ |
||||||
|
-workers=4 \ |
||||||
|
-duration=30s |
||||||
|
``` |
||||||
|
|
||||||
|
## Benchmark Results Interpretation |
||||||
|
|
||||||
|
### Peak Throughput Test |
||||||
|
- **High events/sec**: Good write performance |
||||||
|
- **Low latency**: Efficient event processing |
||||||
|
- **High success rate**: Stable under load |
||||||
|
|
||||||
|
### Burst Pattern Test |
||||||
|
- **Consistent performance**: Good handling of variable loads |
||||||
|
- **Low P95/P99 latency**: Predictable response times |
||||||
|
- **No errors during bursts**: Robust queuing/buffering |
||||||
|
|
||||||
|
### Mixed Read/Write Test |
||||||
|
- **Balanced throughput**: Good concurrent operation handling |
||||||
|
- **Low read latency**: Efficient query processing |
||||||
|
- **Stable write performance**: Queries don't significantly impact writes |
||||||
|
|
||||||
|
## Development |
||||||
|
|
||||||
|
### Adding New Tests |
||||||
|
|
||||||
|
1. Extend the `Benchmark` struct in `main.go` |
||||||
|
2. Add new test method following existing patterns |
||||||
|
3. Update `main()` function to call new test |
||||||
|
4. Update result aggregation in `benchmark-runner.sh` |
||||||
|
|
||||||
|
### Modifying Relay Configurations |
||||||
|
|
||||||
|
Each relay's Dockerfile and configuration can be customized: |
||||||
|
- **Resource limits**: Adjust memory/CPU limits in docker-compose.yml |
||||||
|
- **Database settings**: Modify configuration files in `configs/` |
||||||
|
- **Network settings**: Update port mappings and health checks |
||||||
|
|
||||||
|
### Debugging |
||||||
|
|
||||||
|
```bash |
||||||
|
# View logs for specific relay |
||||||
|
docker-compose logs next-orly |
||||||
|
|
||||||
|
# Run benchmark with debug output |
||||||
|
docker-compose up --build benchmark-runner |
||||||
|
|
||||||
|
# Check individual container health |
||||||
|
docker-compose ps |
||||||
|
``` |
||||||
|
|
||||||
|
## Troubleshooting |
||||||
|
|
||||||
|
### Common Issues |
||||||
|
|
||||||
|
1. **Relay fails to start**: Check logs with `docker-compose logs <service>` |
||||||
|
2. **Connection refused**: Ensure relay health checks are passing |
||||||
|
3. **Build failures**: Verify external repositories were cloned correctly |
||||||
|
4. **Permission errors**: Ensure setup script is executable |
||||||
|
|
||||||
|
### Performance Issues |
||||||
|
|
||||||
|
- **Low throughput**: Check resource limits and concurrent worker count |
||||||
|
- **High memory usage**: Monitor container resource consumption |
||||||
|
- **Network bottlenecks**: Test on different host configurations |
||||||
|
|
||||||
|
### Reset Environment |
||||||
|
|
||||||
|
```bash |
||||||
|
# Clean up everything |
||||||
|
docker-compose down -v |
||||||
|
docker system prune -f |
||||||
|
rm -rf external/ data/ reports/ |
||||||
|
|
||||||
|
# Start fresh |
||||||
|
./setup-external-relays.sh |
||||||
|
docker-compose up --build |
||||||
|
``` |
||||||
|
|
||||||
|
## Contributing |
||||||
|
|
||||||
|
To add support for new relay implementations: |
||||||
|
|
||||||
|
1. Create appropriate Dockerfile following existing patterns |
||||||
|
2. Add service definition to `docker-compose.yml` |
||||||
|
3. Update `BENCHMARK_TARGETS` environment variable |
||||||
|
4. Test the new relay integration |
||||||
|
5. Update documentation |
||||||
|
|
||||||
|
## License |
||||||
|
|
||||||
|
This benchmark suite is part of the next.orly.dev project and follows the same licensing terms. |
||||||
@ -0,0 +1,265 @@ |
|||||||
|
#!/bin/sh |
||||||
|
|
||||||
|
# Benchmark runner script for testing multiple Nostr relay implementations |
||||||
|
# This script coordinates testing all relays and aggregates results |
||||||
|
|
||||||
|
set -e |
||||||
|
|
||||||
|
# Configuration from environment variables |
||||||
|
BENCHMARK_EVENTS="${BENCHMARK_EVENTS:-10000}" |
||||||
|
BENCHMARK_WORKERS="${BENCHMARK_WORKERS:-8}" |
||||||
|
BENCHMARK_DURATION="${BENCHMARK_DURATION:-60s}" |
||||||
|
BENCHMARK_TARGETS="${BENCHMARK_TARGETS:-next-orly:8001,khatru-sqlite:8002,khatru-badger:8003,relayer-basic:8004,strfry:8005,nostr-rs-relay:8006}" |
||||||
|
OUTPUT_DIR="${OUTPUT_DIR:-/reports}" |
||||||
|
|
||||||
|
# Create output directory |
||||||
|
mkdir -p "${OUTPUT_DIR}" |
||||||
|
|
||||||
|
# Generate timestamp for this benchmark run |
||||||
|
TIMESTAMP=$(date +"%Y%m%d_%H%M%S") |
||||||
|
RUN_DIR="${OUTPUT_DIR}/run_${TIMESTAMP}" |
||||||
|
mkdir -p "${RUN_DIR}" |
||||||
|
|
||||||
|
echo "==================================================" |
||||||
|
echo "Nostr Relay Benchmark Suite" |
||||||
|
echo "==================================================" |
||||||
|
echo "Timestamp: $(date)" |
||||||
|
echo "Events per test: ${BENCHMARK_EVENTS}" |
||||||
|
echo "Concurrent workers: ${BENCHMARK_WORKERS}" |
||||||
|
echo "Test duration: ${BENCHMARK_DURATION}" |
||||||
|
echo "Output directory: ${RUN_DIR}" |
||||||
|
echo "==================================================" |
||||||
|
|
||||||
|
# Function to wait for relay to be ready |
||||||
|
wait_for_relay() { |
||||||
|
local name="$1" |
||||||
|
local url="$2" |
||||||
|
local max_attempts=60 |
||||||
|
local attempt=0 |
||||||
|
|
||||||
|
echo "Waiting for ${name} to be ready at ${url}..." |
||||||
|
|
||||||
|
while [ $attempt -lt $max_attempts ]; do |
||||||
|
if wget --quiet --tries=1 --spider --timeout=5 "http://${url}" 2>/dev/null || \ |
||||||
|
curl -f --connect-timeout 5 --max-time 5 "http://${url}" >/dev/null 2>&1; then |
||||||
|
echo "${name} is ready!" |
||||||
|
return 0 |
||||||
|
fi |
||||||
|
|
||||||
|
attempt=$((attempt + 1)) |
||||||
|
echo " Attempt ${attempt}/${max_attempts}: ${name} not ready yet..." |
||||||
|
sleep 2 |
||||||
|
done |
||||||
|
|
||||||
|
echo "ERROR: ${name} failed to become ready after ${max_attempts} attempts" |
||||||
|
return 1 |
||||||
|
} |
||||||
|
|
||||||
|
# Function to run benchmark against a specific relay |
||||||
|
run_benchmark() { |
||||||
|
local relay_name="$1" |
||||||
|
local relay_url="$2" |
||||||
|
local output_file="$3" |
||||||
|
|
||||||
|
echo "" |
||||||
|
echo "==================================================" |
||||||
|
echo "Testing ${relay_name} at ws://${relay_url}" |
||||||
|
echo "==================================================" |
||||||
|
|
||||||
|
# Wait for relay to be ready |
||||||
|
if ! wait_for_relay "${relay_name}" "${relay_url}"; then |
||||||
|
echo "ERROR: ${relay_name} is not responding, skipping..." |
||||||
|
echo "RELAY: ${relay_name}" > "${output_file}" |
||||||
|
echo "STATUS: FAILED - Relay not responding" >> "${output_file}" |
||||||
|
echo "ERROR: Connection failed" >> "${output_file}" |
||||||
|
return 1 |
||||||
|
fi |
||||||
|
|
||||||
|
# Run the benchmark |
||||||
|
echo "Running benchmark against ${relay_name}..." |
||||||
|
|
||||||
|
# Create temporary directory for this relay's data |
||||||
|
TEMP_DATA_DIR="/tmp/benchmark_${relay_name}_$$" |
||||||
|
mkdir -p "${TEMP_DATA_DIR}" |
||||||
|
|
||||||
|
# Run benchmark and capture both stdout and stderr |
||||||
|
if /app/benchmark \ |
||||||
|
-datadir="${TEMP_DATA_DIR}" \ |
||||||
|
-events="${BENCHMARK_EVENTS}" \ |
||||||
|
-workers="${BENCHMARK_WORKERS}" \ |
||||||
|
-duration="${BENCHMARK_DURATION}" \ |
||||||
|
> "${output_file}" 2>&1; then |
||||||
|
|
||||||
|
echo "✓ Benchmark completed successfully for ${relay_name}" |
||||||
|
|
||||||
|
# Add relay identification to the report |
||||||
|
echo "" >> "${output_file}" |
||||||
|
echo "RELAY_NAME: ${relay_name}" >> "${output_file}" |
||||||
|
echo "RELAY_URL: ws://${relay_url}" >> "${output_file}" |
||||||
|
echo "TEST_TIMESTAMP: $(date -Iseconds)" >> "${output_file}" |
||||||
|
echo "BENCHMARK_CONFIG:" >> "${output_file}" |
||||||
|
echo " Events: ${BENCHMARK_EVENTS}" >> "${output_file}" |
||||||
|
echo " Workers: ${BENCHMARK_WORKERS}" >> "${output_file}" |
||||||
|
echo " Duration: ${BENCHMARK_DURATION}" >> "${output_file}" |
||||||
|
|
||||||
|
else |
||||||
|
echo "✗ Benchmark failed for ${relay_name}" |
||||||
|
echo "" >> "${output_file}" |
||||||
|
echo "RELAY_NAME: ${relay_name}" >> "${output_file}" |
||||||
|
echo "RELAY_URL: ws://${relay_url}" >> "${output_file}" |
||||||
|
echo "STATUS: FAILED" >> "${output_file}" |
||||||
|
echo "TEST_TIMESTAMP: $(date -Iseconds)" >> "${output_file}" |
||||||
|
fi |
||||||
|
|
||||||
|
# Clean up temporary data |
||||||
|
rm -rf "${TEMP_DATA_DIR}" |
||||||
|
} |
||||||
|
|
||||||
|
# Function to generate aggregate report |
||||||
|
generate_aggregate_report() { |
||||||
|
local aggregate_file="${RUN_DIR}/aggregate_report.txt" |
||||||
|
|
||||||
|
echo "Generating aggregate report..." |
||||||
|
|
||||||
|
cat > "${aggregate_file}" << EOF |
||||||
|
================================================================ |
||||||
|
NOSTR RELAY BENCHMARK AGGREGATE REPORT |
||||||
|
================================================================ |
||||||
|
Generated: $(date -Iseconds) |
||||||
|
Benchmark Configuration: |
||||||
|
Events per test: ${BENCHMARK_EVENTS} |
||||||
|
Concurrent workers: ${BENCHMARK_WORKERS} |
||||||
|
Test duration: ${BENCHMARK_DURATION} |
||||||
|
|
||||||
|
Relays tested: $(echo "${BENCHMARK_TARGETS}" | tr ',' '\n' | wc -l) |
||||||
|
|
||||||
|
================================================================ |
||||||
|
SUMMARY BY RELAY |
||||||
|
================================================================ |
||||||
|
|
||||||
|
EOF |
||||||
|
|
||||||
|
# Process each relay's results |
||||||
|
echo "${BENCHMARK_TARGETS}" | tr ',' '\n' | while IFS=':' read -r relay_name relay_port; do |
||||||
|
if [ -z "${relay_name}" ] || [ -z "${relay_port}" ]; then |
||||||
|
continue |
||||||
|
fi |
||||||
|
|
||||||
|
relay_file="${RUN_DIR}/${relay_name}_results.txt" |
||||||
|
|
||||||
|
echo "Relay: ${relay_name}" >> "${aggregate_file}" |
||||||
|
echo "----------------------------------------" >> "${aggregate_file}" |
||||||
|
|
||||||
|
if [ -f "${relay_file}" ]; then |
||||||
|
# Extract key metrics from the relay's report |
||||||
|
if grep -q "STATUS: FAILED" "${relay_file}"; then |
||||||
|
echo "Status: FAILED" >> "${aggregate_file}" |
||||||
|
grep "ERROR:" "${relay_file}" | head -1 >> "${aggregate_file}" || echo "Error: Unknown failure" >> "${aggregate_file}" |
||||||
|
else |
||||||
|
echo "Status: COMPLETED" >> "${aggregate_file}" |
||||||
|
|
||||||
|
# Extract performance metrics |
||||||
|
grep "Events/sec:" "${relay_file}" | head -3 >> "${aggregate_file}" || true |
||||||
|
grep "Success Rate:" "${relay_file}" | head -3 >> "${aggregate_file}" || true |
||||||
|
grep "Avg Latency:" "${relay_file}" | head -3 >> "${aggregate_file}" || true |
||||||
|
grep "P95 Latency:" "${relay_file}" | head -3 >> "${aggregate_file}" || true |
||||||
|
grep "Memory:" "${relay_file}" | head -3 >> "${aggregate_file}" || true |
||||||
|
fi |
||||||
|
else |
||||||
|
echo "Status: NO RESULTS FILE" >> "${aggregate_file}" |
||||||
|
echo "Error: Results file not found" >> "${aggregate_file}" |
||||||
|
fi |
||||||
|
|
||||||
|
echo "" >> "${aggregate_file}" |
||||||
|
done |
||||||
|
|
||||||
|
cat >> "${aggregate_file}" << EOF |
||||||
|
|
||||||
|
================================================================ |
||||||
|
DETAILED RESULTS |
||||||
|
================================================================ |
||||||
|
|
||||||
|
Individual relay reports are available in: |
||||||
|
$(ls "${RUN_DIR}"/*_results.txt 2>/dev/null | sed 's|^| - |' || echo " No individual reports found") |
||||||
|
|
||||||
|
================================================================ |
||||||
|
BENCHMARK COMPARISON TABLE |
||||||
|
================================================================ |
||||||
|
|
||||||
|
EOF |
||||||
|
|
||||||
|
# Create a comparison table |
||||||
|
printf "%-20s %-10s %-15s %-15s %-15s\n" "Relay" "Status" "Peak Tput/s" "Avg Latency" "Success Rate" >> "${aggregate_file}" |
||||||
|
printf "%-20s %-10s %-15s %-15s %-15s\n" "----" "------" "-----------" "-----------" "------------" >> "${aggregate_file}" |
||||||
|
|
||||||
|
echo "${BENCHMARK_TARGETS}" | tr ',' '\n' | while IFS=':' read -r relay_name relay_port; do |
||||||
|
if [ -z "${relay_name}" ] || [ -z "${relay_port}" ]; then |
||||||
|
continue |
||||||
|
fi |
||||||
|
|
||||||
|
relay_file="${RUN_DIR}/${relay_name}_results.txt" |
||||||
|
|
||||||
|
if [ -f "${relay_file}" ]; then |
||||||
|
if grep -q "STATUS: FAILED" "${relay_file}"; then |
||||||
|
printf "%-20s %-10s %-15s %-15s %-15s\n" "${relay_name}" "FAILED" "-" "-" "-" >> "${aggregate_file}" |
||||||
|
else |
||||||
|
# Extract metrics for the table |
||||||
|
peak_tput=$(grep "Events/sec:" "${relay_file}" | head -1 | awk '{print $2}' || echo "-") |
||||||
|
avg_latency=$(grep "Avg Latency:" "${relay_file}" | head -1 | awk '{print $3}' || echo "-") |
||||||
|
success_rate=$(grep "Success Rate:" "${relay_file}" | head -1 | awk '{print $3}' || echo "-") |
||||||
|
|
||||||
|
printf "%-20s %-10s %-15s %-15s %-15s\n" "${relay_name}" "OK" "${peak_tput}" "${avg_latency}" "${success_rate}" >> "${aggregate_file}" |
||||||
|
fi |
||||||
|
else |
||||||
|
printf "%-20s %-10s %-15s %-15s %-15s\n" "${relay_name}" "NO DATA" "-" "-" "-" >> "${aggregate_file}" |
||||||
|
fi |
||||||
|
done |
||||||
|
|
||||||
|
echo "" >> "${aggregate_file}" |
||||||
|
echo "================================================================" >> "${aggregate_file}" |
||||||
|
echo "End of Report" >> "${aggregate_file}" |
||||||
|
echo "================================================================" >> "${aggregate_file}" |
||||||
|
} |
||||||
|
|
||||||
|
# Main execution |
||||||
|
echo "Starting relay benchmark suite..." |
||||||
|
|
||||||
|
# Parse targets and run benchmarks |
||||||
|
echo "${BENCHMARK_TARGETS}" | tr ',' '\n' | while IFS=':' read -r relay_name relay_port; do |
||||||
|
if [ -z "${relay_name}" ] || [ -z "${relay_port}" ]; then |
||||||
|
echo "WARNING: Skipping invalid target: ${relay_name}:${relay_port}" |
||||||
|
continue |
||||||
|
fi |
||||||
|
|
||||||
|
relay_url="${relay_name}:${relay_port}" |
||||||
|
output_file="${RUN_DIR}/${relay_name}_results.txt" |
||||||
|
|
||||||
|
run_benchmark "${relay_name}" "${relay_url}" "${output_file}" |
||||||
|
|
||||||
|
# Small delay between tests |
||||||
|
sleep 5 |
||||||
|
done |
||||||
|
|
||||||
|
# Generate aggregate report |
||||||
|
generate_aggregate_report |
||||||
|
|
||||||
|
echo "" |
||||||
|
echo "==================================================" |
||||||
|
echo "Benchmark Suite Completed!" |
||||||
|
echo "==================================================" |
||||||
|
echo "Results directory: ${RUN_DIR}" |
||||||
|
echo "Aggregate report: ${RUN_DIR}/aggregate_report.txt" |
||||||
|
echo "" |
||||||
|
|
||||||
|
# Display summary |
||||||
|
if [ -f "${RUN_DIR}/aggregate_report.txt" ]; then |
||||||
|
echo "Quick Summary:" |
||||||
|
echo "==============" |
||||||
|
grep -A 10 "BENCHMARK COMPARISON TABLE" "${RUN_DIR}/aggregate_report.txt" | tail -n +4 |
||||||
|
fi |
||||||
|
|
||||||
|
echo "" |
||||||
|
echo "All benchmark files:" |
||||||
|
ls -la "${RUN_DIR}/" |
||||||
|
echo "" |
||||||
|
echo "Benchmark suite finished at: $(date)" |
||||||
@ -0,0 +1,36 @@ |
|||||||
|
[info] |
||||||
|
relay_url = "ws://localhost:8080" |
||||||
|
name = "nostr-rs-relay benchmark" |
||||||
|
description = "A nostr-rs-relay for benchmarking" |
||||||
|
pubkey = "" |
||||||
|
contact = "" |
||||||
|
|
||||||
|
[database] |
||||||
|
data_directory = "/data" |
||||||
|
in_memory = false |
||||||
|
engine = "sqlite" |
||||||
|
|
||||||
|
[network] |
||||||
|
port = 8080 |
||||||
|
address = "0.0.0.0" |
||||||
|
|
||||||
|
[limits] |
||||||
|
messages_per_sec = 0 |
||||||
|
subscriptions_per_min = 0 |
||||||
|
max_event_bytes = 65535 |
||||||
|
max_ws_message_bytes = 131072 |
||||||
|
max_ws_frame_bytes = 131072 |
||||||
|
|
||||||
|
[authorization] |
||||||
|
pubkey_whitelist = [] |
||||||
|
|
||||||
|
[verified_users] |
||||||
|
mode = "passive" |
||||||
|
domain_whitelist = [] |
||||||
|
domain_blacklist = [] |
||||||
|
|
||||||
|
[pay_to_relay] |
||||||
|
enabled = false |
||||||
|
|
||||||
|
[options] |
||||||
|
reject_future_seconds = 30 |
||||||
@ -0,0 +1,101 @@ |
|||||||
|
## |
||||||
|
## Default strfry config |
||||||
|
## |
||||||
|
|
||||||
|
# Directory that contains the strfry LMDB database (restart required) |
||||||
|
db = "/data/strfry.lmdb" |
||||||
|
|
||||||
|
dbParams { |
||||||
|
# Maximum number of threads/processes that can simultaneously have LMDB transactions open (restart required) |
||||||
|
maxreaders = 256 |
||||||
|
|
||||||
|
# Size of mmap to use when loading LMDB (default is 1TB, which is probably reasonable) (restart required) |
||||||
|
mapsize = 1099511627776 |
||||||
|
} |
||||||
|
|
||||||
|
relay { |
||||||
|
# Interface to listen on. Use 0.0.0.0 to listen on all interfaces (restart required) |
||||||
|
bind = "0.0.0.0" |
||||||
|
|
||||||
|
# Port to open for the nostr websocket protocol (restart required) |
||||||
|
port = 8080 |
||||||
|
|
||||||
|
# Set OS-limit on maximum number of open files/sockets (if 0, don't attempt to set) (restart required) |
||||||
|
nofiles = 1000000 |
||||||
|
|
||||||
|
# HTTP header that contains the client's real IP, before reverse proxying (ie x-real-ip) (MUST be all lower-case) |
||||||
|
realIpHeader = "" |
||||||
|
|
||||||
|
info { |
||||||
|
# NIP-11: Name of this server. Short/descriptive (< 30 characters) |
||||||
|
name = "strfry benchmark" |
||||||
|
|
||||||
|
# NIP-11: Detailed description of this server, free-form |
||||||
|
description = "A strfry relay for benchmarking" |
||||||
|
|
||||||
|
# NIP-11: Administrative pubkey, for contact purposes |
||||||
|
pubkey = "" |
||||||
|
|
||||||
|
# NIP-11: Alternative contact for this server |
||||||
|
contact = "" |
||||||
|
} |
||||||
|
|
||||||
|
# Maximum accepted incoming websocket frame size (should be larger than max event) (restart required) |
||||||
|
maxWebsocketPayloadSize = 131072 |
||||||
|
|
||||||
|
# Websocket-level PING message frequency (should be less than any reverse proxy idle timeouts) (restart required) |
||||||
|
autoPingSeconds = 55 |
||||||
|
|
||||||
|
# If TCP keep-alive should be enabled (detect dropped connections to upstream reverse proxy) (restart required) |
||||||
|
enableTcpKeepalive = false |
||||||
|
|
||||||
|
# How much uninterrupted CPU time a REQ query should get during its DB scan |
||||||
|
queryTimesliceBudgetMicroseconds = 10000 |
||||||
|
|
||||||
|
# Maximum records that can be returned per filter |
||||||
|
maxFilterLimit = 500 |
||||||
|
|
||||||
|
# Maximum number of subscriptions (concurrent REQs) a connection can have open at any time |
||||||
|
maxSubsPerConnection = 20 |
||||||
|
|
||||||
|
writePolicy { |
||||||
|
# If non-empty, path to an executable script that implements the writePolicy plugin logic |
||||||
|
plugin = "" |
||||||
|
} |
||||||
|
|
||||||
|
compression { |
||||||
|
# Use permessage-deflate compression if supported by client. Reduces bandwidth, but uses more CPU (restart required) |
||||||
|
enabled = true |
||||||
|
|
||||||
|
# Maintain a sliding window buffer for each connection. Improves compression, but uses more memory (restart required) |
||||||
|
slidingWindow = true |
||||||
|
} |
||||||
|
|
||||||
|
logging { |
||||||
|
# Dump all incoming messages |
||||||
|
dumpInAll = false |
||||||
|
|
||||||
|
# Dump all incoming EVENT messages |
||||||
|
dumpInEvents = false |
||||||
|
|
||||||
|
# Dump all incoming REQ/CLOSE messages |
||||||
|
dumpInReqs = false |
||||||
|
|
||||||
|
# Log performance metrics for initial REQ database scans |
||||||
|
dbScanPerf = false |
||||||
|
} |
||||||
|
|
||||||
|
numThreads { |
||||||
|
# Ingester threads: route incoming requests, validate events/sigs (restart required) |
||||||
|
ingester = 3 |
||||||
|
|
||||||
|
# reqWorker threads: Handle initial DB scan for events (restart required) |
||||||
|
reqWorker = 3 |
||||||
|
|
||||||
|
# reqMonitor threads: Handle filtering of new events (restart required) |
||||||
|
reqMonitor = 3 |
||||||
|
|
||||||
|
# yesstr threads: experimental yesstr protocol (restart required) |
||||||
|
yesstr = 1 |
||||||
|
} |
||||||
|
} |
||||||
@ -0,0 +1,183 @@ |
|||||||
|
version: '3.8' |
||||||
|
|
||||||
|
services: |
||||||
|
# Next.orly.dev relay (this repository) |
||||||
|
next-orly: |
||||||
|
build: |
||||||
|
context: ../.. |
||||||
|
dockerfile: cmd/benchmark/Dockerfile.next-orly |
||||||
|
container_name: benchmark-next-orly |
||||||
|
environment: |
||||||
|
- DATA_DIR=/data |
||||||
|
- LISTEN=0.0.0.0 |
||||||
|
- PORT=8080 |
||||||
|
- LOG_LEVEL=info |
||||||
|
volumes: |
||||||
|
- ./data/next-orly:/data |
||||||
|
ports: |
||||||
|
- "8001:8080" |
||||||
|
networks: |
||||||
|
- benchmark-net |
||||||
|
healthcheck: |
||||||
|
test: ["CMD", "curl", "-f", "http://localhost:8080"] |
||||||
|
interval: 30s |
||||||
|
timeout: 10s |
||||||
|
retries: 3 |
||||||
|
start_period: 40s |
||||||
|
|
||||||
|
# Khatru with SQLite |
||||||
|
khatru-sqlite: |
||||||
|
build: |
||||||
|
context: ./external/khatru |
||||||
|
dockerfile: ../../Dockerfile.khatru-sqlite |
||||||
|
container_name: benchmark-khatru-sqlite |
||||||
|
environment: |
||||||
|
- DATABASE_TYPE=sqlite |
||||||
|
- DATABASE_PATH=/data/khatru.db |
||||||
|
- PORT=8080 |
||||||
|
volumes: |
||||||
|
- ./data/khatru-sqlite:/data |
||||||
|
ports: |
||||||
|
- "8002:8080" |
||||||
|
networks: |
||||||
|
- benchmark-net |
||||||
|
healthcheck: |
||||||
|
test: ["CMD", "wget", "--quiet", "--tries=1", "--spider", "http://localhost:8080"] |
||||||
|
interval: 30s |
||||||
|
timeout: 10s |
||||||
|
retries: 3 |
||||||
|
start_period: 40s |
||||||
|
|
||||||
|
# Khatru with Badger |
||||||
|
khatru-badger: |
||||||
|
build: |
||||||
|
context: ./external/khatru |
||||||
|
dockerfile: ../../Dockerfile.khatru-badger |
||||||
|
container_name: benchmark-khatru-badger |
||||||
|
environment: |
||||||
|
- DATABASE_TYPE=badger |
||||||
|
- DATABASE_PATH=/data/badger |
||||||
|
- PORT=8080 |
||||||
|
volumes: |
||||||
|
- ./data/khatru-badger:/data |
||||||
|
ports: |
||||||
|
- "8003:8080" |
||||||
|
networks: |
||||||
|
- benchmark-net |
||||||
|
healthcheck: |
||||||
|
test: ["CMD", "wget", "--quiet", "--tries=1", "--spider", "http://localhost:8080"] |
||||||
|
interval: 30s |
||||||
|
timeout: 10s |
||||||
|
retries: 3 |
||||||
|
start_period: 40s |
||||||
|
|
||||||
|
# Relayer basic example |
||||||
|
relayer-basic: |
||||||
|
build: |
||||||
|
context: ./external/relayer |
||||||
|
dockerfile: ../../Dockerfile.relayer-basic |
||||||
|
container_name: benchmark-relayer-basic |
||||||
|
environment: |
||||||
|
- PORT=8080 |
||||||
|
- DATABASE_PATH=/data/relayer.db |
||||||
|
volumes: |
||||||
|
- ./data/relayer-basic:/data |
||||||
|
ports: |
||||||
|
- "8004:8080" |
||||||
|
networks: |
||||||
|
- benchmark-net |
||||||
|
healthcheck: |
||||||
|
test: ["CMD", "wget", "--quiet", "--tries=1", "--spider", "http://localhost:8080"] |
||||||
|
interval: 30s |
||||||
|
timeout: 10s |
||||||
|
retries: 3 |
||||||
|
start_period: 40s |
||||||
|
|
||||||
|
# Strfry |
||||||
|
strfry: |
||||||
|
build: |
||||||
|
context: ./external/strfry |
||||||
|
dockerfile: ../../Dockerfile.strfry |
||||||
|
container_name: benchmark-strfry |
||||||
|
environment: |
||||||
|
- STRFRY_DB_PATH=/data/strfry.lmdb |
||||||
|
- STRFRY_RELAY_PORT=8080 |
||||||
|
volumes: |
||||||
|
- ./data/strfry:/data |
||||||
|
- ./configs/strfry.conf:/etc/strfry.conf |
||||||
|
ports: |
||||||
|
- "8005:8080" |
||||||
|
networks: |
||||||
|
- benchmark-net |
||||||
|
healthcheck: |
||||||
|
test: ["CMD", "curl", "-f", "http://localhost:8080"] |
||||||
|
interval: 30s |
||||||
|
timeout: 10s |
||||||
|
retries: 3 |
||||||
|
start_period: 40s |
||||||
|
|
||||||
|
# Nostr-rs-relay |
||||||
|
nostr-rs-relay: |
||||||
|
build: |
||||||
|
context: ./external/nostr-rs-relay |
||||||
|
dockerfile: ../../Dockerfile.nostr-rs-relay |
||||||
|
container_name: benchmark-nostr-rs-relay |
||||||
|
environment: |
||||||
|
- RUST_LOG=info |
||||||
|
volumes: |
||||||
|
- ./data/nostr-rs-relay:/data |
||||||
|
- ./configs/config.toml:/app/config.toml |
||||||
|
ports: |
||||||
|
- "8006:8080" |
||||||
|
networks: |
||||||
|
- benchmark-net |
||||||
|
healthcheck: |
||||||
|
test: ["CMD", "wget", "--quiet", "--tries=1", "--spider", "http://localhost:8080"] |
||||||
|
interval: 30s |
||||||
|
timeout: 10s |
||||||
|
retries: 3 |
||||||
|
start_period: 40s |
||||||
|
|
||||||
|
# Benchmark runner |
||||||
|
benchmark-runner: |
||||||
|
build: |
||||||
|
context: ../.. |
||||||
|
dockerfile: cmd/benchmark/Dockerfile.benchmark |
||||||
|
container_name: benchmark-runner |
||||||
|
depends_on: |
||||||
|
next-orly: |
||||||
|
condition: service_healthy |
||||||
|
khatru-sqlite: |
||||||
|
condition: service_healthy |
||||||
|
khatru-badger: |
||||||
|
condition: service_healthy |
||||||
|
relayer-basic: |
||||||
|
condition: service_healthy |
||||||
|
strfry: |
||||||
|
condition: service_healthy |
||||||
|
nostr-rs-relay: |
||||||
|
condition: service_healthy |
||||||
|
environment: |
||||||
|
- BENCHMARK_TARGETS=next-orly:8001,khatru-sqlite:8002,khatru-badger:8003,relayer-basic:8004,strfry:8005,nostr-rs-relay:8006 |
||||||
|
- BENCHMARK_EVENTS=10000 |
||||||
|
- BENCHMARK_WORKERS=8 |
||||||
|
- BENCHMARK_DURATION=60s |
||||||
|
volumes: |
||||||
|
- ./reports:/reports |
||||||
|
networks: |
||||||
|
- benchmark-net |
||||||
|
command: > |
||||||
|
sh -c " |
||||||
|
echo 'Waiting for all relays to be ready...' && |
||||||
|
sleep 30 && |
||||||
|
echo 'Starting benchmark tests...' && |
||||||
|
/app/benchmark-runner --output-dir=/reports |
||||||
|
" |
||||||
|
|
||||||
|
networks: |
||||||
|
benchmark-net: |
||||||
|
driver: bridge |
||||||
|
|
||||||
|
volumes: |
||||||
|
benchmark-data: |
||||||
|
driver: local |
||||||
@ -0,0 +1 @@ |
|||||||
|
Subproject commit 668c41b98886ceb1980a2ead97e7e2ba0f5f0485 |
||||||
@ -0,0 +1,573 @@ |
|||||||
|
package main |
||||||
|
|
||||||
|
import ( |
||||||
|
"context" |
||||||
|
"crypto/rand" |
||||||
|
"flag" |
||||||
|
"fmt" |
||||||
|
"log" |
||||||
|
"os" |
||||||
|
"path/filepath" |
||||||
|
"runtime" |
||||||
|
"strings" |
||||||
|
"sync" |
||||||
|
"time" |
||||||
|
|
||||||
|
"next.orly.dev/pkg/database" |
||||||
|
"next.orly.dev/pkg/encoders/event" |
||||||
|
"next.orly.dev/pkg/encoders/filter" |
||||||
|
"next.orly.dev/pkg/encoders/kind" |
||||||
|
"next.orly.dev/pkg/encoders/tag" |
||||||
|
"next.orly.dev/pkg/encoders/timestamp" |
||||||
|
) |
||||||
|
|
||||||
|
type BenchmarkConfig struct { |
||||||
|
DataDir string |
||||||
|
NumEvents int |
||||||
|
ConcurrentWorkers int |
||||||
|
TestDuration time.Duration |
||||||
|
BurstPattern bool |
||||||
|
ReportInterval time.Duration |
||||||
|
} |
||||||
|
|
||||||
|
type BenchmarkResult struct { |
||||||
|
TestName string |
||||||
|
Duration time.Duration |
||||||
|
TotalEvents int |
||||||
|
EventsPerSecond float64 |
||||||
|
AvgLatency time.Duration |
||||||
|
P95Latency time.Duration |
||||||
|
P99Latency time.Duration |
||||||
|
SuccessRate float64 |
||||||
|
ConcurrentWorkers int |
||||||
|
MemoryUsed uint64 |
||||||
|
Errors []string |
||||||
|
} |
||||||
|
|
||||||
|
type Benchmark struct { |
||||||
|
config *BenchmarkConfig |
||||||
|
db *database.D |
||||||
|
results []*BenchmarkResult |
||||||
|
mu sync.RWMutex |
||||||
|
} |
||||||
|
|
||||||
|
func main() { |
||||||
|
config := parseFlags() |
||||||
|
|
||||||
|
fmt.Printf("Starting Nostr Relay Benchmark\n") |
||||||
|
fmt.Printf("Data Directory: %s\n", config.DataDir) |
||||||
|
fmt.Printf( |
||||||
|
"Events: %d, Workers: %d, Duration: %v\n", |
||||||
|
config.NumEvents, config.ConcurrentWorkers, config.TestDuration, |
||||||
|
) |
||||||
|
|
||||||
|
benchmark := NewBenchmark(config) |
||||||
|
defer benchmark.Close() |
||||||
|
|
||||||
|
// Run benchmark tests
|
||||||
|
benchmark.RunPeakThroughputTest() |
||||||
|
benchmark.RunBurstPatternTest() |
||||||
|
benchmark.RunMixedReadWriteTest() |
||||||
|
|
||||||
|
// Generate report
|
||||||
|
benchmark.GenerateReport() |
||||||
|
} |
||||||
|
|
||||||
|
func parseFlags() *BenchmarkConfig { |
||||||
|
config := &BenchmarkConfig{} |
||||||
|
|
||||||
|
flag.StringVar( |
||||||
|
&config.DataDir, "datadir", "/tmp/benchmark_db", "Database directory", |
||||||
|
) |
||||||
|
flag.IntVar( |
||||||
|
&config.NumEvents, "events", 10000, "Number of events to generate", |
||||||
|
) |
||||||
|
flag.IntVar( |
||||||
|
&config.ConcurrentWorkers, "workers", runtime.NumCPU(), |
||||||
|
"Number of concurrent workers", |
||||||
|
) |
||||||
|
flag.DurationVar( |
||||||
|
&config.TestDuration, "duration", 60*time.Second, "Test duration", |
||||||
|
) |
||||||
|
flag.BoolVar( |
||||||
|
&config.BurstPattern, "burst", true, "Enable burst pattern testing", |
||||||
|
) |
||||||
|
flag.DurationVar( |
||||||
|
&config.ReportInterval, "report-interval", 10*time.Second, |
||||||
|
"Report interval", |
||||||
|
) |
||||||
|
|
||||||
|
flag.Parse() |
||||||
|
return config |
||||||
|
} |
||||||
|
|
||||||
|
func NewBenchmark(config *BenchmarkConfig) *Benchmark { |
||||||
|
// Clean up existing data directory
|
||||||
|
os.RemoveAll(config.DataDir) |
||||||
|
|
||||||
|
ctx := context.Background() |
||||||
|
cancel := func() {} |
||||||
|
|
||||||
|
db, err := database.New(ctx, cancel, config.DataDir, "info") |
||||||
|
if err != nil { |
||||||
|
log.Fatalf("Failed to create database: %v", err) |
||||||
|
} |
||||||
|
|
||||||
|
return &Benchmark{ |
||||||
|
config: config, |
||||||
|
db: db, |
||||||
|
results: make([]*BenchmarkResult, 0), |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
func (b *Benchmark) Close() { |
||||||
|
if b.db != nil { |
||||||
|
b.db.Close() |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
func (b *Benchmark) RunPeakThroughputTest() { |
||||||
|
fmt.Println("\n=== Peak Throughput Test ===") |
||||||
|
|
||||||
|
start := time.Now() |
||||||
|
var wg sync.WaitGroup |
||||||
|
var totalEvents int64 |
||||||
|
var errors []error |
||||||
|
var latencies []time.Duration |
||||||
|
var mu sync.Mutex |
||||||
|
|
||||||
|
events := b.generateEvents(b.config.NumEvents) |
||||||
|
eventChan := make(chan *event.E, len(events)) |
||||||
|
|
||||||
|
// Fill event channel
|
||||||
|
for _, ev := range events { |
||||||
|
eventChan <- ev |
||||||
|
} |
||||||
|
close(eventChan) |
||||||
|
|
||||||
|
// Start workers
|
||||||
|
for i := 0; i < b.config.ConcurrentWorkers; i++ { |
||||||
|
wg.Add(1) |
||||||
|
go func(workerID int) { |
||||||
|
defer wg.Done() |
||||||
|
|
||||||
|
ctx := context.Background() |
||||||
|
for ev := range eventChan { |
||||||
|
eventStart := time.Now() |
||||||
|
|
||||||
|
_, _, err := b.db.SaveEvent(ctx, ev) |
||||||
|
latency := time.Since(eventStart) |
||||||
|
|
||||||
|
mu.Lock() |
||||||
|
if err != nil { |
||||||
|
errors = append(errors, err) |
||||||
|
} else { |
||||||
|
totalEvents++ |
||||||
|
latencies = append(latencies, latency) |
||||||
|
} |
||||||
|
mu.Unlock() |
||||||
|
} |
||||||
|
}(i) |
||||||
|
} |
||||||
|
|
||||||
|
wg.Wait() |
||||||
|
duration := time.Since(start) |
||||||
|
|
||||||
|
// Calculate metrics
|
||||||
|
result := &BenchmarkResult{ |
||||||
|
TestName: "Peak Throughput", |
||||||
|
Duration: duration, |
||||||
|
TotalEvents: int(totalEvents), |
||||||
|
EventsPerSecond: float64(totalEvents) / duration.Seconds(), |
||||||
|
ConcurrentWorkers: b.config.ConcurrentWorkers, |
||||||
|
MemoryUsed: getMemUsage(), |
||||||
|
} |
||||||
|
|
||||||
|
if len(latencies) > 0 { |
||||||
|
result.AvgLatency = calculateAvgLatency(latencies) |
||||||
|
result.P95Latency = calculatePercentileLatency(latencies, 0.95) |
||||||
|
result.P99Latency = calculatePercentileLatency(latencies, 0.99) |
||||||
|
} |
||||||
|
|
||||||
|
result.SuccessRate = float64(totalEvents) / float64(b.config.NumEvents) * 100 |
||||||
|
|
||||||
|
for _, err := range errors { |
||||||
|
result.Errors = append(result.Errors, err.Error()) |
||||||
|
} |
||||||
|
|
||||||
|
b.mu.Lock() |
||||||
|
b.results = append(b.results, result) |
||||||
|
b.mu.Unlock() |
||||||
|
|
||||||
|
fmt.Printf( |
||||||
|
"Events saved: %d/%d (%.1f%%)\n", totalEvents, b.config.NumEvents, |
||||||
|
result.SuccessRate, |
||||||
|
) |
||||||
|
fmt.Printf("Duration: %v\n", duration) |
||||||
|
fmt.Printf("Events/sec: %.2f\n", result.EventsPerSecond) |
||||||
|
fmt.Printf("Avg latency: %v\n", result.AvgLatency) |
||||||
|
fmt.Printf("P95 latency: %v\n", result.P95Latency) |
||||||
|
fmt.Printf("P99 latency: %v\n", result.P99Latency) |
||||||
|
} |
||||||
|
|
||||||
|
func (b *Benchmark) RunBurstPatternTest() { |
||||||
|
fmt.Println("\n=== Burst Pattern Test ===") |
||||||
|
|
||||||
|
start := time.Now() |
||||||
|
var totalEvents int64 |
||||||
|
var errors []error |
||||||
|
var latencies []time.Duration |
||||||
|
var mu sync.Mutex |
||||||
|
|
||||||
|
// Generate events for burst pattern
|
||||||
|
events := b.generateEvents(b.config.NumEvents) |
||||||
|
|
||||||
|
// Simulate burst pattern: high activity periods followed by quiet periods
|
||||||
|
burstSize := b.config.NumEvents / 10 // 10% of events in each burst
|
||||||
|
quietPeriod := 500 * time.Millisecond |
||||||
|
burstPeriod := 100 * time.Millisecond |
||||||
|
|
||||||
|
ctx := context.Background() |
||||||
|
eventIndex := 0 |
||||||
|
|
||||||
|
for eventIndex < len(events) && time.Since(start) < b.config.TestDuration { |
||||||
|
// Burst period - send events rapidly
|
||||||
|
burstStart := time.Now() |
||||||
|
var wg sync.WaitGroup |
||||||
|
|
||||||
|
for i := 0; i < burstSize && eventIndex < len(events); i++ { |
||||||
|
wg.Add(1) |
||||||
|
go func(ev *event.E) { |
||||||
|
defer wg.Done() |
||||||
|
|
||||||
|
eventStart := time.Now() |
||||||
|
_, _, err := b.db.SaveEvent(ctx, ev) |
||||||
|
latency := time.Since(eventStart) |
||||||
|
|
||||||
|
mu.Lock() |
||||||
|
if err != nil { |
||||||
|
errors = append(errors, err) |
||||||
|
} else { |
||||||
|
totalEvents++ |
||||||
|
latencies = append(latencies, latency) |
||||||
|
} |
||||||
|
mu.Unlock() |
||||||
|
}(events[eventIndex]) |
||||||
|
|
||||||
|
eventIndex++ |
||||||
|
time.Sleep(burstPeriod / time.Duration(burstSize)) |
||||||
|
} |
||||||
|
|
||||||
|
wg.Wait() |
||||||
|
fmt.Printf( |
||||||
|
"Burst completed: %d events in %v\n", burstSize, |
||||||
|
time.Since(burstStart), |
||||||
|
) |
||||||
|
|
||||||
|
// Quiet period
|
||||||
|
time.Sleep(quietPeriod) |
||||||
|
} |
||||||
|
|
||||||
|
duration := time.Since(start) |
||||||
|
|
||||||
|
// Calculate metrics
|
||||||
|
result := &BenchmarkResult{ |
||||||
|
TestName: "Burst Pattern", |
||||||
|
Duration: duration, |
||||||
|
TotalEvents: int(totalEvents), |
||||||
|
EventsPerSecond: float64(totalEvents) / duration.Seconds(), |
||||||
|
ConcurrentWorkers: b.config.ConcurrentWorkers, |
||||||
|
MemoryUsed: getMemUsage(), |
||||||
|
} |
||||||
|
|
||||||
|
if len(latencies) > 0 { |
||||||
|
result.AvgLatency = calculateAvgLatency(latencies) |
||||||
|
result.P95Latency = calculatePercentileLatency(latencies, 0.95) |
||||||
|
result.P99Latency = calculatePercentileLatency(latencies, 0.99) |
||||||
|
} |
||||||
|
|
||||||
|
result.SuccessRate = float64(totalEvents) / float64(eventIndex) * 100 |
||||||
|
|
||||||
|
for _, err := range errors { |
||||||
|
result.Errors = append(result.Errors, err.Error()) |
||||||
|
} |
||||||
|
|
||||||
|
b.mu.Lock() |
||||||
|
b.results = append(b.results, result) |
||||||
|
b.mu.Unlock() |
||||||
|
|
||||||
|
fmt.Printf("Burst test completed: %d events in %v\n", totalEvents, duration) |
||||||
|
fmt.Printf("Events/sec: %.2f\n", result.EventsPerSecond) |
||||||
|
} |
||||||
|
|
||||||
|
func (b *Benchmark) RunMixedReadWriteTest() { |
||||||
|
fmt.Println("\n=== Mixed Read/Write Test ===") |
||||||
|
|
||||||
|
start := time.Now() |
||||||
|
var totalWrites, totalReads int64 |
||||||
|
var writeLatencies, readLatencies []time.Duration |
||||||
|
var errors []error |
||||||
|
var mu sync.Mutex |
||||||
|
|
||||||
|
// Pre-populate with some events for reading
|
||||||
|
seedEvents := b.generateEvents(1000) |
||||||
|
ctx := context.Background() |
||||||
|
|
||||||
|
fmt.Println("Pre-populating database for read tests...") |
||||||
|
for _, ev := range seedEvents { |
||||||
|
b.db.SaveEvent(ctx, ev) |
||||||
|
} |
||||||
|
|
||||||
|
events := b.generateEvents(b.config.NumEvents) |
||||||
|
var wg sync.WaitGroup |
||||||
|
|
||||||
|
// Start mixed read/write workers
|
||||||
|
for i := 0; i < b.config.ConcurrentWorkers; i++ { |
||||||
|
wg.Add(1) |
||||||
|
go func(workerID int) { |
||||||
|
defer wg.Done() |
||||||
|
|
||||||
|
eventIndex := workerID |
||||||
|
for time.Since(start) < b.config.TestDuration && eventIndex < len(events) { |
||||||
|
// Alternate between write and read operations
|
||||||
|
if eventIndex%2 == 0 { |
||||||
|
// Write operation
|
||||||
|
writeStart := time.Now() |
||||||
|
_, _, err := b.db.SaveEvent(ctx, events[eventIndex]) |
||||||
|
writeLatency := time.Since(writeStart) |
||||||
|
|
||||||
|
mu.Lock() |
||||||
|
if err != nil { |
||||||
|
errors = append(errors, err) |
||||||
|
} else { |
||||||
|
totalWrites++ |
||||||
|
writeLatencies = append(writeLatencies, writeLatency) |
||||||
|
} |
||||||
|
mu.Unlock() |
||||||
|
} else { |
||||||
|
// Read operation
|
||||||
|
readStart := time.Now() |
||||||
|
f := filter.New() |
||||||
|
f.Kinds = kind.NewS(kind.TextNote) |
||||||
|
limit := uint(10) |
||||||
|
f.Limit = &limit |
||||||
|
_, err := b.db.GetSerialsFromFilter(f) |
||||||
|
readLatency := time.Since(readStart) |
||||||
|
|
||||||
|
mu.Lock() |
||||||
|
if err != nil { |
||||||
|
errors = append(errors, err) |
||||||
|
} else { |
||||||
|
totalReads++ |
||||||
|
readLatencies = append(readLatencies, readLatency) |
||||||
|
} |
||||||
|
mu.Unlock() |
||||||
|
} |
||||||
|
|
||||||
|
eventIndex += b.config.ConcurrentWorkers |
||||||
|
time.Sleep(10 * time.Millisecond) // Small delay between operations
|
||||||
|
} |
||||||
|
}(i) |
||||||
|
} |
||||||
|
|
||||||
|
wg.Wait() |
||||||
|
duration := time.Since(start) |
||||||
|
|
||||||
|
// Calculate metrics
|
||||||
|
result := &BenchmarkResult{ |
||||||
|
TestName: "Mixed Read/Write", |
||||||
|
Duration: duration, |
||||||
|
TotalEvents: int(totalWrites + totalReads), |
||||||
|
EventsPerSecond: float64(totalWrites+totalReads) / duration.Seconds(), |
||||||
|
ConcurrentWorkers: b.config.ConcurrentWorkers, |
||||||
|
MemoryUsed: getMemUsage(), |
||||||
|
} |
||||||
|
|
||||||
|
// Calculate combined latencies for overall metrics
|
||||||
|
allLatencies := append(writeLatencies, readLatencies...) |
||||||
|
if len(allLatencies) > 0 { |
||||||
|
result.AvgLatency = calculateAvgLatency(allLatencies) |
||||||
|
result.P95Latency = calculatePercentileLatency(allLatencies, 0.95) |
||||||
|
result.P99Latency = calculatePercentileLatency(allLatencies, 0.99) |
||||||
|
} |
||||||
|
|
||||||
|
result.SuccessRate = float64(totalWrites+totalReads) / float64(len(events)) * 100 |
||||||
|
|
||||||
|
for _, err := range errors { |
||||||
|
result.Errors = append(result.Errors, err.Error()) |
||||||
|
} |
||||||
|
|
||||||
|
b.mu.Lock() |
||||||
|
b.results = append(b.results, result) |
||||||
|
b.mu.Unlock() |
||||||
|
|
||||||
|
fmt.Printf( |
||||||
|
"Mixed test completed: %d writes, %d reads in %v\n", totalWrites, |
||||||
|
totalReads, duration, |
||||||
|
) |
||||||
|
fmt.Printf("Combined ops/sec: %.2f\n", result.EventsPerSecond) |
||||||
|
} |
||||||
|
|
||||||
|
func (b *Benchmark) generateEvents(count int) []*event.E { |
||||||
|
events := make([]*event.E, count) |
||||||
|
now := timestamp.Now() |
||||||
|
|
||||||
|
for i := 0; i < count; i++ { |
||||||
|
ev := event.New() |
||||||
|
|
||||||
|
// Generate random 32-byte ID
|
||||||
|
ev.ID = make([]byte, 32) |
||||||
|
rand.Read(ev.ID) |
||||||
|
|
||||||
|
// Generate random 32-byte pubkey
|
||||||
|
ev.Pubkey = make([]byte, 32) |
||||||
|
rand.Read(ev.Pubkey) |
||||||
|
|
||||||
|
ev.CreatedAt = now.I64() |
||||||
|
ev.Kind = kind.TextNote.K |
||||||
|
ev.Content = []byte(fmt.Sprintf( |
||||||
|
"This is test event number %d with some content", i, |
||||||
|
)) |
||||||
|
|
||||||
|
// Create tags using NewFromBytesSlice
|
||||||
|
ev.Tags = tag.NewS( |
||||||
|
tag.NewFromBytesSlice([]byte("t"), []byte("benchmark")), |
||||||
|
tag.NewFromBytesSlice( |
||||||
|
[]byte("e"), []byte(fmt.Sprintf("ref_%d", i%50)), |
||||||
|
), |
||||||
|
) |
||||||
|
|
||||||
|
events[i] = ev |
||||||
|
} |
||||||
|
|
||||||
|
return events |
||||||
|
} |
||||||
|
|
||||||
|
func (b *Benchmark) GenerateReport() { |
||||||
|
fmt.Println("\n" + strings.Repeat("=", 80)) |
||||||
|
fmt.Println("BENCHMARK REPORT") |
||||||
|
fmt.Println(strings.Repeat("=", 80)) |
||||||
|
|
||||||
|
b.mu.RLock() |
||||||
|
defer b.mu.RUnlock() |
||||||
|
|
||||||
|
for _, result := range b.results { |
||||||
|
fmt.Printf("\nTest: %s\n", result.TestName) |
||||||
|
fmt.Printf("Duration: %v\n", result.Duration) |
||||||
|
fmt.Printf("Total Events: %d\n", result.TotalEvents) |
||||||
|
fmt.Printf("Events/sec: %.2f\n", result.EventsPerSecond) |
||||||
|
fmt.Printf("Success Rate: %.1f%%\n", result.SuccessRate) |
||||||
|
fmt.Printf("Concurrent Workers: %d\n", result.ConcurrentWorkers) |
||||||
|
fmt.Printf("Memory Used: %d MB\n", result.MemoryUsed/(1024*1024)) |
||||||
|
fmt.Printf("Avg Latency: %v\n", result.AvgLatency) |
||||||
|
fmt.Printf("P95 Latency: %v\n", result.P95Latency) |
||||||
|
fmt.Printf("P99 Latency: %v\n", result.P99Latency) |
||||||
|
|
||||||
|
if len(result.Errors) > 0 { |
||||||
|
fmt.Printf("Errors (%d):\n", len(result.Errors)) |
||||||
|
for i, err := range result.Errors { |
||||||
|
if i < 5 { // Show first 5 errors
|
||||||
|
fmt.Printf(" - %s\n", err) |
||||||
|
} |
||||||
|
} |
||||||
|
if len(result.Errors) > 5 { |
||||||
|
fmt.Printf(" ... and %d more errors\n", len(result.Errors)-5) |
||||||
|
} |
||||||
|
} |
||||||
|
fmt.Println(strings.Repeat("-", 40)) |
||||||
|
} |
||||||
|
|
||||||
|
// Save report to file
|
||||||
|
reportPath := filepath.Join(b.config.DataDir, "benchmark_report.txt") |
||||||
|
b.saveReportToFile(reportPath) |
||||||
|
fmt.Printf("\nReport saved to: %s\n", reportPath) |
||||||
|
} |
||||||
|
|
||||||
|
func (b *Benchmark) saveReportToFile(path string) error { |
||||||
|
file, err := os.Create(path) |
||||||
|
if err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
defer file.Close() |
||||||
|
|
||||||
|
file.WriteString("NOSTR RELAY BENCHMARK REPORT\n") |
||||||
|
file.WriteString("============================\n\n") |
||||||
|
file.WriteString( |
||||||
|
fmt.Sprintf( |
||||||
|
"Generated: %s\n", time.Now().Format(time.RFC3339), |
||||||
|
), |
||||||
|
) |
||||||
|
file.WriteString(fmt.Sprintf("Relay: next.orly.dev\n")) |
||||||
|
file.WriteString(fmt.Sprintf("Database: BadgerDB\n")) |
||||||
|
file.WriteString(fmt.Sprintf("Workers: %d\n", b.config.ConcurrentWorkers)) |
||||||
|
file.WriteString( |
||||||
|
fmt.Sprintf( |
||||||
|
"Test Duration: %v\n\n", b.config.TestDuration, |
||||||
|
), |
||||||
|
) |
||||||
|
|
||||||
|
b.mu.RLock() |
||||||
|
defer b.mu.RUnlock() |
||||||
|
|
||||||
|
for _, result := range b.results { |
||||||
|
file.WriteString(fmt.Sprintf("Test: %s\n", result.TestName)) |
||||||
|
file.WriteString(fmt.Sprintf("Duration: %v\n", result.Duration)) |
||||||
|
file.WriteString(fmt.Sprintf("Events: %d\n", result.TotalEvents)) |
||||||
|
file.WriteString( |
||||||
|
fmt.Sprintf( |
||||||
|
"Events/sec: %.2f\n", result.EventsPerSecond, |
||||||
|
), |
||||||
|
) |
||||||
|
file.WriteString( |
||||||
|
fmt.Sprintf( |
||||||
|
"Success Rate: %.1f%%\n", result.SuccessRate, |
||||||
|
), |
||||||
|
) |
||||||
|
file.WriteString(fmt.Sprintf("Avg Latency: %v\n", result.AvgLatency)) |
||||||
|
file.WriteString(fmt.Sprintf("P95 Latency: %v\n", result.P95Latency)) |
||||||
|
file.WriteString(fmt.Sprintf("P99 Latency: %v\n", result.P99Latency)) |
||||||
|
file.WriteString( |
||||||
|
fmt.Sprintf( |
||||||
|
"Memory: %d MB\n", result.MemoryUsed/(1024*1024), |
||||||
|
), |
||||||
|
) |
||||||
|
file.WriteString("\n") |
||||||
|
} |
||||||
|
|
||||||
|
return nil |
||||||
|
} |
||||||
|
|
||||||
|
// Helper functions
|
||||||
|
|
||||||
|
func calculateAvgLatency(latencies []time.Duration) time.Duration { |
||||||
|
if len(latencies) == 0 { |
||||||
|
return 0 |
||||||
|
} |
||||||
|
|
||||||
|
var total time.Duration |
||||||
|
for _, l := range latencies { |
||||||
|
total += l |
||||||
|
} |
||||||
|
return total / time.Duration(len(latencies)) |
||||||
|
} |
||||||
|
|
||||||
|
func calculatePercentileLatency( |
||||||
|
latencies []time.Duration, percentile float64, |
||||||
|
) time.Duration { |
||||||
|
if len(latencies) == 0 { |
||||||
|
return 0 |
||||||
|
} |
||||||
|
|
||||||
|
// Simple percentile calculation - in production would sort first
|
||||||
|
index := int(float64(len(latencies)) * percentile) |
||||||
|
if index >= len(latencies) { |
||||||
|
index = len(latencies) - 1 |
||||||
|
} |
||||||
|
return latencies[index] |
||||||
|
} |
||||||
|
|
||||||
|
func getMemUsage() uint64 { |
||||||
|
var m runtime.MemStats |
||||||
|
runtime.ReadMemStats(&m) |
||||||
|
return m.Alloc |
||||||
|
} |
||||||
@ -0,0 +1,368 @@ |
|||||||
|
#!/bin/bash |
||||||
|
|
||||||
|
# Setup script for downloading and configuring external relay repositories |
||||||
|
# for benchmarking |
||||||
|
|
||||||
|
set -e |
||||||
|
|
||||||
|
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" |
||||||
|
EXTERNAL_DIR="${SCRIPT_DIR}/external" |
||||||
|
|
||||||
|
echo "Setting up external relay repositories for benchmarking..." |
||||||
|
|
||||||
|
# Create external directory |
||||||
|
mkdir -p "${EXTERNAL_DIR}" |
||||||
|
|
||||||
|
# Function to clone or update repository |
||||||
|
clone_or_update() { |
||||||
|
local repo_url="$1" |
||||||
|
local repo_dir="$2" |
||||||
|
local repo_name="$3" |
||||||
|
|
||||||
|
echo "Setting up ${repo_name}..." |
||||||
|
|
||||||
|
if [ -d "${repo_dir}" ]; then |
||||||
|
echo " ${repo_name} already exists, updating..." |
||||||
|
cd "${repo_dir}" |
||||||
|
git pull origin main 2>/dev/null || git pull origin master 2>/dev/null || true |
||||||
|
cd - > /dev/null |
||||||
|
else |
||||||
|
echo " Cloning ${repo_name}..." |
||||||
|
git clone "${repo_url}" "${repo_dir}" |
||||||
|
fi |
||||||
|
} |
||||||
|
|
||||||
|
# Clone khatru |
||||||
|
clone_or_update "https://github.com/fiatjaf/khatru.git" "${EXTERNAL_DIR}/khatru" "Khatru" |
||||||
|
|
||||||
|
# Clone relayer |
||||||
|
clone_or_update "https://github.com/fiatjaf/relayer.git" "${EXTERNAL_DIR}/relayer" "Relayer" |
||||||
|
|
||||||
|
# Clone strfry |
||||||
|
clone_or_update "https://github.com/hoytech/strfry.git" "${EXTERNAL_DIR}/strfry" "Strfry" |
||||||
|
|
||||||
|
# Clone nostr-rs-relay |
||||||
|
clone_or_update "https://git.sr.ht/~gheartsfield/nostr-rs-relay" "${EXTERNAL_DIR}/nostr-rs-relay" "Nostr-rs-relay" |
||||||
|
|
||||||
|
echo "Creating Dockerfiles for external relays..." |
||||||
|
|
||||||
|
# Create Dockerfile for Khatru SQLite |
||||||
|
cat > "${SCRIPT_DIR}/Dockerfile.khatru-sqlite" << 'EOF' |
||||||
|
FROM golang:1.25-alpine AS builder |
||||||
|
|
||||||
|
RUN apk add --no-cache git ca-certificates sqlite-dev gcc musl-dev |
||||||
|
|
||||||
|
WORKDIR /build |
||||||
|
COPY . . |
||||||
|
|
||||||
|
# Build the basic-sqlite example |
||||||
|
RUN cd examples/basic-sqlite && \ |
||||||
|
go mod tidy && \ |
||||||
|
CGO_ENABLED=1 go build -o khatru-sqlite . |
||||||
|
|
||||||
|
FROM alpine:latest |
||||||
|
RUN apk --no-cache add ca-certificates sqlite wget |
||||||
|
WORKDIR /app |
||||||
|
COPY --from=builder /build/examples/basic-sqlite/khatru-sqlite /app/ |
||||||
|
RUN mkdir -p /data |
||||||
|
EXPOSE 8080 |
||||||
|
ENV DATABASE_PATH=/data/khatru.db |
||||||
|
ENV PORT=8080 |
||||||
|
HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \ |
||||||
|
CMD wget --quiet --tries=1 --spider http://localhost:8080 || exit 1 |
||||||
|
CMD ["/app/khatru-sqlite"] |
||||||
|
EOF |
||||||
|
|
||||||
|
# Create Dockerfile for Khatru Badger |
||||||
|
cat > "${SCRIPT_DIR}/Dockerfile.khatru-badger" << 'EOF' |
||||||
|
FROM golang:1.25-alpine AS builder |
||||||
|
|
||||||
|
RUN apk add --no-cache git ca-certificates |
||||||
|
|
||||||
|
WORKDIR /build |
||||||
|
COPY . . |
||||||
|
|
||||||
|
# Build the basic-badger example |
||||||
|
RUN cd examples/basic-badger && \ |
||||||
|
go mod tidy && \ |
||||||
|
CGO_ENABLED=0 go build -o khatru-badger . |
||||||
|
|
||||||
|
FROM alpine:latest |
||||||
|
RUN apk --no-cache add ca-certificates wget |
||||||
|
WORKDIR /app |
||||||
|
COPY --from=builder /build/examples/basic-badger/khatru-badger /app/ |
||||||
|
RUN mkdir -p /data |
||||||
|
EXPOSE 8080 |
||||||
|
ENV DATABASE_PATH=/data/badger |
||||||
|
ENV PORT=8080 |
||||||
|
HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \ |
||||||
|
CMD wget --quiet --tries=1 --spider http://localhost:8080 || exit 1 |
||||||
|
CMD ["/app/khatru-badger"] |
||||||
|
EOF |
||||||
|
|
||||||
|
# Create Dockerfile for Relayer basic example |
||||||
|
cat > "${SCRIPT_DIR}/Dockerfile.relayer-basic" << 'EOF' |
||||||
|
FROM golang:1.25-alpine AS builder |
||||||
|
|
||||||
|
RUN apk add --no-cache git ca-certificates sqlite-dev gcc musl-dev |
||||||
|
|
||||||
|
WORKDIR /build |
||||||
|
COPY . . |
||||||
|
|
||||||
|
# Build the basic example |
||||||
|
RUN cd examples/basic && \ |
||||||
|
go mod tidy && \ |
||||||
|
CGO_ENABLED=1 go build -o relayer-basic . |
||||||
|
|
||||||
|
FROM alpine:latest |
||||||
|
RUN apk --no-cache add ca-certificates sqlite wget |
||||||
|
WORKDIR /app |
||||||
|
COPY --from=builder /build/examples/basic/relayer-basic /app/ |
||||||
|
RUN mkdir -p /data |
||||||
|
EXPOSE 8080 |
||||||
|
ENV DATABASE_PATH=/data/relayer.db |
||||||
|
ENV PORT=8080 |
||||||
|
HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \ |
||||||
|
CMD wget --quiet --tries=1 --spider http://localhost:8080 || exit 1 |
||||||
|
CMD ["/app/relayer-basic"] |
||||||
|
EOF |
||||||
|
|
||||||
|
# Create Dockerfile for Strfry |
||||||
|
cat > "${SCRIPT_DIR}/Dockerfile.strfry" << 'EOF' |
||||||
|
FROM ubuntu:22.04 AS builder |
||||||
|
|
||||||
|
ENV DEBIAN_FRONTEND=noninteractive |
||||||
|
|
||||||
|
# Install build dependencies |
||||||
|
RUN apt-get update && apt-get install -y \ |
||||||
|
git \ |
||||||
|
build-essential \ |
||||||
|
liblmdb-dev \ |
||||||
|
libsecp256k1-dev \ |
||||||
|
pkg-config \ |
||||||
|
libtool \ |
||||||
|
autoconf \ |
||||||
|
automake \ |
||||||
|
&& rm -rf /var/lib/apt/lists/* |
||||||
|
|
||||||
|
WORKDIR /build |
||||||
|
COPY . . |
||||||
|
|
||||||
|
# Build strfry |
||||||
|
RUN make setup-golpe && \ |
||||||
|
make -j$(nproc) |
||||||
|
|
||||||
|
FROM ubuntu:22.04 |
||||||
|
RUN apt-get update && apt-get install -y \ |
||||||
|
liblmdb0 \ |
||||||
|
libsecp256k1-0 \ |
||||||
|
curl \ |
||||||
|
&& rm -rf /var/lib/apt/lists/* |
||||||
|
|
||||||
|
WORKDIR /app |
||||||
|
COPY --from=builder /build/strfry /app/ |
||||||
|
RUN mkdir -p /data |
||||||
|
|
||||||
|
EXPOSE 8080 |
||||||
|
ENV STRFRY_DB_PATH=/data/strfry.lmdb |
||||||
|
ENV STRFRY_RELAY_PORT=8080 |
||||||
|
|
||||||
|
HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \ |
||||||
|
CMD curl -f http://localhost:8080 || exit 1 |
||||||
|
|
||||||
|
CMD ["/app/strfry", "relay"] |
||||||
|
EOF |
||||||
|
|
||||||
|
# Create Dockerfile for nostr-rs-relay |
||||||
|
cat > "${SCRIPT_DIR}/Dockerfile.nostr-rs-relay" << 'EOF' |
||||||
|
FROM rust:1.70-alpine AS builder |
||||||
|
|
||||||
|
RUN apk add --no-cache musl-dev sqlite-dev |
||||||
|
|
||||||
|
WORKDIR /build |
||||||
|
COPY . . |
||||||
|
|
||||||
|
# Build the relay |
||||||
|
RUN cargo build --release |
||||||
|
|
||||||
|
FROM alpine:latest |
||||||
|
RUN apk --no-cache add ca-certificates sqlite wget |
||||||
|
WORKDIR /app |
||||||
|
COPY --from=builder /build/target/release/nostr-rs-relay /app/ |
||||||
|
RUN mkdir -p /data |
||||||
|
|
||||||
|
EXPOSE 8080 |
||||||
|
ENV RUST_LOG=info |
||||||
|
|
||||||
|
HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \ |
||||||
|
CMD wget --quiet --tries=1 --spider http://localhost:8080 || exit 1 |
||||||
|
|
||||||
|
CMD ["/app/nostr-rs-relay"] |
||||||
|
EOF |
||||||
|
|
||||||
|
echo "Creating configuration files..." |
||||||
|
|
||||||
|
# Create configs directory |
||||||
|
mkdir -p "${SCRIPT_DIR}/configs" |
||||||
|
|
||||||
|
# Create strfry configuration |
||||||
|
cat > "${SCRIPT_DIR}/configs/strfry.conf" << 'EOF' |
||||||
|
## |
||||||
|
## Default strfry config |
||||||
|
## |
||||||
|
|
||||||
|
# Directory that contains the strfry LMDB database (restart required) |
||||||
|
db = "/data/strfry.lmdb" |
||||||
|
|
||||||
|
dbParams { |
||||||
|
# Maximum number of threads/processes that can simultaneously have LMDB transactions open (restart required) |
||||||
|
maxreaders = 256 |
||||||
|
|
||||||
|
# Size of mmap to use when loading LMDB (default is 1TB, which is probably reasonable) (restart required) |
||||||
|
mapsize = 1099511627776 |
||||||
|
} |
||||||
|
|
||||||
|
relay { |
||||||
|
# Interface to listen on. Use 0.0.0.0 to listen on all interfaces (restart required) |
||||||
|
bind = "0.0.0.0" |
||||||
|
|
||||||
|
# Port to open for the nostr websocket protocol (restart required) |
||||||
|
port = 8080 |
||||||
|
|
||||||
|
# Set OS-limit on maximum number of open files/sockets (if 0, don't attempt to set) (restart required) |
||||||
|
nofiles = 1000000 |
||||||
|
|
||||||
|
# HTTP header that contains the client's real IP, before reverse proxying (ie x-real-ip) (MUST be all lower-case) |
||||||
|
realIpHeader = "" |
||||||
|
|
||||||
|
info { |
||||||
|
# NIP-11: Name of this server. Short/descriptive (< 30 characters) |
||||||
|
name = "strfry benchmark" |
||||||
|
|
||||||
|
# NIP-11: Detailed description of this server, free-form |
||||||
|
description = "A strfry relay for benchmarking" |
||||||
|
|
||||||
|
# NIP-11: Administrative pubkey, for contact purposes |
||||||
|
pubkey = "" |
||||||
|
|
||||||
|
# NIP-11: Alternative contact for this server |
||||||
|
contact = "" |
||||||
|
} |
||||||
|
|
||||||
|
# Maximum accepted incoming websocket frame size (should be larger than max event) (restart required) |
||||||
|
maxWebsocketPayloadSize = 131072 |
||||||
|
|
||||||
|
# Websocket-level PING message frequency (should be less than any reverse proxy idle timeouts) (restart required) |
||||||
|
autoPingSeconds = 55 |
||||||
|
|
||||||
|
# If TCP keep-alive should be enabled (detect dropped connections to upstream reverse proxy) (restart required) |
||||||
|
enableTcpKeepalive = false |
||||||
|
|
||||||
|
# How much uninterrupted CPU time a REQ query should get during its DB scan |
||||||
|
queryTimesliceBudgetMicroseconds = 10000 |
||||||
|
|
||||||
|
# Maximum records that can be returned per filter |
||||||
|
maxFilterLimit = 500 |
||||||
|
|
||||||
|
# Maximum number of subscriptions (concurrent REQs) a connection can have open at any time |
||||||
|
maxSubsPerConnection = 20 |
||||||
|
|
||||||
|
writePolicy { |
||||||
|
# If non-empty, path to an executable script that implements the writePolicy plugin logic |
||||||
|
plugin = "" |
||||||
|
} |
||||||
|
|
||||||
|
compression { |
||||||
|
# Use permessage-deflate compression if supported by client. Reduces bandwidth, but uses more CPU (restart required) |
||||||
|
enabled = true |
||||||
|
|
||||||
|
# Maintain a sliding window buffer for each connection. Improves compression, but uses more memory (restart required) |
||||||
|
slidingWindow = true |
||||||
|
} |
||||||
|
|
||||||
|
logging { |
||||||
|
# Dump all incoming messages |
||||||
|
dumpInAll = false |
||||||
|
|
||||||
|
# Dump all incoming EVENT messages |
||||||
|
dumpInEvents = false |
||||||
|
|
||||||
|
# Dump all incoming REQ/CLOSE messages |
||||||
|
dumpInReqs = false |
||||||
|
|
||||||
|
# Log performance metrics for initial REQ database scans |
||||||
|
dbScanPerf = false |
||||||
|
} |
||||||
|
|
||||||
|
numThreads { |
||||||
|
# Ingester threads: route incoming requests, validate events/sigs (restart required) |
||||||
|
ingester = 3 |
||||||
|
|
||||||
|
# reqWorker threads: Handle initial DB scan for events (restart required) |
||||||
|
reqWorker = 3 |
||||||
|
|
||||||
|
# reqMonitor threads: Handle filtering of new events (restart required) |
||||||
|
reqMonitor = 3 |
||||||
|
|
||||||
|
# yesstr threads: experimental yesstr protocol (restart required) |
||||||
|
yesstr = 1 |
||||||
|
} |
||||||
|
} |
||||||
|
EOF |
||||||
|
|
||||||
|
# Create nostr-rs-relay configuration |
||||||
|
cat > "${SCRIPT_DIR}/configs/config.toml" << 'EOF' |
||||||
|
[info] |
||||||
|
relay_url = "ws://localhost:8080" |
||||||
|
name = "nostr-rs-relay benchmark" |
||||||
|
description = "A nostr-rs-relay for benchmarking" |
||||||
|
pubkey = "" |
||||||
|
contact = "" |
||||||
|
|
||||||
|
[database] |
||||||
|
data_directory = "/data" |
||||||
|
in_memory = false |
||||||
|
engine = "sqlite" |
||||||
|
|
||||||
|
[network] |
||||||
|
port = 8080 |
||||||
|
address = "0.0.0.0" |
||||||
|
|
||||||
|
[limits] |
||||||
|
messages_per_sec = 0 |
||||||
|
subscriptions_per_min = 0 |
||||||
|
max_event_bytes = 65535 |
||||||
|
max_ws_message_bytes = 131072 |
||||||
|
max_ws_frame_bytes = 131072 |
||||||
|
|
||||||
|
[authorization] |
||||||
|
pubkey_whitelist = [] |
||||||
|
|
||||||
|
[verified_users] |
||||||
|
mode = "passive" |
||||||
|
domain_whitelist = [] |
||||||
|
domain_blacklist = [] |
||||||
|
|
||||||
|
[pay_to_relay] |
||||||
|
enabled = false |
||||||
|
|
||||||
|
[options] |
||||||
|
reject_future_seconds = 30 |
||||||
|
EOF |
||||||
|
|
||||||
|
echo "Creating data directories..." |
||||||
|
mkdir -p "${SCRIPT_DIR}/data"/{next-orly,khatru-sqlite,khatru-badger,relayer-basic,strfry,nostr-rs-relay} |
||||||
|
mkdir -p "${SCRIPT_DIR}/reports" |
||||||
|
|
||||||
|
echo "Setup complete!" |
||||||
|
echo "" |
||||||
|
echo "External relay repositories have been cloned to: ${EXTERNAL_DIR}" |
||||||
|
echo "Dockerfiles have been created for all relay implementations" |
||||||
|
echo "Configuration files have been created in: ${SCRIPT_DIR}/configs" |
||||||
|
echo "Data directories have been created in: ${SCRIPT_DIR}/data" |
||||||
|
echo "" |
||||||
|
echo "To run the benchmark:" |
||||||
|
echo " cd ${SCRIPT_DIR}" |
||||||
|
echo " docker-compose up --build" |
||||||
|
echo "" |
||||||
|
echo "Reports will be generated in: ${SCRIPT_DIR}/reports" |
||||||
@ -1,54 +0,0 @@ |
|||||||
module acl.orly |
|
||||||
|
|
||||||
go 1.25.0 |
|
||||||
|
|
||||||
replace ( |
|
||||||
acl.orly => ../acl |
|
||||||
crypto.orly => ../crypto |
|
||||||
database.orly => ../database |
|
||||||
encoders.orly => ../encoders |
|
||||||
interfaces.orly => ../interfaces |
|
||||||
next.orly.dev => ../../ |
|
||||||
protocol.orly => ../protocol |
|
||||||
utils.orly => ../utils |
|
||||||
) |
|
||||||
|
|
||||||
require ( |
|
||||||
database.orly v0.0.0-00010101000000-000000000000 |
|
||||||
encoders.orly v0.0.0-00010101000000-000000000000 |
|
||||||
interfaces.orly v0.0.0-00010101000000-000000000000 |
|
||||||
lol.mleku.dev v1.0.2 |
|
||||||
next.orly.dev v0.0.0-00010101000000-000000000000 |
|
||||||
utils.orly v0.0.0-00010101000000-000000000000 |
|
||||||
) |
|
||||||
|
|
||||||
require ( |
|
||||||
crypto.orly v0.0.0-00010101000000-000000000000 // indirect |
|
||||||
github.com/adrg/xdg v0.5.3 // indirect |
|
||||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect |
|
||||||
github.com/davecgh/go-spew v1.1.1 // indirect |
|
||||||
github.com/dgraph-io/badger/v4 v4.8.0 // indirect |
|
||||||
github.com/dgraph-io/ristretto/v2 v2.2.0 // indirect |
|
||||||
github.com/dustin/go-humanize v1.0.1 // indirect |
|
||||||
github.com/fatih/color v1.18.0 // indirect |
|
||||||
github.com/go-logr/logr v1.4.3 // indirect |
|
||||||
github.com/go-logr/stdr v1.2.2 // indirect |
|
||||||
github.com/google/flatbuffers v25.2.10+incompatible // indirect |
|
||||||
github.com/klauspost/compress v1.18.0 // indirect |
|
||||||
github.com/klauspost/cpuid/v2 v2.3.0 // indirect |
|
||||||
github.com/mattn/go-colorable v0.1.14 // indirect |
|
||||||
github.com/mattn/go-isatty v0.0.20 // indirect |
|
||||||
github.com/templexxx/cpu v0.0.1 // indirect |
|
||||||
github.com/templexxx/xhex v0.0.0-20200614015412-aed53437177b // indirect |
|
||||||
go-simpler.org/env v0.12.0 // indirect |
|
||||||
go.opentelemetry.io/auto/sdk v1.1.0 // indirect |
|
||||||
go.opentelemetry.io/otel v1.37.0 // indirect |
|
||||||
go.opentelemetry.io/otel/metric v1.37.0 // indirect |
|
||||||
go.opentelemetry.io/otel/trace v1.37.0 // indirect |
|
||||||
go.uber.org/atomic v1.11.0 // indirect |
|
||||||
golang.org/x/exp v0.0.0-20250819193227-8b4c13bb791b // indirect |
|
||||||
golang.org/x/net v0.41.0 // indirect |
|
||||||
golang.org/x/sys v0.35.0 // indirect |
|
||||||
google.golang.org/protobuf v1.36.6 // indirect |
|
||||||
lukechampine.com/frand v1.5.1 // indirect |
|
||||||
) |
|
||||||
@ -1,68 +0,0 @@ |
|||||||
github.com/adrg/xdg v0.5.3 h1:xRnxJXne7+oWDatRhR1JLnvuccuIeCoBu2rtuLqQB78= |
|
||||||
github.com/adrg/xdg v0.5.3/go.mod h1:nlTsY+NNiCBGCK2tpm09vRqfVzrc2fLmXGpBLF0zlTQ= |
|
||||||
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= |
|
||||||
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= |
|
||||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= |
|
||||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= |
|
||||||
github.com/dgraph-io/badger/v4 v4.8.0 h1:JYph1ChBijCw8SLeybvPINizbDKWZ5n/GYbz2yhN/bs= |
|
||||||
github.com/dgraph-io/badger/v4 v4.8.0/go.mod h1:U6on6e8k/RTbUWxqKR0MvugJuVmkxSNc79ap4917h4w= |
|
||||||
github.com/dgraph-io/ristretto/v2 v2.2.0 h1:bkY3XzJcXoMuELV8F+vS8kzNgicwQFAaGINAEJdWGOM= |
|
||||||
github.com/dgraph-io/ristretto/v2 v2.2.0/go.mod h1:RZrm63UmcBAaYWC1DotLYBmTvgkrs0+XhBd7Npn7/zI= |
|
||||||
github.com/dgryski/go-farm v0.0.0-20240924180020-3414d57e47da h1:aIftn67I1fkbMa512G+w+Pxci9hJPB8oMnkcP3iZF38= |
|
||||||
github.com/dgryski/go-farm v0.0.0-20240924180020-3414d57e47da/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= |
|
||||||
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= |
|
||||||
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= |
|
||||||
github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM= |
|
||||||
github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU= |
|
||||||
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= |
|
||||||
github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= |
|
||||||
github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= |
|
||||||
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= |
|
||||||
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= |
|
||||||
github.com/google/flatbuffers v25.2.10+incompatible h1:F3vclr7C3HpB1k9mxCGRMXq6FdUalZ6H/pNX4FP1v0Q= |
|
||||||
github.com/google/flatbuffers v25.2.10+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= |
|
||||||
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= |
|
||||||
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= |
|
||||||
github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= |
|
||||||
github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= |
|
||||||
github.com/klauspost/cpuid/v2 v2.3.0 h1:S4CRMLnYUhGeDFDqkGriYKdfoFlDnMtqTiI/sFzhA9Y= |
|
||||||
github.com/klauspost/cpuid/v2 v2.3.0/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0= |
|
||||||
github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE= |
|
||||||
github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8= |
|
||||||
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= |
|
||||||
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= |
|
||||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= |
|
||||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= |
|
||||||
github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= |
|
||||||
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= |
|
||||||
github.com/templexxx/cpu v0.0.1 h1:hY4WdLOgKdc8y13EYklu9OUTXik80BkxHoWvTO6MQQY= |
|
||||||
github.com/templexxx/cpu v0.0.1/go.mod h1:w7Tb+7qgcAlIyX4NhLuDKt78AHA5SzPmq0Wj6HiEnnk= |
|
||||||
github.com/templexxx/xhex v0.0.0-20200614015412-aed53437177b h1:XeDLE6c9mzHpdv3Wb1+pWBaWv/BlHK0ZYIu/KaL6eHg= |
|
||||||
github.com/templexxx/xhex v0.0.0-20200614015412-aed53437177b/go.mod h1:7rwmCH0wC2fQvNEvPZ3sKXukhyCTyiaZ5VTZMQYpZKQ= |
|
||||||
go-simpler.org/env v0.12.0 h1:kt/lBts0J1kjWJAnB740goNdvwNxt5emhYngL0Fzufs= |
|
||||||
go-simpler.org/env v0.12.0/go.mod h1:cc/5Md9JCUM7LVLtN0HYjPTDcI3Q8TDaPlNTAlDU+WI= |
|
||||||
go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= |
|
||||||
go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= |
|
||||||
go.opentelemetry.io/otel v1.37.0 h1:9zhNfelUvx0KBfu/gb+ZgeAfAgtWrfHJZcAqFC228wQ= |
|
||||||
go.opentelemetry.io/otel v1.37.0/go.mod h1:ehE/umFRLnuLa/vSccNq9oS1ErUlkkK71gMcN34UG8I= |
|
||||||
go.opentelemetry.io/otel/metric v1.37.0 h1:mvwbQS5m0tbmqML4NqK+e3aDiO02vsf/WgbsdpcPoZE= |
|
||||||
go.opentelemetry.io/otel/metric v1.37.0/go.mod h1:04wGrZurHYKOc+RKeye86GwKiTb9FKm1WHtO+4EVr2E= |
|
||||||
go.opentelemetry.io/otel/trace v1.37.0 h1:HLdcFNbRQBE2imdSEgm/kwqmQj1Or1l/7bW6mxVK7z4= |
|
||||||
go.opentelemetry.io/otel/trace v1.37.0/go.mod h1:TlgrlQ+PtQO5XFerSPUYG0JSgGyryXewPGyayAWSBS0= |
|
||||||
go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= |
|
||||||
go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= |
|
||||||
golang.org/x/exp v0.0.0-20250819193227-8b4c13bb791b h1:DXr+pvt3nC887026GRP39Ej11UATqWDmWuS99x26cD0= |
|
||||||
golang.org/x/exp v0.0.0-20250819193227-8b4c13bb791b/go.mod h1:4QTo5u+SEIbbKW1RacMZq1YEfOBqeXa19JeshGi+zc4= |
|
||||||
golang.org/x/net v0.41.0 h1:vBTly1HeNPEn3wtREYfy4GZ/NECgw2Cnl+nK6Nz3uvw= |
|
||||||
golang.org/x/net v0.41.0/go.mod h1:B/K4NNqkfmg07DQYrbwvSluqCJOOXwUjeb/5lOisjbA= |
|
||||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= |
|
||||||
golang.org/x/sys v0.35.0 h1:vz1N37gP5bs89s7He8XuIYXpyY0+QlsKmzipCbUtyxI= |
|
||||||
golang.org/x/sys v0.35.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= |
|
||||||
google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY= |
|
||||||
google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= |
|
||||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= |
|
||||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= |
|
||||||
lol.mleku.dev v1.0.2 h1:bSV1hHnkmt1hq+9nSvRwN6wgcI7itbM3XRZ4dMB438c= |
|
||||||
lol.mleku.dev v1.0.2/go.mod h1:DQ0WnmkntA9dPLCXgvtIgYt5G0HSqx3wSTLolHgWeLA= |
|
||||||
lukechampine.com/frand v1.5.1 h1:fg0eRtdmGFIxhP5zQJzM1lFDbD6CUfu/f+7WgAZd5/w= |
|
||||||
lukechampine.com/frand v1.5.1/go.mod h1:4VstaWc2plN4Mjr10chUD46RAVGWhpkZ5Nja8+Azp0Q= |
|
||||||
@ -1,35 +0,0 @@ |
|||||||
module crypto.orly |
|
||||||
|
|
||||||
go 1.25.0 |
|
||||||
|
|
||||||
require ( |
|
||||||
encoders.orly v0.0.0-00010101000000-000000000000 |
|
||||||
github.com/davecgh/go-spew v1.1.1 |
|
||||||
github.com/klauspost/cpuid/v2 v2.3.0 |
|
||||||
github.com/stretchr/testify v1.11.1 |
|
||||||
interfaces.orly v0.0.0-00010101000000-000000000000 |
|
||||||
lol.mleku.dev v1.0.2 |
|
||||||
utils.orly v0.0.0-00010101000000-000000000000 |
|
||||||
) |
|
||||||
|
|
||||||
require ( |
|
||||||
github.com/fatih/color v1.18.0 // indirect |
|
||||||
github.com/mattn/go-colorable v0.1.14 // indirect |
|
||||||
github.com/mattn/go-isatty v0.0.20 // indirect |
|
||||||
github.com/pmezard/go-difflib v1.0.0 // indirect |
|
||||||
github.com/templexxx/cpu v0.0.1 // indirect |
|
||||||
github.com/templexxx/xhex v0.0.0-20200614015412-aed53437177b // indirect |
|
||||||
golang.org/x/sys v0.35.0 // indirect |
|
||||||
gopkg.in/yaml.v3 v3.0.1 // indirect |
|
||||||
) |
|
||||||
|
|
||||||
replace ( |
|
||||||
acl.orly => ../acl |
|
||||||
crypto.orly => ../crypto |
|
||||||
database.orly => ../database |
|
||||||
encoders.orly => ../encoders |
|
||||||
interfaces.orly => ../interfaces |
|
||||||
next.orly.dev => ../../ |
|
||||||
protocol.orly => ../protocol |
|
||||||
utils.orly => ../utils |
|
||||||
) |
|
||||||
@ -1,27 +0,0 @@ |
|||||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= |
|
||||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= |
|
||||||
github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM= |
|
||||||
github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU= |
|
||||||
github.com/klauspost/cpuid/v2 v2.3.0 h1:S4CRMLnYUhGeDFDqkGriYKdfoFlDnMtqTiI/sFzhA9Y= |
|
||||||
github.com/klauspost/cpuid/v2 v2.3.0/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0= |
|
||||||
github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE= |
|
||||||
github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8= |
|
||||||
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= |
|
||||||
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= |
|
||||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= |
|
||||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= |
|
||||||
github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= |
|
||||||
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= |
|
||||||
github.com/templexxx/cpu v0.0.1 h1:hY4WdLOgKdc8y13EYklu9OUTXik80BkxHoWvTO6MQQY= |
|
||||||
github.com/templexxx/cpu v0.0.1/go.mod h1:w7Tb+7qgcAlIyX4NhLuDKt78AHA5SzPmq0Wj6HiEnnk= |
|
||||||
github.com/templexxx/xhex v0.0.0-20200614015412-aed53437177b h1:XeDLE6c9mzHpdv3Wb1+pWBaWv/BlHK0ZYIu/KaL6eHg= |
|
||||||
github.com/templexxx/xhex v0.0.0-20200614015412-aed53437177b/go.mod h1:7rwmCH0wC2fQvNEvPZ3sKXukhyCTyiaZ5VTZMQYpZKQ= |
|
||||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= |
|
||||||
golang.org/x/sys v0.35.0 h1:vz1N37gP5bs89s7He8XuIYXpyY0+QlsKmzipCbUtyxI= |
|
||||||
golang.org/x/sys v0.35.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= |
|
||||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= |
|
||||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= |
|
||||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= |
|
||||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= |
|
||||||
lol.mleku.dev v1.0.2 h1:bSV1hHnkmt1hq+9nSvRwN6wgcI7itbM3XRZ4dMB438c= |
|
||||||
lol.mleku.dev v1.0.2/go.mod h1:DQ0WnmkntA9dPLCXgvtIgYt5G0HSqx3wSTLolHgWeLA= |
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in new issue