Compare commits

...

5 Commits

Author SHA1 Message Date
woikos e7bc9a4a97
Add progressive throttle for follows ACL mode (v0.48.10) 2 days ago
woikos 41a3b5c0a5
Fix OOM crash from corrupt compact event data 2 days ago
woikos d41c332d06
Add NRC (Nostr Relay Connect) protocol and web UI (v0.48.9) 5 days ago
woikos 0dac41e35e
Add documentation and improve BBolt import memory efficiency (v0.48.8) 5 days ago
woikos 2480be3a73
Fix OOM in BuildIndexes by processing in chunks (v0.48.6) 6 days ago
  1. 8
      .claude/settings.local.json
  2. 69
      README.md
  3. 83
      app/config/config.go
  4. 13
      app/handle-event.go
  5. 448
      app/handle-nrc.go
  6. 16
      app/listener.go
  7. 84
      app/main.go
  8. 9
      app/server.go
  9. 1
      app/web/dist/bundle.css
  10. 28
      app/web/dist/bundle.js
  11. 2
      app/web/dist/bundle.js.map
  12. 29
      app/web/src/App.svelte
  13. 744
      app/web/src/RelayConnectView.svelte
  14. 104
      app/web/src/api.js
  15. 62
      cmd/wasmdb/main.go
  16. 411
      docs/CURATION_MODE_GUIDE.md
  17. 229
      docs/NIP-NRC.md
  18. 1
      go.mod
  19. 2
      go.sum
  20. 182
      main.go
  21. 80
      pkg/acl/follows.go
  22. 126
      pkg/acl/follows_throttle.go
  23. 140
      pkg/bbolt/import-export.go
  24. 232
      pkg/bbolt/import-minimal.go
  25. 96
      pkg/bbolt/save-event-bulk.go
  26. 6
      pkg/bbolt/save-event.go
  27. 6
      pkg/bbolt/stubs.go
  28. 1
      pkg/bunker/acl_adapter.go
  29. 1
      pkg/cashu/token/token.go
  30. 49
      pkg/database/bufpool/pool_wasm.go
  31. 40
      pkg/database/compact_event.go
  32. 206
      pkg/database/nrc.go
  33. 202
      pkg/database/tokenize_wasm.go
  34. 135
      pkg/database/unicode_normalize_wasm.go
  35. 623
      pkg/protocol/nrc/bridge.go
  36. 513
      pkg/protocol/nrc/client.go
  37. 24
      pkg/protocol/nrc/errors.go
  38. 371
      pkg/protocol/nrc/nrc_test.go
  39. 322
      pkg/protocol/nrc/session.go
  40. 206
      pkg/protocol/nrc/uri.go
  41. 2
      pkg/version/version
  42. 712
      pkg/wasmdb/jsbridge.go
  43. 65
      scripts/build-wasm.sh
  44. 575
      wasm_exec.js

8
.claude/settings.local.json

@ -3,12 +3,8 @@
"allow": [], "allow": [],
"deny": [], "deny": [],
"ask": [], "ask": [],
"additionalDirectories": [ "additionalDirectories": []
"/home/mleku/smesh",
"/home/mleku/Tourmaline",
"/home/mleku/Amber"
]
}, },
"outputStyle": "Default", "outputStyle": "Default",
"MAX_THINKING_TOKENS": "8000" "MAX_THINKING_TOKENS": "16000"
} }

69
README.md

@ -12,6 +12,36 @@ zap me: <EFBFBD>mlekudev@getalby.com
follow me on [nostr](https://jumble.social/users/npub1fjqqy4a93z5zsjwsfxqhc2764kvykfdyttvldkkkdera8dr78vhsmmleku) follow me on [nostr](https://jumble.social/users/npub1fjqqy4a93z5zsjwsfxqhc2764kvykfdyttvldkkkdera8dr78vhsmmleku)
## Table of Contents
- [Bug Reports & Feature Requests](#%EF%B8%8F-bug-reports--feature-requests)
- [System Requirements](#%EF%B8%8F-system-requirements)
- [About](#about)
- [Performance & Cryptography](#performance--cryptography)
- [Building](#building)
- [Prerequisites](#prerequisites)
- [Basic Build](#basic-build)
- [Building with Web UI](#building-with-web-ui)
- [Core Features](#core-features)
- [Web UI](#web-ui)
- [Sprocket Event Processing](#sprocket-event-processing)
- [Policy System](#policy-system)
- [Deployment](#deployment)
- [Automated Deployment](#automated-deployment)
- [TLS Configuration](#tls-configuration)
- [systemd Service Management](#systemd-service-management)
- [Remote Deployment](#remote-deployment)
- [Configuration](#configuration)
- [Firewall Configuration](#firewall-configuration)
- [Monitoring](#monitoring)
- [Testing](#testing)
- [Command-Line Tools](#command-line-tools)
- [Access Control](#access-control)
- [Follows ACL](#follows-acl)
- [Curation ACL](#curation-acl)
- [Cluster Replication](#cluster-replication)
- [Developer Notes](#developer-notes)
## ⚠ Bug Reports & Feature Requests ## ⚠ Bug Reports & Feature Requests
**Bug reports and feature requests that do not follow the protocol will not be accepted.** **Bug reports and feature requests that do not follow the protocol will not be accepted.**
@ -566,9 +596,22 @@ go run ./cmd/subscription-test-simple -url ws://localhost:3334 -duration 120
## Access Control ## Access Control
ORLY provides four ACL (Access Control List) modes to control who can publish events to your relay:
| Mode | Description | Best For |
|------|-------------|----------|
| `none` | Open relay, anyone can write | Public relays |
| `follows` | Write access based on admin follow lists | Personal/community relays |
| `managed` | Explicit allow/deny lists via NIP-86 API | Private relays |
| `curating` | Three-tier classification with rate limiting | Curated community relays |
```bash
export ORLY_ACL_MODE=follows # or: none, managed, curating
```
### Follows ACL ### Follows ACL
The follows ACL (Access Control List) system provides flexible relay access control based on social relationships in the Nostr network. The follows ACL system provides flexible relay access control based on social relationships in the Nostr network.
```bash ```bash
export ORLY_ACL_MODE=follows export ORLY_ACL_MODE=follows
@ -578,6 +621,30 @@ export ORLY_ADMINS=npub1fjqqy4a93z5zsjwsfxqhc2764kvykfdyttvldkkkdera8dr78vhsmmle
The system grants write access to users followed by designated admins, with read-only access for others. Follow lists update dynamically as admins modify their relationships. The system grants write access to users followed by designated admins, with read-only access for others. Follow lists update dynamically as admins modify their relationships.
### Curation ACL
The curation ACL mode provides sophisticated content curation with a three-tier publisher classification system:
- **Trusted**: Unlimited publishing, bypass rate limits
- **Blacklisted**: Blocked from publishing, invisible to regular users
- **Unclassified**: Rate-limited publishing (default 50 events/day)
Key features:
- **Kind whitelisting**: Only allow specific event kinds (e.g., social, DMs, longform)
- **IP-based flood protection**: Auto-ban IPs that exceed rate limits
- **Spam flagging**: Mark events as spam without deleting
- **Web UI management**: Configure via the built-in curation interface
```bash
export ORLY_ACL_MODE=curating
export ORLY_OWNERS=npub1your_owner_key
./orly
```
After starting, publish a configuration event (kind 30078) to enable the relay. The web UI at `/#curation` provides a complete management interface.
For detailed configuration and API documentation, see the [Curation Mode Guide](docs/CURATION_MODE_GUIDE.md).
### Cluster Replication ### Cluster Replication
ORLY supports distributed relay clusters using active replication. When configured with peer relays, ORLY will automatically synchronize events between cluster members using efficient HTTP polling. ORLY supports distributed relay clusters using active replication. When configured with peer relays, ORLY will automatically synchronize events between cluster members using efficient HTTP polling.

83
app/config/config.go

@ -67,6 +67,11 @@ type C struct {
ClusterAdmins []string `env:"ORLY_CLUSTER_ADMINS" usage:"comma-separated list of npubs authorized to manage cluster membership"` ClusterAdmins []string `env:"ORLY_CLUSTER_ADMINS" usage:"comma-separated list of npubs authorized to manage cluster membership"`
FollowListFrequency time.Duration `env:"ORLY_FOLLOW_LIST_FREQUENCY" usage:"how often to fetch admin follow lists (default: 1h)" default:"1h"` FollowListFrequency time.Duration `env:"ORLY_FOLLOW_LIST_FREQUENCY" usage:"how often to fetch admin follow lists (default: 1h)" default:"1h"`
// Progressive throttle for follows ACL mode - allows non-followed users to write with increasing delay
FollowsThrottleEnabled bool `env:"ORLY_FOLLOWS_THROTTLE" default:"false" usage:"enable progressive delay for non-followed users in follows ACL mode"`
FollowsThrottlePerEvent time.Duration `env:"ORLY_FOLLOWS_THROTTLE_INCREMENT" default:"200ms" usage:"delay added per event for non-followed users"`
FollowsThrottleMaxDelay time.Duration `env:"ORLY_FOLLOWS_THROTTLE_MAX" default:"60s" usage:"maximum throttle delay cap"`
// Blossom blob storage service level settings // Blossom blob storage service level settings
BlossomServiceLevels string `env:"ORLY_BLOSSOM_SERVICE_LEVELS" usage:"comma-separated list of service levels in format: name:storage_mb_per_sat_per_month (e.g., basic:1,premium:10)"` BlossomServiceLevels string `env:"ORLY_BLOSSOM_SERVICE_LEVELS" usage:"comma-separated list of service levels in format: name:storage_mb_per_sat_per_month (e.g., basic:1,premium:10)"`
@ -172,6 +177,13 @@ type C struct {
CashuScopes string `env:"ORLY_CASHU_SCOPES" default:"relay,nip46" usage:"comma-separated list of allowed token scopes"` CashuScopes string `env:"ORLY_CASHU_SCOPES" default:"relay,nip46" usage:"comma-separated list of allowed token scopes"`
CashuReauthorize bool `env:"ORLY_CASHU_REAUTHORIZE" default:"true" usage:"re-check ACL on each token verification for stateless revocation"` CashuReauthorize bool `env:"ORLY_CASHU_REAUTHORIZE" default:"true" usage:"re-check ACL on each token verification for stateless revocation"`
// Nostr Relay Connect (NRC) configuration - tunnel private relay through public relay
NRCEnabled bool `env:"ORLY_NRC_ENABLED" default:"false" usage:"enable NRC bridge to expose this relay through a public rendezvous relay"`
NRCRendezvousURL string `env:"ORLY_NRC_RENDEZVOUS_URL" usage:"WebSocket URL of the public relay to use as rendezvous point (e.g., wss://relay.example.com)"`
NRCAuthorizedKeys string `env:"ORLY_NRC_AUTHORIZED_KEYS" usage:"comma-separated list of authorized client pubkeys (hex) for secret-based auth"`
NRCUseCashu bool `env:"ORLY_NRC_USE_CASHU" default:"false" usage:"use Cashu access tokens for NRC authentication instead of static secrets"`
NRCSessionTimeout string `env:"ORLY_NRC_SESSION_TIMEOUT" default:"30m" usage:"inactivity timeout for NRC sessions"`
// Cluster replication configuration // Cluster replication configuration
ClusterPropagatePrivilegedEvents bool `env:"ORLY_CLUSTER_PROPAGATE_PRIVILEGED_EVENTS" default:"true" usage:"propagate privileged events (DMs, gift wraps, etc.) to relay peers for replication"` ClusterPropagatePrivilegedEvents bool `env:"ORLY_CLUSTER_PROPAGATE_PRIVILEGED_EVENTS" default:"true" usage:"propagate privileged events (DMs, gift wraps, etc.) to relay peers for replication"`
@ -404,6 +416,29 @@ func MigrateRequested() (requested bool, fromType, toType, targetPath string) {
return return
} }
// NRCRequested checks if the first command line argument is "nrc" and returns
// the NRC subcommand parameters.
//
// Return Values
// - requested: true if the 'nrc' subcommand was provided
// - subcommand: the NRC subcommand (generate, list, revoke)
// - args: additional arguments for the subcommand
func NRCRequested() (requested bool, subcommand string, args []string) {
if len(os.Args) > 1 {
switch strings.ToLower(os.Args[1]) {
case "nrc":
requested = true
if len(os.Args) > 2 {
subcommand = strings.ToLower(os.Args[2])
if len(os.Args) > 3 {
args = os.Args[3:]
}
}
}
}
return
}
// KV is a key/value pair. // KV is a key/value pair.
type KV struct{ Key, Value string } type KV struct{ Key, Value string }
@ -775,3 +810,51 @@ func (cfg *C) GetBboltConfigValues() (
cfg.BboltNoSync, cfg.BboltNoSync,
cfg.BboltMmapSizeMB * 1024 * 1024 cfg.BboltMmapSizeMB * 1024 * 1024
} }
// GetNRCConfigValues returns the NRC (Nostr Relay Connect) configuration values.
// This avoids circular imports with pkg/protocol/nrc while allowing main.go to construct
// the NRC bridge configuration.
func (cfg *C) GetNRCConfigValues() (
enabled bool,
rendezvousURL string,
authorizedKeys []string,
useCashu bool,
sessionTimeout time.Duration,
) {
// Parse session timeout
sessionTimeout = 30 * time.Minute // Default
if cfg.NRCSessionTimeout != "" {
if d, err := time.ParseDuration(cfg.NRCSessionTimeout); err == nil {
sessionTimeout = d
}
}
// Parse authorized keys
if cfg.NRCAuthorizedKeys != "" {
keys := strings.Split(cfg.NRCAuthorizedKeys, ",")
for _, k := range keys {
k = strings.TrimSpace(k)
if k != "" {
authorizedKeys = append(authorizedKeys, k)
}
}
}
return cfg.NRCEnabled,
cfg.NRCRendezvousURL,
authorizedKeys,
cfg.NRCUseCashu,
sessionTimeout
}
// GetFollowsThrottleConfigValues returns the progressive throttle configuration values
// for the follows ACL mode. This allows non-followed users to write with increasing delay.
func (cfg *C) GetFollowsThrottleConfigValues() (
enabled bool,
perEvent time.Duration,
maxDelay time.Duration,
) {
return cfg.FollowsThrottleEnabled,
cfg.FollowsThrottlePerEvent,
cfg.FollowsThrottleMaxDelay
}

13
app/handle-event.go

@ -2,6 +2,7 @@ package app
import ( import (
"context" "context"
"time"
"lol.mleku.dev/chk" "lol.mleku.dev/chk"
"lol.mleku.dev/log" "lol.mleku.dev/log"
@ -254,6 +255,18 @@ func (l *Listener) HandleEvent(msg []byte) (err error) {
} }
log.I.F("HandleEvent: authorized with access level %s", decision.AccessLevel) log.I.F("HandleEvent: authorized with access level %s", decision.AccessLevel)
// Progressive throttle for follows ACL mode (delays non-followed users)
if delay := l.getFollowsThrottleDelay(env.E); delay > 0 {
log.D.F("HandleEvent: applying progressive throttle delay of %v for %0x from %s",
delay, env.E.Pubkey, l.remote)
select {
case <-l.ctx.Done():
return l.ctx.Err()
case <-time.After(delay):
// Delay completed, continue processing
}
}
// Route special event kinds (ephemeral, etc.) - use routing service // Route special event kinds (ephemeral, etc.) - use routing service
if routeResult := l.eventRouter.Route(env.E, l.authedPubkey.Load()); routeResult.Action != routing.Continue { if routeResult := l.eventRouter.Route(env.E, l.authedPubkey.Load()); routeResult.Action != routing.Continue {
if routeResult.Action == routing.Handled { if routeResult.Action == routing.Handled {

448
app/handle-nrc.go

@ -0,0 +1,448 @@
package app
import (
"encoding/json"
"net/http"
"strings"
"lol.mleku.dev/chk"
"lol.mleku.dev/log"
"git.mleku.dev/mleku/nostr/crypto/keys"
"git.mleku.dev/mleku/nostr/encoders/hex"
"git.mleku.dev/mleku/nostr/httpauth"
"next.orly.dev/pkg/acl"
"next.orly.dev/pkg/database"
)
// getCashuMintURL returns the Cashu mint URL based on relay configuration.
// Returns empty string if Cashu is not enabled.
func (s *Server) getCashuMintURL() string {
if !s.Config.CashuEnabled || s.CashuIssuer == nil {
return ""
}
// Use configured relay URL with /cashu/mint path
relayURL := strings.TrimSuffix(s.Config.RelayURL, "/")
if relayURL == "" {
return ""
}
return relayURL + "/cashu/mint"
}
// NRCConnectionResponse is the response structure for NRC connection API.
type NRCConnectionResponse struct {
ID string `json:"id"`
Label string `json:"label"`
CreatedAt int64 `json:"created_at"`
LastUsed int64 `json:"last_used"`
UseCashu bool `json:"use_cashu"`
URI string `json:"uri,omitempty"` // Only included when specifically requested
}
// NRCConnectionsResponse is the response for listing all connections.
type NRCConnectionsResponse struct {
Connections []NRCConnectionResponse `json:"connections"`
Config NRCConfigResponse `json:"config"`
}
// NRCConfigResponse contains NRC configuration status.
type NRCConfigResponse struct {
Enabled bool `json:"enabled"`
RendezvousURL string `json:"rendezvous_url"`
MintURL string `json:"mint_url,omitempty"`
RelayPubkey string `json:"relay_pubkey"`
}
// NRCCreateRequest is the request body for creating a connection.
type NRCCreateRequest struct {
Label string `json:"label"`
UseCashu bool `json:"use_cashu"`
}
// handleNRCConnections handles GET /api/nrc/connections
func (s *Server) handleNRCConnections(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodGet {
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
return
}
// Validate NIP-98 authentication
valid, pubkey, err := httpauth.CheckAuth(r)
if chk.E(err) || !valid {
errorMsg := "NIP-98 authentication validation failed"
if err != nil {
errorMsg = err.Error()
}
http.Error(w, errorMsg, http.StatusUnauthorized)
return
}
// Check permissions - require owner level
accessLevel := acl.Registry.GetAccessLevel(pubkey, r.RemoteAddr)
if accessLevel != "owner" {
http.Error(w, "Owner permission required", http.StatusForbidden)
return
}
// Get database (must be Badger)
badgerDB, ok := s.DB.(*database.D)
if !ok {
http.Error(w, "NRC requires Badger database backend", http.StatusServiceUnavailable)
return
}
// Get all connections
conns, err := badgerDB.GetAllNRCConnections()
if chk.E(err) {
http.Error(w, "Failed to get connections", http.StatusInternalServerError)
return
}
// Get relay identity for config
relaySecretKey, err := s.DB.GetOrCreateRelayIdentitySecret()
if chk.E(err) {
http.Error(w, "Failed to get relay identity", http.StatusInternalServerError)
return
}
relayPubkey, _ := keys.SecretBytesToPubKeyBytes(relaySecretKey)
// Get NRC config values
nrcEnabled, nrcRendezvousURL, _, nrcUseCashu, _ := s.Config.GetNRCConfigValues()
// Build response
response := NRCConnectionsResponse{
Connections: make([]NRCConnectionResponse, 0, len(conns)),
Config: NRCConfigResponse{
Enabled: nrcEnabled,
RendezvousURL: nrcRendezvousURL,
RelayPubkey: string(hex.Enc(relayPubkey)),
},
}
// Add mint URL if Cashu is enabled
mintURL := s.getCashuMintURL()
if nrcUseCashu && mintURL != "" {
response.Config.MintURL = mintURL
}
for _, conn := range conns {
response.Connections = append(response.Connections, NRCConnectionResponse{
ID: conn.ID,
Label: conn.Label,
CreatedAt: conn.CreatedAt,
LastUsed: conn.LastUsed,
UseCashu: conn.UseCashu,
})
}
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(response)
}
// handleNRCCreate handles POST /api/nrc/connections
func (s *Server) handleNRCCreate(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodPost {
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
return
}
// Validate NIP-98 authentication
valid, pubkey, err := httpauth.CheckAuth(r)
if chk.E(err) || !valid {
errorMsg := "NIP-98 authentication validation failed"
if err != nil {
errorMsg = err.Error()
}
http.Error(w, errorMsg, http.StatusUnauthorized)
return
}
// Check permissions - require owner level
accessLevel := acl.Registry.GetAccessLevel(pubkey, r.RemoteAddr)
if accessLevel != "owner" {
http.Error(w, "Owner permission required", http.StatusForbidden)
return
}
// Get database (must be Badger)
badgerDB, ok := s.DB.(*database.D)
if !ok {
http.Error(w, "NRC requires Badger database backend", http.StatusServiceUnavailable)
return
}
// Parse request body
var req NRCCreateRequest
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
http.Error(w, "Invalid request body", http.StatusBadRequest)
return
}
// Validate label
req.Label = strings.TrimSpace(req.Label)
if req.Label == "" {
http.Error(w, "Label is required", http.StatusBadRequest)
return
}
// Create the connection
conn, err := badgerDB.CreateNRCConnection(req.Label, req.UseCashu)
if chk.E(err) {
http.Error(w, "Failed to create connection", http.StatusInternalServerError)
return
}
// Get relay identity for URI generation
relaySecretKey, err := s.DB.GetOrCreateRelayIdentitySecret()
if chk.E(err) {
http.Error(w, "Failed to get relay identity", http.StatusInternalServerError)
return
}
relayPubkey, _ := keys.SecretBytesToPubKeyBytes(relaySecretKey)
// Get NRC config values
_, nrcRendezvousURL, _, nrcUseCashu, _ := s.Config.GetNRCConfigValues()
// Get mint URL if Cashu enabled
mintURL := ""
if nrcUseCashu {
mintURL = s.getCashuMintURL()
}
// Generate URI
uri, err := badgerDB.GetNRCConnectionURI(conn, relayPubkey, nrcRendezvousURL, mintURL)
if chk.E(err) {
log.W.F("failed to generate URI for new connection: %v", err)
}
// Update bridge authorized secrets if bridge is running
s.updateNRCBridgeSecrets(badgerDB)
// Build response with URI
response := NRCConnectionResponse{
ID: conn.ID,
Label: conn.Label,
CreatedAt: conn.CreatedAt,
LastUsed: conn.LastUsed,
UseCashu: conn.UseCashu,
URI: uri,
}
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusCreated)
json.NewEncoder(w).Encode(response)
}
// handleNRCDelete handles DELETE /api/nrc/connections/{id}
func (s *Server) handleNRCDelete(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodDelete {
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
return
}
// Validate NIP-98 authentication
valid, pubkey, err := httpauth.CheckAuth(r)
if chk.E(err) || !valid {
errorMsg := "NIP-98 authentication validation failed"
if err != nil {
errorMsg = err.Error()
}
http.Error(w, errorMsg, http.StatusUnauthorized)
return
}
// Check permissions - require owner level
accessLevel := acl.Registry.GetAccessLevel(pubkey, r.RemoteAddr)
if accessLevel != "owner" {
http.Error(w, "Owner permission required", http.StatusForbidden)
return
}
// Get database (must be Badger)
badgerDB, ok := s.DB.(*database.D)
if !ok {
http.Error(w, "NRC requires Badger database backend", http.StatusServiceUnavailable)
return
}
// Extract connection ID from URL path
// URL format: /api/nrc/connections/{id}
path := strings.TrimPrefix(r.URL.Path, "/api/nrc/connections/")
connID := strings.TrimSpace(path)
if connID == "" {
http.Error(w, "Connection ID required", http.StatusBadRequest)
return
}
// Delete the connection
if err := badgerDB.DeleteNRCConnection(connID); chk.E(err) {
http.Error(w, "Failed to delete connection", http.StatusInternalServerError)
return
}
// Update bridge authorized secrets if bridge is running
s.updateNRCBridgeSecrets(badgerDB)
log.I.F("deleted NRC connection: %s", connID)
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(map[string]string{"status": "ok"})
}
// handleNRCGetURI handles GET /api/nrc/connections/{id}/uri
func (s *Server) handleNRCGetURI(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodGet {
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
return
}
// Validate NIP-98 authentication
valid, pubkey, err := httpauth.CheckAuth(r)
if chk.E(err) || !valid {
errorMsg := "NIP-98 authentication validation failed"
if err != nil {
errorMsg = err.Error()
}
http.Error(w, errorMsg, http.StatusUnauthorized)
return
}
// Check permissions - require owner level
accessLevel := acl.Registry.GetAccessLevel(pubkey, r.RemoteAddr)
if accessLevel != "owner" {
http.Error(w, "Owner permission required", http.StatusForbidden)
return
}
// Get database (must be Badger)
badgerDB, ok := s.DB.(*database.D)
if !ok {
http.Error(w, "NRC requires Badger database backend", http.StatusServiceUnavailable)
return
}
// Extract connection ID from URL path
// URL format: /api/nrc/connections/{id}/uri
path := strings.TrimPrefix(r.URL.Path, "/api/nrc/connections/")
path = strings.TrimSuffix(path, "/uri")
connID := strings.TrimSpace(path)
if connID == "" {
http.Error(w, "Connection ID required", http.StatusBadRequest)
return
}
// Get the connection
conn, err := badgerDB.GetNRCConnection(connID)
if err != nil {
http.Error(w, "Connection not found", http.StatusNotFound)
return
}
// Get relay identity
relaySecretKey, err := s.DB.GetOrCreateRelayIdentitySecret()
if chk.E(err) {
http.Error(w, "Failed to get relay identity", http.StatusInternalServerError)
return
}
relayPubkey, _ := keys.SecretBytesToPubKeyBytes(relaySecretKey)
// Get NRC config values
_, nrcRendezvousURL, _, nrcUseCashu, _ := s.Config.GetNRCConfigValues()
// Get mint URL if Cashu enabled
mintURL := ""
if nrcUseCashu {
mintURL = s.getCashuMintURL()
}
// Generate URI
uri, err := badgerDB.GetNRCConnectionURI(conn, relayPubkey, nrcRendezvousURL, mintURL)
if chk.E(err) {
http.Error(w, "Failed to generate URI", http.StatusInternalServerError)
return
}
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(map[string]string{"uri": uri})
}
// updateNRCBridgeSecrets updates the NRC bridge with current authorized secrets from database.
func (s *Server) updateNRCBridgeSecrets(badgerDB *database.D) {
if s.nrcBridge == nil {
return
}
secrets, err := badgerDB.GetNRCAuthorizedSecrets()
if chk.E(err) {
log.W.F("failed to get NRC authorized secrets: %v", err)
return
}
s.nrcBridge.UpdateAuthorizedSecrets(secrets)
log.D.F("updated NRC bridge with %d authorized secrets", len(secrets))
}
// handleNRCConnectionsRouter routes NRC connection requests.
func (s *Server) handleNRCConnectionsRouter(w http.ResponseWriter, r *http.Request) {
path := r.URL.Path
// Exact match for /api/nrc/connections
if path == "/api/nrc/connections" {
switch r.Method {
case http.MethodGet:
s.handleNRCConnections(w, r)
case http.MethodPost:
s.handleNRCCreate(w, r)
default:
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
}
return
}
// Check for /api/nrc/connections/{id}/uri
if strings.HasSuffix(path, "/uri") {
s.handleNRCGetURI(w, r)
return
}
// Otherwise it's /api/nrc/connections/{id}
s.handleNRCDelete(w, r)
}
// handleNRCConfig returns NRC configuration status.
func (s *Server) handleNRCConfig(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodGet {
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
return
}
// Get NRC config values
nrcEnabled, nrcRendezvousURL, _, nrcUseCashu, _ := s.Config.GetNRCConfigValues()
// Check if Badger is available (NRC requires Badger)
_, badgerAvailable := s.DB.(*database.D)
response := struct {
Enabled bool `json:"enabled"`
BadgerRequired bool `json:"badger_required"`
RendezvousURL string `json:"rendezvous_url,omitempty"`
UseCashu bool `json:"use_cashu"`
MintURL string `json:"mint_url,omitempty"`
}{
Enabled: nrcEnabled && badgerAvailable,
BadgerRequired: !badgerAvailable,
RendezvousURL: nrcRendezvousURL,
UseCashu: nrcUseCashu,
}
// Add mint URL if Cashu is enabled
if nrcUseCashu {
mintURL := s.getCashuMintURL()
if mintURL != "" {
response.MintURL = mintURL
}
}
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(response)
}

16
app/listener.go

@ -301,6 +301,22 @@ func (l *Listener) getManagedACL() *database.ManagedACL {
return nil return nil
} }
// getFollowsThrottleDelay returns the progressive throttle delay for follows ACL mode.
// Returns 0 if not in follows mode, throttle is disabled, or user is exempt.
func (l *Listener) getFollowsThrottleDelay(ev *event.E) time.Duration {
// Only applies to follows ACL mode
if acl.Registry.Active.Load() != "follows" {
return 0
}
// Find the Follows ACL instance and get the throttle delay
for _, aclInstance := range acl.Registry.ACL {
if follows, ok := aclInstance.(*acl.Follows); ok {
return follows.GetThrottleDelay(ev.Pubkey, l.remote)
}
}
return 0
}
// QueryEvents queries events using the database QueryEvents method // QueryEvents queries events using the database QueryEvents method
func (l *Listener) QueryEvents(ctx context.Context, f *filter.F) (event.S, error) { func (l *Listener) QueryEvents(ctx context.Context, f *filter.F) (event.S, error) {
return l.DB.QueryEvents(ctx, f) return l.DB.QueryEvents(ctx, f)

84
app/main.go

@ -6,6 +6,7 @@ import (
"net/http" "net/http"
"os" "os"
"path/filepath" "path/filepath"
"strings"
"sync" "sync"
"time" "time"
@ -17,6 +18,7 @@ import (
"git.mleku.dev/mleku/nostr/crypto/keys" "git.mleku.dev/mleku/nostr/crypto/keys"
"next.orly.dev/pkg/database" "next.orly.dev/pkg/database"
"git.mleku.dev/mleku/nostr/encoders/bech32encoding" "git.mleku.dev/mleku/nostr/encoders/bech32encoding"
"git.mleku.dev/mleku/nostr/encoders/hex"
"next.orly.dev/pkg/neo4j" "next.orly.dev/pkg/neo4j"
"next.orly.dev/pkg/policy" "next.orly.dev/pkg/policy"
"next.orly.dev/pkg/protocol/graph" "next.orly.dev/pkg/protocol/graph"
@ -26,6 +28,7 @@ import (
"next.orly.dev/pkg/cashu/issuer" "next.orly.dev/pkg/cashu/issuer"
"next.orly.dev/pkg/cashu/keyset" "next.orly.dev/pkg/cashu/keyset"
"next.orly.dev/pkg/cashu/verifier" "next.orly.dev/pkg/cashu/verifier"
"next.orly.dev/pkg/protocol/nrc"
cashuiface "next.orly.dev/pkg/interfaces/cashu" cashuiface "next.orly.dev/pkg/interfaces/cashu"
"next.orly.dev/pkg/ratelimit" "next.orly.dev/pkg/ratelimit"
"next.orly.dev/pkg/spider" "next.orly.dev/pkg/spider"
@ -199,6 +202,81 @@ func Run(
} }
} }
// Initialize NRC (Nostr Relay Connect) bridge if enabled
nrcEnabled, nrcRendezvousURL, nrcAuthorizedKeys, nrcUseCashu, nrcSessionTimeout := cfg.GetNRCConfigValues()
if nrcEnabled && nrcRendezvousURL != "" {
// Get relay identity for signing NRC responses
relaySecretKey, err := db.GetOrCreateRelayIdentitySecret()
if err != nil {
log.E.F("failed to get relay identity for NRC bridge: %v", err)
} else {
// Create signer from secret key
relaySigner, sigErr := p8k.New()
if sigErr != nil {
log.E.F("failed to create signer for NRC bridge: %v", sigErr)
} else if sigErr = relaySigner.InitSec(relaySecretKey); sigErr != nil {
log.E.F("failed to init signer for NRC bridge: %v", sigErr)
} else {
// Parse authorized secrets (format: secret:name,secret:name,...)
authorizedSecrets := make(map[string]string)
for _, entry := range nrcAuthorizedKeys {
parts := strings.SplitN(entry, ":", 2)
if len(parts) >= 1 {
secretHex := parts[0]
name := ""
if len(parts) == 2 {
name = parts[1]
}
// Derive pubkey from secret
secretBytes, decErr := hex.Dec(secretHex)
if decErr != nil || len(secretBytes) != 32 {
log.W.F("NRC: skipping invalid secret key: %s", secretHex[:8])
continue
}
derivedSigner, signerErr := p8k.New()
if signerErr != nil {
log.W.F("NRC: failed to create signer: %v", signerErr)
continue
}
if signerErr = derivedSigner.InitSec(secretBytes); signerErr != nil {
log.W.F("NRC: failed to init signer: %v", signerErr)
continue
}
derivedPubkeyHex := string(hex.Enc(derivedSigner.Pub()))
authorizedSecrets[derivedPubkeyHex] = name
}
}
// Construct local relay URL
localRelayURL := fmt.Sprintf("ws://localhost:%d", cfg.Port)
// Create bridge config
bridgeConfig := &nrc.BridgeConfig{
RendezvousURL: nrcRendezvousURL,
LocalRelayURL: localRelayURL,
Signer: relaySigner,
AuthorizedSecrets: authorizedSecrets,
SessionTimeout: nrcSessionTimeout,
}
// Add Cashu verifier if enabled
if nrcUseCashu && l.CashuVerifier != nil {
bridgeConfig.CashuVerifier = l.CashuVerifier
}
// Create and start the bridge
l.nrcBridge = nrc.NewBridge(bridgeConfig)
if err := l.nrcBridge.Start(); err != nil {
log.E.F("failed to start NRC bridge: %v", err)
l.nrcBridge = nil
} else {
log.I.F("NRC bridge started (rendezvous: %s, authorized: %d, cashu: %v)",
nrcRendezvousURL, len(authorizedSecrets), nrcUseCashu && l.CashuVerifier != nil)
}
}
}
}
// Initialize spider manager based on mode (only for Badger backend) // Initialize spider manager based on mode (only for Badger backend)
if badgerDB, ok := db.(*database.D); ok && cfg.SpiderMode != "none" { if badgerDB, ok := db.(*database.D); ok && cfg.SpiderMode != "none" {
if l.spiderManager, err = spider.New(ctx, badgerDB, l.publishers, cfg.SpiderMode); chk.E(err) { if l.spiderManager, err = spider.New(ctx, badgerDB, l.publishers, cfg.SpiderMode); chk.E(err) {
@ -720,6 +798,12 @@ func Run(
log.I.F("bunker server stopped") log.I.F("bunker server stopped")
} }
// Stop NRC bridge if running
if l.nrcBridge != nil {
l.nrcBridge.Stop()
log.I.F("NRC bridge stopped")
}
// Stop WireGuard server if running // Stop WireGuard server if running
if l.wireguardServer != nil { if l.wireguardServer != nil {
l.wireguardServer.Stop() l.wireguardServer.Stop()

9
app/server.go

@ -36,6 +36,7 @@ import (
"next.orly.dev/pkg/bunker" "next.orly.dev/pkg/bunker"
"next.orly.dev/pkg/cashu/issuer" "next.orly.dev/pkg/cashu/issuer"
"next.orly.dev/pkg/cashu/verifier" "next.orly.dev/pkg/cashu/verifier"
"next.orly.dev/pkg/protocol/nrc"
"next.orly.dev/pkg/ratelimit" "next.orly.dev/pkg/ratelimit"
"next.orly.dev/pkg/spider" "next.orly.dev/pkg/spider"
"next.orly.dev/pkg/storage" "next.orly.dev/pkg/storage"
@ -95,6 +96,9 @@ type Server struct {
CashuIssuer *issuer.Issuer CashuIssuer *issuer.Issuer
CashuVerifier *verifier.Verifier CashuVerifier *verifier.Verifier
// NRC (Nostr Relay Connect) bridge for remote relay access
nrcBridge *nrc.Bridge
// Archive relay and storage management // Archive relay and storage management
archiveManager *archive.Manager archiveManager *archive.Manager
accessTracker *storage.AccessTracker accessTracker *storage.AccessTracker
@ -376,6 +380,11 @@ func (s *Server) UserInterface() {
if s.CashuIssuer != nil { if s.CashuIssuer != nil {
log.Printf("Cashu access token API enabled at /cashu") log.Printf("Cashu access token API enabled at /cashu")
} }
// NRC (Nostr Relay Connect) management endpoints
s.mux.HandleFunc("/api/nrc/connections", s.handleNRCConnectionsRouter)
s.mux.HandleFunc("/api/nrc/connections/", s.handleNRCConnectionsRouter)
s.mux.HandleFunc("/api/nrc/config", s.handleNRCConfig)
} }
// handleFavicon serves favicon.png as favicon.ico // handleFavicon serves favicon.png as favicon.ico

1
app/web/dist/bundle.css vendored

File diff suppressed because one or more lines are too long

28
app/web/dist/bundle.js vendored

File diff suppressed because one or more lines are too long

2
app/web/dist/bundle.js.map vendored

File diff suppressed because one or more lines are too long

29
app/web/src/App.svelte

@ -14,6 +14,7 @@
import CurationView from "./CurationView.svelte"; import CurationView from "./CurationView.svelte";
import BlossomView from "./BlossomView.svelte"; import BlossomView from "./BlossomView.svelte";
import LogView from "./LogView.svelte"; import LogView from "./LogView.svelte";
import RelayConnectView from "./RelayConnectView.svelte";
import SearchResultsView from "./SearchResultsView.svelte"; import SearchResultsView from "./SearchResultsView.svelte";
import FilterDisplay from "./FilterDisplay.svelte"; import FilterDisplay from "./FilterDisplay.svelte";
@ -115,6 +116,9 @@
let policyValidationErrors = []; let policyValidationErrors = [];
let policyFollows = []; let policyFollows = [];
// NRC (Nostr Relay Connect) state
let nrcEnabled = false;
// ACL mode // ACL mode
let aclMode = ""; let aclMode = "";
@ -862,6 +866,9 @@
// Load sprocket configuration // Load sprocket configuration
loadSprocketConfig(); loadSprocketConfig();
// Load NRC configuration
loadNRCConfig();
// Load policy configuration // Load policy configuration
loadPolicyConfig(); loadPolicyConfig();
@ -985,6 +992,15 @@
} }
} }
async function loadNRCConfig() {
try {
const config = await api.fetchNRCConfig();
nrcEnabled = config.enabled;
} catch (error) {
console.error("Error loading NRC config:", error);
}
}
async function loadPolicyConfig() { async function loadPolicyConfig() {
try { try {
const response = await fetch("/api/policy/config", { const response = await fetch("/api/policy/config", {
@ -1666,6 +1682,7 @@
}, },
{ id: "sprocket", icon: "⚙", label: "Sprocket", requiresOwner: true }, { id: "sprocket", icon: "⚙", label: "Sprocket", requiresOwner: true },
{ id: "policy", icon: "📜", label: "Policy", requiresOwner: true }, { id: "policy", icon: "📜", label: "Policy", requiresOwner: true },
{ id: "relay-connect", icon: "🔗", label: "Relay Connect", requiresOwner: true },
{ id: "logs", icon: "📋", label: "Logs", requiresOwner: true }, { id: "logs", icon: "📋", label: "Logs", requiresOwner: true },
]; ];
@ -1694,6 +1711,10 @@
if (tab.id === "policy" && !policyEnabled) { if (tab.id === "policy" && !policyEnabled) {
return false; return false;
} }
// Hide relay-connect tab if NRC is not enabled
if (tab.id === "relay-connect" && !nrcEnabled) {
return false;
}
// Hide managed ACL tab if not in managed mode // Hide managed ACL tab if not in managed mode
if (tab.id === "managed-acl" && aclMode !== "managed") { if (tab.id === "managed-acl" && aclMode !== "managed") {
return false; return false;
@ -2948,6 +2969,14 @@
on:refreshFollows={refreshFollows} on:refreshFollows={refreshFollows}
on:openLoginModal={openLoginModal} on:openLoginModal={openLoginModal}
/> />
{:else if selectedTab === "relay-connect"}
<RelayConnectView
{isLoggedIn}
{userRole}
{userSigner}
{userPubkey}
on:openLoginModal={openLoginModal}
/>
{:else if selectedTab === "logs"} {:else if selectedTab === "logs"}
<LogView <LogView
{isLoggedIn} {isLoggedIn}

744
app/web/src/RelayConnectView.svelte

@ -0,0 +1,744 @@
<script>
export let isLoggedIn = false;
export let userRole = "";
export let userSigner = null;
export let userPubkey = "";
import { createEventDispatcher, onMount } from "svelte";
import * as api from "./api.js";
import { copyToClipboard, showCopyFeedback } from "./utils.js";
const dispatch = createEventDispatcher();
// State
let nrcEnabled = false;
let badgerRequired = false;
let connections = [];
let config = {};
let isLoading = false;
let message = "";
let messageType = "info";
// New connection form
let newLabel = "";
let newUseCashu = false;
// URI display modal
let showURIModal = false;
let currentURI = "";
let currentLabel = "";
onMount(async () => {
await loadNRCConfig();
});
async function loadNRCConfig() {
try {
const result = await api.fetchNRCConfig();
nrcEnabled = result.enabled;
badgerRequired = result.badger_required;
if (nrcEnabled && isLoggedIn && userRole === "owner") {
await loadConnections();
}
} catch (error) {
console.error("Failed to load NRC config:", error);
}
}
async function loadConnections() {
if (!isLoggedIn || !userSigner || !userPubkey) return;
isLoading = true;
try {
const result = await api.fetchNRCConnections(userSigner, userPubkey);
connections = result.connections || [];
config = result.config || {};
} catch (error) {
setMessage(`Failed to load connections: ${error.message}`, "error");
} finally {
isLoading = false;
}
}
async function createConnection() {
if (!newLabel.trim()) {
setMessage("Please enter a label for the connection", "error");
return;
}
isLoading = true;
try {
const result = await api.createNRCConnection(userSigner, userPubkey, newLabel.trim(), newUseCashu);
// Show the URI modal with the new connection
currentURI = result.uri;
currentLabel = result.label;
showURIModal = true;
// Reset form
newLabel = "";
newUseCashu = false;
// Reload connections
await loadConnections();
setMessage(`Connection "${result.label}" created successfully`, "success");
} catch (error) {
setMessage(`Failed to create connection: ${error.message}`, "error");
} finally {
isLoading = false;
}
}
async function deleteConnection(connId, label) {
if (!confirm(`Are you sure you want to delete the connection "${label}"? This will revoke access for any device using this connection.`)) {
return;
}
isLoading = true;
try {
await api.deleteNRCConnection(userSigner, userPubkey, connId);
await loadConnections();
setMessage(`Connection "${label}" deleted`, "success");
} catch (error) {
setMessage(`Failed to delete connection: ${error.message}`, "error");
} finally {
isLoading = false;
}
}
async function showConnectionURI(connId, label) {
isLoading = true;
try {
const result = await api.getNRCConnectionURI(userSigner, userPubkey, connId);
currentURI = result.uri;
currentLabel = label;
showURIModal = true;
} catch (error) {
setMessage(`Failed to get URI: ${error.message}`, "error");
} finally {
isLoading = false;
}
}
async function copyURIToClipboard(event) {
const success = await copyToClipboard(currentURI);
const button = event.target.closest("button");
showCopyFeedback(button, success);
if (!success) {
setMessage("Failed to copy to clipboard", "error");
}
}
function closeURIModal() {
showURIModal = false;
currentURI = "";
currentLabel = "";
}
function setMessage(msg, type = "info") {
message = msg;
messageType = type;
// Auto-clear after 5 seconds
setTimeout(() => {
if (message === msg) {
message = "";
}
}, 5000);
}
function formatTimestamp(ts) {
if (!ts) return "Never";
return new Date(ts * 1000).toLocaleString();
}
function openLoginModal() {
dispatch("openLoginModal");
}
// Reload when login state changes
$: if (isLoggedIn && userRole === "owner" && nrcEnabled) {
loadConnections();
}
</script>
<div class="relay-connect-view">
<h2>Relay Connect</h2>
<p class="description">
Nostr Relay Connect (NRC) allows remote access to this relay through a public relay tunnel.
Create connection strings for your devices to sync securely.
</p>
{#if !nrcEnabled}
<div class="not-enabled">
{#if badgerRequired}
<p>NRC requires the Badger database backend.</p>
<p>Set <code>ORLY_DB_TYPE=badger</code> to enable NRC functionality.</p>
{:else}
<p>NRC is not enabled on this relay.</p>
<p>Set <code>ORLY_NRC_ENABLED=true</code> and configure <code>ORLY_NRC_RENDEZVOUS_URL</code> to enable.</p>
{/if}
</div>
{:else if !isLoggedIn}
<div class="login-prompt">
<p>Please log in to manage relay connections.</p>
<button class="login-btn" on:click={openLoginModal}>Log In</button>
</div>
{:else if userRole !== "owner"}
<div class="permission-denied">
<p>Owner permission required for relay connection management.</p>
<p>Current role: <strong>{userRole || "none"}</strong></p>
</div>
{:else}
<!-- Config status -->
<div class="config-status">
<div class="status-item">
<span class="status-label">Status:</span>
<span class="status-value enabled">Enabled</span>
</div>
<div class="status-item">
<span class="status-label">Rendezvous:</span>
<span class="status-value">{config.rendezvous_url || "Not configured"}</span>
</div>
{#if config.mint_url}
<div class="status-item">
<span class="status-label">Cashu Mint:</span>
<span class="status-value">{config.mint_url}</span>
</div>
{/if}
</div>
<!-- Create new connection -->
<div class="section">
<h3>Create New Connection</h3>
<div class="create-form">
<div class="form-group">
<label for="new-label">Device Label</label>
<input
type="text"
id="new-label"
bind:value={newLabel}
placeholder="e.g., Phone, Laptop, Tablet"
disabled={isLoading}
/>
</div>
<div class="form-group checkbox-group">
<label>
<input
type="checkbox"
bind:checked={newUseCashu}
disabled={isLoading || !config.mint_url}
/>
Include CAT (Cashu Access Token)
{#if !config.mint_url}
<span class="hint">(requires Cashu mint)</span>
{/if}
</label>
</div>
<button
class="create-btn"
on:click={createConnection}
disabled={isLoading || !newLabel.trim()}
>
+ Create Connection
</button>
</div>
</div>
<!-- Connections list -->
<div class="section">
<h3>Connections ({connections.length})</h3>
{#if connections.length === 0}
<p class="no-connections">No connections yet. Create one to get started.</p>
{:else}
<div class="connections-list">
{#each connections as conn}
<div class="connection-item">
<div class="connection-info">
<div class="connection-label">{conn.label}</div>
<div class="connection-details">
<span class="detail">ID: {conn.id.substring(0, 8)}...</span>
<span class="detail">Created: {formatTimestamp(conn.created_at)}</span>
{#if conn.last_used}
<span class="detail">Last used: {formatTimestamp(conn.last_used)}</span>
{/if}
{#if conn.use_cashu}
<span class="badge cashu">CAT</span>
{/if}
</div>
</div>
<div class="connection-actions">
<button
class="action-btn show-uri-btn"
on:click={() => showConnectionURI(conn.id, conn.label)}
disabled={isLoading}
title="Show connection URI"
>
Show URI
</button>
<button
class="action-btn delete-btn"
on:click={() => deleteConnection(conn.id, conn.label)}
disabled={isLoading}
title="Delete connection"
>
Delete
</button>
</div>
</div>
{/each}
</div>
{/if}
<button
class="refresh-btn"
on:click={loadConnections}
disabled={isLoading}
>
Refresh
</button>
</div>
{#if message}
<div class="message" class:error={messageType === "error"} class:success={messageType === "success"}>
{message}
</div>
{/if}
{/if}
</div>
<!-- URI Modal -->
{#if showURIModal}
<div class="modal-overlay" on:click={closeURIModal}>
<div class="modal" on:click|stopPropagation>
<h3>Connection URI for "{currentLabel}"</h3>
<p class="modal-description">
Copy this URI to your Nostr client to connect to this relay remotely.
Keep it secret - anyone with this URI can access your relay.
</p>
<div class="uri-display">
<textarea readonly>{currentURI}</textarea>
</div>
<div class="modal-actions">
<button class="copy-btn" on:click={copyURIToClipboard}>
Copy to Clipboard
</button>
<button class="close-btn" on:click={closeURIModal}>
Close
</button>
</div>
</div>
</div>
{/if}
<style>
.relay-connect-view {
width: 100%;
max-width: 800px;
margin: 0;
padding: 20px;
background: var(--header-bg);
color: var(--text-color);
border-radius: 8px;
}
.relay-connect-view h2 {
margin: 0 0 0.5rem 0;
color: var(--text-color);
font-size: 1.8rem;
font-weight: 600;
}
.description {
color: var(--muted-foreground);
margin-bottom: 1.5rem;
line-height: 1.5;
}
.section {
background-color: var(--card-bg);
border-radius: 8px;
padding: 1em;
margin-bottom: 1.5rem;
border: 1px solid var(--border-color);
}
.section h3 {
margin: 0 0 1rem 0;
color: var(--text-color);
font-size: 1.1rem;
font-weight: 600;
}
.config-status {
display: flex;
flex-direction: column;
gap: 0.5rem;
margin-bottom: 1.5rem;
padding: 1rem;
background: var(--card-bg);
border-radius: 8px;
border: 1px solid var(--border-color);
}
.status-item {
display: flex;
justify-content: space-between;
align-items: center;
}
.status-label {
font-weight: 600;
color: var(--text-color);
}
.status-value {
color: var(--muted-foreground);
font-family: monospace;
font-size: 0.9em;
}
.status-value.enabled {
color: var(--success);
}
/* Create form */
.create-form {
display: flex;
flex-direction: column;
gap: 1rem;
}
.form-group {
display: flex;
flex-direction: column;
gap: 0.5rem;
}
.form-group label {
font-weight: 500;
color: var(--text-color);
}
.form-group input[type="text"] {
padding: 0.75em;
border: 1px solid var(--border-color);
border-radius: 4px;
background: var(--input-bg);
color: var(--input-text-color);
font-size: 1em;
}
.checkbox-group {
flex-direction: row;
align-items: center;
}
.checkbox-group label {
display: flex;
align-items: center;
gap: 0.5rem;
cursor: pointer;
}
.checkbox-group input[type="checkbox"] {
width: 1.2em;
height: 1.2em;
}
.hint {
color: var(--muted-foreground);
font-size: 0.85em;
}
.create-btn {
background: var(--primary);
color: var(--text-color);
border: none;
padding: 0.75em 1.5em;
border-radius: 4px;
cursor: pointer;
font-size: 1em;
font-weight: 500;
align-self: flex-start;
transition: background-color 0.2s;
}
.create-btn:hover:not(:disabled) {
background: var(--accent-hover-color);
}
.create-btn:disabled {
background: var(--secondary);
cursor: not-allowed;
}
/* Connections list */
.connections-list {
display: flex;
flex-direction: column;
gap: 0.75rem;
margin-bottom: 1rem;
}
.connection-item {
display: flex;
justify-content: space-between;
align-items: center;
padding: 1rem;
background: var(--bg-color);
border: 1px solid var(--border-color);
border-radius: 4px;
}
.connection-info {
flex: 1;
}
.connection-label {
font-weight: 600;
color: var(--text-color);
margin-bottom: 0.25rem;
}
.connection-details {
display: flex;
flex-wrap: wrap;
gap: 0.75rem;
font-size: 0.85em;
color: var(--muted-foreground);
}
.badge {
background: var(--primary);
color: var(--text-color);
padding: 0.1em 0.4em;
border-radius: 0.25rem;
font-size: 0.75em;
font-weight: 600;
}
.badge.cashu {
background: var(--warning);
}
.connection-actions {
display: flex;
gap: 0.5rem;
}
.action-btn {
background: var(--primary);
color: var(--text-color);
border: none;
padding: 0.5em 1em;
border-radius: 4px;
cursor: pointer;
font-size: 0.9em;
transition: background-color 0.2s;
}
.action-btn:hover:not(:disabled) {
background: var(--accent-hover-color);
}
.action-btn:disabled {
background: var(--secondary);
cursor: not-allowed;
}
.show-uri-btn {
background: var(--info);
}
.show-uri-btn:hover:not(:disabled) {
filter: brightness(0.9);
}
.delete-btn {
background: var(--danger);
}
.delete-btn:hover:not(:disabled) {
filter: brightness(0.9);
}
.refresh-btn {
background: var(--secondary);
color: var(--text-color);
border: none;
padding: 0.5em 1em;
border-radius: 4px;
cursor: pointer;
font-size: 0.9em;
transition: background-color 0.2s;
}
.refresh-btn:hover:not(:disabled) {
filter: brightness(0.9);
}
.refresh-btn:disabled {
cursor: not-allowed;
opacity: 0.6;
}
.no-connections {
color: var(--muted-foreground);
text-align: center;
padding: 2rem;
}
/* Message */
.message {
padding: 1rem;
border-radius: 4px;
margin-top: 1rem;
background: var(--info-bg, #e7f3ff);
color: var(--info-text, #0066cc);
border: 1px solid var(--info, #0066cc);
}
.message.error {
background: var(--danger-bg);
color: var(--danger-text);
border-color: var(--danger);
}
.message.success {
background: var(--success-bg);
color: var(--success-text);
border-color: var(--success);
}
/* Modal */
.modal-overlay {
position: fixed;
top: 0;
left: 0;
right: 0;
bottom: 0;
background: rgba(0, 0, 0, 0.6);
display: flex;
align-items: center;
justify-content: center;
z-index: 1000;
}
.modal {
background: var(--card-bg);
border-radius: 8px;
padding: 1.5rem;
max-width: 600px;
width: 90%;
max-height: 80vh;
overflow: auto;
border: 1px solid var(--border-color);
}
.modal h3 {
margin: 0 0 0.5rem 0;
color: var(--text-color);
}
.modal-description {
color: var(--muted-foreground);
margin-bottom: 1rem;
font-size: 0.9em;
line-height: 1.5;
}
.uri-display textarea {
width: 100%;
height: 120px;
padding: 0.75em;
border: 1px solid var(--border-color);
border-radius: 4px;
background: var(--input-bg);
color: var(--input-text-color);
font-family: monospace;
font-size: 0.85em;
resize: none;
word-break: break-all;
}
.modal-actions {
display: flex;
gap: 0.5rem;
margin-top: 1rem;
justify-content: flex-end;
}
.copy-btn {
background: var(--primary);
color: var(--text-color);
border: none;
padding: 0.75em 1.5em;
border-radius: 4px;
cursor: pointer;
font-weight: 500;
transition: background-color 0.2s;
}
.copy-btn:hover {
background: var(--accent-hover-color);
}
.close-btn {
background: var(--secondary);
color: var(--text-color);
border: none;
padding: 0.75em 1.5em;
border-radius: 4px;
cursor: pointer;
font-weight: 500;
transition: background-color 0.2s;
}
.close-btn:hover {
filter: brightness(0.9);
}
/* States */
.not-enabled,
.permission-denied,
.login-prompt {
text-align: center;
padding: 2em;
background-color: var(--card-bg);
border-radius: 8px;
border: 1px solid var(--border-color);
color: var(--text-color);
}
.not-enabled p,
.permission-denied p,
.login-prompt p {
margin: 0 0 1rem 0;
line-height: 1.4;
}
.not-enabled code {
background: var(--code-bg);
padding: 0.2em 0.4em;
border-radius: 0.25rem;
font-family: monospace;
font-size: 0.9em;
}
.login-btn {
background: var(--primary);
color: var(--text-color);
border: none;
padding: 0.75em 1.5em;
border-radius: 4px;
cursor: pointer;
font-weight: bold;
font-size: 0.9em;
transition: background-color 0.2s;
}
.login-btn:hover {
background: var(--accent-hover-color);
}
</style>

104
app/web/src/api.js

@ -482,3 +482,107 @@ export async function getWireGuardAudit(signer, pubkey) {
return await response.json(); return await response.json();
} }
// ==================== NRC (Nostr Relay Connect) API ====================
/**
* Get NRC configuration status (no auth required)
* @returns {Promise<object>} NRC config status
*/
export async function fetchNRCConfig() {
try {
const response = await fetch(`${window.location.origin}/api/nrc/config`);
if (response.ok) {
return await response.json();
}
} catch (error) {
console.error("Error fetching NRC config:", error);
}
return { enabled: false, badger_required: true };
}
/**
* Get all NRC connections
* @param {object} signer - The signer instance
* @param {string} pubkey - User's pubkey
* @returns {Promise<object>} Connections list and config
*/
export async function fetchNRCConnections(signer, pubkey) {
const url = `${window.location.origin}/api/nrc/connections`;
const authHeader = await createNIP98Auth(signer, pubkey, "GET", url);
const response = await fetch(url, {
headers: authHeader ? { Authorization: `Nostr ${authHeader}` } : {},
});
if (!response.ok) {
const error = await response.text();
throw new Error(error || `Failed to get NRC connections: ${response.statusText}`);
}
return await response.json();
}
/**
* Create a new NRC connection
* @param {object} signer - The signer instance
* @param {string} pubkey - User's pubkey
* @param {string} label - Connection label
* @param {boolean} useCashu - Whether to use CAT authentication
* @returns {Promise<object>} Created connection with URI
*/
export async function createNRCConnection(signer, pubkey, label, useCashu = false) {
const url = `${window.location.origin}/api/nrc/connections`;
const authHeader = await createNIP98Auth(signer, pubkey, "POST", url);
const response = await fetch(url, {
method: "POST",
headers: {
"Content-Type": "application/json",
...(authHeader ? { Authorization: `Nostr ${authHeader}` } : {}),
},
body: JSON.stringify({ label, use_cashu: useCashu }),
});
if (!response.ok) {
const error = await response.text();
throw new Error(error || `Failed to create NRC connection: ${response.statusText}`);
}
return await response.json();
}
/**
* Delete an NRC connection
* @param {object} signer - The signer instance
* @param {string} pubkey - User's pubkey
* @param {string} connId - Connection ID to delete
* @returns {Promise<object>} Delete result
*/
export async function deleteNRCConnection(signer, pubkey, connId) {
const url = `${window.location.origin}/api/nrc/connections/${connId}`;
const authHeader = await createNIP98Auth(signer, pubkey, "DELETE", url);
const response = await fetch(url, {
method: "DELETE",
headers: authHeader ? { Authorization: `Nostr ${authHeader}` } : {},
});
if (!response.ok) {
const error = await response.text();
throw new Error(error || `Failed to delete NRC connection: ${response.statusText}`);
}
return await response.json();
}
/**
* Get connection URI for an NRC connection
* @param {object} signer - The signer instance
* @param {string} pubkey - User's pubkey
* @param {string} connId - Connection ID
* @returns {Promise<object>} Connection URI
*/
export async function getNRCConnectionURI(signer, pubkey, connId) {
const url = `${window.location.origin}/api/nrc/connections/${connId}/uri`;
const authHeader = await createNIP98Auth(signer, pubkey, "GET", url);
const response = await fetch(url, {
headers: authHeader ? { Authorization: `Nostr ${authHeader}` } : {},
});
if (!response.ok) {
const error = await response.text();
throw new Error(error || `Failed to get NRC URI: ${response.statusText}`);
}
return await response.json();
}

62
cmd/wasmdb/main.go

@ -0,0 +1,62 @@
//go:build js && wasm
// Package main provides the WASM entry point for the WasmDB database.
// It initializes the IndexedDB-backed Nostr event store and exposes
// the database API to JavaScript via the global `wasmdb` object.
//
// Build with:
// GOOS=js GOARCH=wasm go build -o wasmdb.wasm ./cmd/wasmdb
//
// Usage in JavaScript:
// // Load wasm_exec.js first (Go WASM runtime)
// const go = new Go();
// const result = await WebAssembly.instantiateStreaming(fetch('wasmdb.wasm'), go.importObject);
// go.run(result.instance);
//
// // Wait for ready
// while (!wasmdb.isReady()) {
// await new Promise(resolve => setTimeout(resolve, 100));
// }
//
// // Use the API
// await wasmdb.saveEvent(JSON.stringify(event));
// const events = await wasmdb.queryEvents(JSON.stringify({kinds: [1], limit: 10}));
package main
import (
"context"
"fmt"
"next.orly.dev/pkg/database"
"next.orly.dev/pkg/wasmdb"
)
func main() {
// Create context for the database
ctx, cancel := context.WithCancel(context.Background())
// Initialize the database with default config
cfg := &database.DatabaseConfig{
DataDir: ".",
LogLevel: "info",
}
db, err := wasmdb.NewWithConfig(ctx, cancel, cfg)
if err != nil {
fmt.Printf("Failed to initialize WasmDB: %v\n", err)
return
}
// Register the JavaScript bridge
wasmdb.RegisterJSBridge(db, ctx, cancel)
fmt.Println("WasmDB initialized and JavaScript bridge registered")
// Wait for the database to be ready
<-db.Ready()
fmt.Println("WasmDB ready to serve requests")
// Keep the WASM module running
// This is necessary because Go's main() returning would terminate the WASM instance
select {}
}

411
docs/CURATION_MODE_GUIDE.md

@ -0,0 +1,411 @@
# Curation Mode Guide
Curation mode is a sophisticated access control system for Nostr relays that provides three-tier publisher classification, rate limiting, IP-based flood protection, and event kind whitelisting.
## Overview
Unlike simple allow/deny lists, curation mode classifies publishers into three tiers:
| Tier | Rate Limited | Daily Limit | Visibility |
|------|--------------|-------------|------------|
| **Trusted** | No | Unlimited | Full |
| **Blacklisted** | N/A (blocked) | 0 | Hidden from regular users |
| **Unclassified** | Yes | 50 events/day (default) | Full |
This allows relay operators to:
- Reward quality contributors with unlimited publishing
- Block bad actors while preserving their events for admin review
- Allow new users to participate with reasonable rate limits
- Prevent spam floods through automatic IP-based protections
## Quick Start
### 1. Start the Relay
```bash
export ORLY_ACL_MODE=curating
export ORLY_OWNERS=npub1your_owner_pubkey
./orly
```
### 2. Publish Configuration
The relay will not accept events until you publish a configuration event. Use the web UI at `http://your-relay/#curation` or publish a kind 30078 event:
```json
{
"kind": 30078,
"tags": [["d", "curating-config"]],
"content": "{\"dailyLimit\":50,\"ipDailyLimit\":500,\"firstBanHours\":1,\"secondBanHours\":168,\"kindCategories\":[\"social\"]}"
}
```
### 3. Manage Publishers
Use the web UI or NIP-86 API to:
- Trust quality publishers
- Blacklist spammers
- Review unclassified users by activity
- Unblock IPs if needed
## Configuration
### Environment Variables
| Variable | Default | Description |
|----------|---------|-------------|
| `ORLY_ACL_MODE` | `none` | Set to `curating` to enable |
| `ORLY_OWNERS` | | Owner pubkeys (can configure relay) |
| `ORLY_ADMINS` | | Admin pubkeys (can manage publishers) |
### Configuration Event (Kind 30078)
Configuration is stored as a replaceable Nostr event (kind 30078) with d-tag `curating-config`. Only owners and admins can publish configuration.
```typescript
interface CuratingConfig {
// Rate Limiting
dailyLimit: number; // Max events/day for unclassified users (default: 50)
ipDailyLimit: number; // Max events/day from single IP (default: 500)
// IP Ban Durations
firstBanHours: number; // First offense ban duration (default: 1 hour)
secondBanHours: number; // Subsequent offense ban duration (default: 168 hours / 1 week)
// Kind Filtering (choose one or combine)
allowedKinds: number[]; // Explicit kind numbers: [0, 1, 3, 7]
allowedRanges: string[]; // Kind ranges: ["1000-1999", "30000-39999"]
kindCategories: string[]; // Pre-defined categories: ["social", "dm"]
}
```
### Event Kind Categories
Pre-defined categories for convenient kind whitelisting:
| Category | Kinds | Description |
|----------|-------|-------------|
| `social` | 0, 1, 3, 6, 7, 10002 | Profiles, notes, contacts, reposts, reactions |
| `dm` | 4, 14, 1059 | Direct messages (NIP-04, NIP-17, gift wraps) |
| `longform` | 30023, 30024 | Long-form articles and drafts |
| `media` | 1063, 20, 21, 22 | File metadata, picture/video/audio events |
| `marketplace` | 30017-30020, 1021, 1022 | Products, stalls, auctions, bids |
| `groups_nip29` | 9-12, 9000-9002, 39000-39002 | NIP-29 relay-based groups |
| `groups_nip72` | 34550, 1111, 4550 | NIP-72 moderated communities |
| `lists` | 10000, 10001, 10003, 30000, 30001, 30003 | Mute, pin, bookmark lists |
Example configuration allowing social interactions and DMs:
```json
{
"kindCategories": ["social", "dm"],
"dailyLimit": 100,
"ipDailyLimit": 1000
}
```
## Three-Tier Classification
### Trusted Publishers
Trusted publishers have unlimited publishing rights:
- Bypass all rate limiting
- Can publish any allowed kind
- Events always visible to all users
**Use case**: Known quality contributors, verified community members, partner relays.
### Blacklisted Publishers
Blacklisted publishers are blocked from publishing:
- All events rejected with `"pubkey is blacklisted"` error
- Existing events become invisible to regular users
- Admins and owners can still see blacklisted events (for review)
**Use case**: Spammers, abusive users, bad actors.
### Unclassified Publishers
Everyone else falls into the unclassified tier:
- Subject to daily event limit (default: 50 events/day)
- Subject to IP-based flood protection
- Events visible to all users
- Can be promoted to trusted or demoted to blacklisted
**Use case**: New users, general public.
## Rate Limiting & Flood Protection
### Per-Pubkey Limits
Unclassified publishers are limited to a configurable number of events per day (default: 50). The count resets at midnight UTC.
When a user exceeds their limit:
1. Event is rejected with `"daily event limit exceeded"` error
2. Their IP is flagged for potential abuse
### Per-IP Limits
To prevent Sybil attacks (creating many pubkeys from one IP), there's also an IP-based daily limit (default: 500 events).
When an IP exceeds its limit:
1. All events from that IP are rejected
2. The IP is temporarily banned
### Automatic IP Banning
When rate limits are exceeded:
| Offense | Ban Duration | Description |
|---------|--------------|-------------|
| First | 1 hour | Quick timeout for accidental over-posting |
| Second+ | 1 week | Extended ban for repeated abuse |
Ban durations are configurable via `firstBanHours` and `secondBanHours`.
### Offense Tracking
The system tracks which pubkeys triggered rate limits from each IP:
```
IP 192.168.1.100:
- npub1abc... exceeded limit at 2024-01-15 10:30:00
- npub1xyz... exceeded limit at 2024-01-15 10:45:00
Offense count: 2
Status: Banned until 2024-01-22 10:45:00
```
This helps identify coordinated spam attacks.
## Spam Flagging
Events can be flagged as spam without deletion:
- Flagged events are hidden from regular users
- Admins can review flagged events
- Events can be unflagged if incorrectly marked
- Original event data is preserved
This is useful for:
- Moderation review queues
- Training spam detection systems
- Preserving evidence of abuse
## NIP-86 Management API
All management operations use NIP-98 HTTP authentication.
### Trust Management
```bash
# Trust a pubkey
curl -X POST https://relay.example.com \
-H "Authorization: Nostr <nip98_token>" \
-d '{"method":"trustpubkey","params":["<pubkey_hex>"]}'
# Untrust a pubkey
curl -X POST https://relay.example.com \
-H "Authorization: Nostr <nip98_token>" \
-d '{"method":"untrustpubkey","params":["<pubkey_hex>"]}'
# List trusted pubkeys
curl -X POST https://relay.example.com \
-H "Authorization: Nostr <nip98_token>" \
-d '{"method":"listtrustedpubkeys","params":[]}'
```
### Blacklist Management
```bash
# Blacklist a pubkey
curl -X POST https://relay.example.com \
-H "Authorization: Nostr <nip98_token>" \
-d '{"method":"blacklistpubkey","params":["<pubkey_hex>"]}'
# Remove from blacklist
curl -X POST https://relay.example.com \
-H "Authorization: Nostr <nip98_token>" \
-d '{"method":"unblacklistpubkey","params":["<pubkey_hex>"]}'
# List blacklisted pubkeys
curl -X POST https://relay.example.com \
-H "Authorization: Nostr <nip98_token>" \
-d '{"method":"listblacklistedpubkeys","params":[]}'
```
### Unclassified User Management
```bash
# List unclassified users sorted by event count
curl -X POST https://relay.example.com \
-H "Authorization: Nostr <nip98_token>" \
-d '{"method":"listunclassifiedusers","params":[]}'
```
Response includes pubkey, event count, and last activity for each user.
### Spam Management
```bash
# Mark event as spam
curl -X POST https://relay.example.com \
-H "Authorization: Nostr <nip98_token>" \
-d '{"method":"markspam","params":["<event_id_hex>"]}'
# Unmark spam
curl -X POST https://relay.example.com \
-H "Authorization: Nostr <nip98_token>" \
-d '{"method":"unmarkspam","params":["<event_id_hex>"]}'
# List spam events
curl -X POST https://relay.example.com \
-H "Authorization: Nostr <nip98_token>" \
-d '{"method":"listspamevents","params":[]}'
```
### IP Block Management
```bash
# List blocked IPs
curl -X POST https://relay.example.com \
-H "Authorization: Nostr <nip98_token>" \
-d '{"method":"listblockedips","params":[]}'
# Unblock an IP
curl -X POST https://relay.example.com \
-H "Authorization: Nostr <nip98_token>" \
-d '{"method":"unblockip","params":["<ip_address>"]}'
```
### Configuration Management
```bash
# Get current configuration
curl -X POST https://relay.example.com \
-H "Authorization: Nostr <nip98_token>" \
-d '{"method":"getcuratingconfig","params":[]}'
# Set allowed kind categories
curl -X POST https://relay.example.com \
-H "Authorization: Nostr <nip98_token>" \
-d '{"method":"setallowedkindcategories","params":[["social","dm","longform"]]}'
# Get allowed kind categories
curl -X POST https://relay.example.com \
-H "Authorization: Nostr <nip98_token>" \
-d '{"method":"getallowedkindcategories","params":[]}'
```
## Web UI
The curation web UI is available at `/#curation` and provides:
- **Configuration Panel**: Set rate limits, ban durations, and allowed kinds
- **Publisher Management**: Trust/blacklist pubkeys with one click
- **Unclassified Users**: View users sorted by activity, promote or blacklist
- **IP Blocks**: View and unblock banned IP addresses
- **Spam Queue**: Review flagged events, confirm or unflag
## Database Storage
Curation data is stored in the relay database with the following key prefixes:
| Prefix | Purpose |
|--------|---------|
| `CURATING_ACL_CONFIG` | Current configuration |
| `CURATING_ACL_TRUSTED_PUBKEY_{pubkey}` | Trusted publisher list |
| `CURATING_ACL_BLACKLISTED_PUBKEY_{pubkey}` | Blacklisted publisher list |
| `CURATING_ACL_EVENT_COUNT_{pubkey}_{date}` | Daily event counts per pubkey |
| `CURATING_ACL_IP_EVENT_COUNT_{ip}_{date}` | Daily event counts per IP |
| `CURATING_ACL_IP_OFFENSE_{ip}` | Offense tracking per IP |
| `CURATING_ACL_BLOCKED_IP_{ip}` | Active IP blocks |
| `CURATING_ACL_SPAM_EVENT_{eventID}` | Spam-flagged events |
## Caching
For performance, the following data is cached in memory:
- Trusted pubkey set
- Blacklisted pubkey set
- Allowed kinds set
- Current configuration
Caches are refreshed every hour by the background cleanup goroutine.
## Background Maintenance
A background goroutine runs hourly to:
1. Remove expired IP blocks
2. Clean up old event count entries (older than 2 days)
3. Refresh in-memory caches
4. Log maintenance statistics
## Best Practices
### Starting a New Curated Relay
1. Start with permissive settings:
```json
{"dailyLimit": 100, "ipDailyLimit": 1000, "kindCategories": ["social"]}
```
2. Monitor unclassified users for a few days
3. Trust active, quality contributors
4. Blacklist obvious spammers
5. Adjust rate limits based on observed patterns
### Handling Spam Waves
During spam attacks:
1. The IP-based flood protection will auto-ban attack sources
2. Review blocked IPs via web UI or API
3. Blacklist any pubkeys that got through
4. Consider temporarily lowering `ipDailyLimit`
### Recovering from Mistakes
- **Accidentally blacklisted someone**: Use `unblacklistpubkey` - their events become visible again
- **Wrongly flagged spam**: Use `unmarkspam` - event becomes visible again
- **Blocked legitimate IP**: Use `unblockip` - IP can publish again immediately
## Comparison with Other ACL Modes
| Feature | None | Follows | Managed | Curating |
|---------|------|---------|---------|----------|
| Default Access | Write | Write if followed | Explicit allow | Rate-limited |
| Rate Limiting | No | No | No | Yes |
| Kind Filtering | No | No | Optional | Yes |
| IP Protection | No | No | No | Yes |
| Spam Flagging | No | No | No | Yes |
| Configuration | Env vars | Follow lists | NIP-86 | Kind 30078 events |
| Web UI | Basic | Basic | Basic | Full curation panel |
## Troubleshooting
### "Relay not accepting events"
The relay requires a configuration event before accepting any events. Publish a kind 30078 event with d-tag `curating-config`.
### "daily event limit exceeded"
The user has exceeded their daily limit. Options:
1. Wait until midnight UTC for reset
2. Trust the pubkey if they're a quality contributor
3. Increase `dailyLimit` in configuration
### "pubkey is blacklisted"
The pubkey is on the blacklist. Use `unblacklistpubkey` if this was a mistake.
### "IP is blocked"
The IP has been auto-banned due to rate limit violations. Use `unblockip` if legitimate, or wait for the ban to expire.
### Events disappearing for users
Check if the event author has been blacklisted. Blacklisted authors' events are hidden from regular users but visible to admins.

229
docs/NIP-NRC.md

@ -0,0 +1,229 @@
# NIP-XX: Nostr Relay Connect (NRC)
`draft` `optional`
## Abstract
This NIP defines a protocol for exposing a private Nostr relay through a public relay, enabling access to relays behind NAT, firewalls, or on devices without public IP addresses. It uses end-to-end encrypted events to tunnel standard Nostr protocol messages through a rendezvous relay.
## Motivation
Users want to run personal relays for:
- Private data synchronization across devices
- Full control over event storage
- Offline-first applications with sync capability
However, personal relays often run:
- Behind NAT without public IP addresses
- On mobile devices
- On home servers without port forwarding capability
NRC solves this by tunneling Nostr protocol messages through encrypted events on a public relay, similar to how [NIP-47](https://github.com/nostr-protocol/nips/blob/master/47.md) tunnels wallet operations.
## Specification
### Event Kinds
| Kind | Name | Description |
|-------|--------------|------------------------------------------|
| 24891 | NRC Request | Ephemeral, client→relay wrapped message |
| 24892 | NRC Response | Ephemeral, relay→client wrapped message |
### Connection URI
The connection URI format is:
```
nostr+relayconnect://<relay-pubkey>?relay=<rendezvous-relay>&secret=<client-secret>[&name=<device-name>]
```
Parameters:
- `relay-pubkey`: The public key of the private relay (64-char hex)
- `relay`: The WebSocket URL of the rendezvous relay (URL-encoded)
- `secret`: A 32-byte hex-encoded secret used to derive the conversation key
- `name` (optional): Human-readable device identifier for management
Example:
```
nostr+relayconnect://a1b2c3d4e5f6...?relay=wss%3A%2F%2Frelay.example.com&secret=0123456789abcdef...&name=phone
```
### Alternative: CAT Token Authentication
For privacy-preserving access, NRC supports Cashu Access Tokens (CAT) instead of static secrets:
```
nostr+relayconnect://<relay-pubkey>?relay=<rendezvous-relay>&auth=cat&mint=<mint-url>
```
When using CAT authentication:
1. Client obtains a CAT token from the mint with scope `nrc`
2. Client includes the token in request events using a `cashu` tag
3. Bridge verifies the token and re-authorizes via ACL on each request
### Message Flow
```
┌─────────┐ ┌─────────────┐ ┌─────────┐ ┌─────────────┐
│ Client │────▶│ Public Relay│────▶│ Bridge │────▶│Private Relay│
│ │◀────│ (rendezvous)│◀────│ │◀────│ │
└─────────┘ └─────────────┘ └─────────┘ └─────────────┘
│ │
└────────── NIP-44 encrypted ────────┘
```
1. **Client** wraps Nostr messages in kind 24891 events, encrypts content with NIP-44
2. **Public relay** forwards events based on `p` tags (cannot decrypt content)
3. **Bridge** (running alongside private relay) decrypts and forwards to local relay
4. **Private relay** processes the message normally
5. **Bridge** wraps response in kind 24892, encrypts, and publishes
6. **Client** receives kind 24892 events and decrypts the response
### Request Event (Kind 24891)
```json
{
"kind": 24891,
"content": "<nip44_encrypted_json>",
"tags": [
["p", "<relay_pubkey>"],
["encryption", "nip44_v2"],
["session", "<session_id>"]
],
"pubkey": "<client_pubkey>",
"created_at": <unix_timestamp>,
"sig": "<signature>"
}
```
With CAT authentication, add:
```json
["cashu", "cashuA..."]
```
The encrypted content structure:
```json
{
"type": "EVENT" | "REQ" | "CLOSE" | "AUTH" | "COUNT",
"payload": <standard_nostr_message_array>
}
```
Where `payload` is the standard Nostr message array, e.g.:
- `["EVENT", <event_object>]`
- `["REQ", "<sub_id>", <filter1>, <filter2>, ...]`
- `["CLOSE", "<sub_id>"]`
- `["AUTH", <auth_event>]`
- `["COUNT", "<sub_id>", <filter1>, ...]`
### Response Event (Kind 24892)
```json
{
"kind": 24892,
"content": "<nip44_encrypted_json>",
"tags": [
["p", "<client_pubkey>"],
["encryption", "nip44_v2"],
["session", "<session_id>"],
["e", "<request_event_id>"]
],
"pubkey": "<relay_pubkey>",
"created_at": <unix_timestamp>,
"sig": "<signature>"
}
```
The encrypted content structure:
```json
{
"type": "EVENT" | "OK" | "EOSE" | "NOTICE" | "CLOSED" | "COUNT" | "AUTH",
"payload": <standard_nostr_response_array>
}
```
Where `payload` is the standard Nostr response array, e.g.:
- `["EVENT", "<sub_id>", <event_object>]`
- `["OK", "<event_id>", <success_bool>, "<message>"]`
- `["EOSE", "<sub_id>"]`
- `["NOTICE", "<message>"]`
- `["CLOSED", "<sub_id>", "<message>"]`
- `["COUNT", "<sub_id>", {"count": <n>}]`
- `["AUTH", "<challenge>"]`
### Session Management
The `session` tag groups related request/response events, enabling:
- Multiple concurrent subscriptions through a single tunnel
- Correlation of responses to requests
- Session state tracking on the bridge
Session IDs SHOULD be randomly generated UUIDs or 32-byte hex strings.
### Encryption
All content is encrypted using [NIP-44](https://github.com/nostr-protocol/nips/blob/master/44.md) v2.
The conversation key is derived from:
- **Secret-based auth**: ECDH between client's secret key (derived from URI secret) and relay's public key
- **CAT auth**: ECDH between client's Nostr key and relay's public key
### Authentication
#### Secret-Based Authentication
1. Client derives a keypair from the `secret` parameter in the URI
2. Client signs all request events with this derived key
3. Bridge verifies the client's pubkey is in its authorized list
4. Conversation key provides implicit authentication (only authorized clients can decrypt responses)
#### CAT Token Authentication
1. Client obtains a CAT token from the relay's mint with scope `nrc`
2. Token is bound to client's Nostr pubkey
3. Client includes token in the `cashu` tag of request events
4. Bridge verifies token signature and scope
5. Bridge re-authorizes via ACL on each request (enables immediate revocation)
### Access Revocation
**Secret-based**: Remove the client's derived pubkey from the authorized list.
**CAT-based**: Remove the client's Nostr pubkey from the ACL. Takes effect immediately due to re-authorization on each request.
## Security Considerations
1. **End-to-end encryption**: The rendezvous relay cannot read tunneled messages
2. **Perfect forward secrecy**: Not provided; if secret is compromised, past messages can be decrypted
3. **Rate limiting**: Bridges SHOULD enforce rate limits to prevent abuse
4. **Session expiry**: Sessions SHOULD timeout after a period of inactivity
5. **TLS**: The rendezvous relay connection SHOULD use TLS (wss://)
6. **Secret storage**: Clients SHOULD store connection URIs securely (they contain secrets)
## Client Implementation Notes
1. Generate a random session ID on connection
2. Subscribe to kind 24892 events with `#p` filter for client's pubkey
3. For each outgoing message, wrap in kind 24891 and publish
4. Match responses using the `e` tag (references request event ID)
5. Handle EOSE by waiting for kind 24892 with type "EOSE" in content
6. For subscriptions, maintain mapping of internal sub IDs to tunnel session
## Bridge Implementation Notes
1. Subscribe to kind 24891 events with `#p` filter for relay's pubkey
2. Verify client authorization (secret-based or CAT)
3. Decrypt content and forward to local relay via internal WebSocket
4. Capture all relay responses and wrap in kind 24892
5. Sign with relay's key and publish to rendezvous relay
6. Maintain session state for subscription mapping
## Reference Implementations
- ORLY Relay: [https://git.mleku.dev/mleku/next.orly.dev](https://git.mleku.dev/mleku/next.orly.dev)
## See Also
- [NIP-44: Encrypted Payloads](https://github.com/nostr-protocol/nips/blob/master/44.md)
- [NIP-47: Nostr Wallet Connect](https://github.com/nostr-protocol/nips/blob/master/47.md)
- [NIP-46: Nostr Remote Signing](https://github.com/nostr-protocol/nips/blob/master/46.md)

1
go.mod

@ -56,6 +56,7 @@ require (
github.com/google/btree v1.1.2 // indirect github.com/google/btree v1.1.2 // indirect
github.com/google/flatbuffers v25.9.23+incompatible // indirect github.com/google/flatbuffers v25.9.23+incompatible // indirect
github.com/google/pprof v0.0.0-20251007162407-5df77e3f7d1d // indirect github.com/google/pprof v0.0.0-20251007162407-5df77e3f7d1d // indirect
github.com/google/uuid v1.6.0 // indirect
github.com/josharian/intern v1.0.0 // indirect github.com/josharian/intern v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect github.com/json-iterator/go v1.1.12 // indirect
github.com/klauspost/cpuid/v2 v2.3.0 // indirect github.com/klauspost/cpuid/v2 v2.3.0 // indirect

2
go.sum

@ -81,6 +81,8 @@ github.com/google/pprof v0.0.0-20211214055906-6f57359322fd/go.mod h1:KgnwoLYCZ8I
github.com/google/pprof v0.0.0-20240227163752-401108e1b7e7/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik= github.com/google/pprof v0.0.0-20240227163752-401108e1b7e7/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik=
github.com/google/pprof v0.0.0-20251007162407-5df77e3f7d1d h1:KJIErDwbSHjnp/SGzE5ed8Aol7JsKiI5X7yWKAtzhM0= github.com/google/pprof v0.0.0-20251007162407-5df77e3f7d1d h1:KJIErDwbSHjnp/SGzE5ed8Aol7JsKiI5X7yWKAtzhM0=
github.com/google/pprof v0.0.0-20251007162407-5df77e3f7d1d/go.mod h1:I6V7YzU0XDpsHqbsyrghnFZLO1gwK6NPTNvmetQIk9U= github.com/google/pprof v0.0.0-20251007162407-5df77e3f7d1d/go.mod h1:I6V7YzU0XDpsHqbsyrghnFZLO1gwK6NPTNvmetQIk9U=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg= github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg=
github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/hack-pad/safejs v0.1.1 h1:d5qPO0iQ7h2oVtpzGnLExE+Wn9AtytxIfltcS2b9KD8= github.com/hack-pad/safejs v0.1.1 h1:d5qPO0iQ7h2oVtpzGnLExE+Wn9AtytxIfltcS2b9KD8=

182
main.go

@ -185,6 +185,12 @@ func main() {
os.Exit(0) os.Exit(0)
} }
// Handle 'nrc' subcommand: NRC (Nostr Relay Connect) utilities
if requested, subcommand, args := config.NRCRequested(); requested {
handleNRCCommand(cfg, subcommand, args)
os.Exit(0)
}
// Handle 'serve' subcommand: start ephemeral relay with RAM-based storage // Handle 'serve' subcommand: start ephemeral relay with RAM-based storage
if config.ServeRequested() { if config.ServeRequested() {
const serveDataDir = "/dev/shm/orlyserve" const serveDataDir = "/dev/shm/orlyserve"
@ -780,3 +786,179 @@ func openBrowser(url string) {
log.W.F("could not open browser: %v", err) log.W.F("could not open browser: %v", err)
} }
} }
// handleNRCCommand handles the 'nrc' CLI subcommand for NRC (Nostr Relay Connect) utilities.
func handleNRCCommand(cfg *config.C, subcommand string, args []string) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
switch subcommand {
case "generate":
handleNRCGenerate(ctx, cfg, args)
case "list":
handleNRCList(cfg)
case "revoke":
handleNRCRevoke(args)
default:
printNRCUsage()
}
}
// printNRCUsage prints the usage information for the nrc subcommand.
func printNRCUsage() {
fmt.Println("Usage: orly nrc <subcommand> [options]")
fmt.Println("")
fmt.Println("Nostr Relay Connect (NRC) utilities for private relay access.")
fmt.Println("")
fmt.Println("Subcommands:")
fmt.Println(" generate [--name <device>] Generate a new connection URI")
fmt.Println(" list List currently configured authorized secrets")
fmt.Println(" revoke <name> Revoke access for a device (show instructions)")
fmt.Println("")
fmt.Println("Examples:")
fmt.Println(" orly nrc generate")
fmt.Println(" orly nrc generate --name phone")
fmt.Println(" orly nrc list")
fmt.Println(" orly nrc revoke phone")
fmt.Println("")
fmt.Println("To enable NRC, set these environment variables:")
fmt.Println(" ORLY_NRC_ENABLED=true")
fmt.Println(" ORLY_NRC_RENDEZVOUS_URL=wss://public-relay.example.com")
fmt.Println(" ORLY_NRC_AUTHORIZED_KEYS=<secret1>:<name1>,<secret2>:<name2>")
fmt.Println("")
fmt.Println("For CAT-based authentication, also set:")
fmt.Println(" ORLY_NRC_USE_CASHU=true")
}
// handleNRCGenerate generates a new NRC connection URI.
func handleNRCGenerate(ctx context.Context, cfg *config.C, args []string) {
// Parse device name from args
var deviceName string
for i := 0; i < len(args); i++ {
if args[i] == "--name" && i+1 < len(args) {
deviceName = args[i+1]
i++
}
}
// Get relay identity
var db database.Database
var err error
if db, err = database.NewDatabaseWithConfig(
ctx, nil, cfg.DBType, makeDatabaseConfig(cfg),
); chk.E(err) {
fmt.Printf("Error: failed to open database: %v\n", err)
return
}
defer db.Close()
<-db.Ready()
relaySecretKey, err := db.GetOrCreateRelayIdentitySecret()
if err != nil {
fmt.Printf("Error: failed to get relay identity: %v\n", err)
return
}
relayPubkey, err := keys.SecretBytesToPubKeyBytes(relaySecretKey)
if err != nil {
fmt.Printf("Error: failed to derive relay pubkey: %v\n", err)
return
}
// Get rendezvous URL from config
nrcEnabled, nrcRendezvousURL, _, _, _ := cfg.GetNRCConfigValues()
if !nrcEnabled || nrcRendezvousURL == "" {
fmt.Println("Error: NRC is not configured. Set ORLY_NRC_ENABLED=true and ORLY_NRC_RENDEZVOUS_URL")
return
}
// Generate a new random secret
secret := make([]byte, 32)
if _, err := os.ReadFile("/dev/urandom"); err != nil {
// Fallback - use crypto/rand
fmt.Printf("Error: failed to generate random secret: %v\n", err)
return
}
f, _ := os.Open("/dev/urandom")
defer f.Close()
f.Read(secret)
secretHex := hex.Enc(secret)
// Build the URI
uri := fmt.Sprintf("nostr+relayconnect://%s?relay=%s&secret=%s",
hex.Enc(relayPubkey), nrcRendezvousURL, secretHex)
if deviceName != "" {
uri += fmt.Sprintf("&name=%s", deviceName)
}
fmt.Println("Generated NRC Connection URI:")
fmt.Println("")
fmt.Println(uri)
fmt.Println("")
fmt.Println("Add this secret to ORLY_NRC_AUTHORIZED_KEYS:")
if deviceName != "" {
fmt.Printf(" %s:%s\n", secretHex, deviceName)
} else {
fmt.Printf(" %s\n", secretHex)
}
fmt.Println("")
fmt.Println("IMPORTANT: Store this URI securely - anyone with this URI can access your relay.")
}
// handleNRCList lists configured authorized secrets from environment.
func handleNRCList(cfg *config.C) {
_, _, authorizedKeys, useCashu, _ := cfg.GetNRCConfigValues()
fmt.Println("NRC Configuration:")
fmt.Println("")
if len(authorizedKeys) == 0 {
fmt.Println(" No authorized secrets configured.")
fmt.Println("")
fmt.Println(" To add secrets, set ORLY_NRC_AUTHORIZED_KEYS=<secret>:<name>,...")
} else {
fmt.Printf(" Authorized secrets: %d\n", len(authorizedKeys))
fmt.Println("")
for _, entry := range authorizedKeys {
parts := strings.SplitN(entry, ":", 2)
secretHex := parts[0]
name := "(unnamed)"
if len(parts) == 2 && parts[1] != "" {
name = parts[1]
}
// Show truncated secret for identification
truncated := secretHex
if len(secretHex) > 16 {
truncated = secretHex[:8] + "..." + secretHex[len(secretHex)-8:]
}
fmt.Printf(" - %s: %s\n", name, truncated)
}
}
fmt.Println("")
fmt.Printf(" CAT authentication: %v\n", useCashu)
}
// handleNRCRevoke provides instructions for revoking access.
func handleNRCRevoke(args []string) {
if len(args) == 0 {
fmt.Println("Usage: orly nrc revoke <device-name>")
fmt.Println("")
fmt.Println("To revoke access for a device:")
fmt.Println("1. Remove the corresponding secret from ORLY_NRC_AUTHORIZED_KEYS")
fmt.Println("2. Restart the relay")
fmt.Println("")
fmt.Println("Example: If ORLY_NRC_AUTHORIZED_KEYS=\"abc123:phone,def456:laptop\"")
fmt.Println("To revoke 'phone', change to: ORLY_NRC_AUTHORIZED_KEYS=\"def456:laptop\"")
return
}
deviceName := args[0]
fmt.Printf("To revoke access for '%s':\n", deviceName)
fmt.Println("")
fmt.Println("1. Edit ORLY_NRC_AUTHORIZED_KEYS and remove the entry for this device")
fmt.Println("2. Restart the relay")
fmt.Println("")
fmt.Println("The device will no longer be able to connect after the restart.")
}

80
pkg/acl/follows.go

@ -45,6 +45,8 @@ type Follows struct {
lastFollowListFetch time.Time lastFollowListFetch time.Time
// Callback for external notification of follow list changes // Callback for external notification of follow list changes
onFollowListUpdate func() onFollowListUpdate func()
// Progressive throttle for non-followed users (nil if disabled)
throttle *ProgressiveThrottle
} }
func (f *Follows) Configure(cfg ...any) (err error) { func (f *Follows) Configure(cfg ...any) (err error) {
@ -131,6 +133,22 @@ func (f *Follows) Configure(cfg ...any) (err error) {
} }
} }
} }
// Initialize progressive throttle if enabled
if f.cfg.FollowsThrottleEnabled {
perEvent := f.cfg.FollowsThrottlePerEvent
if perEvent == 0 {
perEvent = 200 * time.Millisecond
}
maxDelay := f.cfg.FollowsThrottleMaxDelay
if maxDelay == 0 {
maxDelay = 60 * time.Second
}
f.throttle = NewProgressiveThrottle(perEvent, maxDelay)
log.I.F("follows ACL: progressive throttle enabled (increment: %v, max: %v)",
perEvent, maxDelay)
}
return return
} }
@ -155,6 +173,10 @@ func (f *Follows) GetAccessLevel(pub []byte, address string) (level string) {
if f.cfg == nil { if f.cfg == nil {
return "write" return "write"
} }
// If throttle enabled, non-followed users get write access (with delay applied in handle-event)
if f.throttle != nil {
return "write"
}
return "read" return "read"
} }
@ -165,6 +187,41 @@ func (f *Follows) GetACLInfo() (name, description, documentation string) {
func (f *Follows) Type() string { return "follows" } func (f *Follows) Type() string { return "follows" }
// GetThrottleDelay returns the progressive throttle delay for this event.
// Returns 0 if throttle is disabled or if the user is exempt (owner/admin/followed).
func (f *Follows) GetThrottleDelay(pubkey []byte, ip string) time.Duration {
if f.throttle == nil {
return 0
}
// Check if user is exempt from throttling
f.followsMx.RLock()
defer f.followsMx.RUnlock()
// Owners bypass throttle
for _, v := range f.owners {
if utils.FastEqual(v, pubkey) {
return 0
}
}
// Admins bypass throttle
for _, v := range f.admins {
if utils.FastEqual(v, pubkey) {
return 0
}
}
// Followed users bypass throttle
for _, v := range f.follows {
if utils.FastEqual(v, pubkey) {
return 0
}
}
// Non-followed users get throttled
pubkeyHex := hex.EncodeToString(pubkey)
return f.throttle.GetDelay(ip, pubkeyHex)
}
func (f *Follows) adminRelays() (urls []string) { func (f *Follows) adminRelays() (urls []string) {
f.followsMx.RLock() f.followsMx.RLock()
admins := make([][]byte, len(f.admins)) admins := make([][]byte, len(f.admins))
@ -353,6 +410,29 @@ func (f *Follows) Syncer() {
// Start periodic follow list and metadata fetching // Start periodic follow list and metadata fetching
go f.startPeriodicFollowListFetching() go f.startPeriodicFollowListFetching()
// Start throttle cleanup goroutine if throttle is enabled
if f.throttle != nil {
go f.throttleCleanup()
}
}
// throttleCleanup periodically removes fully-decayed throttle entries
func (f *Follows) throttleCleanup() {
ticker := time.NewTicker(10 * time.Minute)
defer ticker.Stop()
for {
select {
case <-f.Ctx.Done():
return
case <-ticker.C:
f.throttle.Cleanup()
ipCount, pubkeyCount := f.throttle.Stats()
log.T.F("follows throttle: cleanup complete, tracking %d IPs and %d pubkeys",
ipCount, pubkeyCount)
}
}
} }
// startPeriodicFollowListFetching starts periodic fetching of admin follow lists // startPeriodicFollowListFetching starts periodic fetching of admin follow lists

126
pkg/acl/follows_throttle.go

@ -0,0 +1,126 @@
package acl
import (
"sync"
"time"
)
// ThrottleState tracks accumulated delay for an identity (IP or pubkey)
type ThrottleState struct {
AccumulatedDelay time.Duration
LastEventTime time.Time
}
// ProgressiveThrottle implements linear delay with time decay.
// Each event adds perEvent delay, and delay decays at 1:1 ratio with elapsed time.
// This creates a natural rate limit that averages to 1 event per perEvent interval.
type ProgressiveThrottle struct {
mu sync.Mutex
ipStates map[string]*ThrottleState
pubkeyStates map[string]*ThrottleState
perEvent time.Duration // delay increment per event (default 200ms)
maxDelay time.Duration // cap (default 60s)
}
// NewProgressiveThrottle creates a new throttle with the given parameters.
// perEvent is the delay added per event (e.g., 200ms).
// maxDelay is the maximum accumulated delay cap (e.g., 60s).
func NewProgressiveThrottle(perEvent, maxDelay time.Duration) *ProgressiveThrottle {
return &ProgressiveThrottle{
ipStates: make(map[string]*ThrottleState),
pubkeyStates: make(map[string]*ThrottleState),
perEvent: perEvent,
maxDelay: maxDelay,
}
}
// GetDelay returns accumulated delay for this identity and updates state.
// It tracks both IP and pubkey independently and returns the maximum of both.
// This prevents evasion via different pubkeys from same IP or vice versa.
func (pt *ProgressiveThrottle) GetDelay(ip, pubkeyHex string) time.Duration {
pt.mu.Lock()
defer pt.mu.Unlock()
now := time.Now()
var ipDelay, pubkeyDelay time.Duration
if ip != "" {
ipDelay = pt.updateState(pt.ipStates, ip, now)
}
if pubkeyHex != "" {
pubkeyDelay = pt.updateState(pt.pubkeyStates, pubkeyHex, now)
}
// Return max of both to prevent evasion
if ipDelay > pubkeyDelay {
return ipDelay
}
return pubkeyDelay
}
// updateState calculates and updates the delay for a single identity.
// The algorithm:
// 1. Decay: subtract elapsed time from accumulated delay (1:1 ratio)
// 2. Add: add perEvent for this new event
// 3. Cap: limit to maxDelay
func (pt *ProgressiveThrottle) updateState(states map[string]*ThrottleState, key string, now time.Time) time.Duration {
state, exists := states[key]
if !exists {
// First event from this identity
states[key] = &ThrottleState{
AccumulatedDelay: pt.perEvent,
LastEventTime: now,
}
return pt.perEvent
}
// Decay: subtract elapsed time (1:1 ratio)
elapsed := now.Sub(state.LastEventTime)
state.AccumulatedDelay -= elapsed
if state.AccumulatedDelay < 0 {
state.AccumulatedDelay = 0
}
// Add new event's delay
state.AccumulatedDelay += pt.perEvent
state.LastEventTime = now
// Cap at max
if state.AccumulatedDelay > pt.maxDelay {
state.AccumulatedDelay = pt.maxDelay
}
return state.AccumulatedDelay
}
// Cleanup removes entries that have fully decayed (no remaining delay).
// This should be called periodically to prevent unbounded memory growth.
func (pt *ProgressiveThrottle) Cleanup() {
pt.mu.Lock()
defer pt.mu.Unlock()
now := time.Now()
// Remove IP entries that have fully decayed
for k, v := range pt.ipStates {
elapsed := now.Sub(v.LastEventTime)
if elapsed >= v.AccumulatedDelay {
delete(pt.ipStates, k)
}
}
// Remove pubkey entries that have fully decayed
for k, v := range pt.pubkeyStates {
elapsed := now.Sub(v.LastEventTime)
if elapsed >= v.AccumulatedDelay {
delete(pt.pubkeyStates, k)
}
}
}
// Stats returns the current number of tracked IPs and pubkeys (for monitoring)
func (pt *ProgressiveThrottle) Stats() (ipCount, pubkeyCount int) {
pt.mu.Lock()
defer pt.mu.Unlock()
return len(pt.ipStates), len(pt.pubkeyStates)
}

140
pkg/bbolt/import-export.go

@ -31,9 +31,10 @@ func (b *B) ImportEventsFromReader(ctx context.Context, rr io.Reader) error {
if chk.E(err) { if chk.E(err) {
return err return err
} }
defer os.Remove(tmp.Name()) // Clean up temp file when done tmpName := tmp.Name()
defer os.Remove(tmpName) // Clean up temp file when done
log.I.F("bbolt import: buffering upload to %s", tmp.Name()) log.I.F("bbolt import: buffering upload to %s", tmpName)
bufferStart := time.Now() bufferStart := time.Now()
bytesBuffered, err := io.Copy(tmp, rr) bytesBuffered, err := io.Copy(tmp, rr)
if chk.E(err) { if chk.E(err) {
@ -48,12 +49,30 @@ func (b *B) ImportEventsFromReader(ctx context.Context, rr io.Reader) error {
return err return err
} }
processErr := b.processJSONLEvents(ctx, tmp) count, processErr := b.processJSONLEventsReturningCount(ctx, tmp)
// Close temp file to release resources before index building
tmp.Close()
if processErr != nil {
return processErr
}
// Build indexes after events are stored (minimal import mode)
if count > 0 {
// Force garbage collection to reclaim memory before index building
debug.FreeOSMemory()
log.I.F("bbolt import: building indexes for %d events...", count)
if err := b.BuildIndexes(ctx); err != nil {
log.E.F("bbolt import: failed to build indexes: %v", err)
return err
}
}
totalElapsed := time.Since(startTime) totalElapsed := time.Since(startTime)
log.I.F("bbolt import: total operation time: %v", totalElapsed.Round(time.Millisecond)) log.I.F("bbolt import: total operation time: %v", totalElapsed.Round(time.Millisecond))
return processErr return nil
} }
// ImportEventsFromStrings imports events from a slice of JSON strings with policy filtering // ImportEventsFromStrings imports events from a slice of JSON strings with policy filtering
@ -67,7 +86,95 @@ func (b *B) ImportEventsFromStrings(ctx context.Context, eventJSONs []string, po
// processJSONLEvents processes JSONL events from a reader // processJSONLEvents processes JSONL events from a reader
func (b *B) processJSONLEvents(ctx context.Context, rr io.Reader) error { func (b *B) processJSONLEvents(ctx context.Context, rr io.Reader) error {
return b.processJSONLEventsWithPolicy(ctx, rr, nil) _, err := b.processJSONLEventsReturningCount(ctx, rr)
return err
}
// processJSONLEventsReturningCount processes JSONL events and returns the count saved
// This is used by ImportEventsFromReader for migration mode (minimal import without inline indexes)
func (b *B) processJSONLEventsReturningCount(ctx context.Context, rr io.Reader) (int, error) {
// Create a scanner to read the buffer line by line
scan := bufio.NewScanner(rr)
scanBuf := make([]byte, maxLen)
scan.Buffer(scanBuf, maxLen)
// Performance tracking
startTime := time.Now()
lastLogTime := startTime
const logInterval = 5 * time.Second
var count, total, skipped, unmarshalErrors, saveErrors int
for scan.Scan() {
select {
case <-ctx.Done():
log.I.F("bbolt import: context closed after %d events", count)
return count, ctx.Err()
default:
}
line := scan.Bytes()
total += len(line) + 1
if len(line) < 1 {
skipped++
continue
}
ev := event.New()
if _, err := ev.Unmarshal(line); err != nil {
ev.Free()
unmarshalErrors++
log.W.F("bbolt import: failed to unmarshal event: %v", err)
continue
}
// Minimal path for migration: store events only, indexes built later
if err := b.SaveEventMinimal(ev); err != nil {
ev.Free()
saveErrors++
log.W.F("bbolt import: failed to save event: %v", err)
continue
}
ev.Free()
line = nil
count++
// Progress logging every logInterval
if time.Since(lastLogTime) >= logInterval {
elapsed := time.Since(startTime)
eventsPerSec := float64(count) / elapsed.Seconds()
mbPerSec := float64(total) / elapsed.Seconds() / 1024 / 1024
log.I.F("bbolt import: progress %d events saved, %.2f MB read, %.0f events/sec, %.2f MB/sec",
count, float64(total)/1024/1024, eventsPerSec, mbPerSec)
lastLogTime = time.Now()
debug.FreeOSMemory()
}
}
// Flush any remaining batched events
if b.batcher != nil {
b.batcher.Flush()
}
// Final summary
elapsed := time.Since(startTime)
eventsPerSec := float64(count) / elapsed.Seconds()
mbPerSec := float64(total) / elapsed.Seconds() / 1024 / 1024
log.I.F("bbolt import: completed - %d events saved, %.2f MB in %v (%.0f events/sec, %.2f MB/sec)",
count, float64(total)/1024/1024, elapsed.Round(time.Millisecond), eventsPerSec, mbPerSec)
if unmarshalErrors > 0 || saveErrors > 0 || skipped > 0 {
log.I.F("bbolt import: stats - %d unmarshal errors, %d save errors, %d skipped empty lines",
unmarshalErrors, saveErrors, skipped)
}
if err := scan.Err(); err != nil {
return count, err
}
// Clear scanner buffer to help GC
scanBuf = nil
return count, nil
} }
// processJSONLEventsWithPolicy processes JSONL events from a reader with optional policy filtering // processJSONLEventsWithPolicy processes JSONL events from a reader with optional policy filtering
@ -127,14 +234,21 @@ func (b *B) processJSONLEventsWithPolicy(ctx context.Context, rr io.Reader, poli
continue continue
} }
log.D.F("bbolt import: policy allowed event %x during sync import", ev.ID) log.D.F("bbolt import: policy allowed event %x during sync import", ev.ID)
} // With policy checking, use regular SaveEvent path
if _, err := b.SaveEvent(ctx, ev); err != nil {
if _, err := b.SaveEvent(ctx, ev); err != nil { ev.Free()
// return the pooled buffer on error paths too saveErrors++
ev.Free() log.W.F("bbolt import: failed to save event: %v", err)
saveErrors++ continue
log.W.F("bbolt import: failed to save event: %v", err) }
continue } else {
// Minimal path for migration: store events only, build indexes later
if err := b.SaveEventMinimal(ev); err != nil {
ev.Free()
saveErrors++
log.W.F("bbolt import: failed to save event: %v", err)
continue
}
} }
// return the pooled buffer after successful save // return the pooled buffer after successful save

232
pkg/bbolt/import-minimal.go

@ -0,0 +1,232 @@
//go:build !(js && wasm)
package bbolt
import (
"bytes"
"context"
"errors"
"runtime/debug"
"sort"
"time"
bolt "go.etcd.io/bbolt"
"lol.mleku.dev/chk"
"lol.mleku.dev/log"
"next.orly.dev/pkg/database"
"next.orly.dev/pkg/database/bufpool"
"git.mleku.dev/mleku/nostr/encoders/event"
)
// SaveEventMinimal stores only the essential event data for fast bulk import.
// It skips all indexes - call BuildIndexes after import completes.
func (b *B) SaveEventMinimal(ev *event.E) error {
if ev == nil {
return errors.New("nil event")
}
// Reject ephemeral events
if ev.Kind >= 20000 && ev.Kind <= 29999 {
return nil
}
// Get the next serial number
serial := b.getNextEventSerial()
// Serialize event in raw binary format (not compact - preserves full pubkey)
// This allows index building to work without pubkey serial resolution
legacyBuf := bufpool.GetMedium()
defer bufpool.PutMedium(legacyBuf)
ev.MarshalBinary(legacyBuf)
eventData := bufpool.CopyBytes(legacyBuf)
// Create minimal batch - only event data and ID mappings
batch := &EventBatch{
Serial: serial,
EventData: eventData,
Indexes: []BatchedWrite{
// Event ID -> Serial (for lookups)
{BucketName: bucketEid, Key: ev.ID[:], Value: makeSerialKey(serial)},
// Serial -> Event ID (for reverse lookups)
{BucketName: bucketSei, Key: makeSerialKey(serial), Value: ev.ID[:]},
},
}
return b.batcher.Add(batch)
}
// BuildIndexes builds all query indexes from stored events.
// Call this after importing events with SaveEventMinimal.
// Processes events in chunks to avoid OOM on large databases.
func (b *B) BuildIndexes(ctx context.Context) error {
log.I.F("bbolt: starting index build...")
startTime := time.Now()
// Force GC before starting to reclaim batch buffer memory
debug.FreeOSMemory()
// Process in small chunks to avoid OOM on memory-constrained systems
// With ~15 indexes per event and ~50 bytes per key, 50k events = ~37.5MB per chunk
const chunkSize = 50000
var totalEvents int
var lastSerial uint64 = 0
var lastLogTime = time.Now()
for {
select {
case <-ctx.Done():
return ctx.Err()
default:
}
// Collect indexes for this chunk
indexesByBucket := make(map[string][][]byte)
var chunkEvents int
var chunkSerial uint64
// Read a chunk of events
err := b.db.View(func(tx *bolt.Tx) error {
cmpBucket := tx.Bucket(bucketCmp)
if cmpBucket == nil {
return errors.New("cmp bucket not found")
}
cursor := cmpBucket.Cursor()
// Seek to start position
var k, v []byte
if lastSerial == 0 {
k, v = cursor.First()
} else {
// Seek past the last processed serial
seekKey := makeSerialKey(lastSerial + 1)
k, v = cursor.Seek(seekKey)
}
for ; k != nil && chunkEvents < chunkSize; k, v = cursor.Next() {
serial := decodeSerialKey(k)
chunkSerial = serial
// Decode event from raw binary format
ev := event.New()
if err := ev.UnmarshalBinary(bytes.NewBuffer(v)); err != nil {
log.W.F("bbolt: failed to unmarshal event at serial %d: %v", serial, err)
continue
}
// Generate indexes for this event
rawIdxs, err := database.GetIndexesForEvent(ev, serial)
if chk.E(err) {
ev.Free()
continue
}
// Group by bucket (first 3 bytes)
for _, idx := range rawIdxs {
if len(idx) < 3 {
continue
}
bucketName := string(idx[:3])
key := idx[3:]
// Skip eid and sei - already stored during import
if bucketName == "eid" || bucketName == "sei" {
continue
}
// Make a copy of the key
keyCopy := make([]byte, len(key))
copy(keyCopy, key)
indexesByBucket[bucketName] = append(indexesByBucket[bucketName], keyCopy)
}
ev.Free()
chunkEvents++
}
return nil
})
if err != nil {
return err
}
// No more events to process
if chunkEvents == 0 {
break
}
totalEvents += chunkEvents
lastSerial = chunkSerial
// Progress logging
if time.Since(lastLogTime) >= 5*time.Second {
log.I.F("bbolt: index build progress: %d events processed", totalEvents)
lastLogTime = time.Now()
}
// Count total keys in this chunk
var totalKeys int
for _, keys := range indexesByBucket {
totalKeys += len(keys)
}
log.I.F("bbolt: writing %d index keys for chunk (%d events)", totalKeys, chunkEvents)
// Write this chunk's indexes
for bucketName, keys := range indexesByBucket {
if len(keys) == 0 {
continue
}
bucketBytes := []byte(bucketName)
// Sort keys for this bucket before writing
sort.Slice(keys, func(i, j int) bool {
return bytes.Compare(keys[i], keys[j]) < 0
})
// Write in batches
const batchSize = 50000
for i := 0; i < len(keys); i += batchSize {
end := i + batchSize
if end > len(keys) {
end = len(keys)
}
batch := keys[i:end]
err := b.db.Update(func(tx *bolt.Tx) error {
bucket := tx.Bucket(bucketBytes)
if bucket == nil {
return nil
}
for _, key := range batch {
if err := bucket.Put(key, nil); err != nil {
return err
}
}
return nil
})
if err != nil {
log.E.F("bbolt: failed to write batch for bucket %s: %v", bucketName, err)
return err
}
}
}
// Clear for next chunk and release memory
indexesByBucket = nil
debug.FreeOSMemory()
}
elapsed := time.Since(startTime)
log.I.F("bbolt: index build complete in %v (%d events)", elapsed.Round(time.Second), totalEvents)
return nil
}
// decodeSerialKey decodes a 5-byte serial key to uint64
func decodeSerialKey(b []byte) uint64 {
if len(b) < 5 {
return 0
}
return uint64(b[0])<<32 | uint64(b[1])<<24 | uint64(b[2])<<16 | uint64(b[3])<<8 | uint64(b[4])
}

96
pkg/bbolt/save-event-bulk.go

@ -0,0 +1,96 @@
//go:build !(js && wasm)
package bbolt
import (
"errors"
"lol.mleku.dev/chk"
"next.orly.dev/pkg/database"
"next.orly.dev/pkg/database/bufpool"
"git.mleku.dev/mleku/nostr/encoders/event"
)
// SaveEventForImport saves an event optimized for bulk import.
// It skips duplicate checking, deletion checking, and graph vertex creation
// to maximize import throughput. Use only for trusted data migration.
func (b *B) SaveEventForImport(ev *event.E) error {
if ev == nil {
return errors.New("nil event")
}
// Reject ephemeral events (kinds 20000-29999)
if ev.Kind >= 20000 && ev.Kind <= 29999 {
return nil // silently skip
}
// Get the next serial number
serial := b.getNextEventSerial()
// Generate all indexes using the shared function
rawIdxs, err := database.GetIndexesForEvent(ev, serial)
if chk.E(err) {
return err
}
// Convert raw indexes to BatchedWrites, stripping the 3-byte prefix
batch := &EventBatch{
Serial: serial,
Indexes: make([]BatchedWrite, 0, len(rawIdxs)+1),
}
for _, idx := range rawIdxs {
if len(idx) < 3 {
continue
}
bucketName := idx[:3]
key := idx[3:]
batch.Indexes = append(batch.Indexes, BatchedWrite{
BucketName: bucketName,
Key: key,
Value: nil,
})
}
// Serialize event in compact format (without graph references for import)
resolver := &nullSerialResolver{}
compactData, compactErr := database.MarshalCompactEvent(ev, resolver)
if compactErr != nil {
// Fall back to legacy format
legacyBuf := bufpool.GetMedium()
defer bufpool.PutMedium(legacyBuf)
ev.MarshalBinary(legacyBuf)
compactData = bufpool.CopyBytes(legacyBuf)
}
batch.EventData = compactData
// Store serial -> event ID mapping
batch.Indexes = append(batch.Indexes, BatchedWrite{
BucketName: bucketSei,
Key: makeSerialKey(serial),
Value: ev.ID[:],
})
// Add to batcher (no graph vertex, no pubkey lookups)
return b.batcher.Add(batch)
}
// nullSerialResolver returns 0 for all lookups, used for fast import
// where we don't need pubkey/event serial references in compact format
type nullSerialResolver struct{}
func (r *nullSerialResolver) GetOrCreatePubkeySerial(pubkey []byte) (uint64, error) {
return 0, nil
}
func (r *nullSerialResolver) GetPubkeyBySerial(serial uint64) ([]byte, error) {
return nil, nil
}
func (r *nullSerialResolver) GetEventSerialById(eventID []byte) (uint64, bool, error) {
return 0, false, nil
}
func (r *nullSerialResolver) GetEventIdBySerial(serial uint64) ([]byte, error) {
return nil, nil
}

6
pkg/bbolt/save-event.go

@ -350,12 +350,15 @@ func (r *bboltSerialResolver) GetPubkeyBySerial(serial uint64) (pubkey []byte, e
r.b.db.View(func(tx *bolt.Tx) error { r.b.db.View(func(tx *bolt.Tx) error {
bucket := tx.Bucket(bucketSpk) bucket := tx.Bucket(bucketSpk)
if bucket == nil { if bucket == nil {
err = errors.New("bbolt: spk bucket not found")
return nil return nil
} }
val := bucket.Get(makeSerialKey(serial)) val := bucket.Get(makeSerialKey(serial))
if val != nil { if val != nil {
pubkey = make([]byte, 32) pubkey = make([]byte, 32)
copy(pubkey, val) copy(pubkey, val)
} else {
err = errors.New("bbolt: pubkey serial not found")
} }
return nil return nil
}) })
@ -374,12 +377,15 @@ func (r *bboltSerialResolver) GetEventIdBySerial(serial uint64) (eventID []byte,
r.b.db.View(func(tx *bolt.Tx) error { r.b.db.View(func(tx *bolt.Tx) error {
bucket := tx.Bucket(bucketSei) bucket := tx.Bucket(bucketSei)
if bucket == nil { if bucket == nil {
err = errors.New("bbolt: sei bucket not found")
return nil return nil
} }
val := bucket.Get(makeSerialKey(serial)) val := bucket.Get(makeSerialKey(serial))
if val != nil { if val != nil {
eventID = make([]byte, 32) eventID = make([]byte, 32)
copy(eventID, val) copy(eventID, val)
} else {
err = errors.New("bbolt: event serial not found")
} }
return nil return nil
}) })

6
pkg/bbolt/stubs.go

@ -84,8 +84,12 @@ func (b *B) DeleteExpired() {
} }
// ProcessDelete processes a deletion event. // ProcessDelete processes a deletion event.
// For migration from other backends, deletions have already been processed,
// so this is a no-op. Full implementation needed for production use.
func (b *B) ProcessDelete(ev *event.E, admins [][]byte) error { func (b *B) ProcessDelete(ev *event.E, admins [][]byte) error {
return errNotImplemented // TODO: Implement full deletion processing for production use
// For now, just return nil to allow migrations to proceed
return nil
} }
// CheckForDeleted checks if an event has been deleted. // CheckForDeleted checks if an event has been deleted.

1
pkg/bunker/acl_adapter.go

@ -26,6 +26,7 @@ func NewACLAuthzChecker() *ACLAuthzChecker {
token.ScopeNIP46: acliface.Write, // Bunker access requires write token.ScopeNIP46: acliface.Write, // Bunker access requires write
token.ScopeBlossom: acliface.Write, // Blossom access requires write token.ScopeBlossom: acliface.Write, // Blossom access requires write
token.ScopeAPI: acliface.Admin, // API access requires admin token.ScopeAPI: acliface.Admin, // API access requires admin
token.ScopeNRC: acliface.Write, // NRC tunnel access requires write
}, },
} }
} }

1
pkg/cashu/token/token.go

@ -21,6 +21,7 @@ const (
ScopeNIP46 = "nip46" // NIP-46 remote signing / bunker ScopeNIP46 = "nip46" // NIP-46 remote signing / bunker
ScopeBlossom = "blossom" // Blossom media server ScopeBlossom = "blossom" // Blossom media server
ScopeAPI = "api" // HTTP API access ScopeAPI = "api" // HTTP API access
ScopeNRC = "nrc" // Nostr Relay Connect tunneling
) )
// WildcardKind indicates all kinds are permitted. // WildcardKind indicates all kinds are permitted.

49
pkg/database/bufpool/pool_wasm.go

@ -0,0 +1,49 @@
//go:build js && wasm
// Package bufpool provides buffer pools for reducing GC pressure in hot paths.
// This is the WASM version which uses simple allocations since sync.Pool
// behavior differs in WASM environments.
package bufpool
import (
"bytes"
)
const (
// SmallBufferSize for index keys (8-64 bytes typical)
SmallBufferSize = 64
// MediumBufferSize for event encoding (300-1000 bytes typical)
MediumBufferSize = 1024
)
// GetSmall returns a small buffer (64 bytes).
// In WASM, we simply allocate new buffers as sync.Pool is less effective.
func GetSmall() *bytes.Buffer {
return bytes.NewBuffer(make([]byte, 0, SmallBufferSize))
}
// PutSmall is a no-op in WASM; the buffer will be garbage collected.
func PutSmall(buf *bytes.Buffer) {
// No-op in WASM
}
// GetMedium returns a medium buffer (1KB).
func GetMedium() *bytes.Buffer {
return bytes.NewBuffer(make([]byte, 0, MediumBufferSize))
}
// PutMedium is a no-op in WASM; the buffer will be garbage collected.
func PutMedium(buf *bytes.Buffer) {
// No-op in WASM
}
// CopyBytes copies the buffer contents to a new slice.
func CopyBytes(buf *bytes.Buffer) []byte {
if buf == nil || buf.Len() == 0 {
return nil
}
result := make([]byte, buf.Len())
copy(result, buf.Bytes())
return result
}

40
pkg/database/compact_event.go

@ -52,6 +52,20 @@ const (
TagElementPubkeySerial = 0x01 // Pubkey serial reference (5 bytes) TagElementPubkeySerial = 0x01 // Pubkey serial reference (5 bytes)
TagElementEventSerial = 0x02 // Event ID serial reference (5 bytes) TagElementEventSerial = 0x02 // Event ID serial reference (5 bytes)
TagElementEventIdFull = 0x03 // Full event ID (32 bytes) - for unknown refs TagElementEventIdFull = 0x03 // Full event ID (32 bytes) - for unknown refs
// Sanity limits to prevent OOM from corrupt data
MaxTagsPerEvent = 10000 // Maximum number of tags in an event
MaxTagElements = 100 // Maximum elements in a single tag
MaxContentLength = 10 << 20 // 10MB max content
MaxTagElementLength = 1 << 20 // 1MB max for a single tag element
)
var (
ErrTooManyTags = errors.New("corrupt data: too many tags")
ErrTooManyTagElems = errors.New("corrupt data: too many tag elements")
ErrContentTooLarge = errors.New("corrupt data: content too large")
ErrTagElementTooLong = errors.New("corrupt data: tag element too long")
ErrUnknownTagElemType = errors.New("corrupt data: unknown tag element type")
) )
// SerialResolver is an interface for resolving serials during compact encoding/decoding. // SerialResolver is an interface for resolving serials during compact encoding/decoding.
@ -287,12 +301,15 @@ func UnmarshalCompactEvent(data []byte, eventId []byte, resolver SerialResolver)
if nTags, err = varint.Decode(r); chk.E(err) { if nTags, err = varint.Decode(r); chk.E(err) {
return nil, err return nil, err
} }
if nTags > MaxTagsPerEvent {
return nil, ErrTooManyTags // Don't log - caller handles gracefully
}
if nTags > 0 { if nTags > 0 {
ev.Tags = tag.NewSWithCap(int(nTags)) ev.Tags = tag.NewSWithCap(int(nTags))
for i := uint64(0); i < nTags; i++ { for i := uint64(0); i < nTags; i++ {
var t *tag.T var t *tag.T
if t, err = decodeCompactTag(r, resolver); chk.E(err) { if t, err = decodeCompactTag(r, resolver); err != nil {
return nil, err return nil, err // Don't log corruption errors
} }
*ev.Tags = append(*ev.Tags, t) *ev.Tags = append(*ev.Tags, t)
} }
@ -303,6 +320,9 @@ func UnmarshalCompactEvent(data []byte, eventId []byte, resolver SerialResolver)
if contentLen, err = varint.Decode(r); chk.E(err) { if contentLen, err = varint.Decode(r); chk.E(err) {
return nil, err return nil, err
} }
if contentLen > MaxContentLength {
return nil, ErrContentTooLarge
}
ev.Content = make([]byte, contentLen) ev.Content = make([]byte, contentLen)
if _, err = io.ReadFull(r, ev.Content); chk.E(err) { if _, err = io.ReadFull(r, ev.Content); chk.E(err) {
return nil, err return nil, err
@ -320,16 +340,19 @@ func UnmarshalCompactEvent(data []byte, eventId []byte, resolver SerialResolver)
// decodeCompactTag decodes a single tag from compact format. // decodeCompactTag decodes a single tag from compact format.
func decodeCompactTag(r io.Reader, resolver SerialResolver) (t *tag.T, err error) { func decodeCompactTag(r io.Reader, resolver SerialResolver) (t *tag.T, err error) {
var nElems uint64 var nElems uint64
if nElems, err = varint.Decode(r); chk.E(err) { if nElems, err = varint.Decode(r); err != nil {
return nil, err return nil, err
} }
if nElems > MaxTagElements {
return nil, ErrTooManyTagElems
}
t = tag.NewWithCap(int(nElems)) t = tag.NewWithCap(int(nElems))
for i := uint64(0); i < nElems; i++ { for i := uint64(0); i < nElems; i++ {
var elem []byte var elem []byte
if elem, err = decodeTagElement(r, resolver); chk.E(err) { if elem, err = decodeTagElement(r, resolver); err != nil {
return nil, err return nil, err // Don't log corruption errors
} }
t.T = append(t.T, elem) t.T = append(t.T, elem)
} }
@ -350,9 +373,12 @@ func decodeTagElement(r io.Reader, resolver SerialResolver) (elem []byte, err er
case TagElementRaw: case TagElementRaw:
// Raw bytes: varint length + data // Raw bytes: varint length + data
var length uint64 var length uint64
if length, err = varint.Decode(r); chk.E(err) { if length, err = varint.Decode(r); err != nil {
return nil, err return nil, err
} }
if length > MaxTagElementLength {
return nil, ErrTagElementTooLong
}
elem = make([]byte, length) elem = make([]byte, length)
if _, err = io.ReadFull(r, elem); err != nil { if _, err = io.ReadFull(r, elem); err != nil {
return nil, err return nil, err
@ -402,7 +428,7 @@ func decodeTagElement(r io.Reader, resolver SerialResolver) (elem []byte, err er
return elem, nil return elem, nil
default: default:
return nil, errors.New("unknown tag element type flag") return nil, ErrUnknownTagElemType
} }
} }

206
pkg/database/nrc.go

@ -0,0 +1,206 @@
//go:build !(js && wasm)
package database
import (
"crypto/rand"
"encoding/json"
"errors"
"fmt"
"time"
"github.com/dgraph-io/badger/v4"
"lol.mleku.dev/chk"
"lol.mleku.dev/log"
"git.mleku.dev/mleku/nostr/crypto/keys"
"git.mleku.dev/mleku/nostr/encoders/hex"
)
// Key prefixes for NRC data
const (
nrcConnectionPrefix = "nrc:conn:" // NRC connections by ID
)
// NRCConnection stores an NRC connection configuration in the database.
type NRCConnection struct {
ID string `json:"id"` // Unique identifier (hex of first 8 bytes of secret)
Label string `json:"label"` // Human-readable label (e.g., "Phone", "Laptop")
Secret []byte `json:"secret"` // 32-byte secret for client authentication
CreatedAt int64 `json:"created_at"` // Unix timestamp
LastUsed int64 `json:"last_used"` // Unix timestamp of last connection (0 if never)
UseCashu bool `json:"use_cashu"` // Whether to include CAT token in URI
}
// GetNRCConnection retrieves an NRC connection by ID.
func (d *D) GetNRCConnection(id string) (conn *NRCConnection, err error) {
key := []byte(nrcConnectionPrefix + id)
err = d.DB.View(func(txn *badger.Txn) error {
item, err := txn.Get(key)
if errors.Is(err, badger.ErrKeyNotFound) {
return err
}
if err != nil {
return err
}
return item.Value(func(val []byte) error {
conn = &NRCConnection{}
return json.Unmarshal(val, conn)
})
})
return
}
// SaveNRCConnection stores an NRC connection in the database.
func (d *D) SaveNRCConnection(conn *NRCConnection) error {
data, err := json.Marshal(conn)
if err != nil {
return fmt.Errorf("failed to marshal connection: %w", err)
}
key := []byte(nrcConnectionPrefix + conn.ID)
return d.DB.Update(func(txn *badger.Txn) error {
return txn.Set(key, data)
})
}
// DeleteNRCConnection removes an NRC connection from the database.
func (d *D) DeleteNRCConnection(id string) error {
key := []byte(nrcConnectionPrefix + id)
return d.DB.Update(func(txn *badger.Txn) error {
if err := txn.Delete(key); err != nil && !errors.Is(err, badger.ErrKeyNotFound) {
return err
}
return nil
})
}
// GetAllNRCConnections returns all NRC connections.
func (d *D) GetAllNRCConnections() (conns []*NRCConnection, err error) {
prefix := []byte(nrcConnectionPrefix)
err = d.DB.View(func(txn *badger.Txn) error {
opts := badger.DefaultIteratorOptions
opts.Prefix = prefix
it := txn.NewIterator(opts)
defer it.Close()
for it.Seek(prefix); it.ValidForPrefix(prefix); it.Next() {
item := it.Item()
err := item.Value(func(val []byte) error {
conn := &NRCConnection{}
if err := json.Unmarshal(val, conn); err != nil {
return err
}
conns = append(conns, conn)
return nil
})
if err != nil {
return err
}
}
return nil
})
return
}
// CreateNRCConnection generates a new NRC connection with a random secret.
func (d *D) CreateNRCConnection(label string, useCashu bool) (*NRCConnection, error) {
// Generate random 32-byte secret
secret := make([]byte, 32)
if _, err := rand.Read(secret); err != nil {
return nil, fmt.Errorf("failed to generate random secret: %w", err)
}
// Use first 8 bytes of secret as ID (hex encoded = 16 chars)
id := string(hex.Enc(secret[:8]))
conn := &NRCConnection{
ID: id,
Label: label,
Secret: secret,
CreatedAt: time.Now().Unix(),
LastUsed: 0,
UseCashu: useCashu,
}
if err := d.SaveNRCConnection(conn); chk.E(err) {
return nil, err
}
log.I.F("created NRC connection: id=%s label=%s cashu=%v", id, label, useCashu)
return conn, nil
}
// GetNRCConnectionURI generates the full connection URI for a connection.
// relayPubkey is the relay's public key (32 bytes).
// rendezvousURL is the public relay URL.
// mintURL is the CAT mint URL (required if useCashu is true).
func (d *D) GetNRCConnectionURI(conn *NRCConnection, relayPubkey []byte, rendezvousURL, mintURL string) (string, error) {
if len(relayPubkey) != 32 {
return "", fmt.Errorf("invalid relay pubkey length: %d", len(relayPubkey))
}
if rendezvousURL == "" {
return "", fmt.Errorf("rendezvous URL is required")
}
relayPubkeyHex := hex.Enc(relayPubkey)
secretHex := hex.Enc(conn.Secret)
var uri string
if conn.UseCashu {
if mintURL == "" {
return "", fmt.Errorf("mint URL is required for CAT authentication")
}
// CAT-based URI includes both secret (for non-CAT relays) and CAT auth
uri = fmt.Sprintf("nostr+relayconnect://%s?relay=%s&secret=%s&auth=cat&mint=%s",
relayPubkeyHex, rendezvousURL, secretHex, mintURL)
} else {
// Secret-only URI
uri = fmt.Sprintf("nostr+relayconnect://%s?relay=%s&secret=%s",
relayPubkeyHex, rendezvousURL, secretHex)
}
if conn.Label != "" {
uri += fmt.Sprintf("&name=%s", conn.Label)
}
return uri, nil
}
// GetNRCAuthorizedSecrets returns a map of derived pubkeys to labels for all connections.
// This is used by the NRC bridge to authorize incoming connections.
func (d *D) GetNRCAuthorizedSecrets() (map[string]string, error) {
conns, err := d.GetAllNRCConnections()
if err != nil {
return nil, err
}
result := make(map[string]string)
for _, conn := range conns {
// Derive pubkey from secret
pubkey, err := keys.SecretBytesToPubKeyBytes(conn.Secret)
if err != nil {
log.W.F("failed to derive pubkey for NRC connection %s: %v", conn.ID, err)
continue
}
pubkeyHex := string(hex.Enc(pubkey))
result[pubkeyHex] = conn.Label
}
return result, nil
}
// UpdateNRCConnectionLastUsed updates the last used timestamp for a connection.
func (d *D) UpdateNRCConnectionLastUsed(id string) error {
conn, err := d.GetNRCConnection(id)
if err != nil {
return err
}
conn.LastUsed = time.Now().Unix()
return d.SaveNRCConnection(conn)
}

202
pkg/database/tokenize_wasm.go

@ -0,0 +1,202 @@
//go:build js && wasm
package database
import (
"crypto/sha256"
"strings"
"unicode"
)
// TokenHashes extracts unique word hashes (8-byte truncated sha256) from content.
// Rules:
// - Unicode-aware: words are sequences of letters or numbers.
// - Lowercased using unicode case mapping.
// - Ignore URLs (starting with http://, https://, www., or containing "://").
// - Ignore nostr: URIs and #[n] mentions.
// - Ignore words shorter than 2 runes.
// - Exclude 64-character hexadecimal strings (likely IDs/pubkeys).
func TokenHashes(content []byte) [][]byte {
s := string(content)
var out [][]byte
seen := make(map[string]struct{})
i := 0
for i < len(s) {
r, size := rune(s[i]), 1
if r >= 0x80 {
r, size = utf8DecodeRuneInString(s[i:])
}
// Skip whitespace
if unicode.IsSpace(r) {
i += size
continue
}
// Skip URLs and schemes
if hasPrefixFold(s[i:], "http://") || hasPrefixFold(s[i:], "https://") || hasPrefixFold(s[i:], "nostr:") || hasPrefixFold(s[i:], "www.") {
i = skipUntilSpace(s, i)
continue
}
// If token contains "://" ahead, treat as URL and skip to space
if j := strings.Index(s[i:], "://"); j == 0 || (j > 0 && isWordStart(r)) {
// Only if it's at start of token
before := s[i : i+j]
if len(before) == 0 || allAlphaNum(before) {
i = skipUntilSpace(s, i)
continue
}
}
// Skip #[n] mentions
if r == '#' && i+size < len(s) && s[i+size] == '[' {
end := strings.IndexByte(s[i:], ']')
if end >= 0 {
i += end + 1
continue
}
}
// Collect a word
start := i
var runes []rune
for i < len(s) {
r2, size2 := rune(s[i]), 1
if r2 >= 0x80 {
r2, size2 = utf8DecodeRuneInString(s[i:])
}
if unicode.IsLetter(r2) || unicode.IsNumber(r2) {
// Normalize decorative unicode (small caps, fraktur) to ASCII
// before lowercasing for consistent indexing
runes = append(runes, unicode.ToLower(normalizeRune(r2)))
i += size2
continue
}
break
}
// If we didn't consume any rune for a word, advance by one rune to avoid stalling
if i == start {
_, size2 := utf8DecodeRuneInString(s[i:])
i += size2
continue
}
if len(runes) >= 2 {
w := string(runes)
// Exclude 64-char hex strings
if isHex64(w) {
continue
}
if _, ok := seen[w]; !ok {
seen[w] = struct{}{}
h := sha256.Sum256([]byte(w))
out = append(out, h[:8])
}
}
}
return out
}
func hasPrefixFold(s, prefix string) bool {
if len(s) < len(prefix) {
return false
}
for i := 0; i < len(prefix); i++ {
c := s[i]
p := prefix[i]
if c == p {
continue
}
// ASCII case-insensitive
if 'A' <= c && c <= 'Z' {
c = c - 'A' + 'a'
}
if 'A' <= p && p <= 'Z' {
p = p - 'A' + 'a'
}
if c != p {
return false
}
}
return true
}
func skipUntilSpace(s string, i int) int {
for i < len(s) {
r, size := rune(s[i]), 1
if r >= 0x80 {
r, size = utf8DecodeRuneInString(s[i:])
}
if unicode.IsSpace(r) {
return i
}
i += size
}
return i
}
func allAlphaNum(s string) bool {
for _, r := range s {
if !(unicode.IsLetter(r) || unicode.IsNumber(r)) {
return false
}
}
return true
}
func isWordStart(r rune) bool { return unicode.IsLetter(r) || unicode.IsNumber(r) }
// utf8DecodeRuneInString decodes the first UTF-8 rune from s.
// Returns the rune and the number of bytes consumed.
func utf8DecodeRuneInString(s string) (r rune, size int) {
if len(s) == 0 {
return 0, 0
}
// ASCII fast path
b := s[0]
if b < 0x80 {
return rune(b), 1
}
// Multi-byte: determine expected length from first byte
var expectedLen int
switch {
case b&0xE0 == 0xC0: // 110xxxxx - 2 bytes
expectedLen = 2
case b&0xF0 == 0xE0: // 1110xxxx - 3 bytes
expectedLen = 3
case b&0xF8 == 0xF0: // 11110xxx - 4 bytes
expectedLen = 4
default:
// Invalid UTF-8 start byte
return 0xFFFD, 1
}
if len(s) < expectedLen {
return 0xFFFD, 1
}
// Decode using Go's built-in rune conversion (simple and correct)
runes := []rune(s[:expectedLen])
if len(runes) == 0 {
return 0xFFFD, 1
}
return runes[0], expectedLen
}
// isHex64 returns true if s is exactly 64 hex characters (0-9, a-f)
func isHex64(s string) bool {
if len(s) != 64 {
return false
}
for i := 0; i < 64; i++ {
c := s[i]
if c >= '0' && c <= '9' {
continue
}
if c >= 'a' && c <= 'f' {
continue
}
if c >= 'A' && c <= 'F' {
continue
}
return false
}
return true
}

135
pkg/database/unicode_normalize_wasm.go

@ -0,0 +1,135 @@
//go:build js && wasm
package database
// normalizeRune maps decorative unicode characters (small caps, fraktur) back to
// their ASCII equivalents for consistent word indexing. This ensures that text
// written with decorative alphabets (e.g., "ᴅᴇᴀᴛʜ" or "𝔇𝔢𝔞𝔱𝔥") indexes the same
// as regular ASCII ("death").
//
// Character sets normalized:
// - Small Caps (used for DEATH-style text in Terry Pratchett tradition)
// - Mathematical Fraktur lowercase (𝔞-𝔷)
// - Mathematical Fraktur uppercase (𝔄-ℨ, including Letterlike Symbols block exceptions)
func normalizeRune(r rune) rune {
// Check small caps first (scattered codepoints)
if mapped, ok := smallCapsToASCII[r]; ok {
return mapped
}
// Check fraktur lowercase: U+1D51E to U+1D537 (contiguous range)
if r >= 0x1D51E && r <= 0x1D537 {
return 'a' + (r - 0x1D51E)
}
// Check fraktur uppercase main range: U+1D504 to U+1D51C (with gaps)
if r >= 0x1D504 && r <= 0x1D51C {
if mapped, ok := frakturUpperToASCII[r]; ok {
return mapped
}
}
// Check fraktur uppercase exceptions from Letterlike Symbols block
if mapped, ok := frakturLetterlikeToASCII[r]; ok {
return mapped
}
return r
}
// smallCapsToASCII maps small capital letters to lowercase ASCII.
// These are scattered across multiple Unicode blocks (IPA Extensions,
// Phonetic Extensions, Latin Extended-D).
var smallCapsToASCII = map[rune]rune{
'ᴀ': 'a', // U+1D00 LATIN LETTER SMALL CAPITAL A
'ʙ': 'b', // U+0299 LATIN LETTER SMALL CAPITAL B
'ᴄ': 'c', // U+1D04 LATIN LETTER SMALL CAPITAL C
'ᴅ': 'd', // U+1D05 LATIN LETTER SMALL CAPITAL D
'ᴇ': 'e', // U+1D07 LATIN LETTER SMALL CAPITAL E
'ꜰ': 'f', // U+A730 LATIN LETTER SMALL CAPITAL F
'ɢ': 'g', // U+0262 LATIN LETTER SMALL CAPITAL G
'ʜ': 'h', // U+029C LATIN LETTER SMALL CAPITAL H
'ɪ': 'i', // U+026A LATIN LETTER SMALL CAPITAL I
'ᴊ': 'j', // U+1D0A LATIN LETTER SMALL CAPITAL J
'ᴋ': 'k', // U+1D0B LATIN LETTER SMALL CAPITAL K
'ʟ': 'l', // U+029F LATIN LETTER SMALL CAPITAL L
'ᴍ': 'm', // U+1D0D LATIN LETTER SMALL CAPITAL M
'ɴ': 'n', // U+0274 LATIN LETTER SMALL CAPITAL N
'ᴏ': 'o', // U+1D0F LATIN LETTER SMALL CAPITAL O
'ᴘ': 'p', // U+1D18 LATIN LETTER SMALL CAPITAL P
'ǫ': 'q', // U+01EB LATIN SMALL LETTER O WITH OGONEK (no true small cap Q)
'ʀ': 'r', // U+0280 LATIN LETTER SMALL CAPITAL R
'ꜱ': 's', // U+A731 LATIN LETTER SMALL CAPITAL S
'ᴛ': 't', // U+1D1B LATIN LETTER SMALL CAPITAL T
'ᴜ': 'u', // U+1D1C LATIN LETTER SMALL CAPITAL U
'ᴠ': 'v', // U+1D20 LATIN LETTER SMALL CAPITAL V
'ᴡ': 'w', // U+1D21 LATIN LETTER SMALL CAPITAL W
// Note: no small cap X exists in standard use
'ʏ': 'y', // U+028F LATIN LETTER SMALL CAPITAL Y
'ᴢ': 'z', // U+1D22 LATIN LETTER SMALL CAPITAL Z
}
// frakturUpperToASCII maps Mathematical Fraktur uppercase letters to lowercase ASCII.
// The main range U+1D504-U+1D51C has gaps where C, H, I, R, Z use Letterlike Symbols.
var frakturUpperToASCII = map[rune]rune{
'𝔄': 'a', // U+1D504 MATHEMATICAL FRAKTUR CAPITAL A
'𝔅': 'b', // U+1D505 MATHEMATICAL FRAKTUR CAPITAL B
// C is at U+212D (Letterlike Symbols)
'𝔇': 'd', // U+1D507 MATHEMATICAL FRAKTUR CAPITAL D
'𝔈': 'e', // U+1D508 MATHEMATICAL FRAKTUR CAPITAL E
'𝔉': 'f', // U+1D509 MATHEMATICAL FRAKTUR CAPITAL F
'𝔊': 'g', // U+1D50A MATHEMATICAL FRAKTUR CAPITAL G
// H is at U+210C (Letterlike Symbols)
// I is at U+2111 (Letterlike Symbols)
'𝔍': 'j', // U+1D50D MATHEMATICAL FRAKTUR CAPITAL J
'𝔎': 'k', // U+1D50E MATHEMATICAL FRAKTUR CAPITAL K
'𝔏': 'l', // U+1D50F MATHEMATICAL FRAKTUR CAPITAL L
'𝔐': 'm', // U+1D510 MATHEMATICAL FRAKTUR CAPITAL M
'𝔑': 'n', // U+1D511 MATHEMATICAL FRAKTUR CAPITAL N
'𝔒': 'o', // U+1D512 MATHEMATICAL FRAKTUR CAPITAL O
'𝔓': 'p', // U+1D513 MATHEMATICAL FRAKTUR CAPITAL P
'𝔔': 'q', // U+1D514 MATHEMATICAL FRAKTUR CAPITAL Q
// R is at U+211C (Letterlike Symbols)
'𝔖': 's', // U+1D516 MATHEMATICAL FRAKTUR CAPITAL S
'𝔗': 't', // U+1D517 MATHEMATICAL FRAKTUR CAPITAL T
'𝔘': 'u', // U+1D518 MATHEMATICAL FRAKTUR CAPITAL U
'𝔙': 'v', // U+1D519 MATHEMATICAL FRAKTUR CAPITAL V
'𝔚': 'w', // U+1D51A MATHEMATICAL FRAKTUR CAPITAL W
'𝔛': 'x', // U+1D51B MATHEMATICAL FRAKTUR CAPITAL X
'𝔜': 'y', // U+1D51C MATHEMATICAL FRAKTUR CAPITAL Y
// Z is at U+2128 (Letterlike Symbols)
}
// frakturLetterlikeToASCII maps the Fraktur characters that live in the
// Letterlike Symbols block (U+2100-U+214F) rather than Mathematical Alphanumeric Symbols.
var frakturLetterlikeToASCII = map[rune]rune{
'ℭ': 'c', // U+212D BLACK-LETTER CAPITAL C
'ℌ': 'h', // U+210C BLACK-LETTER CAPITAL H
'ℑ': 'i', // U+2111 BLACK-LETTER CAPITAL I
'ℜ': 'r', // U+211C BLACK-LETTER CAPITAL R
'ℨ': 'z', // U+2128 BLACK-LETTER CAPITAL Z
}
// hasDecorativeUnicode checks if text contains any small caps or fraktur characters
// that would need normalization. Used by migration to identify events needing re-indexing.
func hasDecorativeUnicode(s string) bool {
for _, r := range s {
// Check small caps
if _, ok := smallCapsToASCII[r]; ok {
return true
}
// Check fraktur lowercase range
if r >= 0x1D51E && r <= 0x1D537 {
return true
}
// Check fraktur uppercase range
if r >= 0x1D504 && r <= 0x1D51C {
return true
}
// Check letterlike symbols fraktur
if _, ok := frakturLetterlikeToASCII[r]; ok {
return true
}
}
return false
}

623
pkg/protocol/nrc/bridge.go

@ -0,0 +1,623 @@
package nrc
import (
"context"
"encoding/json"
"fmt"
"sync"
"time"
"git.mleku.dev/mleku/nostr/crypto/encryption"
"git.mleku.dev/mleku/nostr/encoders/event"
"git.mleku.dev/mleku/nostr/encoders/filter"
"git.mleku.dev/mleku/nostr/encoders/hex"
"git.mleku.dev/mleku/nostr/encoders/kind"
"git.mleku.dev/mleku/nostr/encoders/tag"
"git.mleku.dev/mleku/nostr/encoders/timestamp"
"git.mleku.dev/mleku/nostr/interfaces/signer"
"git.mleku.dev/mleku/nostr/ws"
"lol.mleku.dev/chk"
"lol.mleku.dev/log"
"next.orly.dev/pkg/cashu/token"
"next.orly.dev/pkg/cashu/verifier"
)
const (
// KindNRCRequest is the event kind for NRC requests.
KindNRCRequest = 24891
// KindNRCResponse is the event kind for NRC responses.
KindNRCResponse = 24892
)
// BridgeConfig holds configuration for the NRC bridge.
type BridgeConfig struct {
// RendezvousURL is the WebSocket URL of the public relay.
RendezvousURL string
// LocalRelayURL is the WebSocket URL of the local private relay.
LocalRelayURL string
// Signer is the relay's signer for signing response events.
Signer signer.I
// AuthorizedSecrets maps derived pubkeys to device names (secret-based auth).
AuthorizedSecrets map[string]string
// CashuVerifier is used for CAT token verification (optional).
CashuVerifier *verifier.Verifier
// SessionTimeout is the inactivity timeout for sessions.
SessionTimeout time.Duration
}
// Bridge connects a private relay to a public rendezvous relay.
type Bridge struct {
config *BridgeConfig
sessions *SessionManager
// rendezvousConn is the connection to the rendezvous relay.
rendezvousConn *ws.Client
// mu protects connection state.
mu sync.RWMutex
// ctx is the bridge context.
ctx context.Context
// cancel cancels the bridge context.
cancel context.CancelFunc
}
// NewBridge creates a new NRC bridge.
func NewBridge(config *BridgeConfig) *Bridge {
ctx, cancel := context.WithCancel(context.Background())
timeout := config.SessionTimeout
if timeout == 0 {
timeout = DefaultSessionTimeout
}
return &Bridge{
config: config,
sessions: NewSessionManager(timeout),
ctx: ctx,
cancel: cancel,
}
}
// Start starts the bridge and begins listening for NRC requests.
func (b *Bridge) Start() error {
log.I.F("starting NRC bridge, rendezvous: %s, local: %s",
b.config.RendezvousURL, b.config.LocalRelayURL)
// Start session cleanup goroutine
go b.cleanupLoop()
// Start the main bridge loop with auto-reconnection
go b.runLoop()
return nil
}
// Stop stops the bridge.
func (b *Bridge) Stop() {
log.I.F("stopping NRC bridge")
b.cancel()
b.sessions.Close()
b.mu.Lock()
defer b.mu.Unlock()
if b.rendezvousConn != nil {
b.rendezvousConn.Close()
}
}
// UpdateAuthorizedSecrets updates the map of authorized secrets.
// This allows dynamic management of authorized connections through the UI.
func (b *Bridge) UpdateAuthorizedSecrets(secrets map[string]string) {
b.mu.Lock()
defer b.mu.Unlock()
b.config.AuthorizedSecrets = secrets
}
// cleanupLoop periodically cleans up expired sessions.
func (b *Bridge) cleanupLoop() {
ticker := time.NewTicker(5 * time.Minute)
defer ticker.Stop()
for {
select {
case <-b.ctx.Done():
return
case <-ticker.C:
removed := b.sessions.CleanupExpired()
if removed > 0 {
log.D.F("cleaned up %d expired NRC sessions", removed)
}
}
}
}
// runLoop runs the main bridge loop with auto-reconnection.
func (b *Bridge) runLoop() {
delay := time.Second
for {
select {
case <-b.ctx.Done():
return
default:
}
err := b.runOnce()
if err != nil {
if b.ctx.Err() != nil {
return // Context cancelled, exit cleanly
}
log.W.F("NRC bridge error: %v, reconnecting in %v", err, delay)
select {
case <-time.After(delay):
if delay < 30*time.Second {
delay *= 2
}
case <-b.ctx.Done():
return
}
continue
}
delay = time.Second
}
}
// runOnce runs a single iteration of the bridge.
func (b *Bridge) runOnce() error {
// Connect to rendezvous relay
rendezvousConn, err := ws.RelayConnect(b.ctx, b.config.RendezvousURL)
if chk.E(err) {
return fmt.Errorf("%w: %v", ErrRendezvousConnectionFailed, err)
}
defer rendezvousConn.Close()
b.mu.Lock()
b.rendezvousConn = rendezvousConn
b.mu.Unlock()
// Subscribe to NRC request events
relayPubkeyHex := hex.Enc(b.config.Signer.Pub())
sub, err := rendezvousConn.Subscribe(
b.ctx,
filter.NewS(&filter.F{
Kinds: kind.NewS(kind.New(KindNRCRequest)),
Tags: tag.NewS(
tag.NewFromAny("p", relayPubkeyHex),
),
Since: &timestamp.T{V: time.Now().Unix()},
}),
)
if chk.E(err) {
return fmt.Errorf("subscription failed: %w", err)
}
defer sub.Unsub()
log.I.F("NRC bridge listening for requests on %s", b.config.RendezvousURL)
// Process incoming request events
for {
select {
case <-b.ctx.Done():
return nil
case ev := <-sub.Events:
if ev == nil {
return fmt.Errorf("subscription closed")
}
go b.handleRequest(ev)
}
}
}
// handleRequest handles a single NRC request event.
func (b *Bridge) handleRequest(ev *event.E) {
ctx, cancel := context.WithTimeout(b.ctx, 30*time.Second)
defer cancel()
// Extract session ID from tags
sessionID := ""
sessionTag := ev.Tags.GetFirst([]byte("session"))
if sessionTag != nil && sessionTag.Len() >= 2 {
sessionID = string(sessionTag.Value())
}
if sessionID == "" {
log.W.F("NRC request missing session tag from %s", hex.Enc(ev.Pubkey[:]))
return
}
// Verify authorization
conversationKey, authMode, deviceName, err := b.authorize(ctx, ev)
if err != nil {
log.W.F("NRC authorization failed for %s: %v", hex.Enc(ev.Pubkey[:]), err)
b.sendError(ctx, ev, sessionID, "unauthorized: "+err.Error())
return
}
// Get or create session
session := b.sessions.GetOrCreate(sessionID, ev.Pubkey[:], conversationKey, authMode, deviceName)
session.Touch()
// Decrypt request content
decrypted, err := encryption.Decrypt(conversationKey, string(ev.Content))
if err != nil {
log.W.F("NRC decryption failed: %v", err)
b.sendError(ctx, ev, sessionID, "decryption failed")
return
}
// Parse request message
reqMsg, err := ParseRequestContent([]byte(decrypted))
if err != nil {
log.W.F("NRC invalid request format: %v", err)
b.sendError(ctx, ev, sessionID, "invalid request format")
return
}
log.D.F("NRC request: type=%s session=%s from=%s",
reqMsg.Type, sessionID, hex.Enc(ev.Pubkey[:]))
// Forward to local relay and handle response
if err := b.forwardToLocalRelay(ctx, session, ev, reqMsg); err != nil {
log.W.F("NRC forward failed: %v", err)
b.sendError(ctx, ev, sessionID, "relay error: "+err.Error())
}
}
// authorize checks if the request is authorized and returns the conversation key.
func (b *Bridge) authorize(ctx context.Context, ev *event.E) (conversationKey []byte, authMode AuthMode, deviceName string, err error) {
clientPubkey := ev.Pubkey[:]
clientPubkeyHex := string(hex.Enc(clientPubkey))
// Check for CAT token in tags
cashuTag := ev.Tags.GetFirst([]byte("cashu"))
if cashuTag != nil && cashuTag.Len() >= 2 {
// CAT authentication
if b.config.CashuVerifier == nil {
err = fmt.Errorf("CAT auth not configured")
return
}
tokenStr := string(cashuTag.Value())
var tok *token.Token
tok, err = token.Parse(tokenStr)
if chk.E(err) {
err = fmt.Errorf("invalid CAT token: %w", err)
return
}
if err = b.config.CashuVerifier.VerifyForScope(ctx, tok, token.ScopeNRC, ""); chk.E(err) {
return
}
// CAT auth uses ECDH between relay key and client's Nostr key
conversationKey, err = encryption.GenerateConversationKey(
b.config.Signer.Sec(),
clientPubkey,
)
if chk.E(err) {
return
}
authMode = AuthModeCAT
return
}
// Secret-based authentication: check if client pubkey is in authorized list
if name, ok := b.config.AuthorizedSecrets[clientPubkeyHex]; ok {
// Secret auth uses ECDH between relay key and client's derived key
conversationKey, err = encryption.GenerateConversationKey(
b.config.Signer.Sec(),
clientPubkey,
)
if chk.E(err) {
return
}
authMode = AuthModeSecret
deviceName = name
return
}
err = ErrUnauthorized
return
}
// forwardToLocalRelay forwards a request to the local relay and handles responses.
func (b *Bridge) forwardToLocalRelay(ctx context.Context, session *Session, reqEvent *event.E, reqMsg *RequestMessage) error {
// Connect to local relay
localConn, err := ws.RelayConnect(ctx, b.config.LocalRelayURL)
if chk.E(err) {
return fmt.Errorf("%w: %v", ErrRelayConnectionFailed, err)
}
defer localConn.Close()
// Handle different message types
switch reqMsg.Type {
case "REQ":
return b.handleREQ(ctx, session, reqEvent, reqMsg, localConn)
case "EVENT":
return b.handleEVENT(ctx, session, reqEvent, reqMsg, localConn)
case "CLOSE":
return b.handleCLOSE(ctx, session, reqEvent, reqMsg)
case "COUNT":
return b.handleCOUNT(ctx, session, reqEvent, reqMsg, localConn)
default:
return fmt.Errorf("unsupported message type: %s", reqMsg.Type)
}
}
// handleREQ handles a REQ message and forwards responses.
func (b *Bridge) handleREQ(ctx context.Context, session *Session, reqEvent *event.E, reqMsg *RequestMessage, conn *ws.Client) error {
// Extract subscription ID and filters from payload
// Payload: ["REQ", "<sub_id>", filter1, filter2, ...]
if len(reqMsg.Payload) < 3 {
return fmt.Errorf("invalid REQ payload")
}
subID, ok := reqMsg.Payload[1].(string)
if !ok {
return fmt.Errorf("invalid subscription ID")
}
// Parse filters from payload
var filters []*filter.F
for i := 2; i < len(reqMsg.Payload); i++ {
filterMap, ok := reqMsg.Payload[i].(map[string]any)
if !ok {
continue
}
filterBytes, err := json.Marshal(filterMap)
if err != nil {
continue
}
var f filter.F
if err := json.Unmarshal(filterBytes, &f); err != nil {
continue
}
filters = append(filters, &f)
}
if len(filters) == 0 {
return fmt.Errorf("no valid filters in REQ")
}
// Add subscription to session
if err := session.AddSubscription(subID); err != nil {
return err
}
// Create filter set
filterSet := filter.NewS(filters...)
// Subscribe to local relay
sub, err := conn.Subscribe(ctx, filterSet)
if chk.E(err) {
session.RemoveSubscription(subID)
return fmt.Errorf("local subscribe failed: %w", err)
}
defer sub.Unsub()
// Forward events until EOSE or timeout
for {
select {
case <-ctx.Done():
return ctx.Err()
case ev := <-sub.Events:
if ev == nil {
// Subscription closed, send EOSE
resp := &ResponseMessage{
Type: "EOSE",
Payload: []any{"EOSE", subID},
}
return b.sendResponse(ctx, reqEvent, session, resp)
}
// Convert event to JSON-compatible map
eventBytes, err := json.Marshal(ev)
if err != nil {
continue
}
var eventMap map[string]any
if err := json.Unmarshal(eventBytes, &eventMap); err != nil {
continue
}
// Send EVENT response
resp := &ResponseMessage{
Type: "EVENT",
Payload: []any{"EVENT", subID, eventMap},
}
if err := b.sendResponse(ctx, reqEvent, session, resp); err != nil {
log.W.F("failed to send event response: %v", err)
}
session.IncrementEventCount(subID)
case <-sub.EndOfStoredEvents:
// Send EOSE
session.MarkEOSE(subID)
resp := &ResponseMessage{
Type: "EOSE",
Payload: []any{"EOSE", subID},
}
return b.sendResponse(ctx, reqEvent, session, resp)
}
}
}
// handleEVENT handles an EVENT message and forwards the OK response.
func (b *Bridge) handleEVENT(ctx context.Context, session *Session, reqEvent *event.E, reqMsg *RequestMessage, conn *ws.Client) error {
// Extract event from payload: ["EVENT", {...event...}]
if len(reqMsg.Payload) < 2 {
return fmt.Errorf("invalid EVENT payload")
}
eventMap, ok := reqMsg.Payload[1].(map[string]any)
if !ok {
return fmt.Errorf("invalid event data")
}
// Parse event
eventBytes, err := json.Marshal(eventMap)
if err != nil {
return fmt.Errorf("failed to marshal event: %w", err)
}
var ev event.E
if err := json.Unmarshal(eventBytes, &ev); err != nil {
return fmt.Errorf("failed to unmarshal event: %w", err)
}
// Publish to local relay
err = conn.Publish(ctx, &ev)
success := err == nil
message := ""
if err != nil {
message = err.Error()
}
// Send OK response
resp := &ResponseMessage{
Type: "OK",
Payload: []any{"OK", string(hex.Enc(ev.ID[:])), success, message},
}
return b.sendResponse(ctx, reqEvent, session, resp)
}
// handleCLOSE handles a CLOSE message.
func (b *Bridge) handleCLOSE(ctx context.Context, session *Session, reqEvent *event.E, reqMsg *RequestMessage) error {
// Extract subscription ID: ["CLOSE", "<sub_id>"]
if len(reqMsg.Payload) >= 2 {
if subID, ok := reqMsg.Payload[1].(string); ok {
session.RemoveSubscription(subID)
}
}
// CLOSE doesn't have a response
return nil
}
// handleCOUNT handles a COUNT message.
func (b *Bridge) handleCOUNT(ctx context.Context, session *Session, reqEvent *event.E, reqMsg *RequestMessage, conn *ws.Client) error {
// COUNT is not supported via ws.Client directly, return error
resp := &ResponseMessage{
Type: "NOTICE",
Payload: []any{"NOTICE", "COUNT not supported through NRC tunnel"},
}
return b.sendResponse(ctx, reqEvent, session, resp)
}
// sendResponse encrypts and sends a response to the client.
func (b *Bridge) sendResponse(ctx context.Context, reqEvent *event.E, session *Session, resp *ResponseMessage) error {
// Marshal response content
content, err := MarshalResponseContent(resp)
if err != nil {
return fmt.Errorf("marshal failed: %w", err)
}
// Encrypt content
encrypted, err := encryption.Encrypt(session.ConversationKey, content, nil)
if err != nil {
return fmt.Errorf("%w: %v", ErrEncryptionFailed, err)
}
// Build response event
respEvent := &event.E{
Content: []byte(encrypted),
CreatedAt: time.Now().Unix(),
Kind: KindNRCResponse,
Tags: tag.NewS(
tag.NewFromAny("p", hex.Enc(reqEvent.Pubkey[:])),
tag.NewFromAny("encryption", "nip44_v2"),
tag.NewFromAny("session", session.ID),
tag.NewFromAny("e", hex.Enc(reqEvent.ID[:])),
),
}
// Sign with relay key
if err := respEvent.Sign(b.config.Signer); chk.E(err) {
return fmt.Errorf("signing failed: %w", err)
}
// Publish to rendezvous relay
b.mu.RLock()
conn := b.rendezvousConn
b.mu.RUnlock()
if conn == nil {
return fmt.Errorf("not connected to rendezvous relay")
}
if err := conn.Publish(ctx, respEvent); chk.E(err) {
return fmt.Errorf("publish failed: %w", err)
}
return nil
}
// sendError sends an error response to the client.
func (b *Bridge) sendError(ctx context.Context, reqEvent *event.E, sessionID string, errMsg string) {
// For errors, we need to get or create a conversation key
// This is best-effort since we may not be able to authenticate
conversationKey, err := encryption.GenerateConversationKey(
b.config.Signer.Sec(),
reqEvent.Pubkey[:],
)
if err != nil {
log.W.F("failed to generate conversation key for error response: %v", err)
return
}
resp := &ResponseMessage{
Type: "NOTICE",
Payload: []any{"NOTICE", "nrc: " + errMsg},
}
content, err := MarshalResponseContent(resp)
if err != nil {
return
}
encrypted, err := encryption.Encrypt(conversationKey, content, nil)
if err != nil {
return
}
respEvent := &event.E{
Content: []byte(encrypted),
CreatedAt: time.Now().Unix(),
Kind: KindNRCResponse,
Tags: tag.NewS(
tag.NewFromAny("p", hex.Enc(reqEvent.Pubkey[:])),
tag.NewFromAny("encryption", "nip44_v2"),
tag.NewFromAny("session", sessionID),
tag.NewFromAny("e", hex.Enc(reqEvent.ID[:])),
),
}
if err := respEvent.Sign(b.config.Signer); err != nil {
return
}
b.mu.RLock()
conn := b.rendezvousConn
b.mu.RUnlock()
if conn != nil {
conn.Publish(ctx, respEvent)
}
}
// AddAuthorizedSecret adds an authorized secret (derived pubkey).
func (b *Bridge) AddAuthorizedSecret(pubkeyHex, deviceName string) {
b.config.AuthorizedSecrets[pubkeyHex] = deviceName
}
// RemoveAuthorizedSecret removes an authorized secret.
func (b *Bridge) RemoveAuthorizedSecret(pubkeyHex string) {
delete(b.config.AuthorizedSecrets, pubkeyHex)
}
// ListAuthorizedSecrets returns a copy of the authorized secrets map.
func (b *Bridge) ListAuthorizedSecrets() map[string]string {
result := make(map[string]string)
for k, v := range b.config.AuthorizedSecrets {
result[k] = v
}
return result
}
// SessionCount returns the number of active sessions.
func (b *Bridge) SessionCount() int {
return b.sessions.Count()
}

513
pkg/protocol/nrc/client.go

@ -0,0 +1,513 @@
package nrc
import (
"context"
"encoding/json"
"fmt"
"sync"
"time"
"git.mleku.dev/mleku/nostr/crypto/encryption"
"git.mleku.dev/mleku/nostr/encoders/event"
"git.mleku.dev/mleku/nostr/encoders/filter"
"git.mleku.dev/mleku/nostr/encoders/hex"
"git.mleku.dev/mleku/nostr/encoders/kind"
"git.mleku.dev/mleku/nostr/encoders/tag"
"git.mleku.dev/mleku/nostr/encoders/timestamp"
"git.mleku.dev/mleku/nostr/interfaces/signer"
"git.mleku.dev/mleku/nostr/ws"
"github.com/google/uuid"
"lol.mleku.dev/chk"
"lol.mleku.dev/log"
)
// Client connects to a private relay through the NRC tunnel.
type Client struct {
uri *ConnectionURI
sessionID string
rendezvousConn *ws.Client
responseSub *ws.Subscription
conversationKey []byte
clientSigner signer.I
// pending maps request event IDs to response channels.
pending map[string]chan *ResponseMessage
pendingMu sync.Mutex
// subscriptions maps subscription IDs to event channels.
subscriptions map[string]chan *event.E
subscriptionsMu sync.Mutex
ctx context.Context
cancel context.CancelFunc
}
// NewClient creates a new NRC client from a connection URI.
func NewClient(connectionURI string) (*Client, error) {
uri, err := ParseConnectionURI(connectionURI)
if err != nil {
return nil, fmt.Errorf("invalid URI: %w", err)
}
if uri.AuthMode != AuthModeSecret {
return nil, fmt.Errorf("CAT authentication not yet supported in client")
}
ctx, cancel := context.WithCancel(context.Background())
return &Client{
uri: uri,
sessionID: uuid.New().String(),
conversationKey: uri.GetConversationKey(),
clientSigner: uri.GetClientSigner(),
pending: make(map[string]chan *ResponseMessage),
subscriptions: make(map[string]chan *event.E),
ctx: ctx,
cancel: cancel,
}, nil
}
// Connect establishes the connection to the rendezvous relay.
func (c *Client) Connect(ctx context.Context) error {
// Connect to rendezvous relay
conn, err := ws.RelayConnect(ctx, c.uri.RendezvousRelay)
if chk.E(err) {
return fmt.Errorf("%w: %v", ErrRendezvousConnectionFailed, err)
}
c.rendezvousConn = conn
// Subscribe to response events
clientPubkeyHex := hex.Enc(c.clientSigner.Pub())
sub, err := conn.Subscribe(
ctx,
filter.NewS(&filter.F{
Kinds: kind.NewS(kind.New(KindNRCResponse)),
Tags: tag.NewS(
tag.NewFromAny("p", clientPubkeyHex),
),
Since: &timestamp.T{V: time.Now().Unix()},
}),
)
if chk.E(err) {
conn.Close()
return fmt.Errorf("subscription failed: %w", err)
}
c.responseSub = sub
// Start response handler
go c.handleResponses()
log.I.F("NRC client connected to %s via %s",
hex.Enc(c.uri.RelayPubkey), c.uri.RendezvousRelay)
return nil
}
// Close closes the client connection.
func (c *Client) Close() {
c.cancel()
if c.responseSub != nil {
c.responseSub.Unsub()
}
if c.rendezvousConn != nil {
c.rendezvousConn.Close()
}
// Close all pending channels
c.pendingMu.Lock()
for _, ch := range c.pending {
close(ch)
}
c.pending = make(map[string]chan *ResponseMessage)
c.pendingMu.Unlock()
// Close all subscription channels
c.subscriptionsMu.Lock()
for _, ch := range c.subscriptions {
close(ch)
}
c.subscriptions = make(map[string]chan *event.E)
c.subscriptionsMu.Unlock()
}
// handleResponses processes incoming NRC response events.
func (c *Client) handleResponses() {
for {
select {
case <-c.ctx.Done():
return
case ev := <-c.responseSub.Events:
if ev == nil {
return
}
c.processResponse(ev)
}
}
}
// processResponse decrypts and routes a response event.
func (c *Client) processResponse(ev *event.E) {
// Decrypt content
decrypted, err := encryption.Decrypt(c.conversationKey, string(ev.Content))
if err != nil {
log.W.F("NRC response decryption failed: %v", err)
return
}
// Parse response
var resp struct {
Type string `json:"type"`
Payload []any `json:"payload"`
}
if err := json.Unmarshal([]byte(decrypted), &resp); err != nil {
log.W.F("NRC response parse failed: %v", err)
return
}
// Extract request event ID for routing
var requestEventID string
eTag := ev.Tags.GetFirst([]byte("e"))
if eTag != nil && eTag.Len() >= 2 {
requestEventID = string(eTag.ValueHex())
}
// Route based on response type
switch resp.Type {
case "EVENT":
c.handleEventResponse(resp.Payload)
case "EOSE":
c.handleEOSEResponse(resp.Payload, requestEventID)
case "OK":
c.handleOKResponse(resp.Payload, requestEventID)
case "NOTICE":
c.handleNoticeResponse(resp.Payload)
case "CLOSED":
c.handleClosedResponse(resp.Payload)
case "COUNT":
c.handleCountResponse(resp.Payload, requestEventID)
case "AUTH":
c.handleAuthResponse(resp.Payload, requestEventID)
}
}
// handleEventResponse routes an EVENT to the appropriate subscription.
func (c *Client) handleEventResponse(payload []any) {
if len(payload) < 3 {
return
}
// Payload: ["EVENT", "<sub_id>", {...event...}]
subID, ok := payload[1].(string)
if !ok {
return
}
c.subscriptionsMu.Lock()
ch, exists := c.subscriptions[subID]
c.subscriptionsMu.Unlock()
if !exists {
return
}
// Parse event from payload
eventData, ok := payload[2].(map[string]any)
if !ok {
return
}
eventBytes, err := json.Marshal(eventData)
if err != nil {
return
}
var ev event.E
if err := json.Unmarshal(eventBytes, &ev); err != nil {
return
}
select {
case ch <- &ev:
default:
// Channel full, drop event
}
}
// handleEOSEResponse handles an EOSE response.
func (c *Client) handleEOSEResponse(payload []any, requestEventID string) {
// Route to pending request
c.pendingMu.Lock()
ch, exists := c.pending[requestEventID]
c.pendingMu.Unlock()
if exists {
resp := &ResponseMessage{Type: "EOSE", Payload: payload}
select {
case ch <- resp:
default:
}
}
}
// handleOKResponse handles an OK response.
func (c *Client) handleOKResponse(payload []any, requestEventID string) {
c.pendingMu.Lock()
ch, exists := c.pending[requestEventID]
c.pendingMu.Unlock()
if exists {
resp := &ResponseMessage{Type: "OK", Payload: payload}
select {
case ch <- resp:
default:
}
}
}
// handleNoticeResponse logs a NOTICE.
func (c *Client) handleNoticeResponse(payload []any) {
if len(payload) >= 2 {
if msg, ok := payload[1].(string); ok {
log.W.F("NRC NOTICE: %s", msg)
}
}
}
// handleClosedResponse handles a subscription close.
func (c *Client) handleClosedResponse(payload []any) {
if len(payload) >= 2 {
if subID, ok := payload[1].(string); ok {
c.subscriptionsMu.Lock()
if ch, exists := c.subscriptions[subID]; exists {
close(ch)
delete(c.subscriptions, subID)
}
c.subscriptionsMu.Unlock()
}
}
}
// handleCountResponse handles a COUNT response.
func (c *Client) handleCountResponse(payload []any, requestEventID string) {
c.pendingMu.Lock()
ch, exists := c.pending[requestEventID]
c.pendingMu.Unlock()
if exists {
resp := &ResponseMessage{Type: "COUNT", Payload: payload}
select {
case ch <- resp:
default:
}
}
}
// handleAuthResponse handles an AUTH challenge.
func (c *Client) handleAuthResponse(payload []any, requestEventID string) {
c.pendingMu.Lock()
ch, exists := c.pending[requestEventID]
c.pendingMu.Unlock()
if exists {
resp := &ResponseMessage{Type: "AUTH", Payload: payload}
select {
case ch <- resp:
default:
}
}
}
// sendRequest sends an NRC request and waits for response.
func (c *Client) sendRequest(ctx context.Context, msgType string, payload []any) (*ResponseMessage, error) {
// Build request content
reqContent := struct {
Type string `json:"type"`
Payload []any `json:"payload"`
}{
Type: msgType,
Payload: payload,
}
contentBytes, err := json.Marshal(reqContent)
if err != nil {
return nil, fmt.Errorf("marshal failed: %w", err)
}
// Encrypt content
encrypted, err := encryption.Encrypt(c.conversationKey, contentBytes, nil)
if err != nil {
return nil, fmt.Errorf("%w: %v", ErrEncryptionFailed, err)
}
// Build request event
reqEvent := &event.E{
Content: []byte(encrypted),
CreatedAt: time.Now().Unix(),
Kind: KindNRCRequest,
Tags: tag.NewS(
tag.NewFromAny("p", hex.Enc(c.uri.RelayPubkey)),
tag.NewFromAny("encryption", "nip44_v2"),
tag.NewFromAny("session", c.sessionID),
),
}
// Sign with client key
if err := reqEvent.Sign(c.clientSigner); chk.E(err) {
return nil, fmt.Errorf("signing failed: %w", err)
}
// Set up response channel
responseCh := make(chan *ResponseMessage, 1)
requestEventID := string(hex.Enc(reqEvent.ID[:]))
c.pendingMu.Lock()
c.pending[requestEventID] = responseCh
c.pendingMu.Unlock()
defer func() {
c.pendingMu.Lock()
delete(c.pending, requestEventID)
c.pendingMu.Unlock()
}()
// Publish request
if err := c.rendezvousConn.Publish(ctx, reqEvent); chk.E(err) {
return nil, fmt.Errorf("publish failed: %w", err)
}
// Wait for response
select {
case <-ctx.Done():
return nil, ctx.Err()
case resp := <-responseCh:
if resp == nil {
return nil, fmt.Errorf("response channel closed")
}
return resp, nil
}
}
// Publish publishes an event to the private relay.
func (c *Client) Publish(ctx context.Context, ev *event.E) (bool, string, error) {
// Convert event to JSON for payload
eventBytes, err := json.Marshal(ev)
if err != nil {
return false, "", fmt.Errorf("marshal event failed: %w", err)
}
var eventMap map[string]any
if err := json.Unmarshal(eventBytes, &eventMap); err != nil {
return false, "", fmt.Errorf("unmarshal event failed: %w", err)
}
payload := []any{"EVENT", eventMap}
resp, err := c.sendRequest(ctx, "EVENT", payload)
if err != nil {
return false, "", err
}
// Parse OK response: ["OK", "<event_id>", <success>, "<message>"]
if resp.Type != "OK" || len(resp.Payload) < 4 {
return false, "", fmt.Errorf("unexpected response type: %s", resp.Type)
}
success, _ := resp.Payload[2].(bool)
message, _ := resp.Payload[3].(string)
return success, message, nil
}
// Subscribe creates a subscription to the private relay.
func (c *Client) Subscribe(ctx context.Context, subID string, filters ...*filter.F) (<-chan *event.E, error) {
// Build payload: ["REQ", "<sub_id>", filter1, filter2, ...]
payload := []any{"REQ", subID}
for _, f := range filters {
filterBytes, err := json.Marshal(f)
if err != nil {
return nil, fmt.Errorf("marshal filter failed: %w", err)
}
var filterMap map[string]any
if err := json.Unmarshal(filterBytes, &filterMap); err != nil {
return nil, fmt.Errorf("unmarshal filter failed: %w", err)
}
payload = append(payload, filterMap)
}
// Create event channel for this subscription
eventCh := make(chan *event.E, 100)
c.subscriptionsMu.Lock()
c.subscriptions[subID] = eventCh
c.subscriptionsMu.Unlock()
// Send request (don't wait for EOSE, events will come asynchronously)
go func() {
reqCtx, cancel := context.WithTimeout(ctx, 30*time.Second)
defer cancel()
_, err := c.sendRequest(reqCtx, "REQ", payload)
if err != nil {
log.W.F("NRC subscribe failed: %v", err)
}
}()
return eventCh, nil
}
// Unsubscribe closes a subscription.
func (c *Client) Unsubscribe(ctx context.Context, subID string) error {
// Remove from local tracking
c.subscriptionsMu.Lock()
if ch, exists := c.subscriptions[subID]; exists {
close(ch)
delete(c.subscriptions, subID)
}
c.subscriptionsMu.Unlock()
// Send CLOSE to relay
payload := []any{"CLOSE", subID}
_, err := c.sendRequest(ctx, "CLOSE", payload)
return err
}
// Count sends a COUNT request to the private relay.
func (c *Client) Count(ctx context.Context, subID string, filters ...*filter.F) (int64, error) {
// Build payload: ["COUNT", "<sub_id>", filter1, filter2, ...]
payload := []any{"COUNT", subID}
for _, f := range filters {
filterBytes, err := json.Marshal(f)
if err != nil {
return 0, fmt.Errorf("marshal filter failed: %w", err)
}
var filterMap map[string]any
if err := json.Unmarshal(filterBytes, &filterMap); err != nil {
return 0, fmt.Errorf("unmarshal filter failed: %w", err)
}
payload = append(payload, filterMap)
}
resp, err := c.sendRequest(ctx, "COUNT", payload)
if err != nil {
return 0, err
}
// Parse COUNT response: ["COUNT", "<sub_id>", {"count": N}]
if resp.Type != "COUNT" || len(resp.Payload) < 3 {
return 0, fmt.Errorf("unexpected response type: %s", resp.Type)
}
countData, ok := resp.Payload[2].(map[string]any)
if !ok {
return 0, fmt.Errorf("invalid count response")
}
count, ok := countData["count"].(float64)
if !ok {
return 0, fmt.Errorf("missing count field")
}
return int64(count), nil
}
// RelayURL returns a pseudo-URL for this NRC connection.
func (c *Client) RelayURL() string {
return "nrc://" + string(hex.Enc(c.uri.RelayPubkey))
}

24
pkg/protocol/nrc/errors.go

@ -0,0 +1,24 @@
package nrc
import "errors"
var (
// ErrUnauthorized is returned when a client is not authorized.
ErrUnauthorized = errors.New("unauthorized")
// ErrInvalidSession is returned when a session ID is invalid or not found.
ErrInvalidSession = errors.New("invalid session")
// ErrTooManySubscriptions is returned when a session has too many subscriptions.
ErrTooManySubscriptions = errors.New("too many subscriptions")
// ErrInvalidMessageType is returned when the message type is invalid.
ErrInvalidMessageType = errors.New("invalid message type")
// ErrSessionExpired is returned when a session has expired.
ErrSessionExpired = errors.New("session expired")
// ErrDecryptionFailed is returned when message decryption fails.
ErrDecryptionFailed = errors.New("decryption failed")
// ErrEncryptionFailed is returned when message encryption fails.
ErrEncryptionFailed = errors.New("encryption failed")
// ErrRelayConnectionFailed is returned when connection to the local relay fails.
ErrRelayConnectionFailed = errors.New("relay connection failed")
// ErrRendezvousConnectionFailed is returned when connection to the rendezvous relay fails.
ErrRendezvousConnectionFailed = errors.New("rendezvous relay connection failed")
)

371
pkg/protocol/nrc/nrc_test.go

@ -0,0 +1,371 @@
package nrc
import (
"testing"
"time"
"git.mleku.dev/mleku/nostr/crypto/keys"
"git.mleku.dev/mleku/nostr/encoders/hex"
)
// Test keys - generated from known secrets for reproducibility
var (
// From secret: 0000000000000000000000000000000000000000000000000000000000000001
testRelaySecret = "0000000000000000000000000000000000000000000000000000000000000001"
// From secret: 0000000000000000000000000000000000000000000000000000000000000002
testClientSecret = "0000000000000000000000000000000000000000000000000000000000000002"
)
// getTestRelayPubkey returns the pubkey derived from testRelaySecret
func getTestRelayPubkey(t *testing.T) []byte {
secretBytes, err := hex.Dec(testRelaySecret)
if err != nil {
t.Fatalf("failed to decode test secret: %v", err)
}
pubkey, err := keys.SecretBytesToPubKeyBytes(secretBytes)
if err != nil {
t.Fatalf("failed to derive pubkey: %v", err)
}
return pubkey
}
// getTestRelayPubkeyHex returns the hex-encoded pubkey
func getTestRelayPubkeyHex(t *testing.T) string {
return string(hex.Enc(getTestRelayPubkey(t)))
}
func TestParseConnectionURI(t *testing.T) {
// Get valid pubkey for tests
relayPubkeyHex := getTestRelayPubkeyHex(t)
tests := []struct {
name string
uri string
wantErr bool
check func(*testing.T, *ConnectionURI)
}{
{
name: "invalid scheme",
uri: "nostr+wallet://abc123",
wantErr: true,
},
{
name: "missing relay parameter",
uri: "nostr+relayconnect://" + relayPubkeyHex,
wantErr: true,
},
{
name: "missing secret parameter",
uri: "nostr+relayconnect://" + relayPubkeyHex + "?relay=wss://relay.example.com",
wantErr: true,
},
{
name: "invalid relay pubkey",
uri: "nostr+relayconnect://invalid?relay=wss://relay.example.com&secret=" + testClientSecret,
wantErr: true,
},
{
name: "valid secret-based URI",
uri: "nostr+relayconnect://" + relayPubkeyHex + "?relay=wss://relay.example.com&secret=" + testClientSecret,
check: func(t *testing.T, conn *ConnectionURI) {
if conn.AuthMode != AuthModeSecret {
t.Errorf("expected AuthModeSecret, got %d", conn.AuthMode)
}
if conn.RendezvousRelay != "wss://relay.example.com" {
t.Errorf("expected wss://relay.example.com, got %s", conn.RendezvousRelay)
}
if conn.GetClientSigner() == nil {
t.Error("expected client signer to be set")
}
if len(conn.GetConversationKey()) != 32 {
t.Errorf("expected 32-byte conversation key, got %d", len(conn.GetConversationKey()))
}
},
},
{
name: "valid URI with device name",
uri: "nostr+relayconnect://" + relayPubkeyHex + "?relay=wss://relay.example.com&secret=" + testClientSecret + "&name=phone",
check: func(t *testing.T, conn *ConnectionURI) {
if conn.DeviceName != "phone" {
t.Errorf("expected device name 'phone', got '%s'", conn.DeviceName)
}
},
},
{
name: "valid CAT-based URI",
uri: "nostr+relayconnect://" + relayPubkeyHex + "?relay=wss://relay.example.com&auth=cat&mint=https://mint.example.com",
check: func(t *testing.T, conn *ConnectionURI) {
if conn.AuthMode != AuthModeCAT {
t.Errorf("expected AuthModeCAT, got %d", conn.AuthMode)
}
if conn.MintURL != "https://mint.example.com" {
t.Errorf("expected mint URL, got %s", conn.MintURL)
}
},
},
{
name: "CAT URI missing mint",
uri: "nostr+relayconnect://" + relayPubkeyHex + "?relay=wss://relay.example.com&auth=cat",
wantErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
conn, err := ParseConnectionURI(tt.uri)
if (err != nil) != tt.wantErr {
t.Errorf("ParseConnectionURI() error = %v, wantErr %v", err, tt.wantErr)
return
}
if tt.check != nil && err == nil {
tt.check(t, conn)
}
})
}
}
func TestGenerateConnectionURI(t *testing.T) {
relayPubkey := getTestRelayPubkey(t)
rendezvousRelay := "wss://relay.example.com"
uri, secret, err := GenerateConnectionURI(relayPubkey, rendezvousRelay, "test-device")
if err != nil {
t.Fatalf("GenerateConnectionURI() error = %v", err)
}
if len(secret) != 32 {
t.Errorf("expected 32-byte secret, got %d", len(secret))
}
// Parse the generated URI to verify it's valid
conn, err := ParseConnectionURI(uri)
if err != nil {
t.Fatalf("failed to parse generated URI: %v", err)
}
if conn.DeviceName != "test-device" {
t.Errorf("expected device name 'test-device', got '%s'", conn.DeviceName)
}
if conn.RendezvousRelay != rendezvousRelay {
t.Errorf("expected rendezvous relay %s, got %s", rendezvousRelay, conn.RendezvousRelay)
}
}
func TestSession(t *testing.T) {
clientPubkey := make([]byte, 32)
conversationKey := make([]byte, 32)
session := NewSession("test-session", clientPubkey, conversationKey, AuthModeSecret, "test-device")
if session == nil {
t.Fatal("NewSession returned nil")
}
// Test basic properties
if session.ID != "test-session" {
t.Errorf("expected ID 'test-session', got '%s'", session.ID)
}
if session.DeviceName != "test-device" {
t.Errorf("expected device name 'test-device', got '%s'", session.DeviceName)
}
if session.AuthMode != AuthModeSecret {
t.Errorf("expected AuthModeSecret, got %d", session.AuthMode)
}
// Test subscription management
if err := session.AddSubscription("sub1"); err != nil {
t.Errorf("AddSubscription() error = %v", err)
}
if !session.HasSubscription("sub1") {
t.Error("expected subscription 'sub1' to exist")
}
if session.SubscriptionCount() != 1 {
t.Errorf("expected 1 subscription, got %d", session.SubscriptionCount())
}
session.RemoveSubscription("sub1")
if session.HasSubscription("sub1") {
t.Error("expected subscription 'sub1' to be removed")
}
// Test expiry
if session.IsExpired(time.Hour) {
t.Error("session should not be expired")
}
// Test close
session.Close()
select {
case <-session.Context().Done():
// Expected
default:
t.Error("expected session context to be cancelled after Close()")
}
}
func TestSessionManager(t *testing.T) {
manager := NewSessionManager(time.Minute)
if manager == nil {
t.Fatal("NewSessionManager returned nil")
}
clientPubkey := make([]byte, 32)
conversationKey := make([]byte, 32)
// Test GetOrCreate
session := manager.GetOrCreate("session1", clientPubkey, conversationKey, AuthModeSecret, "device1")
if session == nil {
t.Fatal("GetOrCreate returned nil")
}
// Get same session again
session2 := manager.GetOrCreate("session1", clientPubkey, conversationKey, AuthModeSecret, "device1")
if session2 != session {
t.Error("expected GetOrCreate to return same session")
}
// Test Get
retrieved := manager.Get("session1")
if retrieved != session {
t.Error("expected Get to return the session")
}
// Test Count
if manager.Count() != 1 {
t.Errorf("expected count 1, got %d", manager.Count())
}
// Test Remove
manager.Remove("session1")
if manager.Get("session1") != nil {
t.Error("expected session to be removed")
}
if manager.Count() != 0 {
t.Errorf("expected count 0 after removal, got %d", manager.Count())
}
// Test Close
manager.GetOrCreate("session2", clientPubkey, conversationKey, AuthModeSecret, "device2")
manager.Close()
if manager.Count() != 0 {
t.Errorf("expected count 0 after Close, got %d", manager.Count())
}
}
func TestParseRequestContent(t *testing.T) {
tests := []struct {
name string
content string
wantErr bool
check func(*testing.T, *RequestMessage)
}{
{
name: "empty content",
content: "",
wantErr: true,
},
{
name: "invalid JSON",
content: "not json",
wantErr: true,
},
{
name: "missing type",
content: `{"payload": []}`,
wantErr: true,
},
{
name: "valid EVENT request",
content: `{"type": "EVENT", "payload": ["EVENT", {}]}`,
check: func(t *testing.T, msg *RequestMessage) {
if msg.Type != "EVENT" {
t.Errorf("expected type EVENT, got %s", msg.Type)
}
},
},
{
name: "valid REQ request",
content: `{"type": "REQ", "payload": ["REQ", "sub1", {}]}`,
check: func(t *testing.T, msg *RequestMessage) {
if msg.Type != "REQ" {
t.Errorf("expected type REQ, got %s", msg.Type)
}
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
msg, err := ParseRequestContent([]byte(tt.content))
if (err != nil) != tt.wantErr {
t.Errorf("ParseRequestContent() error = %v, wantErr %v", err, tt.wantErr)
return
}
if tt.check != nil && err == nil {
tt.check(t, msg)
}
})
}
}
func TestMarshalResponseContent(t *testing.T) {
resp := &ResponseMessage{
Type: "OK",
Payload: []any{"OK", "eventid123", true, ""},
}
content, err := MarshalResponseContent(resp)
if err != nil {
t.Fatalf("MarshalResponseContent() error = %v", err)
}
// Verify it's valid JSON that can be parsed back
parsed, err := ParseRequestContent(content)
if err != nil {
t.Fatalf("failed to parse marshaled response: %v", err)
}
if parsed.Type != "OK" {
t.Errorf("expected type OK, got %s", parsed.Type)
}
}
func TestBridgeConfig(t *testing.T) {
config := &BridgeConfig{
RendezvousURL: "wss://relay.example.com",
LocalRelayURL: "ws://localhost:3334",
AuthorizedSecrets: map[string]string{"pubkey1": "device1"},
SessionTimeout: time.Minute,
}
bridge := NewBridge(config)
if bridge == nil {
t.Fatal("NewBridge returned nil")
}
// Bridge shouldn't start without a valid rendezvous relay
// but we can verify it was created
bridge.Stop()
}
func TestSubscriptionTooMany(t *testing.T) {
clientPubkey := make([]byte, 32)
conversationKey := make([]byte, 32)
session := NewSession("test-session", clientPubkey, conversationKey, AuthModeSecret, "test-device")
// Add max subscriptions
for i := 0; i < DefaultMaxSubscriptions; i++ {
if err := session.AddSubscription(string(rune(i))); err != nil {
t.Fatalf("AddSubscription() error = %v at iteration %d", err, i)
}
}
// Next one should fail
err := session.AddSubscription("overflow")
if err != ErrTooManySubscriptions {
t.Errorf("expected ErrTooManySubscriptions, got %v", err)
}
session.Close()
}

322
pkg/protocol/nrc/session.go

@ -0,0 +1,322 @@
package nrc
import (
"context"
"encoding/json"
"sync"
"time"
)
const (
// DefaultSessionTimeout is the default inactivity timeout for sessions.
DefaultSessionTimeout = 30 * time.Minute
// DefaultMaxSubscriptions is the default maximum subscriptions per session.
DefaultMaxSubscriptions = 100
)
// Session represents an NRC client session through the tunnel.
type Session struct {
// ID is the unique session identifier.
ID string
// ClientPubkey is the public key of the connected client.
ClientPubkey []byte
// ConversationKey is the NIP-44 conversation key for this session.
ConversationKey []byte
// DeviceName is the optional device identifier.
DeviceName string
// AuthMode is the authentication mode used.
AuthMode AuthMode
// CreatedAt is when the session was created.
CreatedAt time.Time
// LastActivity is the timestamp of the last activity.
LastActivity time.Time
// subscriptions maps client subscription IDs to internal subscription state.
subscriptions map[string]*Subscription
// subMu protects the subscriptions map.
subMu sync.RWMutex
// ctx is the session context.
ctx context.Context
// cancel cancels the session context.
cancel context.CancelFunc
// eventCh receives events from the local relay for this session.
eventCh chan *SessionEvent
}
// Subscription represents a tunneled subscription.
type Subscription struct {
// ID is the client's subscription ID.
ID string
// CreatedAt is when the subscription was created.
CreatedAt time.Time
// EventCount tracks how many events have been sent.
EventCount int64
// EOSESent indicates whether EOSE has been sent.
EOSESent bool
}
// SessionEvent wraps a relay response for delivery to the client.
type SessionEvent struct {
// Type is the response type (EVENT, OK, EOSE, NOTICE, CLOSED, COUNT, AUTH).
Type string
// Payload is the response payload array.
Payload []any
// RequestEventID is the ID of the request event this responds to (if applicable).
RequestEventID string
}
// NewSession creates a new session.
func NewSession(id string, clientPubkey, conversationKey []byte, authMode AuthMode, deviceName string) *Session {
ctx, cancel := context.WithCancel(context.Background())
now := time.Now()
return &Session{
ID: id,
ClientPubkey: clientPubkey,
ConversationKey: conversationKey,
DeviceName: deviceName,
AuthMode: authMode,
CreatedAt: now,
LastActivity: now,
subscriptions: make(map[string]*Subscription),
ctx: ctx,
cancel: cancel,
eventCh: make(chan *SessionEvent, 100),
}
}
// Context returns the session's context.
func (s *Session) Context() context.Context {
return s.ctx
}
// Close closes the session and cleans up resources.
func (s *Session) Close() {
s.cancel()
close(s.eventCh)
}
// Events returns the channel for receiving events destined for this session.
func (s *Session) Events() <-chan *SessionEvent {
return s.eventCh
}
// SendEvent sends an event to the session's event channel.
func (s *Session) SendEvent(ev *SessionEvent) bool {
select {
case s.eventCh <- ev:
return true
case <-s.ctx.Done():
return false
default:
// Channel full, drop event
return false
}
}
// Touch updates the last activity timestamp.
func (s *Session) Touch() {
s.LastActivity = time.Now()
}
// IsExpired checks if the session has been inactive too long.
func (s *Session) IsExpired(timeout time.Duration) bool {
return time.Since(s.LastActivity) > timeout
}
// AddSubscription adds a new subscription to the session.
func (s *Session) AddSubscription(subID string) error {
s.subMu.Lock()
defer s.subMu.Unlock()
if len(s.subscriptions) >= DefaultMaxSubscriptions {
return ErrTooManySubscriptions
}
s.subscriptions[subID] = &Subscription{
ID: subID,
CreatedAt: time.Now(),
}
return nil
}
// RemoveSubscription removes a subscription from the session.
func (s *Session) RemoveSubscription(subID string) {
s.subMu.Lock()
defer s.subMu.Unlock()
delete(s.subscriptions, subID)
}
// GetSubscription returns a subscription by ID.
func (s *Session) GetSubscription(subID string) *Subscription {
s.subMu.RLock()
defer s.subMu.RUnlock()
return s.subscriptions[subID]
}
// HasSubscription checks if a subscription exists.
func (s *Session) HasSubscription(subID string) bool {
s.subMu.RLock()
defer s.subMu.RUnlock()
_, ok := s.subscriptions[subID]
return ok
}
// SubscriptionCount returns the number of active subscriptions.
func (s *Session) SubscriptionCount() int {
s.subMu.RLock()
defer s.subMu.RUnlock()
return len(s.subscriptions)
}
// MarkEOSE marks a subscription as having sent EOSE.
func (s *Session) MarkEOSE(subID string) {
s.subMu.Lock()
defer s.subMu.Unlock()
if sub, ok := s.subscriptions[subID]; ok {
sub.EOSESent = true
}
}
// IncrementEventCount increments the event count for a subscription.
func (s *Session) IncrementEventCount(subID string) {
s.subMu.Lock()
defer s.subMu.Unlock()
if sub, ok := s.subscriptions[subID]; ok {
sub.EventCount++
}
}
// SessionManager manages multiple NRC sessions.
type SessionManager struct {
sessions map[string]*Session
mu sync.RWMutex
timeout time.Duration
}
// NewSessionManager creates a new session manager.
func NewSessionManager(timeout time.Duration) *SessionManager {
if timeout == 0 {
timeout = DefaultSessionTimeout
}
return &SessionManager{
sessions: make(map[string]*Session),
timeout: timeout,
}
}
// Get returns a session by ID.
func (m *SessionManager) Get(sessionID string) *Session {
m.mu.RLock()
defer m.mu.RUnlock()
return m.sessions[sessionID]
}
// GetOrCreate gets an existing session or creates a new one.
func (m *SessionManager) GetOrCreate(sessionID string, clientPubkey, conversationKey []byte, authMode AuthMode, deviceName string) *Session {
m.mu.Lock()
defer m.mu.Unlock()
if session, ok := m.sessions[sessionID]; ok {
session.Touch()
return session
}
session := NewSession(sessionID, clientPubkey, conversationKey, authMode, deviceName)
m.sessions[sessionID] = session
return session
}
// Remove removes a session.
func (m *SessionManager) Remove(sessionID string) {
m.mu.Lock()
defer m.mu.Unlock()
if session, ok := m.sessions[sessionID]; ok {
session.Close()
delete(m.sessions, sessionID)
}
}
// CleanupExpired removes expired sessions.
func (m *SessionManager) CleanupExpired() int {
m.mu.Lock()
defer m.mu.Unlock()
var removed int
for id, session := range m.sessions {
if session.IsExpired(m.timeout) {
session.Close()
delete(m.sessions, id)
removed++
}
}
return removed
}
// Count returns the number of active sessions.
func (m *SessionManager) Count() int {
m.mu.RLock()
defer m.mu.RUnlock()
return len(m.sessions)
}
// Close closes all sessions.
func (m *SessionManager) Close() {
m.mu.Lock()
defer m.mu.Unlock()
for _, session := range m.sessions {
session.Close()
}
m.sessions = make(map[string]*Session)
}
// RequestMessage represents a parsed NRC request message.
type RequestMessage struct {
Type string // EVENT, REQ, CLOSE, AUTH, COUNT
Payload []any
}
// ResponseMessage represents an NRC response message to be sent.
type ResponseMessage struct {
Type string // EVENT, OK, EOSE, NOTICE, CLOSED, COUNT, AUTH
Payload []any
}
// ParseRequestContent parses the decrypted content of an NRC request.
func ParseRequestContent(content []byte) (*RequestMessage, error) {
// Content format: {"type": "EVENT|REQ|...", "payload": [...]}
// Parse as generic JSON
var msg struct {
Type string `json:"type"`
Payload []any `json:"payload"`
}
if err := json.Unmarshal(content, &msg); err != nil {
return nil, err
}
if msg.Type == "" {
return nil, ErrInvalidMessageType
}
return &RequestMessage{
Type: msg.Type,
Payload: msg.Payload,
}, nil
}
// MarshalResponseContent marshals an NRC response for encryption.
func MarshalResponseContent(resp *ResponseMessage) ([]byte, error) {
msg := struct {
Type string `json:"type"`
Payload []any `json:"payload"`
}{
Type: resp.Type,
Payload: resp.Payload,
}
return json.Marshal(msg)
}

206
pkg/protocol/nrc/uri.go

@ -0,0 +1,206 @@
package nrc
import (
"errors"
"net/url"
"git.mleku.dev/mleku/nostr/crypto/encryption"
"git.mleku.dev/mleku/nostr/encoders/hex"
"git.mleku.dev/mleku/nostr/interfaces/signer"
"git.mleku.dev/mleku/nostr/interfaces/signer/p8k"
"lol.mleku.dev/chk"
)
// AuthMode defines the authentication mode for NRC connections.
type AuthMode int
const (
// AuthModeSecret uses a shared secret for authentication.
AuthModeSecret AuthMode = iota
// AuthModeCAT uses Cashu Access Tokens for authentication.
AuthModeCAT
)
// ConnectionURI represents a parsed nostr+relayconnect:// URI.
type ConnectionURI struct {
// RelayPubkey is the public key of the private relay (32 bytes).
RelayPubkey []byte
// RendezvousRelay is the WebSocket URL of the public relay.
RendezvousRelay string
// AuthMode indicates whether to use secret or CAT authentication.
AuthMode AuthMode
// DeviceName is an optional human-readable device identifier.
DeviceName string
// Secret-based authentication fields
clientSecretKey signer.I
conversationKey []byte
// CAT-based authentication fields
MintURL string
}
// GetClientSigner returns the signer derived from the secret (secret-based auth only).
func (c *ConnectionURI) GetClientSigner() signer.I {
return c.clientSecretKey
}
// GetConversationKey returns the NIP-44 conversation key (secret-based auth only).
func (c *ConnectionURI) GetConversationKey() []byte {
return c.conversationKey
}
// ParseConnectionURI parses a nostr+relayconnect:// URI.
//
// Secret-based URI format:
//
// nostr+relayconnect://<relay-pubkey>?relay=<rendezvous-relay>&secret=<client-secret>[&name=<device-name>]
//
// CAT-based URI format:
//
// nostr+relayconnect://<relay-pubkey>?relay=<rendezvous-relay>&auth=cat&mint=<mint-url>
func ParseConnectionURI(nrcURI string) (conn *ConnectionURI, err error) {
var p *url.URL
if p, err = url.Parse(nrcURI); chk.E(err) {
return
}
if p == nil {
err = errors.New("invalid uri")
return
}
conn = &ConnectionURI{}
// Validate scheme
if p.Scheme != "nostr+relayconnect" {
err = errors.New("incorrect scheme: expected nostr+relayconnect")
return
}
// Parse relay pubkey from host
if conn.RelayPubkey, err = hex.Dec(p.Host); chk.E(err) {
err = errors.New("invalid relay public key")
return
}
if len(conn.RelayPubkey) != 32 {
err = errors.New("relay public key must be 32 bytes")
return
}
query := p.Query()
// Parse rendezvous relay URL (required)
relayParam := query.Get("relay")
if relayParam == "" {
err = errors.New("missing relay parameter")
return
}
conn.RendezvousRelay = relayParam
// Parse optional device name
conn.DeviceName = query.Get("name")
// Determine auth mode
authParam := query.Get("auth")
if authParam == "cat" {
conn.AuthMode = AuthModeCAT
// Parse mint URL for CAT auth
conn.MintURL = query.Get("mint")
if conn.MintURL == "" {
err = errors.New("missing mint parameter for CAT auth")
return
}
} else {
conn.AuthMode = AuthModeSecret
// Parse secret for secret-based auth
secret := query.Get("secret")
if secret == "" {
err = errors.New("missing secret parameter")
return
}
var secretBytes []byte
if secretBytes, err = hex.Dec(secret); chk.E(err) {
err = errors.New("invalid secret: must be hex-encoded")
return
}
if len(secretBytes) != 32 {
err = errors.New("secret must be 32 bytes")
return
}
// Create signer from secret
var clientKey *p8k.Signer
if clientKey, err = p8k.New(); chk.E(err) {
return
}
if err = clientKey.InitSec(secretBytes); chk.E(err) {
return
}
conn.clientSecretKey = clientKey
// Generate conversation key using NIP-44 key derivation
if conn.conversationKey, err = encryption.GenerateConversationKey(
clientKey.Sec(),
conn.RelayPubkey,
); chk.E(err) {
return
}
}
return
}
// GenerateConnectionURI creates a new NRC connection URI with a random secret.
func GenerateConnectionURI(relayPubkey []byte, rendezvousRelay string, deviceName string) (uri string, secret []byte, err error) {
if len(relayPubkey) != 32 {
err = errors.New("relay public key must be 32 bytes")
return
}
// Generate random 32-byte secret
var clientKey *p8k.Signer
if clientKey, err = p8k.New(); chk.E(err) {
return
}
if err = clientKey.Generate(); chk.E(err) {
return
}
secret = clientKey.Sec()
// Build URI
u := &url.URL{
Scheme: "nostr+relayconnect",
Host: string(hex.Enc(relayPubkey)),
}
q := u.Query()
q.Set("relay", rendezvousRelay)
q.Set("secret", string(hex.Enc(secret)))
if deviceName != "" {
q.Set("name", deviceName)
}
u.RawQuery = q.Encode()
uri = u.String()
return
}
// GenerateCATConnectionURI creates a new NRC connection URI for CAT authentication.
func GenerateCATConnectionURI(relayPubkey []byte, rendezvousRelay string, mintURL string) (uri string, err error) {
if len(relayPubkey) != 32 {
err = errors.New("relay public key must be 32 bytes")
return
}
// Build URI
u := &url.URL{
Scheme: "nostr+relayconnect",
Host: string(hex.Enc(relayPubkey)),
}
q := u.Query()
q.Set("relay", rendezvousRelay)
q.Set("auth", "cat")
q.Set("mint", mintURL)
u.RawQuery = q.Encode()
uri = u.String()
return
}

2
pkg/version/version

@ -1 +1 @@
v0.48.1 v0.48.10

712
pkg/wasmdb/jsbridge.go

@ -0,0 +1,712 @@
//go:build js && wasm
package wasmdb
import (
"bytes"
"context"
"encoding/json"
"fmt"
"syscall/js"
"git.mleku.dev/mleku/nostr/encoders/event"
"git.mleku.dev/mleku/nostr/encoders/filter"
"git.mleku.dev/mleku/nostr/encoders/hex"
"git.mleku.dev/mleku/nostr/encoders/kind"
"git.mleku.dev/mleku/nostr/encoders/tag"
"git.mleku.dev/mleku/nostr/encoders/timestamp"
)
// JSBridge holds the database instance for JavaScript access
var jsBridge *JSBridge
// JSBridge wraps the WasmDB instance for JavaScript interop
// Exposes a relay protocol interface (NIP-01) rather than direct database access
type JSBridge struct {
db *W
ctx context.Context
cancel context.CancelFunc
}
// RegisterJSBridge exposes the relay protocol API to JavaScript
func RegisterJSBridge(db *W, ctx context.Context, cancel context.CancelFunc) {
jsBridge = &JSBridge{
db: db,
ctx: ctx,
cancel: cancel,
}
// Create the wasmdb global object with relay protocol interface
wasmdbObj := map[string]interface{}{
// Lifecycle
"isReady": js.FuncOf(jsBridge.jsIsReady),
"close": js.FuncOf(jsBridge.jsClose),
"wipe": js.FuncOf(jsBridge.jsWipe),
// Relay Protocol (NIP-01)
// This is the main entry point - handles EVENT, REQ, CLOSE messages
"handleMessage": js.FuncOf(jsBridge.jsHandleMessage),
// Graph Query Extensions
"queryGraph": js.FuncOf(jsBridge.jsQueryGraph),
// Marker Extensions (key-value storage via relay protocol)
// ["MARKER", "set", key, value] -> ["OK", key, true]
// ["MARKER", "get", key] -> ["MARKER", key, value]
// ["MARKER", "delete", key] -> ["OK", key, true]
// These are also handled via handleMessage
}
js.Global().Set("wasmdb", wasmdbObj)
}
// jsIsReady returns true if the database is ready
func (b *JSBridge) jsIsReady(this js.Value, args []js.Value) interface{} {
select {
case <-b.db.Ready():
return true
default:
return false
}
}
// jsClose closes the database
func (b *JSBridge) jsClose(this js.Value, args []js.Value) interface{} {
return promiseWrapper(func() (interface{}, error) {
err := b.db.Close()
return nil, err
})
}
// jsWipe wipes all data from the database
func (b *JSBridge) jsWipe(this js.Value, args []js.Value) interface{} {
return promiseWrapper(func() (interface{}, error) {
err := b.db.Wipe()
return nil, err
})
}
// jsHandleMessage handles NIP-01 relay protocol messages
// Input: JSON string representing a relay message array
//
// ["EVENT", <event>] - Submit an event
// ["REQ", <sub_id>, <filter>...] - Request events
// ["CLOSE", <sub_id>] - Close a subscription
// ["MARKER", "set"|"get"|"delete", key, value?] - Marker operations
//
// Output: Promise<string[]> - Array of JSON response messages
func (b *JSBridge) jsHandleMessage(this js.Value, args []js.Value) interface{} {
if len(args) < 1 {
return rejectPromise("handleMessage requires message JSON argument")
}
messageJSON := args[0].String()
return promiseWrapper(func() (interface{}, error) {
// Parse the message array
var message []json.RawMessage
if err := json.Unmarshal([]byte(messageJSON), &message); err != nil {
return nil, fmt.Errorf("invalid message format: %w", err)
}
if len(message) < 1 {
return nil, fmt.Errorf("empty message")
}
// Get message type
var msgType string
if err := json.Unmarshal(message[0], &msgType); err != nil {
return nil, fmt.Errorf("invalid message type: %w", err)
}
switch msgType {
case "EVENT":
return b.handleEvent(message)
case "REQ":
return b.handleReq(message)
case "CLOSE":
return b.handleClose(message)
case "MARKER":
return b.handleMarker(message)
default:
return nil, fmt.Errorf("unknown message type: %s", msgType)
}
})
}
// handleEvent processes an EVENT message
// ["EVENT", <event>] -> ["OK", <id>, true/false, "message"]
func (b *JSBridge) handleEvent(message []json.RawMessage) (interface{}, error) {
if len(message) < 2 {
return []interface{}{makeOK("", false, "missing event")}, nil
}
// Parse the event
ev, err := parseEventFromRawJSON(message[1])
if err != nil {
return []interface{}{makeOK("", false, fmt.Sprintf("invalid event: %s", err))}, nil
}
eventIDHex := hex.Enc(ev.ID)
// Save to database
replaced, err := b.db.SaveEvent(b.ctx, ev)
if err != nil {
return []interface{}{makeOK(eventIDHex, false, err.Error())}, nil
}
var msg string
if replaced {
msg = "replaced"
} else {
msg = "saved"
}
return []interface{}{makeOK(eventIDHex, true, msg)}, nil
}
// handleReq processes a REQ message
// ["REQ", <sub_id>, <filter>...] -> ["EVENT", <sub_id>, <event>]..., ["EOSE", <sub_id>]
func (b *JSBridge) handleReq(message []json.RawMessage) (interface{}, error) {
if len(message) < 2 {
return nil, fmt.Errorf("REQ requires subscription ID")
}
// Get subscription ID
var subID string
if err := json.Unmarshal(message[1], &subID); err != nil {
return nil, fmt.Errorf("invalid subscription ID: %w", err)
}
// Parse filters (can have multiple)
var allEvents []*event.E
for i := 2; i < len(message); i++ {
f, err := parseFilterFromRawJSON(message[i])
if err != nil {
continue
}
events, err := b.db.QueryEvents(b.ctx, f)
if err != nil {
continue
}
allEvents = append(allEvents, events...)
}
// Build response messages
responses := make([]interface{}, 0, len(allEvents)+1)
// Add EVENT messages
for _, ev := range allEvents {
eventJSON, err := eventToJSON(ev)
if err != nil {
continue
}
responses = append(responses, makeEvent(subID, string(eventJSON)))
}
// Add EOSE
responses = append(responses, makeEOSE(subID))
return responses, nil
}
// handleClose processes a CLOSE message
// ["CLOSE", <sub_id>] -> (no response for local relay)
func (b *JSBridge) handleClose(message []json.RawMessage) (interface{}, error) {
// For the local relay, subscriptions are stateless (single query/response)
// CLOSE is a no-op but we acknowledge it
return []interface{}{}, nil
}
// handleMarker processes MARKER extension messages
// ["MARKER", "set", key, value] -> ["OK", key, true]
// ["MARKER", "get", key] -> ["MARKER", key, value] or ["MARKER", key, null]
// ["MARKER", "delete", key] -> ["OK", key, true]
func (b *JSBridge) handleMarker(message []json.RawMessage) (interface{}, error) {
if len(message) < 3 {
return nil, fmt.Errorf("MARKER requires operation and key")
}
var operation string
if err := json.Unmarshal(message[1], &operation); err != nil {
return nil, fmt.Errorf("invalid marker operation: %w", err)
}
var key string
if err := json.Unmarshal(message[2], &key); err != nil {
return nil, fmt.Errorf("invalid marker key: %w", err)
}
switch operation {
case "set":
if len(message) < 4 {
return nil, fmt.Errorf("MARKER set requires value")
}
var value string
if err := json.Unmarshal(message[3], &value); err != nil {
return nil, fmt.Errorf("invalid marker value: %w", err)
}
if err := b.db.SetMarker(key, []byte(value)); err != nil {
return []interface{}{makeMarkerOK(key, false, err.Error())}, nil
}
return []interface{}{makeMarkerOK(key, true, "")}, nil
case "get":
value, err := b.db.GetMarker(key)
if err != nil || value == nil {
return []interface{}{makeMarkerResult(key, nil)}, nil
}
valueStr := string(value)
return []interface{}{makeMarkerResult(key, &valueStr)}, nil
case "delete":
if err := b.db.DeleteMarker(key); err != nil {
return []interface{}{makeMarkerOK(key, false, err.Error())}, nil
}
return []interface{}{makeMarkerOK(key, true, "")}, nil
case "has":
has := b.db.HasMarker(key)
return []interface{}{makeMarkerHas(key, has)}, nil
default:
return nil, fmt.Errorf("unknown marker operation: %s", operation)
}
}
// jsQueryGraph handles graph query extensions
// Args: [queryJSON: string] - JSON-encoded graph query
// Returns: Promise<string> - JSON-encoded graph result
func (b *JSBridge) jsQueryGraph(this js.Value, args []js.Value) interface{} {
if len(args) < 1 {
return rejectPromise("queryGraph requires query JSON argument")
}
queryJSON := args[0].String()
return promiseWrapper(func() (interface{}, error) {
var query struct {
Type string `json:"type"`
Pubkey string `json:"pubkey"`
Depth int `json:"depth,omitempty"`
Limit int `json:"limit,omitempty"`
}
if err := json.Unmarshal([]byte(queryJSON), &query); err != nil {
return nil, fmt.Errorf("invalid graph query: %w", err)
}
// Set defaults
if query.Depth == 0 {
query.Depth = 1
}
if query.Limit == 0 {
query.Limit = 1000
}
switch query.Type {
case "follows":
return b.queryFollows(query.Pubkey, query.Depth, query.Limit)
case "followers":
return b.queryFollowers(query.Pubkey, query.Limit)
case "mutes":
return b.queryMutes(query.Pubkey)
default:
return nil, fmt.Errorf("unknown graph query type: %s", query.Type)
}
})
}
// queryFollows returns who a pubkey follows
func (b *JSBridge) queryFollows(pubkeyHex string, depth, limit int) (interface{}, error) {
// Query kind 3 (contact list) for the pubkey
f := &filter.F{
Kinds: kind.NewWithCap(1),
}
f.Kinds.K = append(f.Kinds.K, kind.New(3))
f.Authors = tag.NewWithCap(1)
f.Authors.T = append(f.Authors.T, []byte(pubkeyHex))
one := uint(1)
f.Limit = &one
events, err := b.db.QueryEvents(b.ctx, f)
if err != nil {
return nil, err
}
var follows []string
if len(events) > 0 && events[0].Tags != nil {
for _, t := range *events[0].Tags {
if t != nil && len(t.T) >= 2 && string(t.T[0]) == "p" {
follows = append(follows, string(t.T[1]))
}
}
}
result := map[string]interface{}{
"nodes": follows,
}
jsonBytes, err := json.Marshal(result)
if err != nil {
return nil, err
}
return string(jsonBytes), nil
}
// queryFollowers returns who follows a pubkey
func (b *JSBridge) queryFollowers(pubkeyHex string, limit int) (interface{}, error) {
// Query kind 3 events that tag this pubkey
f := &filter.F{
Kinds: kind.NewWithCap(1),
Tags: tag.NewSWithCap(1),
}
f.Kinds.K = append(f.Kinds.K, kind.New(3))
// Add #p tag filter
pTag := tag.NewWithCap(2)
pTag.T = append(pTag.T, []byte("p"))
pTag.T = append(pTag.T, []byte(pubkeyHex))
f.Tags.Append(pTag)
lim := uint(limit)
f.Limit = &lim
events, err := b.db.QueryEvents(b.ctx, f)
if err != nil {
return nil, err
}
var followers []string
for _, ev := range events {
followers = append(followers, hex.Enc(ev.Pubkey))
}
result := map[string]interface{}{
"nodes": followers,
}
jsonBytes, err := json.Marshal(result)
if err != nil {
return nil, err
}
return string(jsonBytes), nil
}
// queryMutes returns who a pubkey has muted
func (b *JSBridge) queryMutes(pubkeyHex string) (interface{}, error) {
// Query kind 10000 (mute list) for the pubkey
f := &filter.F{
Kinds: kind.NewWithCap(1),
}
f.Kinds.K = append(f.Kinds.K, kind.New(10000))
f.Authors = tag.NewWithCap(1)
f.Authors.T = append(f.Authors.T, []byte(pubkeyHex))
one := uint(1)
f.Limit = &one
events, err := b.db.QueryEvents(b.ctx, f)
if err != nil {
return nil, err
}
var mutes []string
if len(events) > 0 && events[0].Tags != nil {
for _, t := range *events[0].Tags {
if t != nil && len(t.T) >= 2 && string(t.T[0]) == "p" {
mutes = append(mutes, string(t.T[1]))
}
}
}
result := map[string]interface{}{
"nodes": mutes,
}
jsonBytes, err := json.Marshal(result)
if err != nil {
return nil, err
}
return string(jsonBytes), nil
}
// Response message builders
func makeOK(eventID string, accepted bool, message string) string {
msg := []interface{}{"OK", eventID, accepted, message}
jsonBytes, _ := json.Marshal(msg)
return string(jsonBytes)
}
func makeEvent(subID, eventJSON string) string {
// We return the raw event JSON embedded in the array
return fmt.Sprintf(`["EVENT","%s",%s]`, subID, eventJSON)
}
func makeEOSE(subID string) string {
msg := []interface{}{"EOSE", subID}
jsonBytes, _ := json.Marshal(msg)
return string(jsonBytes)
}
func makeMarkerOK(key string, success bool, message string) string {
msg := []interface{}{"OK", key, success}
if message != "" {
msg = append(msg, message)
}
jsonBytes, _ := json.Marshal(msg)
return string(jsonBytes)
}
func makeMarkerResult(key string, value *string) string {
var msg []interface{}
if value == nil {
msg = []interface{}{"MARKER", key, nil}
} else {
msg = []interface{}{"MARKER", key, *value}
}
jsonBytes, _ := json.Marshal(msg)
return string(jsonBytes)
}
func makeMarkerHas(key string, has bool) string {
msg := []interface{}{"MARKER", key, has}
jsonBytes, _ := json.Marshal(msg)
return string(jsonBytes)
}
// Helper functions
// promiseWrapper wraps a function in a JavaScript Promise
func promiseWrapper(fn func() (interface{}, error)) interface{} {
handler := js.FuncOf(func(this js.Value, args []js.Value) interface{} {
resolve := args[0]
reject := args[1]
go func() {
result, err := fn()
if err != nil {
reject.Invoke(err.Error())
} else {
resolve.Invoke(result)
}
}()
return nil
})
promiseConstructor := js.Global().Get("Promise")
return promiseConstructor.New(handler)
}
// rejectPromise creates a rejected promise with an error message
func rejectPromise(msg string) interface{} {
handler := js.FuncOf(func(this js.Value, args []js.Value) interface{} {
reject := args[1]
reject.Invoke(msg)
return nil
})
promiseConstructor := js.Global().Get("Promise")
return promiseConstructor.New(handler)
}
// parseEventFromRawJSON parses a Nostr event from raw JSON
func parseEventFromRawJSON(raw json.RawMessage) (*event.E, error) {
return parseEventFromJSON(string(raw))
}
// parseEventFromJSON parses a Nostr event from JSON
func parseEventFromJSON(jsonStr string) (*event.E, error) {
// Parse into intermediate struct for JSON compatibility
var raw struct {
ID string `json:"id"`
Pubkey string `json:"pubkey"`
CreatedAt int64 `json:"created_at"`
Kind int `json:"kind"`
Tags [][]string `json:"tags"`
Content string `json:"content"`
Sig string `json:"sig"`
}
if err := json.Unmarshal([]byte(jsonStr), &raw); err != nil {
return nil, err
}
ev := &event.E{
Kind: uint16(raw.Kind),
CreatedAt: raw.CreatedAt,
Content: []byte(raw.Content),
}
// Decode ID
if id, err := hex.Dec(raw.ID); err == nil && len(id) == 32 {
ev.ID = id
}
// Decode Pubkey
if pk, err := hex.Dec(raw.Pubkey); err == nil && len(pk) == 32 {
ev.Pubkey = pk
}
// Decode Sig
if sig, err := hex.Dec(raw.Sig); err == nil && len(sig) == 64 {
ev.Sig = sig
}
// Convert tags
if len(raw.Tags) > 0 {
ev.Tags = tag.NewSWithCap(len(raw.Tags))
for _, t := range raw.Tags {
tagBytes := make([][]byte, len(t))
for i, s := range t {
tagBytes[i] = []byte(s)
}
newTag := tag.NewFromBytesSlice(tagBytes...)
ev.Tags.Append(newTag)
}
}
return ev, nil
}
// parseFilterFromRawJSON parses a Nostr filter from raw JSON
func parseFilterFromRawJSON(raw json.RawMessage) (*filter.F, error) {
return parseFilterFromJSON(string(raw))
}
// parseFilterFromJSON parses a Nostr filter from JSON
func parseFilterFromJSON(jsonStr string) (*filter.F, error) {
// Parse into intermediate struct
var raw struct {
IDs []string `json:"ids,omitempty"`
Authors []string `json:"authors,omitempty"`
Kinds []int `json:"kinds,omitempty"`
Since *int64 `json:"since,omitempty"`
Until *int64 `json:"until,omitempty"`
Limit *uint `json:"limit,omitempty"`
Search *string `json:"search,omitempty"`
}
if err := json.Unmarshal([]byte(jsonStr), &raw); err != nil {
return nil, err
}
f := &filter.F{}
// Set IDs
if len(raw.IDs) > 0 {
f.Ids = tag.NewWithCap(len(raw.IDs))
for _, idHex := range raw.IDs {
f.Ids.T = append(f.Ids.T, []byte(idHex))
}
}
// Set Authors
if len(raw.Authors) > 0 {
f.Authors = tag.NewWithCap(len(raw.Authors))
for _, pkHex := range raw.Authors {
f.Authors.T = append(f.Authors.T, []byte(pkHex))
}
}
// Set Kinds
if len(raw.Kinds) > 0 {
f.Kinds = kind.NewWithCap(len(raw.Kinds))
for _, k := range raw.Kinds {
f.Kinds.K = append(f.Kinds.K, kind.New(uint16(k)))
}
}
// Set timestamps
if raw.Since != nil {
f.Since = timestamp.New(*raw.Since)
}
if raw.Until != nil {
f.Until = timestamp.New(*raw.Until)
}
// Set limit
if raw.Limit != nil {
f.Limit = raw.Limit
}
// Set search
if raw.Search != nil {
f.Search = []byte(*raw.Search)
}
// Handle tag filters (e.g., #e, #p, #t)
var rawMap map[string]interface{}
json.Unmarshal([]byte(jsonStr), &rawMap)
for key, val := range rawMap {
if len(key) == 2 && key[0] == '#' {
if arr, ok := val.([]interface{}); ok {
tagFilter := tag.NewWithCap(len(arr) + 1)
// First element is the tag name (e.g., "e", "p")
tagFilter.T = append(tagFilter.T, []byte{key[1]})
for _, v := range arr {
if s, ok := v.(string); ok {
tagFilter.T = append(tagFilter.T, []byte(s))
}
}
if f.Tags == nil {
f.Tags = tag.NewSWithCap(4)
}
f.Tags.Append(tagFilter)
}
}
}
return f, nil
}
// eventToJSON converts a Nostr event to JSON
func eventToJSON(ev *event.E) ([]byte, error) {
// Build tags array
var tags [][]string
if ev.Tags != nil {
for _, t := range *ev.Tags {
if t == nil {
continue
}
tagStrs := make([]string, len(t.T))
for i, elem := range t.T {
tagStrs[i] = string(elem)
}
tags = append(tags, tagStrs)
}
}
raw := struct {
ID string `json:"id"`
Pubkey string `json:"pubkey"`
CreatedAt int64 `json:"created_at"`
Kind int `json:"kind"`
Tags [][]string `json:"tags"`
Content string `json:"content"`
Sig string `json:"sig"`
}{
ID: hex.Enc(ev.ID),
Pubkey: hex.Enc(ev.Pubkey),
CreatedAt: ev.CreatedAt,
Kind: int(ev.Kind),
Tags: tags,
Content: string(ev.Content),
Sig: hex.Enc(ev.Sig),
}
buf := new(bytes.Buffer)
enc := json.NewEncoder(buf)
enc.SetEscapeHTML(false)
if err := enc.Encode(raw); err != nil {
return nil, err
}
// Remove trailing newline from encoder
result := buf.Bytes()
if len(result) > 0 && result[len(result)-1] == '\n' {
result = result[:len(result)-1]
}
return result, nil
}

65
scripts/build-wasm.sh

@ -0,0 +1,65 @@
#!/bin/bash
# Build the WasmDB WASM module for browser use
#
# Output: wasmdb.wasm in the repository root
#
# Usage:
# ./scripts/build-wasm.sh
# ./scripts/build-wasm.sh --output /path/to/output/wasmdb.wasm
set -e
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
REPO_ROOT="$(cd "${SCRIPT_DIR}/.." && pwd)"
OUTPUT_PATH="${REPO_ROOT}/wasmdb.wasm"
# Parse arguments
while [[ $# -gt 0 ]]; do
case $1 in
--output|-o)
OUTPUT_PATH="$2"
shift 2
;;
*)
echo "Unknown option: $1"
exit 1
;;
esac
done
echo "Building WasmDB WASM module..."
echo "Output: ${OUTPUT_PATH}"
cd "${REPO_ROOT}"
# Build with optimizations
GOOS=js GOARCH=wasm go build \
-ldflags="-s -w" \
-o "${OUTPUT_PATH}" \
./cmd/wasmdb
# Get the size
SIZE=$(du -h "${OUTPUT_PATH}" | cut -f1)
echo "Build complete: ${OUTPUT_PATH} (${SIZE})"
# Copy wasm_exec.js from Go installation if not present
WASM_EXEC="${REPO_ROOT}/wasm_exec.js"
if [ ! -f "${WASM_EXEC}" ]; then
GO_ROOT=$(go env GOROOT)
# Try lib/wasm first (newer Go versions), then misc/wasm
if [ -f "${GO_ROOT}/lib/wasm/wasm_exec.js" ]; then
cp "${GO_ROOT}/lib/wasm/wasm_exec.js" "${WASM_EXEC}"
echo "Copied wasm_exec.js to ${WASM_EXEC}"
elif [ -f "${GO_ROOT}/misc/wasm/wasm_exec.js" ]; then
cp "${GO_ROOT}/misc/wasm/wasm_exec.js" "${WASM_EXEC}"
echo "Copied wasm_exec.js to ${WASM_EXEC}"
else
echo "Warning: wasm_exec.js not found in Go installation"
echo "Checked: ${GO_ROOT}/lib/wasm/wasm_exec.js"
echo "Checked: ${GO_ROOT}/misc/wasm/wasm_exec.js"
echo "You'll need to copy it manually from your Go installation"
fi
fi
echo "Done!"

575
wasm_exec.js

@ -0,0 +1,575 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
"use strict";
(() => {
const enosys = () => {
const err = new Error("not implemented");
err.code = "ENOSYS";
return err;
};
if (!globalThis.fs) {
let outputBuf = "";
globalThis.fs = {
constants: { O_WRONLY: -1, O_RDWR: -1, O_CREAT: -1, O_TRUNC: -1, O_APPEND: -1, O_EXCL: -1, O_DIRECTORY: -1 }, // unused
writeSync(fd, buf) {
outputBuf += decoder.decode(buf);
const nl = outputBuf.lastIndexOf("\n");
if (nl != -1) {
console.log(outputBuf.substring(0, nl));
outputBuf = outputBuf.substring(nl + 1);
}
return buf.length;
},
write(fd, buf, offset, length, position, callback) {
if (offset !== 0 || length !== buf.length || position !== null) {
callback(enosys());
return;
}
const n = this.writeSync(fd, buf);
callback(null, n);
},
chmod(path, mode, callback) { callback(enosys()); },
chown(path, uid, gid, callback) { callback(enosys()); },
close(fd, callback) { callback(enosys()); },
fchmod(fd, mode, callback) { callback(enosys()); },
fchown(fd, uid, gid, callback) { callback(enosys()); },
fstat(fd, callback) { callback(enosys()); },
fsync(fd, callback) { callback(null); },
ftruncate(fd, length, callback) { callback(enosys()); },
lchown(path, uid, gid, callback) { callback(enosys()); },
link(path, link, callback) { callback(enosys()); },
lstat(path, callback) { callback(enosys()); },
mkdir(path, perm, callback) { callback(enosys()); },
open(path, flags, mode, callback) { callback(enosys()); },
read(fd, buffer, offset, length, position, callback) { callback(enosys()); },
readdir(path, callback) { callback(enosys()); },
readlink(path, callback) { callback(enosys()); },
rename(from, to, callback) { callback(enosys()); },
rmdir(path, callback) { callback(enosys()); },
stat(path, callback) { callback(enosys()); },
symlink(path, link, callback) { callback(enosys()); },
truncate(path, length, callback) { callback(enosys()); },
unlink(path, callback) { callback(enosys()); },
utimes(path, atime, mtime, callback) { callback(enosys()); },
};
}
if (!globalThis.process) {
globalThis.process = {
getuid() { return -1; },
getgid() { return -1; },
geteuid() { return -1; },
getegid() { return -1; },
getgroups() { throw enosys(); },
pid: -1,
ppid: -1,
umask() { throw enosys(); },
cwd() { throw enosys(); },
chdir() { throw enosys(); },
}
}
if (!globalThis.path) {
globalThis.path = {
resolve(...pathSegments) {
return pathSegments.join("/");
}
}
}
if (!globalThis.crypto) {
throw new Error("globalThis.crypto is not available, polyfill required (crypto.getRandomValues only)");
}
if (!globalThis.performance) {
throw new Error("globalThis.performance is not available, polyfill required (performance.now only)");
}
if (!globalThis.TextEncoder) {
throw new Error("globalThis.TextEncoder is not available, polyfill required");
}
if (!globalThis.TextDecoder) {
throw new Error("globalThis.TextDecoder is not available, polyfill required");
}
const encoder = new TextEncoder("utf-8");
const decoder = new TextDecoder("utf-8");
globalThis.Go = class {
constructor() {
this.argv = ["js"];
this.env = {};
this.exit = (code) => {
if (code !== 0) {
console.warn("exit code:", code);
}
};
this._exitPromise = new Promise((resolve) => {
this._resolveExitPromise = resolve;
});
this._pendingEvent = null;
this._scheduledTimeouts = new Map();
this._nextCallbackTimeoutID = 1;
const setInt64 = (addr, v) => {
this.mem.setUint32(addr + 0, v, true);
this.mem.setUint32(addr + 4, Math.floor(v / 4294967296), true);
}
const setInt32 = (addr, v) => {
this.mem.setUint32(addr + 0, v, true);
}
const getInt64 = (addr) => {
const low = this.mem.getUint32(addr + 0, true);
const high = this.mem.getInt32(addr + 4, true);
return low + high * 4294967296;
}
const loadValue = (addr) => {
const f = this.mem.getFloat64(addr, true);
if (f === 0) {
return undefined;
}
if (!isNaN(f)) {
return f;
}
const id = this.mem.getUint32(addr, true);
return this._values[id];
}
const storeValue = (addr, v) => {
const nanHead = 0x7FF80000;
if (typeof v === "number" && v !== 0) {
if (isNaN(v)) {
this.mem.setUint32(addr + 4, nanHead, true);
this.mem.setUint32(addr, 0, true);
return;
}
this.mem.setFloat64(addr, v, true);
return;
}
if (v === undefined) {
this.mem.setFloat64(addr, 0, true);
return;
}
let id = this._ids.get(v);
if (id === undefined) {
id = this._idPool.pop();
if (id === undefined) {
id = this._values.length;
}
this._values[id] = v;
this._goRefCounts[id] = 0;
this._ids.set(v, id);
}
this._goRefCounts[id]++;
let typeFlag = 0;
switch (typeof v) {
case "object":
if (v !== null) {
typeFlag = 1;
}
break;
case "string":
typeFlag = 2;
break;
case "symbol":
typeFlag = 3;
break;
case "function":
typeFlag = 4;
break;
}
this.mem.setUint32(addr + 4, nanHead | typeFlag, true);
this.mem.setUint32(addr, id, true);
}
const loadSlice = (addr) => {
const array = getInt64(addr + 0);
const len = getInt64(addr + 8);
return new Uint8Array(this._inst.exports.mem.buffer, array, len);
}
const loadSliceOfValues = (addr) => {
const array = getInt64(addr + 0);
const len = getInt64(addr + 8);
const a = new Array(len);
for (let i = 0; i < len; i++) {
a[i] = loadValue(array + i * 8);
}
return a;
}
const loadString = (addr) => {
const saddr = getInt64(addr + 0);
const len = getInt64(addr + 8);
return decoder.decode(new DataView(this._inst.exports.mem.buffer, saddr, len));
}
const testCallExport = (a, b) => {
this._inst.exports.testExport0();
return this._inst.exports.testExport(a, b);
}
const timeOrigin = Date.now() - performance.now();
this.importObject = {
_gotest: {
add: (a, b) => a + b,
callExport: testCallExport,
},
gojs: {
// Go's SP does not change as long as no Go code is running. Some operations (e.g. calls, getters and setters)
// may synchronously trigger a Go event handler. This makes Go code get executed in the middle of the imported
// function. A goroutine can switch to a new stack if the current stack is too small (see morestack function).
// This changes the SP, thus we have to update the SP used by the imported function.
// func wasmExit(code int32)
"runtime.wasmExit": (sp) => {
sp >>>= 0;
const code = this.mem.getInt32(sp + 8, true);
this.exited = true;
delete this._inst;
delete this._values;
delete this._goRefCounts;
delete this._ids;
delete this._idPool;
this.exit(code);
},
// func wasmWrite(fd uintptr, p unsafe.Pointer, n int32)
"runtime.wasmWrite": (sp) => {
sp >>>= 0;
const fd = getInt64(sp + 8);
const p = getInt64(sp + 16);
const n = this.mem.getInt32(sp + 24, true);
fs.writeSync(fd, new Uint8Array(this._inst.exports.mem.buffer, p, n));
},
// func resetMemoryDataView()
"runtime.resetMemoryDataView": (sp) => {
sp >>>= 0;
this.mem = new DataView(this._inst.exports.mem.buffer);
},
// func nanotime1() int64
"runtime.nanotime1": (sp) => {
sp >>>= 0;
setInt64(sp + 8, (timeOrigin + performance.now()) * 1000000);
},
// func walltime() (sec int64, nsec int32)
"runtime.walltime": (sp) => {
sp >>>= 0;
const msec = (new Date).getTime();
setInt64(sp + 8, msec / 1000);
this.mem.setInt32(sp + 16, (msec % 1000) * 1000000, true);
},
// func scheduleTimeoutEvent(delay int64) int32
"runtime.scheduleTimeoutEvent": (sp) => {
sp >>>= 0;
const id = this._nextCallbackTimeoutID;
this._nextCallbackTimeoutID++;
this._scheduledTimeouts.set(id, setTimeout(
() => {
this._resume();
while (this._scheduledTimeouts.has(id)) {
// for some reason Go failed to register the timeout event, log and try again
// (temporary workaround for https://github.com/golang/go/issues/28975)
console.warn("scheduleTimeoutEvent: missed timeout event");
this._resume();
}
},
getInt64(sp + 8),
));
this.mem.setInt32(sp + 16, id, true);
},
// func clearTimeoutEvent(id int32)
"runtime.clearTimeoutEvent": (sp) => {
sp >>>= 0;
const id = this.mem.getInt32(sp + 8, true);
clearTimeout(this._scheduledTimeouts.get(id));
this._scheduledTimeouts.delete(id);
},
// func getRandomData(r []byte)
"runtime.getRandomData": (sp) => {
sp >>>= 0;
crypto.getRandomValues(loadSlice(sp + 8));
},
// func finalizeRef(v ref)
"syscall/js.finalizeRef": (sp) => {
sp >>>= 0;
const id = this.mem.getUint32(sp + 8, true);
this._goRefCounts[id]--;
if (this._goRefCounts[id] === 0) {
const v = this._values[id];
this._values[id] = null;
this._ids.delete(v);
this._idPool.push(id);
}
},
// func stringVal(value string) ref
"syscall/js.stringVal": (sp) => {
sp >>>= 0;
storeValue(sp + 24, loadString(sp + 8));
},
// func valueGet(v ref, p string) ref
"syscall/js.valueGet": (sp) => {
sp >>>= 0;
const result = Reflect.get(loadValue(sp + 8), loadString(sp + 16));
sp = this._inst.exports.getsp() >>> 0; // see comment above
storeValue(sp + 32, result);
},
// func valueSet(v ref, p string, x ref)
"syscall/js.valueSet": (sp) => {
sp >>>= 0;
Reflect.set(loadValue(sp + 8), loadString(sp + 16), loadValue(sp + 32));
},
// func valueDelete(v ref, p string)
"syscall/js.valueDelete": (sp) => {
sp >>>= 0;
Reflect.deleteProperty(loadValue(sp + 8), loadString(sp + 16));
},
// func valueIndex(v ref, i int) ref
"syscall/js.valueIndex": (sp) => {
sp >>>= 0;
storeValue(sp + 24, Reflect.get(loadValue(sp + 8), getInt64(sp + 16)));
},
// valueSetIndex(v ref, i int, x ref)
"syscall/js.valueSetIndex": (sp) => {
sp >>>= 0;
Reflect.set(loadValue(sp + 8), getInt64(sp + 16), loadValue(sp + 24));
},
// func valueCall(v ref, m string, args []ref) (ref, bool)
"syscall/js.valueCall": (sp) => {
sp >>>= 0;
try {
const v = loadValue(sp + 8);
const m = Reflect.get(v, loadString(sp + 16));
const args = loadSliceOfValues(sp + 32);
const result = Reflect.apply(m, v, args);
sp = this._inst.exports.getsp() >>> 0; // see comment above
storeValue(sp + 56, result);
this.mem.setUint8(sp + 64, 1);
} catch (err) {
sp = this._inst.exports.getsp() >>> 0; // see comment above
storeValue(sp + 56, err);
this.mem.setUint8(sp + 64, 0);
}
},
// func valueInvoke(v ref, args []ref) (ref, bool)
"syscall/js.valueInvoke": (sp) => {
sp >>>= 0;
try {
const v = loadValue(sp + 8);
const args = loadSliceOfValues(sp + 16);
const result = Reflect.apply(v, undefined, args);
sp = this._inst.exports.getsp() >>> 0; // see comment above
storeValue(sp + 40, result);
this.mem.setUint8(sp + 48, 1);
} catch (err) {
sp = this._inst.exports.getsp() >>> 0; // see comment above
storeValue(sp + 40, err);
this.mem.setUint8(sp + 48, 0);
}
},
// func valueNew(v ref, args []ref) (ref, bool)
"syscall/js.valueNew": (sp) => {
sp >>>= 0;
try {
const v = loadValue(sp + 8);
const args = loadSliceOfValues(sp + 16);
const result = Reflect.construct(v, args);
sp = this._inst.exports.getsp() >>> 0; // see comment above
storeValue(sp + 40, result);
this.mem.setUint8(sp + 48, 1);
} catch (err) {
sp = this._inst.exports.getsp() >>> 0; // see comment above
storeValue(sp + 40, err);
this.mem.setUint8(sp + 48, 0);
}
},
// func valueLength(v ref) int
"syscall/js.valueLength": (sp) => {
sp >>>= 0;
setInt64(sp + 16, parseInt(loadValue(sp + 8).length));
},
// valuePrepareString(v ref) (ref, int)
"syscall/js.valuePrepareString": (sp) => {
sp >>>= 0;
const str = encoder.encode(String(loadValue(sp + 8)));
storeValue(sp + 16, str);
setInt64(sp + 24, str.length);
},
// valueLoadString(v ref, b []byte)
"syscall/js.valueLoadString": (sp) => {
sp >>>= 0;
const str = loadValue(sp + 8);
loadSlice(sp + 16).set(str);
},
// func valueInstanceOf(v ref, t ref) bool
"syscall/js.valueInstanceOf": (sp) => {
sp >>>= 0;
this.mem.setUint8(sp + 24, (loadValue(sp + 8) instanceof loadValue(sp + 16)) ? 1 : 0);
},
// func copyBytesToGo(dst []byte, src ref) (int, bool)
"syscall/js.copyBytesToGo": (sp) => {
sp >>>= 0;
const dst = loadSlice(sp + 8);
const src = loadValue(sp + 32);
if (!(src instanceof Uint8Array || src instanceof Uint8ClampedArray)) {
this.mem.setUint8(sp + 48, 0);
return;
}
const toCopy = src.subarray(0, dst.length);
dst.set(toCopy);
setInt64(sp + 40, toCopy.length);
this.mem.setUint8(sp + 48, 1);
},
// func copyBytesToJS(dst ref, src []byte) (int, bool)
"syscall/js.copyBytesToJS": (sp) => {
sp >>>= 0;
const dst = loadValue(sp + 8);
const src = loadSlice(sp + 16);
if (!(dst instanceof Uint8Array || dst instanceof Uint8ClampedArray)) {
this.mem.setUint8(sp + 48, 0);
return;
}
const toCopy = src.subarray(0, dst.length);
dst.set(toCopy);
setInt64(sp + 40, toCopy.length);
this.mem.setUint8(sp + 48, 1);
},
"debug": (value) => {
console.log(value);
},
}
};
}
async run(instance) {
if (!(instance instanceof WebAssembly.Instance)) {
throw new Error("Go.run: WebAssembly.Instance expected");
}
this._inst = instance;
this.mem = new DataView(this._inst.exports.mem.buffer);
this._values = [ // JS values that Go currently has references to, indexed by reference id
NaN,
0,
null,
true,
false,
globalThis,
this,
];
this._goRefCounts = new Array(this._values.length).fill(Infinity); // number of references that Go has to a JS value, indexed by reference id
this._ids = new Map([ // mapping from JS values to reference ids
[0, 1],
[null, 2],
[true, 3],
[false, 4],
[globalThis, 5],
[this, 6],
]);
this._idPool = []; // unused ids that have been garbage collected
this.exited = false; // whether the Go program has exited
// Pass command line arguments and environment variables to WebAssembly by writing them to the linear memory.
let offset = 4096;
const strPtr = (str) => {
const ptr = offset;
const bytes = encoder.encode(str + "\0");
new Uint8Array(this.mem.buffer, offset, bytes.length).set(bytes);
offset += bytes.length;
if (offset % 8 !== 0) {
offset += 8 - (offset % 8);
}
return ptr;
};
const argc = this.argv.length;
const argvPtrs = [];
this.argv.forEach((arg) => {
argvPtrs.push(strPtr(arg));
});
argvPtrs.push(0);
const keys = Object.keys(this.env).sort();
keys.forEach((key) => {
argvPtrs.push(strPtr(`${key}=${this.env[key]}`));
});
argvPtrs.push(0);
const argv = offset;
argvPtrs.forEach((ptr) => {
this.mem.setUint32(offset, ptr, true);
this.mem.setUint32(offset + 4, 0, true);
offset += 8;
});
// The linker guarantees global data starts from at least wasmMinDataAddr.
// Keep in sync with cmd/link/internal/ld/data.go:wasmMinDataAddr.
const wasmMinDataAddr = 4096 + 8192;
if (offset >= wasmMinDataAddr) {
throw new Error("total length of command line and environment variables exceeds limit");
}
this._inst.exports.run(argc, argv);
if (this.exited) {
this._resolveExitPromise();
}
await this._exitPromise;
}
_resume() {
if (this.exited) {
throw new Error("Go program has already exited");
}
this._inst.exports.resume();
if (this.exited) {
this._resolveExitPromise();
}
}
_makeFuncWrapper(id) {
const go = this;
return function () {
const event = { id: id, this: this, args: arguments };
go._pendingEvent = event;
go._resume();
return event.result;
};
}
}
})();
Loading…
Cancel
Save