Browse Source

Add curation ACL mode and complete graph query implementation (v0.47.0)

Curation Mode:
- Three-tier publisher classification: Trusted, Blacklisted, Unclassified
- Per-pubkey rate limiting (default 50/day) for unclassified users
- IP flood protection (default 500/day) with automatic banning
- Event kind allow-listing via categories, ranges, and custom kinds
- Query filtering hides blacklisted pubkey events (admin/owner exempt)
- Web UI for managing trusted/blacklisted pubkeys and configuration
- NIP-86 API endpoints for all curation management operations

Graph Query Extension:
- Complete reference aggregation for Badger and Neo4j backends
- E-tag graph backfill migration (v8) runs automatically on startup
- Configuration options: ORLY_GRAPH_QUERIES_ENABLED, MAX_DEPTH, etc.
- NIP-11 advertisement of graph query capabilities

Files modified:
- app/handle-nip86-curating.go: NIP-86 curation API handlers (new)
- app/web/src/CurationView.svelte: Curation management UI (new)
- app/web/src/kindCategories.js: Kind category definitions (new)
- pkg/acl/curating.go: Curating ACL implementation (new)
- pkg/database/curating-acl.go: Database layer for curation (new)
- pkg/neo4j/graph-refs.go: Neo4j ref collection (new)
- pkg/database/migrations.go: E-tag graph backfill migration
- pkg/protocol/graph/executor.go: Reference aggregation support
- app/handle-event.go: Curation config event processing
- app/handle-req.go: Blacklist filtering for queries
- docs/GRAPH_QUERIES_REMAINING_PLAN.md: Updated completion status

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
main
woikos 6 days ago
parent
commit
047cdf3472
No known key found for this signature in database
  1. 49
      app/config/config.go
  2. 43
      app/handle-event.go
  3. 593
      app/handle-nip86-curating.go
  4. 13
      app/handle-nip86.go
  5. 40
      app/handle-relayinfo.go
  6. 21
      app/handle-req.go
  7. 16
      app/main.go
  8. 1
      app/web/dist/bundle.css
  9. 28
      app/web/dist/bundle.js
  10. 2
      app/web/dist/bundle.js.map
  11. 45
      app/web/src/App.svelte
  12. 1232
      app/web/src/CurationView.svelte
  13. 160
      app/web/src/kindCategories.js
  14. 778
      docs/GRAPH_QUERIES_REMAINING_PLAN.md
  15. 1
      go.mod
  16. 2
      go.sum
  17. 110
      main.go
  18. 699
      pkg/acl/curating.go
  19. 989
      pkg/database/curating-acl.go
  20. 22
      pkg/database/graph-adapter.go
  21. 10
      pkg/database/graph-result.go
  22. 191
      pkg/database/migrations.go
  23. 22
      pkg/neo4j/graph-adapter.go
  24. 163
      pkg/neo4j/graph-refs.go
  25. 52
      pkg/neo4j/graph-result.go
  26. 97
      pkg/protocol/graph/executor.go
  27. 4
      pkg/ratelimit/memory.go
  28. 2
      pkg/version/version

49
app/config/config.go

@ -53,7 +53,7 @@ type C struct { @@ -53,7 +53,7 @@ type C struct {
IPBlacklist []string `env:"ORLY_IP_BLACKLIST" usage:"comma-separated list of IP addresses to block; matches on prefixes to allow subnets, e.g. 192.168 = 192.168.0.0/16"`
Admins []string `env:"ORLY_ADMINS" usage:"comma-separated list of admin npubs"`
Owners []string `env:"ORLY_OWNERS" usage:"comma-separated list of owner npubs, who have full control of the relay for wipe and restart and other functions"`
ACLMode string `env:"ORLY_ACL_MODE" usage:"ACL mode: follows, managed (nip-86), none" default:"none"`
ACLMode string `env:"ORLY_ACL_MODE" usage:"ACL mode: follows, managed (nip-86), curating, none" default:"none"`
AuthRequired bool `env:"ORLY_AUTH_REQUIRED" usage:"require authentication for all requests (works with managed ACL)" default:"false"`
AuthToWrite bool `env:"ORLY_AUTH_TO_WRITE" usage:"require authentication only for write operations (EVENT), allow REQ/COUNT without auth" default:"false"`
BootstrapRelays []string `env:"ORLY_BOOTSTRAP_RELAYS" usage:"comma-separated list of bootstrap relay URLs for initial sync"`
@ -167,6 +167,12 @@ type C struct { @@ -167,6 +167,12 @@ type C struct {
// Cluster replication configuration
ClusterPropagatePrivilegedEvents bool `env:"ORLY_CLUSTER_PROPAGATE_PRIVILEGED_EVENTS" default:"true" usage:"propagate privileged events (DMs, gift wraps, etc.) to relay peers for replication"`
// Graph query configuration (NIP-XX)
GraphQueriesEnabled bool `env:"ORLY_GRAPH_QUERIES_ENABLED" default:"true" usage:"enable graph traversal queries (_graph filter extension)"`
GraphMaxDepth int `env:"ORLY_GRAPH_MAX_DEPTH" default:"16" usage:"maximum depth for graph traversal queries (1-16)"`
GraphMaxResults int `env:"ORLY_GRAPH_MAX_RESULTS" default:"10000" usage:"maximum pubkeys/events returned per graph query"`
GraphRateLimitRPM int `env:"ORLY_GRAPH_RATE_LIMIT_RPM" default:"60" usage:"graph queries per minute per connection (0=unlimited)"`
// Archive relay configuration (query augmentation from authoritative archives)
ArchiveEnabled bool `env:"ORLY_ARCHIVE_ENABLED" default:"false" usage:"enable archive relay query augmentation (fetch from archives, cache locally)"`
ArchiveRelays []string `env:"ORLY_ARCHIVE_RELAYS" default:"wss://archive.orly.dev/" usage:"comma-separated list of archive relay URLs for query augmentation"`
@ -332,6 +338,25 @@ func VersionRequested() (requested bool) { @@ -332,6 +338,25 @@ func VersionRequested() (requested bool) {
return
}
// CuratingModeRequested checks if the first command line argument is "curatingmode"
// and returns the owner npub/hex pubkey if provided.
//
// Return Values
// - requested: true if the 'curatingmode' subcommand was provided
// - ownerKey: the npub or hex pubkey provided as the second argument (empty if not provided)
func CuratingModeRequested() (requested bool, ownerKey string) {
if len(os.Args) > 1 {
switch strings.ToLower(os.Args[1]) {
case "curatingmode":
requested = true
if len(os.Args) > 2 {
ownerKey = os.Args[2]
}
}
}
return
}
// KV is a key/value pair.
type KV struct{ Key, Value string }
@ -660,3 +685,25 @@ func (cfg *C) GetTorConfigValues() ( @@ -660,3 +685,25 @@ func (cfg *C) GetTorConfigValues() (
cfg.TorBinary,
cfg.TorSOCKS
}
// GetGraphConfigValues returns the graph query configuration values.
// This avoids circular imports with pkg/protocol/graph while allowing main.go
// to construct the graph executor configuration.
func (cfg *C) GetGraphConfigValues() (
enabled bool,
maxDepth int,
maxResults int,
rateLimitRPM int,
) {
maxDepth = cfg.GraphMaxDepth
if maxDepth < 1 {
maxDepth = 1
}
if maxDepth > 16 {
maxDepth = 16
}
return cfg.GraphQueriesEnabled,
maxDepth,
cfg.GraphMaxResults,
cfg.GraphRateLimitRPM
}

43
app/handle-event.go

@ -12,6 +12,7 @@ import ( @@ -12,6 +12,7 @@ import (
"git.mleku.dev/mleku/nostr/encoders/envelopes/eventenvelope"
"git.mleku.dev/mleku/nostr/encoders/envelopes/noticeenvelope"
"git.mleku.dev/mleku/nostr/encoders/envelopes/okenvelope"
"git.mleku.dev/mleku/nostr/encoders/event"
"git.mleku.dev/mleku/nostr/encoders/hex"
"git.mleku.dev/mleku/nostr/encoders/kind"
"git.mleku.dev/mleku/nostr/encoders/reason"
@ -175,6 +176,29 @@ func (l *Listener) HandleEvent(msg []byte) (err error) { @@ -175,6 +176,29 @@ func (l *Listener) HandleEvent(msg []byte) (err error) {
log.E.F("failed to process NIP-43 leave request: %v", err)
}
return
case acl.CuratingConfigKind:
// Handle curating configuration events (kind 30078 with d-tag "curating-config")
// Check if this is a curating config event (verify d-tag)
dTag := env.E.Tags.GetFirst([]byte("d"))
if dTag != nil && string(dTag.Value()) == acl.CuratingConfigDTag {
if err = l.HandleCuratingConfigUpdate(env.E); chk.E(err) {
log.E.F("failed to process curating config update: %v", err)
if err = Ok.Error(l, env, err.Error()); chk.E(err) {
return
}
return
}
// Save the event and send OK response
result := l.eventProcessor.Process(context.Background(), env.E)
if result.Error != nil {
log.E.F("failed to save curating config event: %v", result.Error)
}
if err = Ok.Ok(l, env, "curating configuration updated"); chk.E(err) {
return
}
return
}
// Not a curating config event, continue with normal processing
case kind.PolicyConfig.K:
// Handle policy configuration update events (kind 12345)
// Only policy admins can update policy configuration
@ -324,3 +348,22 @@ func (l *Listener) isPeerRelayPubkey(pubkey []byte) bool { @@ -324,3 +348,22 @@ func (l *Listener) isPeerRelayPubkey(pubkey []byte) bool {
return false
}
// HandleCuratingConfigUpdate processes curating configuration events (kind 30078)
func (l *Listener) HandleCuratingConfigUpdate(ev *event.E) error {
// Check if curating ACL is active
if acl.Registry.Type() != "curating" {
return nil // Ignore config events if not in curating mode
}
// Find the curating ACL instance
for _, aclInstance := range acl.Registry.ACL {
if aclInstance.Type() == "curating" {
if curating, ok := aclInstance.(*acl.Curating); ok {
return curating.ProcessConfigEvent(ev)
}
}
}
return nil
}

593
app/handle-nip86-curating.go

@ -0,0 +1,593 @@ @@ -0,0 +1,593 @@
package app
import (
"context"
"encoding/hex"
"encoding/json"
"io"
"net/http"
"strconv"
"lol.mleku.dev/chk"
"next.orly.dev/pkg/acl"
"next.orly.dev/pkg/database"
"git.mleku.dev/mleku/nostr/httpauth"
)
// handleCuratingNIP86Request handles curating NIP-86 requests with pre-authenticated pubkey.
// This is called from the main NIP-86 handler after authentication.
func (s *Server) handleCuratingNIP86Request(w http.ResponseWriter, r *http.Request, pubkey []byte) {
_ = pubkey // Pubkey already validated by caller
// Get the curating ACL instance
var curatingACL *acl.Curating
for _, aclInstance := range acl.Registry.ACL {
if aclInstance.Type() == "curating" {
if curating, ok := aclInstance.(*acl.Curating); ok {
curatingACL = curating
break
}
}
}
if curatingACL == nil {
http.Error(w, "Curating ACL not available", http.StatusInternalServerError)
return
}
// Read and parse the request
body, err := io.ReadAll(r.Body)
if chk.E(err) {
http.Error(w, "Failed to read request body", http.StatusBadRequest)
return
}
var request NIP86Request
if err := json.Unmarshal(body, &request); chk.E(err) {
http.Error(w, "Invalid JSON request", http.StatusBadRequest)
return
}
// Set response headers
w.Header().Set("Content-Type", "application/json")
// Handle the request based on method
response := s.handleCuratingNIP86Method(request, curatingACL)
// Send response
jsonData, err := json.Marshal(response)
if chk.E(err) {
http.Error(w, "Error generating response", http.StatusInternalServerError)
return
}
w.Write(jsonData)
}
// handleCuratingNIP86Management handles NIP-86 management API requests for curating mode (standalone)
func (s *Server) handleCuratingNIP86Management(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodPost {
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
return
}
// Check Content-Type
contentType := r.Header.Get("Content-Type")
if contentType != "application/nostr+json+rpc" {
http.Error(w, "Content-Type must be application/nostr+json+rpc", http.StatusBadRequest)
return
}
// Validate NIP-98 authentication
valid, pubkey, err := httpauth.CheckAuth(r)
if chk.E(err) || !valid {
errorMsg := "NIP-98 authentication validation failed"
if err != nil {
errorMsg = err.Error()
}
http.Error(w, errorMsg, http.StatusUnauthorized)
return
}
// Check permissions - require owner or admin level
accessLevel := acl.Registry.GetAccessLevel(pubkey, r.RemoteAddr)
if accessLevel != "owner" && accessLevel != "admin" {
http.Error(w, "Owner or admin permission required", http.StatusForbidden)
return
}
// Check if curating ACL is active
if acl.Registry.Type() != "curating" {
http.Error(w, "Curating ACL mode is not active", http.StatusBadRequest)
return
}
// Delegate to shared request handler
s.handleCuratingNIP86Request(w, r, pubkey)
}
// handleCuratingNIP86Method handles individual NIP-86 methods for curating mode
func (s *Server) handleCuratingNIP86Method(request NIP86Request, curatingACL *acl.Curating) NIP86Response {
dbACL := curatingACL.GetCuratingACL()
switch request.Method {
case "supportedmethods":
return s.handleCuratingSupportedMethods()
case "trustpubkey":
return s.handleTrustPubkey(request.Params, curatingACL)
case "untrustpubkey":
return s.handleUntrustPubkey(request.Params, curatingACL)
case "listtrustedpubkeys":
return s.handleListTrustedPubkeys(dbACL)
case "blacklistpubkey":
return s.handleBlacklistPubkey(request.Params, curatingACL)
case "unblacklistpubkey":
return s.handleUnblacklistPubkey(request.Params, curatingACL)
case "listblacklistedpubkeys":
return s.handleListBlacklistedPubkeys(dbACL)
case "listunclassifiedusers":
return s.handleListUnclassifiedUsers(request.Params, dbACL)
case "markspam":
return s.handleMarkSpam(request.Params, dbACL)
case "unmarkspam":
return s.handleUnmarkSpam(request.Params, dbACL)
case "listspamevents":
return s.handleListSpamEvents(dbACL)
case "deleteevent":
return s.handleDeleteEvent(request.Params)
case "getcuratingconfig":
return s.handleGetCuratingConfig(dbACL)
case "listblockedips":
return s.handleListCuratingBlockedIPs(dbACL)
case "unblockip":
return s.handleUnblockCuratingIP(request.Params, dbACL)
case "isconfigured":
return s.handleIsConfigured(dbACL)
default:
return NIP86Response{Error: "Unknown method: " + request.Method}
}
}
// handleCuratingSupportedMethods returns the list of supported methods for curating mode
func (s *Server) handleCuratingSupportedMethods() NIP86Response {
methods := []string{
"supportedmethods",
"trustpubkey",
"untrustpubkey",
"listtrustedpubkeys",
"blacklistpubkey",
"unblacklistpubkey",
"listblacklistedpubkeys",
"listunclassifiedusers",
"markspam",
"unmarkspam",
"listspamevents",
"deleteevent",
"getcuratingconfig",
"listblockedips",
"unblockip",
"isconfigured",
}
return NIP86Response{Result: methods}
}
// handleTrustPubkey adds a pubkey to the trusted list
func (s *Server) handleTrustPubkey(params []interface{}, curatingACL *acl.Curating) NIP86Response {
if len(params) < 1 {
return NIP86Response{Error: "Missing required parameter: pubkey"}
}
pubkey, ok := params[0].(string)
if !ok {
return NIP86Response{Error: "Invalid pubkey parameter"}
}
if len(pubkey) != 64 {
return NIP86Response{Error: "Invalid pubkey format (must be 64 hex characters)"}
}
note := ""
if len(params) > 1 {
if n, ok := params[1].(string); ok {
note = n
}
}
if err := curatingACL.TrustPubkey(pubkey, note); chk.E(err) {
return NIP86Response{Error: "Failed to trust pubkey: " + err.Error()}
}
return NIP86Response{Result: true}
}
// handleUntrustPubkey removes a pubkey from the trusted list
func (s *Server) handleUntrustPubkey(params []interface{}, curatingACL *acl.Curating) NIP86Response {
if len(params) < 1 {
return NIP86Response{Error: "Missing required parameter: pubkey"}
}
pubkey, ok := params[0].(string)
if !ok {
return NIP86Response{Error: "Invalid pubkey parameter"}
}
if err := curatingACL.UntrustPubkey(pubkey); chk.E(err) {
return NIP86Response{Error: "Failed to untrust pubkey: " + err.Error()}
}
return NIP86Response{Result: true}
}
// handleListTrustedPubkeys returns the list of trusted pubkeys
func (s *Server) handleListTrustedPubkeys(dbACL *database.CuratingACL) NIP86Response {
trusted, err := dbACL.ListTrustedPubkeys()
if chk.E(err) {
return NIP86Response{Error: "Failed to list trusted pubkeys: " + err.Error()}
}
result := make([]map[string]interface{}, len(trusted))
for i, t := range trusted {
result[i] = map[string]interface{}{
"pubkey": t.Pubkey,
"note": t.Note,
"added": t.Added.Unix(),
}
}
return NIP86Response{Result: result}
}
// handleBlacklistPubkey adds a pubkey to the blacklist
func (s *Server) handleBlacklistPubkey(params []interface{}, curatingACL *acl.Curating) NIP86Response {
if len(params) < 1 {
return NIP86Response{Error: "Missing required parameter: pubkey"}
}
pubkey, ok := params[0].(string)
if !ok {
return NIP86Response{Error: "Invalid pubkey parameter"}
}
if len(pubkey) != 64 {
return NIP86Response{Error: "Invalid pubkey format (must be 64 hex characters)"}
}
reason := ""
if len(params) > 1 {
if r, ok := params[1].(string); ok {
reason = r
}
}
if err := curatingACL.BlacklistPubkey(pubkey, reason); chk.E(err) {
return NIP86Response{Error: "Failed to blacklist pubkey: " + err.Error()}
}
return NIP86Response{Result: true}
}
// handleUnblacklistPubkey removes a pubkey from the blacklist
func (s *Server) handleUnblacklistPubkey(params []interface{}, curatingACL *acl.Curating) NIP86Response {
if len(params) < 1 {
return NIP86Response{Error: "Missing required parameter: pubkey"}
}
pubkey, ok := params[0].(string)
if !ok {
return NIP86Response{Error: "Invalid pubkey parameter"}
}
if err := curatingACL.UnblacklistPubkey(pubkey); chk.E(err) {
return NIP86Response{Error: "Failed to unblacklist pubkey: " + err.Error()}
}
return NIP86Response{Result: true}
}
// handleListBlacklistedPubkeys returns the list of blacklisted pubkeys
func (s *Server) handleListBlacklistedPubkeys(dbACL *database.CuratingACL) NIP86Response {
blacklisted, err := dbACL.ListBlacklistedPubkeys()
if chk.E(err) {
return NIP86Response{Error: "Failed to list blacklisted pubkeys: " + err.Error()}
}
result := make([]map[string]interface{}, len(blacklisted))
for i, b := range blacklisted {
result[i] = map[string]interface{}{
"pubkey": b.Pubkey,
"reason": b.Reason,
"added": b.Added.Unix(),
}
}
return NIP86Response{Result: result}
}
// handleListUnclassifiedUsers returns unclassified users sorted by event count
func (s *Server) handleListUnclassifiedUsers(params []interface{}, dbACL *database.CuratingACL) NIP86Response {
limit := 100 // Default limit
if len(params) > 0 {
if l, ok := params[0].(float64); ok {
limit = int(l)
}
}
users, err := dbACL.ListUnclassifiedUsers(limit)
if chk.E(err) {
return NIP86Response{Error: "Failed to list unclassified users: " + err.Error()}
}
result := make([]map[string]interface{}, len(users))
for i, u := range users {
result[i] = map[string]interface{}{
"pubkey": u.Pubkey,
"event_count": u.EventCount,
"last_event": u.LastEvent.Unix(),
}
}
return NIP86Response{Result: result}
}
// handleMarkSpam marks an event as spam
func (s *Server) handleMarkSpam(params []interface{}, dbACL *database.CuratingACL) NIP86Response {
if len(params) < 1 {
return NIP86Response{Error: "Missing required parameter: event_id"}
}
eventID, ok := params[0].(string)
if !ok {
return NIP86Response{Error: "Invalid event_id parameter"}
}
if len(eventID) != 64 {
return NIP86Response{Error: "Invalid event_id format (must be 64 hex characters)"}
}
pubkey := ""
if len(params) > 1 {
if p, ok := params[1].(string); ok {
pubkey = p
}
}
reason := ""
if len(params) > 2 {
if r, ok := params[2].(string); ok {
reason = r
}
}
if err := dbACL.MarkEventAsSpam(eventID, pubkey, reason); chk.E(err) {
return NIP86Response{Error: "Failed to mark event as spam: " + err.Error()}
}
return NIP86Response{Result: true}
}
// handleUnmarkSpam removes the spam flag from an event
func (s *Server) handleUnmarkSpam(params []interface{}, dbACL *database.CuratingACL) NIP86Response {
if len(params) < 1 {
return NIP86Response{Error: "Missing required parameter: event_id"}
}
eventID, ok := params[0].(string)
if !ok {
return NIP86Response{Error: "Invalid event_id parameter"}
}
if err := dbACL.UnmarkEventAsSpam(eventID); chk.E(err) {
return NIP86Response{Error: "Failed to unmark event as spam: " + err.Error()}
}
return NIP86Response{Result: true}
}
// handleListSpamEvents returns the list of spam-flagged events
func (s *Server) handleListSpamEvents(dbACL *database.CuratingACL) NIP86Response {
spam, err := dbACL.ListSpamEvents()
if chk.E(err) {
return NIP86Response{Error: "Failed to list spam events: " + err.Error()}
}
result := make([]map[string]interface{}, len(spam))
for i, sp := range spam {
result[i] = map[string]interface{}{
"event_id": sp.EventID,
"pubkey": sp.Pubkey,
"reason": sp.Reason,
"added": sp.Added.Unix(),
}
}
return NIP86Response{Result: result}
}
// handleDeleteEvent permanently deletes an event from the database
func (s *Server) handleDeleteEvent(params []interface{}) NIP86Response {
if len(params) < 1 {
return NIP86Response{Error: "Missing required parameter: event_id"}
}
eventIDHex, ok := params[0].(string)
if !ok {
return NIP86Response{Error: "Invalid event_id parameter"}
}
if len(eventIDHex) != 64 {
return NIP86Response{Error: "Invalid event_id format (must be 64 hex characters)"}
}
// Convert hex to bytes
eventID, err := hex.DecodeString(eventIDHex)
if err != nil {
return NIP86Response{Error: "Invalid event_id hex: " + err.Error()}
}
// Delete from database
if err := s.DB.DeleteEvent(context.Background(), eventID); chk.E(err) {
return NIP86Response{Error: "Failed to delete event: " + err.Error()}
}
return NIP86Response{Result: true}
}
// handleGetCuratingConfig returns the current curating configuration
func (s *Server) handleGetCuratingConfig(dbACL *database.CuratingACL) NIP86Response {
config, err := dbACL.GetConfig()
if chk.E(err) {
return NIP86Response{Error: "Failed to get config: " + err.Error()}
}
result := map[string]interface{}{
"daily_limit": config.DailyLimit,
"first_ban_hours": config.FirstBanHours,
"second_ban_hours": config.SecondBanHours,
"allowed_kinds": config.AllowedKinds,
"allowed_ranges": config.AllowedRanges,
"kind_categories": config.KindCategories,
"config_event_id": config.ConfigEventID,
"config_pubkey": config.ConfigPubkey,
"configured_at": config.ConfiguredAt,
"is_configured": config.ConfigEventID != "",
}
return NIP86Response{Result: result}
}
// handleListCuratingBlockedIPs returns the list of blocked IPs in curating mode
func (s *Server) handleListCuratingBlockedIPs(dbACL *database.CuratingACL) NIP86Response {
blocked, err := dbACL.ListBlockedIPs()
if chk.E(err) {
return NIP86Response{Error: "Failed to list blocked IPs: " + err.Error()}
}
result := make([]map[string]interface{}, len(blocked))
for i, b := range blocked {
result[i] = map[string]interface{}{
"ip": b.IP,
"reason": b.Reason,
"expires_at": b.ExpiresAt.Unix(),
"added": b.Added.Unix(),
}
}
return NIP86Response{Result: result}
}
// handleUnblockCuratingIP unblocks an IP in curating mode
func (s *Server) handleUnblockCuratingIP(params []interface{}, dbACL *database.CuratingACL) NIP86Response {
if len(params) < 1 {
return NIP86Response{Error: "Missing required parameter: ip"}
}
ip, ok := params[0].(string)
if !ok {
return NIP86Response{Error: "Invalid ip parameter"}
}
if err := dbACL.UnblockIP(ip); chk.E(err) {
return NIP86Response{Error: "Failed to unblock IP: " + err.Error()}
}
return NIP86Response{Result: true}
}
// handleIsConfigured checks if curating mode is configured
func (s *Server) handleIsConfigured(dbACL *database.CuratingACL) NIP86Response {
configured, err := dbACL.IsConfigured()
if chk.E(err) {
return NIP86Response{Error: "Failed to check configuration: " + err.Error()}
}
return NIP86Response{Result: configured}
}
// GetKindCategoriesInfo returns information about available kind categories
func GetKindCategoriesInfo() []map[string]interface{} {
categories := []map[string]interface{}{
{
"id": "social",
"name": "Social/Notes",
"description": "Profiles, text notes, follows, reposts, reactions",
"kinds": []int{0, 1, 3, 6, 7, 10002},
},
{
"id": "dm",
"name": "Direct Messages",
"description": "NIP-04 DMs, NIP-17 private messages, gift wraps",
"kinds": []int{4, 14, 1059},
},
{
"id": "longform",
"name": "Long-form Content",
"description": "Articles and drafts",
"kinds": []int{30023, 30024},
},
{
"id": "media",
"name": "Media",
"description": "File metadata, video, audio",
"kinds": []int{1063, 20, 21, 22},
},
{
"id": "marketplace",
"name": "Marketplace",
"description": "Product listings, stalls, auctions",
"kinds": []int{30017, 30018, 30019, 30020, 1021, 1022},
},
{
"id": "groups_nip29",
"name": "Group Messaging (NIP-29)",
"description": "Simple group messages and metadata",
"kinds": []int{9, 10, 11, 12, 9000, 9001, 9002, 39000, 39001, 39002},
},
{
"id": "groups_nip72",
"name": "Communities (NIP-72)",
"description": "Moderated communities and post approvals",
"kinds": []int{34550, 1111, 4550},
},
{
"id": "lists",
"name": "Lists/Bookmarks",
"description": "Mute lists, pins, categorized lists, bookmarks",
"kinds": []int{10000, 10001, 10003, 30000, 30001, 30003},
},
}
return categories
}
// expandKindRange expands a range string like "1000-1999" into individual kinds
func expandKindRange(rangeStr string) []int {
var kinds []int
parts := make([]int, 2)
n, err := parseRange(rangeStr, parts)
if err != nil || n != 2 {
return kinds
}
for i := parts[0]; i <= parts[1]; i++ {
kinds = append(kinds, i)
}
return kinds
}
func parseRange(s string, parts []int) (int, error) {
// Simple parsing of "start-end"
for i, c := range s {
if c == '-' && i > 0 {
start, err := strconv.Atoi(s[:i])
if err != nil {
return 0, err
}
end, err := strconv.Atoi(s[i+1:])
if err != nil {
return 0, err
}
parts[0] = start
parts[1] = end
return 2, nil
}
}
return 0, nil
}

13
app/handle-nip86.go

@ -55,9 +55,16 @@ func (s *Server) handleNIP86Management(w http.ResponseWriter, r *http.Request) { @@ -55,9 +55,16 @@ func (s *Server) handleNIP86Management(w http.ResponseWriter, r *http.Request) {
return
}
// Check if managed ACL is active
if acl.Registry.Type() != "managed" {
http.Error(w, "Managed ACL mode is not active", http.StatusBadRequest)
// Dispatch based on ACL mode
aclType := acl.Registry.Type()
switch aclType {
case "curating":
s.handleCuratingNIP86Request(w, r, pubkey)
return
case "managed":
// Continue with managed ACL handling below
default:
http.Error(w, "NIP-86 requires managed or curating ACL mode", http.StatusBadRequest)
return
}

40
app/handle-relayinfo.go

@ -15,11 +15,20 @@ import ( @@ -15,11 +15,20 @@ import (
"next.orly.dev/pkg/version"
)
// GraphQueryConfig describes graph query capabilities for NIP-11 advertisement.
type GraphQueryConfig struct {
Enabled bool `json:"enabled"`
MaxDepth int `json:"max_depth"`
MaxResults int `json:"max_results"`
Methods []string `json:"methods"`
}
// ExtendedRelayInfo extends the standard NIP-11 relay info with additional fields.
// The Addresses field contains alternative WebSocket URLs for the relay (e.g., .onion).
type ExtendedRelayInfo struct {
*relayinfo.T
Addresses []string `json:"addresses,omitempty"`
Addresses []string `json:"addresses,omitempty"`
GraphQuery *GraphQueryConfig `json:"graph_query,omitempty"`
}
// HandleRelayInfo generates and returns a relay information document in JSON
@ -132,6 +141,10 @@ func (s *Server) HandleRelayInfo(w http.ResponseWriter, r *http.Request) { @@ -132,6 +141,10 @@ func (s *Server) HandleRelayInfo(w http.ResponseWriter, r *http.Request) {
}
}
// Restricted writes applies when ACL mode is not managed/curating but also not none
// (e.g., follows mode restricts writes to followed pubkeys)
restrictedWrites := s.Config.ACLMode != "managed" && s.Config.ACLMode != "curating" && s.Config.ACLMode != "none"
info = &relayinfo.T{
Name: name,
Description: description,
@ -141,7 +154,7 @@ func (s *Server) HandleRelayInfo(w http.ResponseWriter, r *http.Request) { @@ -141,7 +154,7 @@ func (s *Server) HandleRelayInfo(w http.ResponseWriter, r *http.Request) {
Version: strings.TrimPrefix(version.V, "v"),
Limitation: relayinfo.Limits{
AuthRequired: s.Config.AuthRequired || s.Config.ACLMode != "none",
RestrictedWrites: s.Config.ACLMode != "managed" && s.Config.ACLMode != "none",
RestrictedWrites: restrictedWrites,
PaymentRequired: s.Config.MonthlyPriceSats > 0,
},
Icon: icon,
@ -162,11 +175,26 @@ func (s *Server) HandleRelayInfo(w http.ResponseWriter, r *http.Request) { @@ -162,11 +175,26 @@ func (s *Server) HandleRelayInfo(w http.ResponseWriter, r *http.Request) {
}
}
// Return extended info if we have addresses, otherwise standard info
if len(addresses) > 0 {
// Build graph query config if enabled
var graphConfig *GraphQueryConfig
if s.graphExecutor != nil && s.Config.GraphQueriesEnabled {
graphEnabled, maxDepth, maxResults, _ := s.Config.GetGraphConfigValues()
if graphEnabled {
graphConfig = &GraphQueryConfig{
Enabled: true,
MaxDepth: maxDepth,
MaxResults: maxResults,
Methods: []string{"follows", "followers", "mentions", "thread"},
}
}
}
// Return extended info if we have addresses or graph query support, otherwise standard info
if len(addresses) > 0 || graphConfig != nil {
extInfo := &ExtendedRelayInfo{
T: info,
Addresses: addresses,
T: info,
Addresses: addresses,
GraphQuery: graphConfig,
}
if err := json.NewEncoder(w).Encode(extInfo); chk.E(err) {
}

21
app/handle-req.go

@ -602,6 +602,27 @@ func (l *Listener) HandleReq(msg []byte) (err error) { @@ -602,6 +602,27 @@ func (l *Listener) HandleReq(msg []byte) (err error) {
events = aclFilteredEvents
}
// Apply curating ACL filtering for read access if curating ACL is active
if acl.Registry.Active.Load() == "curating" {
// Find the curating ACL instance
for _, aclInstance := range acl.Registry.ACL {
if aclInstance.Type() == "curating" {
if curatingACL, ok := aclInstance.(*acl.Curating); ok {
var curatingFilteredEvents event.S
for _, ev := range events {
if curatingACL.IsEventVisible(ev, accessLevel) {
curatingFilteredEvents = append(curatingFilteredEvents, ev)
} else {
log.D.F("curating ACL filtered out event %s from blacklisted pubkey", hexenc.Enc(ev.ID))
}
}
events = curatingFilteredEvents
}
break
}
}
}
// Apply private tag filtering - only show events with "private" tags to authorized users
var privateFilteredEvents event.S
authedPubkey := l.authedPubkey.Load()

16
app/main.go

@ -135,8 +135,8 @@ func Run( @@ -135,8 +135,8 @@ func Run(
}
}
// Initialize graph query executor (Badger backend)
if badgerDB, ok := db.(*database.D); ok {
// Initialize graph query executor (Badger backend) if enabled
if badgerDB, ok := db.(*database.D); ok && cfg.GraphQueriesEnabled {
// Get relay identity key for signing graph query responses
relaySecretKey, err := badgerDB.GetOrCreateRelayIdentitySecret()
if err != nil {
@ -147,13 +147,15 @@ func Run( @@ -147,13 +147,15 @@ func Run(
if l.graphExecutor, err = graph.NewExecutor(graphAdapter, relaySecretKey); err != nil {
log.E.F("failed to create graph executor: %v", err)
} else {
log.I.F("graph query executor initialized (Badger backend)")
graphEnabled, maxDepth, maxResults, rateLimitRPM := cfg.GetGraphConfigValues()
log.I.F("graph query executor initialized (Badger backend, enabled=%v, max_depth=%d, max_results=%d, rate_limit=%d/min)",
graphEnabled, maxDepth, maxResults, rateLimitRPM)
}
}
}
// Initialize graph query executor (Neo4j backend)
if neo4jDB, ok := db.(*neo4j.N); ok {
// Initialize graph query executor (Neo4j backend) if enabled
if neo4jDB, ok := db.(*neo4j.N); ok && cfg.GraphQueriesEnabled {
// Get relay identity key for signing graph query responses
relaySecretKey, err := neo4jDB.GetOrCreateRelayIdentitySecret()
if err != nil {
@ -164,7 +166,9 @@ func Run( @@ -164,7 +166,9 @@ func Run(
if l.graphExecutor, err = graph.NewExecutor(graphAdapter, relaySecretKey); err != nil {
log.E.F("failed to create graph executor: %v", err)
} else {
log.I.F("graph query executor initialized (Neo4j backend)")
graphEnabled, maxDepth, maxResults, rateLimitRPM := cfg.GetGraphConfigValues()
log.I.F("graph query executor initialized (Neo4j backend, enabled=%v, max_depth=%d, max_results=%d, rate_limit=%d/min)",
graphEnabled, maxDepth, maxResults, rateLimitRPM)
}
}
}

1
app/web/dist/bundle.css vendored

File diff suppressed because one or more lines are too long

28
app/web/dist/bundle.js vendored

File diff suppressed because one or more lines are too long

2
app/web/dist/bundle.js.map vendored

File diff suppressed because one or more lines are too long

45
app/web/src/App.svelte

@ -11,6 +11,7 @@ @@ -11,6 +11,7 @@
import RecoveryView from "./RecoveryView.svelte";
import SprocketView from "./SprocketView.svelte";
import PolicyView from "./PolicyView.svelte";
import CurationView from "./CurationView.svelte";
import BlossomView from "./BlossomView.svelte";
import LogView from "./LogView.svelte";
import SearchResultsView from "./SearchResultsView.svelte";
@ -1657,6 +1658,12 @@ @@ -1657,6 +1658,12 @@
label: "Managed ACL",
requiresOwner: true,
},
{
id: "curation",
icon: "📋",
label: "Curation",
requiresOwner: true,
},
{ id: "sprocket", icon: "⚙", label: "Sprocket", requiresOwner: true },
{ id: "policy", icon: "📜", label: "Policy", requiresOwner: true },
{ id: "logs", icon: "📋", label: "Logs", requiresOwner: true },
@ -1691,6 +1698,10 @@ @@ -1691,6 +1698,10 @@
if (tab.id === "managed-acl" && aclMode !== "managed") {
return false;
}
// Hide curation tab if not in curating mode
if (tab.id === "curation" && aclMode !== "curating") {
return false;
}
// Debug logging for tab filtering
console.log(`Tab ${tab.id} filter check:`, {
isLoggedIn,
@ -2860,6 +2871,40 @@ @@ -2860,6 +2871,40 @@
</div>
{/if}
</div>
{:else if selectedTab === "curation"}
<div class="curation-view-container">
{#if aclMode !== "curating"}
<div class="acl-mode-warning">
<h3>Curating Mode Not Active</h3>
<p>
To use the Curation interface, you need to set
the ACL mode to "curating" in your relay
configuration.
</p>
<p>
Current ACL mode: <strong
>{aclMode || "unknown"}</strong
>
</p>
<p>
Please set <code>ORLY_ACL_MODE=curating</code> in your
environment variables and restart the relay.
</p>
</div>
{:else if isLoggedIn && userRole === "owner"}
<CurationView {userSigner} {userPubkey} />
{:else}
<div class="access-denied">
<p>
Please log in with owner permissions to access
curation configuration.
</p>
<button class="login-btn" on:click={openLoginModal}
>Log In</button
>
</div>
{/if}
</div>
{:else if selectedTab === "sprocket"}
<SprocketView
{isLoggedIn}

1232
app/web/src/CurationView.svelte

File diff suppressed because it is too large Load Diff

160
app/web/src/kindCategories.js

@ -0,0 +1,160 @@ @@ -0,0 +1,160 @@
/**
* Kind categories for curating mode.
* These define predefined groups of event kinds that can be enabled/disabled together.
* The categories match the server-side definitions in pkg/database/curating-acl.go.
*/
export const curationKindCategories = [
{
id: "social",
name: "Social/Notes",
description: "User profiles, notes, follows, reposts, reactions, and relay lists",
kinds: [0, 1, 3, 6, 7, 10002],
},
{
id: "dm",
name: "Direct Messages",
description: "Encrypted direct messages (legacy and NIP-17 gift-wrapped)",
kinds: [4, 14, 1059],
},
{
id: "longform",
name: "Long-form Content",
description: "Blog posts and article drafts",
kinds: [30023, 30024],
},
{
id: "media",
name: "Media",
description: "File metadata and media attachments",
kinds: [1063, 20, 21, 22],
},
{
id: "marketplace",
name: "Marketplace",
description: "Product listings, stalls, and marketplace events",
kinds: [30017, 30018, 30019, 30020],
},
{
id: "groups_nip29",
name: "Group Messaging (NIP-29)",
description: "Simple relay-based group chat messages",
kinds: [9, 10, 11, 12],
},
{
id: "groups_nip72",
name: "Communities (NIP-72)",
description: "Community definitions and threaded discussions",
kinds: [34550, 1111, 4550],
},
{
id: "lists",
name: "Lists/Bookmarks",
description: "Mute lists, pin lists, and parameterized list events",
kinds: [10000, 10001, 30000, 30001],
},
];
/**
* Get all kinds from selected categories.
* @param {string[]} categoryIds - Array of category IDs
* @returns {number[]} - Array of unique kind numbers
*/
export function getKindsFromCategories(categoryIds) {
const kinds = new Set();
for (const id of categoryIds) {
const category = curationKindCategories.find((c) => c.id === id);
if (category) {
category.kinds.forEach((k) => kinds.add(k));
}
}
return Array.from(kinds).sort((a, b) => a - b);
}
/**
* Get category IDs that contain a given kind.
* @param {number} kind - The kind number to look up
* @returns {string[]} - Array of category IDs containing this kind
*/
export function getCategoriesForKind(kind) {
return curationKindCategories
.filter((c) => c.kinds.includes(kind))
.map((c) => c.id);
}
/**
* Parse a custom kinds string (e.g., "100, 200-300, 500") into an array of kinds.
* @param {string} customKinds - Comma-separated list of kinds and ranges
* @returns {number[]} - Array of individual kind numbers
*/
export function parseCustomKinds(customKinds) {
if (!customKinds || !customKinds.trim()) return [];
const kinds = new Set();
const parts = customKinds.split(",").map((p) => p.trim());
for (const part of parts) {
if (!part) continue;
// Check if it's a range (e.g., "100-200")
if (part.includes("-")) {
const [start, end] = part.split("-").map((n) => parseInt(n.trim(), 10));
if (!isNaN(start) && !isNaN(end) && start <= end) {
// Don't expand ranges > 1000 to avoid memory issues
if (end - start <= 1000) {
for (let i = start; i <= end; i++) {
kinds.add(i);
}
}
}
} else {
const num = parseInt(part, 10);
if (!isNaN(num)) {
kinds.add(num);
}
}
}
return Array.from(kinds).sort((a, b) => a - b);
}
/**
* Format a list of kinds into a compact string with ranges.
* @param {number[]} kinds - Array of kind numbers
* @returns {string} - Formatted string like "1, 3, 5-10, 15"
*/
export function formatKindsCompact(kinds) {
if (!kinds || kinds.length === 0) return "";
const sorted = [...kinds].sort((a, b) => a - b);
const ranges = [];
let rangeStart = sorted[0];
let rangeEnd = sorted[0];
for (let i = 1; i < sorted.length; i++) {
if (sorted[i] === rangeEnd + 1) {
rangeEnd = sorted[i];
} else {
if (rangeEnd > rangeStart + 1) {
ranges.push(`${rangeStart}-${rangeEnd}`);
} else if (rangeEnd === rangeStart + 1) {
ranges.push(`${rangeStart}, ${rangeEnd}`);
} else {
ranges.push(`${rangeStart}`);
}
rangeStart = sorted[i];
rangeEnd = sorted[i];
}
}
// Push the last range
if (rangeEnd > rangeStart + 1) {
ranges.push(`${rangeStart}-${rangeEnd}`);
} else if (rangeEnd === rangeStart + 1) {
ranges.push(`${rangeStart}, ${rangeEnd}`);
} else {
ranges.push(`${rangeStart}`);
}
return ranges.join(", ");
}

778
docs/GRAPH_QUERIES_REMAINING_PLAN.md

@ -0,0 +1,778 @@ @@ -0,0 +1,778 @@
# Graph Queries: Remaining Implementation Plan
> Consolidated plan based on NIP-XX-GRAPH-QUERIES.md spec, existing implementation plans, and codebase analysis.
## Current Status Summary
| Component | Status | Notes |
|-----------|--------|-------|
| Filter extension parsing (`_graph`) | ✅ COMPLETE | Phase 0 |
| E-tag graph index (eeg/gee) | ✅ COMPLETE | Phase 1 - indexes populated on new events |
| Graph traversal primitives | ✅ COMPLETE | Phase 2 |
| High-level traversals (follows, followers, mentions, thread) | ✅ COMPLETE | Phase 3 |
| Query handler + relay-signed responses | ✅ COMPLETE | Phase 4 |
| **Reference aggregation (Badger)** | ✅ COMPLETE | Phase 5C - `pkg/database/graph-refs.go` |
| **Reference aggregation (Neo4j)** | ✅ COMPLETE | Phase 5C - `pkg/neo4j/graph-refs.go` |
| **E-tag graph backfill migration** | ✅ COMPLETE | Phase 5B - Migration v8 in `pkg/database/migrations.go` |
| **Configuration options** | ✅ COMPLETE | Phase 5A - `app/config/config.go` |
| **NIP-11 advertisement** | ✅ COMPLETE | Phase 5A - `app/handle-relayinfo.go` |
| **P-tag graph query optimization** | ❌ NOT IMPLEMENTED | Enhancement - lower priority |
---
## Phase 5A: Configuration & NIP-11 Advertisement
**Goal**: Allow relays to configure graph query support and advertise capabilities.
### 5A.1 Configuration Options
**File**: `app/config/config.go` or environment variables
| Variable | Type | Default | Description |
|----------|------|---------|-------------|
| `ORLY_GRAPH_QUERIES_ENABLED` | bool | true | Enable/disable graph queries |
| `ORLY_GRAPH_MAX_DEPTH` | int | 16 | Maximum traversal depth |
| `ORLY_GRAPH_RATE_LIMIT` | int | 10 | Queries per minute per connection |
| `ORLY_GRAPH_MAX_RESULTS` | int | 10000 | Maximum pubkeys/events per response |
**Implementation**:
```go
// pkg/config/graph.go
type GraphConfig struct {
Enabled bool `env:"ORLY_GRAPH_QUERIES_ENABLED" default:"true"`
MaxDepth int `env:"ORLY_GRAPH_MAX_DEPTH" default:"16"`
RateLimit int `env:"ORLY_GRAPH_RATE_LIMIT" default:"10"`
MaxResults int `env:"ORLY_GRAPH_MAX_RESULTS" default:"10000"`
}
```
**Server integration** (`app/server.go`):
```go
// Check config before initializing executor
if config.Graph.Enabled {
l.graphExecutor, err = graph.NewExecutor(graphAdapter, relaySecretKey, config.Graph)
}
```
### 5A.2 NIP-11 Advertisement
**File**: `app/handle-relayinfo.go`
**Changes required**:
```go
// In buildRelayInfo():
if s.graphExecutor != nil {
info.SupportedNips = append(info.SupportedNips, "XX") // Or appropriate NIP number
info.Limitation.GraphQueryMaxDepth = config.Graph.MaxDepth
}
// Add to RelayInfo struct or use Software field:
type RelayInfo struct {
// ... existing fields
Limitation struct {
// ... existing limitations
GraphQueryMaxDepth int `json:"graph_query_max_depth,omitempty"`
} `json:"limitation,omitempty"`
}
```
**Example NIP-11 output**:
```json
{
"supported_nips": [1, "XX"],
"limitation": {
"graph_query_max_depth": 16
}
}
```
---
## Phase 5B: E-Tag Graph Backfill Migration
**Goal**: Populate e-tag graph indexes (eeg/gee) for events stored before graph feature was added.
### 5B.1 Migration Implementation
**File**: `pkg/database/migration-etag-graph.go`
```go
package database
import (
"bytes"
"github.com/dgraph-io/badger/v4"
"next.orly.dev/pkg/database/indexes"
"next.orly.dev/pkg/database/indexes/types"
)
// MigrateETagGraph backfills e-tag graph edges for existing events
// This is safe to run multiple times (idempotent)
func (d *D) MigrateETagGraph() error {
log.I.F("Starting e-tag graph backfill migration...")
var processed, edges, skipped int
batchSize := 1000
batch := make([]eTagEdge, 0, batchSize)
// Iterate all events using serial-event index (sei)
err := d.View(func(txn *badger.Txn) error {
opts := badger.DefaultIteratorOptions
opts.PrefetchValues = true
it := txn.NewIterator(opts)
defer it.Close()
prefix := indexes.NewPrefix(indexes.SerialEvent).Bytes()
for it.Seek(prefix); it.ValidForPrefix(prefix); it.Next() {
item := it.Item()
// Decode event serial from key
key := item.Key()
sourceSer := new(types.Uint40)
dec := indexes.SerialEventDec(sourceSer)
if err := dec.UnmarshalRead(bytes.NewReader(key)); err != nil {
skipped++
continue
}
// Get event data
var ev event.T
if err := item.Value(func(val []byte) error {
return ev.UnmarshalCompact(val)
}); err != nil {
skipped++
continue
}
// Get event kind
eventKind := new(types.Uint16)
eventKind.Set(uint16(ev.Kind))
// Extract e-tags
for _, eTag := range ev.Tags.GetAll([]byte("e")) {
if eTag.Len() < 2 {
continue
}
targetID, err := hex.Dec(string(eTag.Value()))
if err != nil || len(targetID) != 32 {
continue
}
// Look up target event serial
targetSer, err := d.GetEventSerialByID(targetID)
if err != nil || targetSer == nil {
// Target event doesn't exist in our relay - skip
continue
}
batch = append(batch, eTagEdge{
sourceSer: sourceSer,
targetSer: targetSer,
kind: eventKind,
})
}
// Flush batch if full
if len(batch) >= batchSize {
if err := d.writeETagEdges(batch); err != nil {
return err
}
edges += len(batch)
batch = batch[:0]
}
processed++
if processed%10000 == 0 {
log.I.F("Migration progress: %d events, %d edges, %d skipped", processed, edges, skipped)
}
}
return nil
})
// Flush remaining batch
if len(batch) > 0 {
if err := d.writeETagEdges(batch); err != nil {
return err
}
edges += len(batch)
}
log.I.F("E-tag graph migration complete: %d events, %d edges, %d skipped", processed, edges, skipped)
return err
}
type eTagEdge struct {
sourceSer *types.Uint40
targetSer *types.Uint40
kind *types.Uint16
}
func (d *D) writeETagEdges(edges []eTagEdge) error {
return d.Update(func(txn *badger.Txn) error {
for _, edge := range edges {
// Forward edge: eeg|source|target|kind|direction(0)
keyBuf := new(bytes.Buffer)
dirOut := new(types.Letter)
dirOut.Set(types.EdgeDirectionETagOut)
if err := indexes.EventEventGraphEnc(edge.sourceSer, edge.targetSer, edge.kind, dirOut).MarshalWrite(keyBuf); err != nil {
return err
}
if err := txn.Set(keyBuf.Bytes(), nil); err != nil {
return err
}
// Reverse edge: gee|target|kind|direction(1)|source
keyBuf.Reset()
dirIn := new(types.Letter)
dirIn.Set(types.EdgeDirectionETagIn)
if err := indexes.GraphEventEventEnc(edge.targetSer, edge.kind, dirIn, edge.sourceSer).MarshalWrite(keyBuf); err != nil {
return err
}
if err := txn.Set(keyBuf.Bytes(), nil); err != nil {
return err
}
}
return nil
})
}
```
### 5B.2 CLI Integration
**File**: `cmd/migrate.go` (or add to existing migration command)
```go
// Add migration subcommand
func runETagGraphMigration(cmd *cobra.Command, args []string) error {
db, err := database.Open(cfg.DataDir)
if err != nil {
return err
}
defer db.Close()
return db.MigrateETagGraph()
}
```
**Usage**:
```bash
./orly migrate --backfill-etag-graph
# OR
./orly migrate etag-graph
```
---
## Phase 5C: Reference Aggregation (inbound_refs / outbound_refs)
**Goal**: Implement the `inbound_refs` and `outbound_refs` query parameters per NIP spec.
### Spec Requirements
From NIP-XX-GRAPH-QUERIES.md:
```json
{
"_graph": {
"method": "follows",
"seed": "<pubkey>",
"depth": 1,
"inbound_refs": [
{"kinds": [7], "from_depth": 1}
]
}
}
```
**Semantics**:
- `inbound_refs`: Find events that **reference** discovered events (reactions, replies, reposts)
- `outbound_refs`: Find events **referenced by** discovered events (what posts are replying to)
- Multiple `ref_spec` items have **AND** semantics
- Multiple `kinds` within a single `ref_spec` have **OR** semantics
- Results sorted by count **descending** (most referenced first)
### 5C.1 Extend Query Struct
**File**: `pkg/protocol/graph/query.go`
Already defined but needs execution:
```go
type RefSpec struct {
Kinds []uint16 `json:"kinds"`
FromDepth int `json:"from_depth,omitempty"`
}
type Query struct {
// ... existing fields
InboundRefs []RefSpec `json:"inbound_refs,omitempty"`
OutboundRefs []RefSpec `json:"outbound_refs,omitempty"`
}
```
### 5C.2 Implement Reference Collection
**File**: `pkg/database/graph-refs.go`
```go
// CollectInboundRefs finds events that reference events authored by the discovered pubkeys
// For each depth level, finds inbound e-tag references of specified kinds
// Returns aggregated counts sorted by popularity (descending)
func (d *D) CollectInboundRefs(
result *GraphResult,
refSpecs []RefSpec,
maxDepth int,
) error {
if result.InboundRefs == nil {
result.InboundRefs = make(map[uint16]map[string][]string)
}
// For each depth level
for depth := 0; depth <= maxDepth; depth++ {
// Determine which ref specs apply at this depth
var applicableKinds []uint16
for _, spec := range refSpecs {
if depth >= spec.FromDepth {
applicableKinds = append(applicableKinds, spec.Kinds...)
}
}
if len(applicableKinds) == 0 {
continue
}
// Get pubkeys at this depth
var pubkeySerials []*types.Uint40
if depth == 0 {
// depth 0 = seed pubkey
seedSerial, _ := d.PubkeyHexToSerial(result.SeedPubkey)
if seedSerial != nil {
pubkeySerials = []*types.Uint40{seedSerial}
}
} else {
pubkeys := result.PubkeysByDepth[depth]
for _, pkHex := range pubkeys {
ser, _ := d.PubkeyHexToSerial(pkHex)
if ser != nil {
pubkeySerials = append(pubkeySerials, ser)
}
}
}
// For each pubkey, find their events, then find references to those events
for _, pkSerial := range pubkeySerials {
// Get events authored by this pubkey
authoredEvents, err := d.GetEventsAuthoredByPubkey(pkSerial, nil)
if err != nil {
continue
}
for _, eventSerial := range authoredEvents {
eventIDHex, _ := d.GetEventIDFromSerial(eventSerial)
if eventIDHex == "" {
continue
}
// Find inbound references (events that reference this event)
refSerials, err := d.GetReferencingEvents(eventSerial, applicableKinds)
if err != nil {
continue
}
for _, refSerial := range refSerials {
refIDHex, _ := d.GetEventIDFromSerial(refSerial)
if refIDHex == "" {
continue
}
// Get the kind of the referencing event
refKind, _ := d.GetEventKindFromSerial(refSerial)
// Add to aggregation
if result.InboundRefs[refKind] == nil {
result.InboundRefs[refKind] = make(map[string][]string)
}
result.InboundRefs[refKind][eventIDHex] = append(
result.InboundRefs[refKind][eventIDHex],
refIDHex,
)
}
}
}
}
return nil
}
// CollectOutboundRefs finds events referenced BY events at each depth
// (following e-tag chains to find what posts are being replied to)
func (d *D) CollectOutboundRefs(
result *GraphResult,
refSpecs []RefSpec,
maxDepth int,
) error {
if result.OutboundRefs == nil {
result.OutboundRefs = make(map[uint16]map[string][]string)
}
// Similar implementation to CollectInboundRefs but using GetETagsFromEventSerial
// to follow outbound references instead of GetReferencingEvents for inbound
// ...
return nil
}
```
### 5C.3 Response Generation with Refs
**File**: `pkg/protocol/graph/executor.go`
Add ref aggregation support to response:
```go
func (e *Executor) Execute(q *Query) (*event.T, error) {
// ... existing traversal code ...
// Collect references if specified
if len(q.InboundRefs) > 0 {
if err := e.db.CollectInboundRefs(result, q.InboundRefs, q.Depth); err != nil {
return nil, fmt.Errorf("inbound refs: %w", err)
}
}
if len(q.OutboundRefs) > 0 {
if err := e.db.CollectOutboundRefs(result, q.OutboundRefs, q.Depth); err != nil {
return nil, fmt.Errorf("outbound refs: %w", err)
}
}
// Build response content with refs included
content := ResponseContent{
PubkeysByDepth: result.ToDepthArrays(),
TotalPubkeys: result.TotalPubkeys,
}
// Add ref summaries if present
if len(result.InboundRefs) > 0 || len(result.OutboundRefs) > 0 {
content.InboundRefSummary = buildRefSummary(result.InboundRefs)
content.OutboundRefSummary = buildRefSummary(result.OutboundRefs)
}
// ... rest of response generation ...
}
type ResponseContent struct {
PubkeysByDepth [][]string `json:"pubkeys_by_depth,omitempty"`
EventsByDepth [][]string `json:"events_by_depth,omitempty"`
TotalPubkeys int `json:"total_pubkeys,omitempty"`
TotalEvents int `json:"total_events,omitempty"`
InboundRefSummary []RefSummary `json:"inbound_refs,omitempty"`
OutboundRefSummary []RefSummary `json:"outbound_refs,omitempty"`
}
type RefSummary struct {
Kind uint16 `json:"kind"`
TargetEventID string `json:"target"`
RefCount int `json:"count"`
RefEventIDs []string `json:"refs,omitempty"` // Optional: include actual ref IDs
}
func buildRefSummary(refs map[uint16]map[string][]string) []RefSummary {
var summaries []RefSummary
for kind, targets := range refs {
for targetID, refIDs := range targets {
summaries = append(summaries, RefSummary{
Kind: kind,
TargetEventID: targetID,
RefCount: len(refIDs),
RefEventIDs: refIDs,
})
}
}
// Sort by count descending
sort.Slice(summaries, func(i, j int) bool {
return summaries[i].RefCount > summaries[j].RefCount
})
return summaries
}
```
---
## Phase 5D: P-Tag Graph Query Optimization
**Goal**: Use the pubkey graph index (peg) for faster `#p` tag queries.
### Spec
From PTAG_GRAPH_OPTIMIZATION.md:
When a filter has `#p` tags (mentions/references), use the `peg` index instead of the `tkc` (TagKind) index for:
- 41% smaller index size
- No hash collisions (exact serial match vs 8-byte hash)
- Kind-indexed in key structure
### 5D.1 Query Optimization
**File**: `pkg/database/query-for-ptag-graph.go`
```go
package database
// canUsePTagGraph checks if a filter can use graph optimization
func canUsePTagGraph(f *filter.F) bool {
// Has p-tags?
if f.Tags == nil || f.Tags.Len() == 0 {
return false
}
hasPTags := false
for _, t := range *f.Tags {
if len(t.Key()) >= 1 && t.Key()[0] == 'p' {
hasPTags = true
break
}
}
if !hasPTags {
return false
}
// No authors filter (use different optimization for that)
if f.Authors != nil && f.Authors.Len() > 0 {
return false
}
return true
}
// QueryPTagGraph uses the pubkey graph index for efficient p-tag queries
func (d *D) QueryPTagGraph(f *filter.F) (serials types.Uint40s, err error) {
// Extract p-tags from filter
var ptagPubkeys [][]byte
for _, t := range *f.Tags {
if len(t.Key()) >= 1 && t.Key()[0] == 'p' {
for _, val := range t.Values() {
pubkeyBytes, err := hex.Dec(string(val))
if err == nil && len(pubkeyBytes) == 32 {
ptagPubkeys = append(ptagPubkeys, pubkeyBytes)
}
}
}
}
if len(ptagPubkeys) == 0 {
return nil, nil
}
// Resolve pubkeys to serials
var pubkeySerials []*types.Uint40
for _, pk := range ptagPubkeys {
ser, err := d.GetPubkeySerial(pk)
if err == nil && ser != nil {
pubkeySerials = append(pubkeySerials, ser)
}
}
// Query kinds (optional)
var kinds []uint16
if f.Kinds != nil {
kinds = f.Kinds.ToUint16()
}
// Scan peg index for each pubkey
seen := make(map[uint64]bool)
for _, pkSerial := range pubkeySerials {
// peg|pubkey_serial|kind|direction(2)|event_serial
// direction=2 means "inbound p-tag" (this pubkey is referenced)
eventSerials, err := d.GetEventsReferencingPubkey(pkSerial, kinds)
if err != nil {
continue
}
for _, evSer := range eventSerials {
if !seen[evSer.Uint64()] {
seen[evSer.Uint64()] = true
serials = append(serials, evSer)
}
}
}
return serials, nil
}
// GetEventsReferencingPubkey finds events that have p-tag referencing this pubkey
// Uses peg index with direction=2 (p-tag inbound)
func (d *D) GetEventsReferencingPubkey(pubkeySerial *types.Uint40, kinds []uint16) ([]*types.Uint40, error) {
var eventSerials []*types.Uint40
err := d.View(func(txn *badger.Txn) error {
if len(kinds) > 0 {
// Scan specific kinds
for _, k := range kinds {
kind := new(types.Uint16)
kind.Set(k)
direction := new(types.Letter)
direction.Set(types.EdgeDirectionPTagIn) // direction=2
prefix := new(bytes.Buffer)
indexes.PubkeyEventGraphEnc(pubkeySerial, kind, direction, nil).MarshalWrite(prefix)
opts := badger.DefaultIteratorOptions
opts.PrefetchValues = false
it := txn.NewIterator(opts)
for it.Seek(prefix.Bytes()); it.ValidForPrefix(prefix.Bytes()); it.Next() {
key := it.Item().Key()
_, _, _, evSer := indexes.PubkeyEventGraphVars()
dec := indexes.PubkeyEventGraphDec(new(types.Uint40), new(types.Uint16), new(types.Letter), evSer)
if err := dec.UnmarshalRead(bytes.NewReader(key)); err != nil {
continue
}
ser := new(types.Uint40)
ser.Set(evSer.Uint64())
eventSerials = append(eventSerials, ser)
}
it.Close()
}
} else {
// Scan all kinds (direction=2 only)
direction := new(types.Letter)
direction.Set(types.EdgeDirectionPTagIn)
// Need to scan all kinds for this pubkey with direction=2
// This is less efficient - recommend always filtering by kinds
// ... implementation
}
return nil
})
return eventSerials, err
}
```
### 5D.2 Query Dispatcher Integration
**File**: `pkg/database/query.go` (or equivalent)
```go
func (d *D) GetSerialsFromFilter(f *filter.F) (sers types.Uint40s, err error) {
// Try p-tag graph optimization first
if canUsePTagGraph(f) {
if sers, err = d.QueryPTagGraph(f); err == nil && len(sers) > 0 {
return sers, nil
}
// Fall through to traditional indexes on error or empty result
}
// Existing index selection logic...
}
```
---
## Implementation Priority Order
### Critical Path (Completes NIP Spec)
1. **Phase 5C: Reference Aggregation** - Required by NIP spec for full feature parity
- Estimated: Medium complexity
- Value: High (enables reaction/reply counts, popular post discovery)
2. **Phase 5A: Configuration & NIP-11** - Needed for relay discoverability
- Estimated: Low complexity
- Value: Medium (allows clients to detect support)
### Enhancement Path (Performance & Operations)
3. **Phase 5B: E-Tag Graph Backfill** - Enables graph queries on historical data
- Estimated: Medium complexity
- Value: Medium (relays with existing data need this)
4. **Phase 5D: P-Tag Graph Optimization** - Performance improvement
- Estimated: Low-Medium complexity
- Value: Medium (3-5x faster for mention queries)
---
## Testing Plan
### Unit Tests
```go
// Reference aggregation
func TestCollectInboundRefs(t *testing.T)
func TestCollectOutboundRefs(t *testing.T)
func TestRefSummarySorting(t *testing.T)
// Configuration
func TestGraphConfigDefaults(t *testing.T)
func TestGraphConfigEnvOverrides(t *testing.T)
// Migration
func TestETagGraphMigration(t *testing.T)
func TestMigrationIdempotency(t *testing.T)
// P-tag optimization
func TestCanUsePTagGraph(t *testing.T)
func TestQueryPTagGraph(t *testing.T)
```
### Integration Tests
```go
// Full round-trip with refs
func TestGraphQueryWithInboundRefs(t *testing.T)
func TestGraphQueryWithOutboundRefs(t *testing.T)
func TestGraphQueryRefsSortedByCount(t *testing.T)
// NIP-11
func TestRelayInfoAdvertisesGraphQueries(t *testing.T)
```
### Performance Tests
```go
// Benchmark ref collection
func BenchmarkCollectInboundRefs(b *testing.B)
func BenchmarkPTagGraphVsTagIndex(b *testing.B)
```
---
## Summary
| Phase | Description | Complexity | Priority | Status |
|-------|-------------|------------|----------|--------|
| **5A** | Configuration & NIP-11 | Low | High | ✅ COMPLETE |
| **5B** | E-tag graph migration | Medium | Medium | ✅ COMPLETE |
| **5C** | Reference aggregation | Medium | High | ✅ COMPLETE |
| **5D** | P-tag optimization | Low-Medium | Medium | ❌ Not started |
**Completed 2025-01-05:**
- Phase 5A: Configuration options (`ORLY_GRAPH_*` env vars) and NIP-11 `graph_query` field
- Phase 5B: E-tag graph backfill migration (v8) runs automatically on startup
- Phase 5C: Inbound/outbound ref collection for both Badger and Neo4j backends
**Remaining:**
- Phase 5D: P-tag graph query optimization (enhancement, not critical)
**Implementation Files:**
- `app/config/config.go` - Graph configuration options
- `app/handle-relayinfo.go` - NIP-11 advertisement
- `pkg/database/migrations.go` - E-tag graph backfill (v8)
- `pkg/database/graph-refs.go` - Badger ref collection
- `pkg/neo4j/graph-refs.go` - Neo4j ref collection
- `pkg/protocol/graph/executor.go` - Query execution with ref support

1
go.mod

@ -25,6 +25,7 @@ require ( @@ -25,6 +25,7 @@ require (
go.uber.org/atomic v1.11.0
golang.org/x/crypto v0.46.0
golang.org/x/lint v0.0.0-20241112194109-818c5a804067
golang.org/x/term v0.38.0
golang.zx2c4.com/wireguard v0.0.0-20250521234502-f333402bd9cb
honnef.co/go/tools v0.6.1
lol.mleku.dev v1.0.5

2
go.sum

@ -198,6 +198,8 @@ golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBc @@ -198,6 +198,8 @@ golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.39.0 h1:CvCKL8MeisomCi6qNZ+wbb0DN9E5AATixKsvNtMoMFk=
golang.org/x/sys v0.39.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
golang.org/x/term v0.38.0 h1:PQ5pkm/rLO6HnxFR7N2lJHOZX6Kez5Y1gDSJla6jo7Q=
golang.org/x/term v0.38.0/go.mod h1:bSEAKrOT1W+VSu9TSCMtoGEOUcKxOKgl3LE5QEF/xVg=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.32.0 h1:ZD01bjUt1FQ9WJ0ClOL5vxgxOI/sVCNgX1YtKwcY0mU=
golang.org/x/text v0.32.0/go.mod h1:o/rUWzghvpD5TXrTIBuJU77MTaN0ljMWE47kxGJQ7jY=

110
main.go

@ -6,20 +6,24 @@ import ( @@ -6,20 +6,24 @@ import (
"net/http"
pp "net/http/pprof"
"os"
"os/exec"
"os/signal"
"runtime"
"runtime/debug"
"strings"
"sync"
"syscall"
"time"
"github.com/pkg/profile"
"golang.org/x/term"
"lol.mleku.dev/chk"
"lol.mleku.dev/log"
"next.orly.dev/app"
"next.orly.dev/app/config"
"next.orly.dev/pkg/acl"
"git.mleku.dev/mleku/nostr/crypto/keys"
"git.mleku.dev/mleku/nostr/encoders/bech32encoding"
"next.orly.dev/pkg/database"
neo4jdb "next.orly.dev/pkg/neo4j" // Import for neo4j factory and type
"git.mleku.dev/mleku/nostr/encoders/hex"
@ -97,6 +101,96 @@ func main() { @@ -97,6 +101,96 @@ func main() {
cfg.Listen, cfg.Port, cfg.ACLMode)
}
// Handle 'curatingmode' subcommand: start relay in curating mode with specified owner
if requested, ownerKey := config.CuratingModeRequested(); requested {
if ownerKey == "" {
fmt.Println("Usage: orly curatingmode <npub|hex_pubkey>")
fmt.Println("")
fmt.Println("Starts the relay in curating mode with the specified pubkey as owner.")
fmt.Println("Opens a browser to the curation setup page where you must log in")
fmt.Println("with a Nostr extension to configure the relay.")
fmt.Println("")
fmt.Println("Press Escape or Ctrl+C to stop the relay.")
os.Exit(1)
}
// Parse the owner key (npub or hex)
var ownerHex string
if strings.HasPrefix(ownerKey, "npub1") {
// Decode npub to hex
_, pubBytes, err := bech32encoding.Decode([]byte(ownerKey))
if err != nil {
fmt.Printf("Error: invalid npub: %v\n", err)
os.Exit(1)
}
if pb, ok := pubBytes.([]byte); ok {
ownerHex = hex.Enc(pb)
} else {
fmt.Println("Error: invalid npub encoding")
os.Exit(1)
}
} else if len(ownerKey) == 64 {
// Assume hex pubkey
ownerHex = strings.ToLower(ownerKey)
} else {
fmt.Println("Error: owner key must be an npub or 64-character hex pubkey")
os.Exit(1)
}
// Configure for curating mode
cfg.ACLMode = "curating"
cfg.Owners = []string{ownerHex}
log.I.F("curatingmode: starting with owner %s", ownerHex)
log.I.F("curatingmode: listening on %s:%d", cfg.Listen, cfg.Port)
// Start a goroutine to open browser after a short delay
go func() {
time.Sleep(2 * time.Second)
url := fmt.Sprintf("http://%s:%d/#curation", cfg.Listen, cfg.Port)
log.I.F("curatingmode: opening browser to %s", url)
openBrowser(url)
}()
// Start a goroutine to listen for Escape key
go func() {
// Set terminal to raw mode to capture individual key presses
oldState, err := term.MakeRaw(int(os.Stdin.Fd()))
if err != nil {
log.W.F("could not set terminal to raw mode: %v", err)
return
}
defer term.Restore(int(os.Stdin.Fd()), oldState)
buf := make([]byte, 1)
for {
_, err := os.Stdin.Read(buf)
if err != nil {
return
}
// Escape key is 0x1b (27)
if buf[0] == 0x1b {
fmt.Println("\nEscape pressed, shutting down...")
p, _ := os.FindProcess(os.Getpid())
_ = p.Signal(os.Interrupt)
return
}
}
}()
fmt.Println("")
fmt.Println("Curating Mode Setup")
fmt.Println("===================")
fmt.Printf("Owner: %s\n", ownerHex)
fmt.Printf("URL: http://%s:%d/#curation\n", cfg.Listen, cfg.Port)
fmt.Println("")
fmt.Println("Log in with your Nostr extension to configure allowed event kinds")
fmt.Println("and rate limiting settings.")
fmt.Println("")
fmt.Println("Press Escape or Ctrl+C to stop the relay.")
fmt.Println("")
}
// Ensure profiling is stopped on interrupts (SIGINT/SIGTERM) as well as on normal exit
var profileStopOnce sync.Once
profileStop := func() {}
@ -548,3 +642,19 @@ func makeDatabaseConfig(cfg *config.C) *database.DatabaseConfig { @@ -548,3 +642,19 @@ func makeDatabaseConfig(cfg *config.C) *database.DatabaseConfig {
Neo4jQueryResultLimit: neo4jQueryResultLimit,
}
}
// openBrowser opens the specified URL in the default browser.
func openBrowser(url string) {
var cmd *exec.Cmd
switch runtime.GOOS {
case "darwin":
cmd = exec.Command("open", url)
case "windows":
cmd = exec.Command("cmd", "/c", "start", url)
default: // linux, freebsd, etc.
cmd = exec.Command("xdg-open", url)
}
if err := cmd.Start(); err != nil {
log.W.F("could not open browser: %v", err)
}
}

699
pkg/acl/curating.go

@ -0,0 +1,699 @@ @@ -0,0 +1,699 @@
package acl
import (
"context"
"encoding/hex"
"reflect"
"strconv"
"strings"
"sync"
"time"
"lol.mleku.dev/chk"
"lol.mleku.dev/errorf"
"lol.mleku.dev/log"
"next.orly.dev/app/config"
"next.orly.dev/pkg/database"
"git.mleku.dev/mleku/nostr/encoders/bech32encoding"
"git.mleku.dev/mleku/nostr/encoders/event"
"next.orly.dev/pkg/utils"
)
// Default values for curating mode
const (
DefaultDailyLimit = 50
DefaultIPDailyLimit = 500 // Max events per IP per day (flood protection)
DefaultFirstBanHours = 1
DefaultSecondBanHours = 168 // 1 week
CuratingConfigKind = 30078
CuratingConfigDTag = "curating-config"
)
// Curating implements the curating ACL mode with three-tier publisher classification:
// - Trusted: Unlimited publishing
// - Blacklisted: Cannot publish
// - Unclassified: Rate-limited publishing (default 50/day)
type Curating struct {
Ctx context.Context
cfg *config.C
db *database.D
curatingACL *database.CuratingACL
owners [][]byte
admins [][]byte
mx sync.RWMutex
// In-memory caches for performance
trustedCache map[string]bool
blacklistedCache map[string]bool
kindCache map[int]bool
configCache *database.CuratingConfig
cacheMx sync.RWMutex
}
func (c *Curating) Configure(cfg ...any) (err error) {
log.I.F("configuring curating ACL")
for _, ca := range cfg {
switch cv := ca.(type) {
case *config.C:
c.cfg = cv
case *database.D:
c.db = cv
c.curatingACL = database.NewCuratingACL(cv)
case context.Context:
c.Ctx = cv
default:
err = errorf.E("invalid type: %T", reflect.TypeOf(ca))
}
}
if c.cfg == nil || c.db == nil {
err = errorf.E("both config and database must be set")
return
}
// Initialize caches
c.trustedCache = make(map[string]bool)
c.blacklistedCache = make(map[string]bool)
c.kindCache = make(map[int]bool)
// Load owners from config
for _, owner := range c.cfg.Owners {
var own []byte
if o, e := bech32encoding.NpubOrHexToPublicKeyBinary(owner); chk.E(e) {
continue
} else {
own = o
}
c.owners = append(c.owners, own)
}
// Load admins from config
for _, admin := range c.cfg.Admins {
var adm []byte
if a, e := bech32encoding.NpubOrHexToPublicKeyBinary(admin); chk.E(e) {
continue
} else {
adm = a
}
c.admins = append(c.admins, adm)
}
// Refresh caches from database
if err = c.RefreshCaches(); err != nil {
log.W.F("curating ACL: failed to refresh caches: %v", err)
}
return nil
}
func (c *Curating) GetAccessLevel(pub []byte, address string) (level string) {
c.mx.RLock()
defer c.mx.RUnlock()
pubkeyHex := hex.EncodeToString(pub)
// Check owners first
for _, v := range c.owners {
if utils.FastEqual(v, pub) {
return "owner"
}
}
// Check admins
for _, v := range c.admins {
if utils.FastEqual(v, pub) {
return "admin"
}
}
// Check if IP is blocked
if address != "" {
blocked, _, err := c.curatingACL.IsIPBlocked(address)
if err == nil && blocked {
return "blocked"
}
}
// Check if pubkey is blacklisted (check cache first)
c.cacheMx.RLock()
if c.blacklistedCache[pubkeyHex] {
c.cacheMx.RUnlock()
return "banned"
}
c.cacheMx.RUnlock()
// Double-check database for blacklisted
blacklisted, _ := c.curatingACL.IsPubkeyBlacklisted(pubkeyHex)
if blacklisted {
// Update cache
c.cacheMx.Lock()
c.blacklistedCache[pubkeyHex] = true
c.cacheMx.Unlock()
return "banned"
}
// All other users get write access (rate limiting handled in CheckPolicy)
return "write"
}
// CheckPolicy implements the PolicyChecker interface for event-level filtering
func (c *Curating) CheckPolicy(ev *event.E) (allowed bool, err error) {
pubkeyHex := hex.EncodeToString(ev.Pubkey)
// Check if configured
config, err := c.GetConfig()
if err != nil {
return false, errorf.E("failed to get config: %v", err)
}
if config.ConfigEventID == "" {
return false, errorf.E("curating mode not configured: please publish a configuration event")
}
// Check if event is spam-flagged
isSpam, _ := c.curatingACL.IsEventSpam(hex.EncodeToString(ev.ID[:]))
if isSpam {
return false, errorf.E("blocked: event is flagged as spam")
}
// Check if event kind is allowed
if !c.curatingACL.IsKindAllowed(int(ev.Kind), &config) {
return false, errorf.E("blocked: event kind %d is not in the allow list", ev.Kind)
}
// Check if pubkey is blacklisted
c.cacheMx.RLock()
isBlacklisted := c.blacklistedCache[pubkeyHex]
c.cacheMx.RUnlock()
if !isBlacklisted {
isBlacklisted, _ = c.curatingACL.IsPubkeyBlacklisted(pubkeyHex)
}
if isBlacklisted {
return false, errorf.E("blocked: pubkey is blacklisted")
}
// Check if pubkey is trusted (bypass rate limiting)
c.cacheMx.RLock()
isTrusted := c.trustedCache[pubkeyHex]
c.cacheMx.RUnlock()
if !isTrusted {
isTrusted, _ = c.curatingACL.IsPubkeyTrusted(pubkeyHex)
if isTrusted {
// Update cache
c.cacheMx.Lock()
c.trustedCache[pubkeyHex] = true
c.cacheMx.Unlock()
}
}
if isTrusted {
return true, nil
}
// Check if owner or admin (bypass rate limiting)
for _, v := range c.owners {
if utils.FastEqual(v, ev.Pubkey) {
return true, nil
}
}
for _, v := range c.admins {
if utils.FastEqual(v, ev.Pubkey) {
return true, nil
}
}
// For unclassified users, check rate limit
today := time.Now().Format("2006-01-02")
dailyLimit := config.DailyLimit
if dailyLimit == 0 {
dailyLimit = DefaultDailyLimit
}
count, err := c.curatingACL.GetEventCount(pubkeyHex, today)
if err != nil {
log.W.F("curating ACL: failed to get event count: %v", err)
count = 0
}
if count >= dailyLimit {
return false, errorf.E("rate limit exceeded: maximum %d events per day for unclassified users", dailyLimit)
}
// Increment the counter
_, err = c.curatingACL.IncrementEventCount(pubkeyHex, today)
if err != nil {
log.W.F("curating ACL: failed to increment event count: %v", err)
}
return true, nil
}
// RateLimitCheck checks if an unclassified user can publish and handles IP tracking
// This is called separately when we have access to the IP address
func (c *Curating) RateLimitCheck(pubkeyHex, ip string) (allowed bool, message string, err error) {
config, err := c.GetConfig()
if err != nil {
return false, "", errorf.E("failed to get config: %v", err)
}
today := time.Now().Format("2006-01-02")
// Check IP flood limit first (applies to all non-trusted users from this IP)
if ip != "" {
ipDailyLimit := config.IPDailyLimit
if ipDailyLimit == 0 {
ipDailyLimit = DefaultIPDailyLimit
}
ipCount, err := c.curatingACL.GetIPEventCount(ip, today)
if err != nil {
ipCount = 0
}
if ipCount >= ipDailyLimit {
// IP has exceeded flood limit - record offense and ban
c.recordIPOffenseAndBan(ip, pubkeyHex, config, "IP flood limit exceeded")
return false, "rate limit exceeded: too many events from this IP address", nil
}
}
// Check per-pubkey daily limit
dailyLimit := config.DailyLimit
if dailyLimit == 0 {
dailyLimit = DefaultDailyLimit
}
count, err := c.curatingACL.GetEventCount(pubkeyHex, today)
if err != nil {
count = 0
}
if count >= dailyLimit {
// Record IP offense and potentially ban
if ip != "" {
c.recordIPOffenseAndBan(ip, pubkeyHex, config, "pubkey rate limit exceeded")
}
return false, "rate limit exceeded: maximum events per day for unclassified users", nil
}
// Increment IP event count for flood tracking (only for non-trusted users)
if ip != "" {
_, _ = c.curatingACL.IncrementIPEventCount(ip, today)
}
return true, "", nil
}
// recordIPOffenseAndBan records an offense for an IP and applies a ban if warranted
func (c *Curating) recordIPOffenseAndBan(ip, pubkeyHex string, config database.CuratingConfig, reason string) {
offenseCount, _ := c.curatingACL.RecordIPOffense(ip, pubkeyHex)
if offenseCount > 0 {
firstBanHours := config.FirstBanHours
if firstBanHours == 0 {
firstBanHours = DefaultFirstBanHours
}
secondBanHours := config.SecondBanHours
if secondBanHours == 0 {
secondBanHours = DefaultSecondBanHours
}
var banDuration time.Duration
if offenseCount >= 2 {
banDuration = time.Duration(secondBanHours) * time.Hour
log.W.F("curating ACL: IP %s banned for %d hours (offense #%d, reason: %s)", ip, secondBanHours, offenseCount, reason)
} else {
banDuration = time.Duration(firstBanHours) * time.Hour
log.W.F("curating ACL: IP %s banned for %d hours (offense #%d, reason: %s)", ip, firstBanHours, offenseCount, reason)
}
c.curatingACL.BlockIP(ip, banDuration, reason)
}
}
func (c *Curating) GetACLInfo() (name, description, documentation string) {
return "curating", "curated relay with rate-limited unclassified publishers",
`Curating ACL mode provides three-tier publisher classification:
- Trusted: Unlimited publishing, explicitly marked by admin
- Blacklisted: Cannot publish, events rejected
- Unclassified: Default state, rate-limited (default 50 events/day)
Features:
- Per-pubkey daily rate limiting for unclassified users (default 50/day)
- Per-IP daily rate limiting for flood protection (default 500/day)
- IP-based spam detection (tracks multiple rate-limited pubkeys)
- Automatic IP bans (1-hour first offense, 1-week second offense)
- Event kind allow-listing for content control
- Spam flagging (events hidden from queries without deletion)
Configuration via kind 30078 event with d-tag "curating-config".
The relay will not accept events until configured.
Management through NIP-86 API endpoints:
- trustpubkey, untrustpubkey, listtrustedpubkeys
- blacklistpubkey, unblacklistpubkey, listblacklistedpubkeys
- listunclassifiedusers
- markspam, unmarkspam, listspamevents
- setallowedkindcategories, getallowedkindcategories`
}
func (c *Curating) Type() string { return "curating" }
// IsEventVisible checks if an event should be visible to the given access level.
// Events from blacklisted pubkeys are only visible to admin/owner.
func (c *Curating) IsEventVisible(ev *event.E, accessLevel string) bool {
// Admin and owner can see all events
if accessLevel == "admin" || accessLevel == "owner" {
return true
}
// Check if the event author is blacklisted
pubkeyHex := hex.EncodeToString(ev.Pubkey)
// Check cache first
c.cacheMx.RLock()
isBlacklisted := c.blacklistedCache[pubkeyHex]
c.cacheMx.RUnlock()
if isBlacklisted {
return false
}
// Check database if not in cache
if blacklisted, _ := c.curatingACL.IsPubkeyBlacklisted(pubkeyHex); blacklisted {
c.cacheMx.Lock()
c.blacklistedCache[pubkeyHex] = true
c.cacheMx.Unlock()
return false
}
return true
}
// FilterVisibleEvents filters a list of events, removing those from blacklisted pubkeys.
// Returns only events visible to the given access level.
func (c *Curating) FilterVisibleEvents(events []*event.E, accessLevel string) []*event.E {
// Admin and owner can see all events
if accessLevel == "admin" || accessLevel == "owner" {
return events
}
// Filter out events from blacklisted pubkeys
visible := make([]*event.E, 0, len(events))
for _, ev := range events {
if c.IsEventVisible(ev, accessLevel) {
visible = append(visible, ev)
}
}
return visible
}
// GetCuratingACL returns the database ACL instance for direct access
func (c *Curating) GetCuratingACL() *database.CuratingACL {
return c.curatingACL
}
func (c *Curating) Syncer() {
log.I.F("starting curating ACL syncer")
// Start background cleanup goroutine
go c.backgroundCleanup()
}
// backgroundCleanup periodically cleans up expired data
func (c *Curating) backgroundCleanup() {
// Run cleanup every hour
ticker := time.NewTicker(time.Hour)
defer ticker.Stop()
for {
select {
case <-c.Ctx.Done():
log.D.F("curating ACL background cleanup stopped")
return
case <-ticker.C:
c.runCleanup()
}
}
}
func (c *Curating) runCleanup() {
log.D.F("curating ACL: running background cleanup")
// Clean up expired IP blocks
if err := c.curatingACL.CleanupExpiredIPBlocks(); err != nil {
log.W.F("curating ACL: failed to cleanup expired IP blocks: %v", err)
}
// Clean up old event counts (older than 7 days)
cutoffDate := time.Now().AddDate(0, 0, -7).Format("2006-01-02")
if err := c.curatingACL.CleanupOldEventCounts(cutoffDate); err != nil {
log.W.F("curating ACL: failed to cleanup old event counts: %v", err)
}
// Refresh caches
if err := c.RefreshCaches(); err != nil {
log.W.F("curating ACL: failed to refresh caches: %v", err)
}
}
// RefreshCaches refreshes all in-memory caches from the database
func (c *Curating) RefreshCaches() error {
c.cacheMx.Lock()
defer c.cacheMx.Unlock()
// Refresh trusted pubkeys cache
trusted, err := c.curatingACL.ListTrustedPubkeys()
if err != nil {
return errorf.E("failed to list trusted pubkeys: %v", err)
}
c.trustedCache = make(map[string]bool)
for _, t := range trusted {
c.trustedCache[t.Pubkey] = true
}
// Refresh blacklisted pubkeys cache
blacklisted, err := c.curatingACL.ListBlacklistedPubkeys()
if err != nil {
return errorf.E("failed to list blacklisted pubkeys: %v", err)
}
c.blacklistedCache = make(map[string]bool)
for _, b := range blacklisted {
c.blacklistedCache[b.Pubkey] = true
}
// Refresh config cache
config, err := c.curatingACL.GetConfig()
if err != nil {
return errorf.E("failed to get config: %v", err)
}
c.configCache = &config
// Refresh allowed kinds cache
c.kindCache = make(map[int]bool)
for _, k := range config.AllowedKinds {
c.kindCache[k] = true
}
log.D.F("curating ACL: caches refreshed - %d trusted, %d blacklisted, %d allowed kinds",
len(c.trustedCache), len(c.blacklistedCache), len(c.kindCache))
return nil
}
// GetConfig returns the current configuration
func (c *Curating) GetConfig() (database.CuratingConfig, error) {
c.cacheMx.RLock()
if c.configCache != nil {
config := *c.configCache
c.cacheMx.RUnlock()
return config, nil
}
c.cacheMx.RUnlock()
return c.curatingACL.GetConfig()
}
// IsConfigured returns true if the relay has been configured
func (c *Curating) IsConfigured() (bool, error) {
return c.curatingACL.IsConfigured()
}
// ProcessConfigEvent processes a kind 30078 event to extract curating configuration
func (c *Curating) ProcessConfigEvent(ev *event.E) error {
if ev.Kind != CuratingConfigKind {
return errorf.E("invalid event kind: expected %d, got %d", CuratingConfigKind, ev.Kind)
}
// Check d-tag
dTag := ev.Tags.GetFirst([]byte("d"))
if dTag == nil || string(dTag.Value()) != CuratingConfigDTag {
return errorf.E("invalid d-tag: expected %s", CuratingConfigDTag)
}
// Check if pubkey is owner or admin
pubkeyHex := hex.EncodeToString(ev.Pubkey)
isOwner := false
isAdmin := false
for _, v := range c.owners {
if utils.FastEqual(v, ev.Pubkey) {
isOwner = true
break
}
}
if !isOwner {
for _, v := range c.admins {
if utils.FastEqual(v, ev.Pubkey) {
isAdmin = true
break
}
}
}
if !isOwner && !isAdmin {
return errorf.E("config event must be from owner or admin")
}
// Parse configuration from tags
config := database.CuratingConfig{
ConfigEventID: hex.EncodeToString(ev.ID[:]),
ConfigPubkey: pubkeyHex,
ConfiguredAt: ev.CreatedAt,
DailyLimit: DefaultDailyLimit,
FirstBanHours: DefaultFirstBanHours,
SecondBanHours: DefaultSecondBanHours,
}
for _, tag := range *ev.Tags {
if tag.Len() < 2 {
continue
}
key := string(tag.Key())
value := string(tag.Value())
switch key {
case "daily_limit":
if v, err := strconv.Atoi(value); err == nil && v > 0 {
config.DailyLimit = v
}
case "ip_daily_limit":
if v, err := strconv.Atoi(value); err == nil && v > 0 {
config.IPDailyLimit = v
}
case "first_ban_hours":
if v, err := strconv.Atoi(value); err == nil && v > 0 {
config.FirstBanHours = v
}
case "second_ban_hours":
if v, err := strconv.Atoi(value); err == nil && v > 0 {
config.SecondBanHours = v
}
case "kind_category":
config.KindCategories = append(config.KindCategories, value)
case "kind_range":
config.AllowedRanges = append(config.AllowedRanges, value)
case "kind":
if k, err := strconv.Atoi(value); err == nil {
config.AllowedKinds = append(config.AllowedKinds, k)
}
}
}
// Save configuration
if err := c.curatingACL.SaveConfig(config); err != nil {
return errorf.E("failed to save config: %v", err)
}
// Refresh caches
c.cacheMx.Lock()
c.configCache = &config
c.cacheMx.Unlock()
log.I.F("curating ACL: configuration updated from event %s by %s",
config.ConfigEventID, config.ConfigPubkey)
return nil
}
// IsTrusted checks if a pubkey is trusted
func (c *Curating) IsTrusted(pubkeyHex string) bool {
c.cacheMx.RLock()
if c.trustedCache[pubkeyHex] {
c.cacheMx.RUnlock()
return true
}
c.cacheMx.RUnlock()
trusted, _ := c.curatingACL.IsPubkeyTrusted(pubkeyHex)
return trusted
}
// IsBlacklisted checks if a pubkey is blacklisted
func (c *Curating) IsBlacklisted(pubkeyHex string) bool {
c.cacheMx.RLock()
if c.blacklistedCache[pubkeyHex] {
c.cacheMx.RUnlock()
return true
}
c.cacheMx.RUnlock()
blacklisted, _ := c.curatingACL.IsPubkeyBlacklisted(pubkeyHex)
return blacklisted
}
// TrustPubkey adds a pubkey to the trusted list
func (c *Curating) TrustPubkey(pubkeyHex, note string) error {
pubkeyHex = strings.ToLower(pubkeyHex)
if err := c.curatingACL.SaveTrustedPubkey(pubkeyHex, note); err != nil {
return err
}
// Update cache
c.cacheMx.Lock()
c.trustedCache[pubkeyHex] = true
delete(c.blacklistedCache, pubkeyHex) // Remove from blacklist cache if present
c.cacheMx.Unlock()
// Also remove from blacklist in DB
c.curatingACL.RemoveBlacklistedPubkey(pubkeyHex)
return nil
}
// UntrustPubkey removes a pubkey from the trusted list
func (c *Curating) UntrustPubkey(pubkeyHex string) error {
pubkeyHex = strings.ToLower(pubkeyHex)
if err := c.curatingACL.RemoveTrustedPubkey(pubkeyHex); err != nil {
return err
}
// Update cache
c.cacheMx.Lock()
delete(c.trustedCache, pubkeyHex)
c.cacheMx.Unlock()
return nil
}
// BlacklistPubkey adds a pubkey to the blacklist
func (c *Curating) BlacklistPubkey(pubkeyHex, reason string) error {
pubkeyHex = strings.ToLower(pubkeyHex)
if err := c.curatingACL.SaveBlacklistedPubkey(pubkeyHex, reason); err != nil {
return err
}
// Update cache
c.cacheMx.Lock()
c.blacklistedCache[pubkeyHex] = true
delete(c.trustedCache, pubkeyHex) // Remove from trusted cache if present
c.cacheMx.Unlock()
// Also remove from trusted list in DB
c.curatingACL.RemoveTrustedPubkey(pubkeyHex)
return nil
}
// UnblacklistPubkey removes a pubkey from the blacklist
func (c *Curating) UnblacklistPubkey(pubkeyHex string) error {
pubkeyHex = strings.ToLower(pubkeyHex)
if err := c.curatingACL.RemoveBlacklistedPubkey(pubkeyHex); err != nil {
return err
}
// Update cache
c.cacheMx.Lock()
delete(c.blacklistedCache, pubkeyHex)
c.cacheMx.Unlock()
return nil
}
func init() {
Registry.Register(new(Curating))
}

989
pkg/database/curating-acl.go

@ -0,0 +1,989 @@ @@ -0,0 +1,989 @@
//go:build !(js && wasm)
package database
import (
"bytes"
"encoding/json"
"fmt"
"sort"
"time"
"github.com/dgraph-io/badger/v4"
)
// CuratingConfig represents the configuration for curating ACL mode
// This is parsed from a kind 30078 event with d-tag "curating-config"
type CuratingConfig struct {
DailyLimit int `json:"daily_limit"` // Max events per day for unclassified users
IPDailyLimit int `json:"ip_daily_limit"` // Max events per day from a single IP (flood protection)
FirstBanHours int `json:"first_ban_hours"` // IP ban duration for first offense
SecondBanHours int `json:"second_ban_hours"` // IP ban duration for second+ offense
AllowedKinds []int `json:"allowed_kinds"` // Explicit kind numbers
AllowedRanges []string `json:"allowed_ranges"` // Kind ranges like "1000-1999"
KindCategories []string `json:"kind_categories"` // Category IDs like "social", "dm"
ConfigEventID string `json:"config_event_id"` // ID of the config event
ConfigPubkey string `json:"config_pubkey"` // Pubkey that published config
ConfiguredAt int64 `json:"configured_at"` // Timestamp of config event
}
// TrustedPubkey represents an explicitly trusted publisher
type TrustedPubkey struct {
Pubkey string `json:"pubkey"`
Note string `json:"note,omitempty"`
Added time.Time `json:"added"`
}
// BlacklistedPubkey represents a blacklisted publisher
type BlacklistedPubkey struct {
Pubkey string `json:"pubkey"`
Reason string `json:"reason,omitempty"`
Added time.Time `json:"added"`
}
// PubkeyEventCount tracks daily event counts for rate limiting
type PubkeyEventCount struct {
Pubkey string `json:"pubkey"`
Date string `json:"date"` // YYYY-MM-DD format
Count int `json:"count"`
LastEvent time.Time `json:"last_event"`
}
// IPOffense tracks rate limit violations from IPs
type IPOffense struct {
IP string `json:"ip"`
OffenseCount int `json:"offense_count"`
PubkeysHit []string `json:"pubkeys_hit"` // Pubkeys that hit rate limit from this IP
LastOffense time.Time `json:"last_offense"`
}
// CuratingBlockedIP represents a temporarily blocked IP with expiration
type CuratingBlockedIP struct {
IP string `json:"ip"`
Reason string `json:"reason"`
ExpiresAt time.Time `json:"expires_at"`
Added time.Time `json:"added"`
}
// SpamEvent represents an event flagged as spam
type SpamEvent struct {
EventID string `json:"event_id"`
Pubkey string `json:"pubkey"`
Reason string `json:"reason,omitempty"`
Added time.Time `json:"added"`
}
// UnclassifiedUser represents a user who hasn't been trusted or blacklisted
type UnclassifiedUser struct {
Pubkey string `json:"pubkey"`
EventCount int `json:"event_count"`
LastEvent time.Time `json:"last_event"`
}
// CuratingACL database operations
type CuratingACL struct {
*D
}
// NewCuratingACL creates a new CuratingACL instance
func NewCuratingACL(db *D) *CuratingACL {
return &CuratingACL{D: db}
}
// ==================== Configuration ====================
// SaveConfig saves the curating configuration
func (c *CuratingACL) SaveConfig(config CuratingConfig) error {
return c.Update(func(txn *badger.Txn) error {
key := c.getConfigKey()
data, err := json.Marshal(config)
if err != nil {
return err
}
return txn.Set(key, data)
})
}
// GetConfig returns the curating configuration
func (c *CuratingACL) GetConfig() (CuratingConfig, error) {
var config CuratingConfig
err := c.View(func(txn *badger.Txn) error {
key := c.getConfigKey()
item, err := txn.Get(key)
if err != nil {
if err == badger.ErrKeyNotFound {
return nil // Return empty config
}
return err
}
val, err := item.ValueCopy(nil)
if err != nil {
return err
}
return json.Unmarshal(val, &config)
})
return config, err
}
// IsConfigured returns true if a configuration event has been set
func (c *CuratingACL) IsConfigured() (bool, error) {
config, err := c.GetConfig()
if err != nil {
return false, err
}
return config.ConfigEventID != "", nil
}
// ==================== Trusted Pubkeys ====================
// SaveTrustedPubkey saves a trusted pubkey to the database
func (c *CuratingACL) SaveTrustedPubkey(pubkey string, note string) error {
return c.Update(func(txn *badger.Txn) error {
key := c.getTrustedPubkeyKey(pubkey)
trusted := TrustedPubkey{
Pubkey: pubkey,
Note: note,
Added: time.Now(),
}
data, err := json.Marshal(trusted)
if err != nil {
return err
}
return txn.Set(key, data)
})
}
// RemoveTrustedPubkey removes a trusted pubkey from the database
func (c *CuratingACL) RemoveTrustedPubkey(pubkey string) error {
return c.Update(func(txn *badger.Txn) error {
key := c.getTrustedPubkeyKey(pubkey)
return txn.Delete(key)
})
}
// ListTrustedPubkeys returns all trusted pubkeys
func (c *CuratingACL) ListTrustedPubkeys() ([]TrustedPubkey, error) {
var trusted []TrustedPubkey
err := c.View(func(txn *badger.Txn) error {
prefix := c.getTrustedPubkeyPrefix()
it := txn.NewIterator(badger.IteratorOptions{Prefix: prefix})
defer it.Close()
for it.Rewind(); it.Valid(); it.Next() {
item := it.Item()
val, err := item.ValueCopy(nil)
if err != nil {
continue
}
var t TrustedPubkey
if err := json.Unmarshal(val, &t); err != nil {
continue
}
trusted = append(trusted, t)
}
return nil
})
return trusted, err
}
// IsPubkeyTrusted checks if a pubkey is trusted
func (c *CuratingACL) IsPubkeyTrusted(pubkey string) (bool, error) {
var trusted bool
err := c.View(func(txn *badger.Txn) error {
key := c.getTrustedPubkeyKey(pubkey)
_, err := txn.Get(key)
if err == badger.ErrKeyNotFound {
trusted = false
return nil
}
if err != nil {
return err
}
trusted = true
return nil
})
return trusted, err
}
// ==================== Blacklisted Pubkeys ====================
// SaveBlacklistedPubkey saves a blacklisted pubkey to the database
func (c *CuratingACL) SaveBlacklistedPubkey(pubkey string, reason string) error {
return c.Update(func(txn *badger.Txn) error {
key := c.getBlacklistedPubkeyKey(pubkey)
blacklisted := BlacklistedPubkey{
Pubkey: pubkey,
Reason: reason,
Added: time.Now(),
}
data, err := json.Marshal(blacklisted)
if err != nil {
return err
}
return txn.Set(key, data)
})
}
// RemoveBlacklistedPubkey removes a blacklisted pubkey from the database
func (c *CuratingACL) RemoveBlacklistedPubkey(pubkey string) error {
return c.Update(func(txn *badger.Txn) error {
key := c.getBlacklistedPubkeyKey(pubkey)
return txn.Delete(key)
})
}
// ListBlacklistedPubkeys returns all blacklisted pubkeys
func (c *CuratingACL) ListBlacklistedPubkeys() ([]BlacklistedPubkey, error) {
var blacklisted []BlacklistedPubkey
err := c.View(func(txn *badger.Txn) error {
prefix := c.getBlacklistedPubkeyPrefix()
it := txn.NewIterator(badger.IteratorOptions{Prefix: prefix})
defer it.Close()
for it.Rewind(); it.Valid(); it.Next() {
item := it.Item()
val, err := item.ValueCopy(nil)
if err != nil {
continue
}
var b BlacklistedPubkey
if err := json.Unmarshal(val, &b); err != nil {
continue
}
blacklisted = append(blacklisted, b)
}
return nil
})
return blacklisted, err
}
// IsPubkeyBlacklisted checks if a pubkey is blacklisted
func (c *CuratingACL) IsPubkeyBlacklisted(pubkey string) (bool, error) {
var blacklisted bool
err := c.View(func(txn *badger.Txn) error {
key := c.getBlacklistedPubkeyKey(pubkey)
_, err := txn.Get(key)
if err == badger.ErrKeyNotFound {
blacklisted = false
return nil
}
if err != nil {
return err
}
blacklisted = true
return nil
})
return blacklisted, err
}
// ==================== Event Counting ====================
// GetEventCount returns the event count for a pubkey on a specific date
func (c *CuratingACL) GetEventCount(pubkey, date string) (int, error) {
var count int
err := c.View(func(txn *badger.Txn) error {
key := c.getEventCountKey(pubkey, date)
item, err := txn.Get(key)
if err == badger.ErrKeyNotFound {
count = 0
return nil
}
if err != nil {
return err
}
val, err := item.ValueCopy(nil)
if err != nil {
return err
}
var ec PubkeyEventCount
if err := json.Unmarshal(val, &ec); err != nil {
return err
}
count = ec.Count
return nil
})
return count, err
}
// IncrementEventCount increments and returns the new event count for a pubkey
func (c *CuratingACL) IncrementEventCount(pubkey, date string) (int, error) {
var newCount int
err := c.Update(func(txn *badger.Txn) error {
key := c.getEventCountKey(pubkey, date)
var ec PubkeyEventCount
item, err := txn.Get(key)
if err == badger.ErrKeyNotFound {
ec = PubkeyEventCount{
Pubkey: pubkey,
Date: date,
Count: 0,
LastEvent: time.Now(),
}
} else if err != nil {
return err
} else {
val, err := item.ValueCopy(nil)
if err != nil {
return err
}
if err := json.Unmarshal(val, &ec); err != nil {
return err
}
}
ec.Count++
ec.LastEvent = time.Now()
newCount = ec.Count
data, err := json.Marshal(ec)
if err != nil {
return err
}
return txn.Set(key, data)
})
return newCount, err
}
// CleanupOldEventCounts removes event counts older than the specified date
func (c *CuratingACL) CleanupOldEventCounts(beforeDate string) error {
return c.Update(func(txn *badger.Txn) error {
prefix := c.getEventCountPrefix()
it := txn.NewIterator(badger.IteratorOptions{Prefix: prefix})
defer it.Close()
var keysToDelete [][]byte
for it.Rewind(); it.Valid(); it.Next() {
item := it.Item()
val, err := item.ValueCopy(nil)
if err != nil {
continue
}
var ec PubkeyEventCount
if err := json.Unmarshal(val, &ec); err != nil {
continue
}
if ec.Date < beforeDate {
keysToDelete = append(keysToDelete, item.KeyCopy(nil))
}
}
for _, key := range keysToDelete {
if err := txn.Delete(key); err != nil {
return err
}
}
return nil
})
}
// ==================== IP Event Counting ====================
// IPEventCount tracks events from an IP address per day (flood protection)
type IPEventCount struct {
IP string `json:"ip"`
Date string `json:"date"`
Count int `json:"count"`
LastEvent time.Time `json:"last_event"`
}
// GetIPEventCount returns the total event count for an IP on a specific date
func (c *CuratingACL) GetIPEventCount(ip, date string) (int, error) {
var count int
err := c.View(func(txn *badger.Txn) error {
key := c.getIPEventCountKey(ip, date)
item, err := txn.Get(key)
if err == badger.ErrKeyNotFound {
count = 0
return nil
}
if err != nil {
return err
}
val, err := item.ValueCopy(nil)
if err != nil {
return err
}
var ec IPEventCount
if err := json.Unmarshal(val, &ec); err != nil {
return err
}
count = ec.Count
return nil
})
return count, err
}
// IncrementIPEventCount increments and returns the new event count for an IP
func (c *CuratingACL) IncrementIPEventCount(ip, date string) (int, error) {
var newCount int
err := c.Update(func(txn *badger.Txn) error {
key := c.getIPEventCountKey(ip, date)
var ec IPEventCount
item, err := txn.Get(key)
if err == badger.ErrKeyNotFound {
ec = IPEventCount{
IP: ip,
Date: date,
Count: 0,
LastEvent: time.Now(),
}
} else if err != nil {
return err
} else {
val, err := item.ValueCopy(nil)
if err != nil {
return err
}
if err := json.Unmarshal(val, &ec); err != nil {
return err
}
}
ec.Count++
ec.LastEvent = time.Now()
newCount = ec.Count
data, err := json.Marshal(ec)
if err != nil {
return err
}
return txn.Set(key, data)
})
return newCount, err
}
// CleanupOldIPEventCounts removes IP event counts older than the specified date
func (c *CuratingACL) CleanupOldIPEventCounts(beforeDate string) error {
return c.Update(func(txn *badger.Txn) error {
prefix := c.getIPEventCountPrefix()
it := txn.NewIterator(badger.IteratorOptions{Prefix: prefix})
defer it.Close()
var keysToDelete [][]byte
for it.Rewind(); it.Valid(); it.Next() {
item := it.Item()
val, err := item.ValueCopy(nil)
if err != nil {
continue
}
var ec IPEventCount
if err := json.Unmarshal(val, &ec); err != nil {
continue
}
if ec.Date < beforeDate {
keysToDelete = append(keysToDelete, item.KeyCopy(nil))
}
}
for _, key := range keysToDelete {
if err := txn.Delete(key); err != nil {
return err
}
}
return nil
})
}
func (c *CuratingACL) getIPEventCountKey(ip, date string) []byte {
buf := new(bytes.Buffer)
buf.WriteString("CURATING_ACL_IP_EVENT_COUNT_")
buf.WriteString(ip)
buf.WriteString("_")
buf.WriteString(date)
return buf.Bytes()
}
func (c *CuratingACL) getIPEventCountPrefix() []byte {
return []byte("CURATING_ACL_IP_EVENT_COUNT_")
}
// ==================== IP Offense Tracking ====================
// GetIPOffense returns the offense record for an IP
func (c *CuratingACL) GetIPOffense(ip string) (*IPOffense, error) {
var offense *IPOffense
err := c.View(func(txn *badger.Txn) error {
key := c.getIPOffenseKey(ip)
item, err := txn.Get(key)
if err == badger.ErrKeyNotFound {
return nil
}
if err != nil {
return err
}
val, err := item.ValueCopy(nil)
if err != nil {
return err
}
offense = new(IPOffense)
return json.Unmarshal(val, offense)
})
return offense, err
}
// RecordIPOffense records a rate limit violation from an IP for a pubkey
// Returns the new offense count
func (c *CuratingACL) RecordIPOffense(ip, pubkey string) (int, error) {
var newCount int
err := c.Update(func(txn *badger.Txn) error {
key := c.getIPOffenseKey(ip)
var offense IPOffense
item, err := txn.Get(key)
if err == badger.ErrKeyNotFound {
offense = IPOffense{
IP: ip,
OffenseCount: 0,
PubkeysHit: []string{},
LastOffense: time.Now(),
}
} else if err != nil {
return err
} else {
val, err := item.ValueCopy(nil)
if err != nil {
return err
}
if err := json.Unmarshal(val, &offense); err != nil {
return err
}
}
// Add pubkey if not already in list
found := false
for _, p := range offense.PubkeysHit {
if p == pubkey {
found = true
break
}
}
if !found {
offense.PubkeysHit = append(offense.PubkeysHit, pubkey)
offense.OffenseCount++
}
offense.LastOffense = time.Now()
newCount = offense.OffenseCount
data, err := json.Marshal(offense)
if err != nil {
return err
}
return txn.Set(key, data)
})
return newCount, err
}
// ==================== IP Blocking ====================
// BlockIP blocks an IP for a specified duration
func (c *CuratingACL) BlockIP(ip string, duration time.Duration, reason string) error {
return c.Update(func(txn *badger.Txn) error {
key := c.getBlockedIPKey(ip)
blocked := CuratingBlockedIP{
IP: ip,
Reason: reason,
ExpiresAt: time.Now().Add(duration),
Added: time.Now(),
}
data, err := json.Marshal(blocked)
if err != nil {
return err
}
return txn.Set(key, data)
})
}
// UnblockIP removes an IP from the blocked list
func (c *CuratingACL) UnblockIP(ip string) error {
return c.Update(func(txn *badger.Txn) error {
key := c.getBlockedIPKey(ip)
return txn.Delete(key)
})
}
// IsIPBlocked checks if an IP is blocked and returns expiration time
func (c *CuratingACL) IsIPBlocked(ip string) (bool, time.Time, error) {
var blocked bool
var expiresAt time.Time
err := c.View(func(txn *badger.Txn) error {
key := c.getBlockedIPKey(ip)
item, err := txn.Get(key)
if err == badger.ErrKeyNotFound {
blocked = false
return nil
}
if err != nil {
return err
}
val, err := item.ValueCopy(nil)
if err != nil {
return err
}
var b CuratingBlockedIP
if err := json.Unmarshal(val, &b); err != nil {
return err
}
if time.Now().After(b.ExpiresAt) {
// Block has expired
blocked = false
return nil
}
blocked = true
expiresAt = b.ExpiresAt
return nil
})
return blocked, expiresAt, err
}
// ListBlockedIPs returns all blocked IPs (including expired ones)
func (c *CuratingACL) ListBlockedIPs() ([]CuratingBlockedIP, error) {
var blocked []CuratingBlockedIP
err := c.View(func(txn *badger.Txn) error {
prefix := c.getBlockedIPPrefix()
it := txn.NewIterator(badger.IteratorOptions{Prefix: prefix})
defer it.Close()
for it.Rewind(); it.Valid(); it.Next() {
item := it.Item()
val, err := item.ValueCopy(nil)
if err != nil {
continue
}
var b CuratingBlockedIP
if err := json.Unmarshal(val, &b); err != nil {
continue
}
blocked = append(blocked, b)
}
return nil
})
return blocked, err
}
// CleanupExpiredIPBlocks removes expired IP blocks
func (c *CuratingACL) CleanupExpiredIPBlocks() error {
return c.Update(func(txn *badger.Txn) error {
prefix := c.getBlockedIPPrefix()
it := txn.NewIterator(badger.IteratorOptions{Prefix: prefix})
defer it.Close()
now := time.Now()
var keysToDelete [][]byte
for it.Rewind(); it.Valid(); it.Next() {
item := it.Item()
val, err := item.ValueCopy(nil)
if err != nil {
continue
}
var b CuratingBlockedIP
if err := json.Unmarshal(val, &b); err != nil {
continue
}
if now.After(b.ExpiresAt) {
keysToDelete = append(keysToDelete, item.KeyCopy(nil))
}
}
for _, key := range keysToDelete {
if err := txn.Delete(key); err != nil {
return err
}
}
return nil
})
}
// ==================== Spam Events ====================
// MarkEventAsSpam marks an event as spam
func (c *CuratingACL) MarkEventAsSpam(eventID, pubkey, reason string) error {
return c.Update(func(txn *badger.Txn) error {
key := c.getSpamEventKey(eventID)
spam := SpamEvent{
EventID: eventID,
Pubkey: pubkey,
Reason: reason,
Added: time.Now(),
}
data, err := json.Marshal(spam)
if err != nil {
return err
}
return txn.Set(key, data)
})
}
// UnmarkEventAsSpam removes the spam flag from an event
func (c *CuratingACL) UnmarkEventAsSpam(eventID string) error {
return c.Update(func(txn *badger.Txn) error {
key := c.getSpamEventKey(eventID)
return txn.Delete(key)
})
}
// IsEventSpam checks if an event is marked as spam
func (c *CuratingACL) IsEventSpam(eventID string) (bool, error) {
var spam bool
err := c.View(func(txn *badger.Txn) error {
key := c.getSpamEventKey(eventID)
_, err := txn.Get(key)
if err == badger.ErrKeyNotFound {
spam = false
return nil
}
if err != nil {
return err
}
spam = true
return nil
})
return spam, err
}
// ListSpamEvents returns all spam events
func (c *CuratingACL) ListSpamEvents() ([]SpamEvent, error) {
var spam []SpamEvent
err := c.View(func(txn *badger.Txn) error {
prefix := c.getSpamEventPrefix()
it := txn.NewIterator(badger.IteratorOptions{Prefix: prefix})
defer it.Close()
for it.Rewind(); it.Valid(); it.Next() {
item := it.Item()
val, err := item.ValueCopy(nil)
if err != nil {
continue
}
var s SpamEvent
if err := json.Unmarshal(val, &s); err != nil {
continue
}
spam = append(spam, s)
}
return nil
})
return spam, err
}
// ==================== Unclassified Users ====================
// ListUnclassifiedUsers returns users who are neither trusted nor blacklisted
// sorted by event count descending
func (c *CuratingACL) ListUnclassifiedUsers(limit int) ([]UnclassifiedUser, error) {
// First, get all trusted and blacklisted pubkeys to exclude
trusted, err := c.ListTrustedPubkeys()
if err != nil {
return nil, err
}
blacklisted, err := c.ListBlacklistedPubkeys()
if err != nil {
return nil, err
}
excludeSet := make(map[string]struct{})
for _, t := range trusted {
excludeSet[t.Pubkey] = struct{}{}
}
for _, b := range blacklisted {
excludeSet[b.Pubkey] = struct{}{}
}
// Now iterate through event counts and aggregate by pubkey
pubkeyCounts := make(map[string]*UnclassifiedUser)
err = c.View(func(txn *badger.Txn) error {
prefix := c.getEventCountPrefix()
it := txn.NewIterator(badger.IteratorOptions{Prefix: prefix})
defer it.Close()
for it.Rewind(); it.Valid(); it.Next() {
item := it.Item()
val, err := item.ValueCopy(nil)
if err != nil {
continue
}
var ec PubkeyEventCount
if err := json.Unmarshal(val, &ec); err != nil {
continue
}
// Skip if trusted or blacklisted
if _, excluded := excludeSet[ec.Pubkey]; excluded {
continue
}
if existing, ok := pubkeyCounts[ec.Pubkey]; ok {
existing.EventCount += ec.Count
if ec.LastEvent.After(existing.LastEvent) {
existing.LastEvent = ec.LastEvent
}
} else {
pubkeyCounts[ec.Pubkey] = &UnclassifiedUser{
Pubkey: ec.Pubkey,
EventCount: ec.Count,
LastEvent: ec.LastEvent,
}
}
}
return nil
})
if err != nil {
return nil, err
}
// Convert to slice and sort by event count descending
var users []UnclassifiedUser
for _, u := range pubkeyCounts {
users = append(users, *u)
}
sort.Slice(users, func(i, j int) bool {
return users[i].EventCount > users[j].EventCount
})
// Apply limit
if limit > 0 && len(users) > limit {
users = users[:limit]
}
return users, nil
}
// ==================== Key Generation ====================
func (c *CuratingACL) getConfigKey() []byte {
return []byte("CURATING_ACL_CONFIG")
}
func (c *CuratingACL) getTrustedPubkeyKey(pubkey string) []byte {
buf := new(bytes.Buffer)
buf.WriteString("CURATING_ACL_TRUSTED_PUBKEY_")
buf.WriteString(pubkey)
return buf.Bytes()
}
func (c *CuratingACL) getTrustedPubkeyPrefix() []byte {
return []byte("CURATING_ACL_TRUSTED_PUBKEY_")
}
func (c *CuratingACL) getBlacklistedPubkeyKey(pubkey string) []byte {
buf := new(bytes.Buffer)
buf.WriteString("CURATING_ACL_BLACKLISTED_PUBKEY_")
buf.WriteString(pubkey)
return buf.Bytes()
}
func (c *CuratingACL) getBlacklistedPubkeyPrefix() []byte {
return []byte("CURATING_ACL_BLACKLISTED_PUBKEY_")
}
func (c *CuratingACL) getEventCountKey(pubkey, date string) []byte {
buf := new(bytes.Buffer)
buf.WriteString("CURATING_ACL_EVENT_COUNT_")
buf.WriteString(pubkey)
buf.WriteString("_")
buf.WriteString(date)
return buf.Bytes()
}
func (c *CuratingACL) getEventCountPrefix() []byte {
return []byte("CURATING_ACL_EVENT_COUNT_")
}
func (c *CuratingACL) getIPOffenseKey(ip string) []byte {
buf := new(bytes.Buffer)
buf.WriteString("CURATING_ACL_IP_OFFENSE_")
buf.WriteString(ip)
return buf.Bytes()
}
func (c *CuratingACL) getBlockedIPKey(ip string) []byte {
buf := new(bytes.Buffer)
buf.WriteString("CURATING_ACL_BLOCKED_IP_")
buf.WriteString(ip)
return buf.Bytes()
}
func (c *CuratingACL) getBlockedIPPrefix() []byte {
return []byte("CURATING_ACL_BLOCKED_IP_")
}
func (c *CuratingACL) getSpamEventKey(eventID string) []byte {
buf := new(bytes.Buffer)
buf.WriteString("CURATING_ACL_SPAM_EVENT_")
buf.WriteString(eventID)
return buf.Bytes()
}
func (c *CuratingACL) getSpamEventPrefix() []byte {
return []byte("CURATING_ACL_SPAM_EVENT_")
}
// ==================== Kind Checking Helpers ====================
// IsKindAllowed checks if an event kind is allowed based on config
func (c *CuratingACL) IsKindAllowed(kind int, config *CuratingConfig) bool {
if config == nil {
return false
}
// Check explicit kinds
for _, k := range config.AllowedKinds {
if k == kind {
return true
}
}
// Check ranges
for _, rangeStr := range config.AllowedRanges {
if kindInRange(kind, rangeStr) {
return true
}
}
// Check categories
for _, cat := range config.KindCategories {
if kindInCategory(kind, cat) {
return true
}
}
return false
}
// kindInRange checks if a kind is within a range string like "1000-1999"
func kindInRange(kind int, rangeStr string) bool {
var start, end int
n, err := fmt.Sscanf(rangeStr, "%d-%d", &start, &end)
if err != nil || n != 2 {
return false
}
return kind >= start && kind <= end
}
// kindInCategory checks if a kind belongs to a predefined category
func kindInCategory(kind int, category string) bool {
categories := map[string][]int{
"social": {0, 1, 3, 6, 7, 10002},
"dm": {4, 14, 1059},
"longform": {30023, 30024},
"media": {1063, 20, 21, 22},
"marketplace": {30017, 30018, 30019, 30020, 1021, 1022},
"groups_nip29": {9, 10, 11, 12, 9000, 9001, 9002, 39000, 39001, 39002},
"groups_nip72": {34550, 1111, 4550},
"lists": {10000, 10001, 10003, 30000, 30001, 30003},
}
kinds, ok := categories[category]
if !ok {
return false
}
for _, k := range kinds {
if k == kind {
return true
}
}
return false
}

22
pkg/database/graph-adapter.go

@ -38,5 +38,27 @@ func (a *GraphAdapter) TraverseThread(seedEventID []byte, maxDepth int, directio @@ -38,5 +38,27 @@ func (a *GraphAdapter) TraverseThread(seedEventID []byte, maxDepth int, directio
return a.db.TraverseThread(seedEventID, maxDepth, direction)
}
// CollectInboundRefs implements graph.GraphDatabase.
// It collects events that reference items in the result.
func (a *GraphAdapter) CollectInboundRefs(result graph.GraphResultI, depth int, kinds []uint16) error {
// Type assert to get the concrete GraphResult
graphResult, ok := result.(*GraphResult)
if !ok {
return nil // Can't collect refs if we don't have a GraphResult
}
return a.db.AddInboundRefsToResult(graphResult, depth, kinds)
}
// CollectOutboundRefs implements graph.GraphDatabase.
// It collects events referenced by items in the result.
func (a *GraphAdapter) CollectOutboundRefs(result graph.GraphResultI, depth int, kinds []uint16) error {
// Type assert to get the concrete GraphResult
graphResult, ok := result.(*GraphResult)
if !ok {
return nil
}
return a.db.AddOutboundRefsToResult(graphResult, depth, kinds)
}
// Verify GraphAdapter implements graph.GraphDatabase
var _ graph.GraphDatabase = (*GraphAdapter)(nil)

10
pkg/database/graph-result.go

@ -325,3 +325,13 @@ func (r *GraphResult) GetTotalPubkeys() int { @@ -325,3 +325,13 @@ func (r *GraphResult) GetTotalPubkeys() int {
func (r *GraphResult) GetTotalEvents() int {
return r.TotalEvents
}
// GetInboundRefs returns the InboundRefs map for external access.
func (r *GraphResult) GetInboundRefs() map[uint16]map[string][]string {
return r.InboundRefs
}
// GetOutboundRefs returns the OutboundRefs map for external access.
func (r *GraphResult) GetOutboundRefs() map[uint16]map[string][]string {
return r.OutboundRefs
}

191
pkg/database/migrations.go

@ -13,12 +13,13 @@ import ( @@ -13,12 +13,13 @@ import (
"next.orly.dev/pkg/database/indexes"
"next.orly.dev/pkg/database/indexes/types"
"git.mleku.dev/mleku/nostr/encoders/event"
"git.mleku.dev/mleku/nostr/encoders/hex"
"git.mleku.dev/mleku/nostr/encoders/ints"
"git.mleku.dev/mleku/nostr/encoders/kind"
)
const (
currentVersion uint32 = 7
currentVersion uint32 = 8
)
func (d *D) RunMigrations() {
@ -115,6 +116,14 @@ func (d *D) RunMigrations() { @@ -115,6 +116,14 @@ func (d *D) RunMigrations() {
// bump to version 7
_ = d.writeVersionTag(7)
}
if dbVersion < 8 {
log.I.F("migrating to version 8...")
// Backfill e-tag graph indexes (eeg/gee) for graph query support
// This creates edges for all existing events with e-tags
d.BackfillETagGraph()
// bump to version 8
_ = d.writeVersionTag(8)
}
}
// writeVersionTag writes a new version tag key to the database (no value)
@ -1079,3 +1088,183 @@ func (d *D) RebuildWordIndexesWithNormalization() { @@ -1079,3 +1088,183 @@ func (d *D) RebuildWordIndexesWithNormalization() {
log.I.F("word index rebuild with unicode normalization complete")
}
// BackfillETagGraph populates e-tag graph indexes (eeg/gee) for all existing events.
// This enables graph traversal queries for thread/reply discovery.
//
// The migration:
// 1. Iterates all events in compact storage (cmp prefix)
// 2. Extracts e-tags from each event
// 3. For e-tags referencing events we have, creates bidirectional edges:
// - eeg|source|target|kind|direction(out) - forward edge
// - gee|target|kind|direction(in)|source - reverse edge
//
// This is idempotent: running multiple times won't create duplicate edges
// (BadgerDB overwrites existing keys).
func (d *D) BackfillETagGraph() {
log.I.F("backfilling e-tag graph indexes for graph query support...")
var err error
type ETagEdge struct {
SourceSerial *types.Uint40
TargetSerial *types.Uint40
Kind *types.Uint16
}
var edges []ETagEdge
var processedEvents int
var eventsWithETags int
var skippedTargets int
// First pass: collect all e-tag edges from events
if err = d.View(func(txn *badger.Txn) error {
// Iterate compact events (cmp prefix)
cmpPrf := new(bytes.Buffer)
if err = indexes.CompactEventEnc(nil).MarshalWrite(cmpPrf); chk.E(err) {
return err
}
it := txn.NewIterator(badger.IteratorOptions{Prefix: cmpPrf.Bytes()})
defer it.Close()
for it.Rewind(); it.Valid(); it.Next() {
item := it.Item()
key := item.KeyCopy(nil)
// Extract serial from key (prefix 3 bytes + serial 5 bytes)
if len(key) < 8 {
continue
}
sourceSerial := new(types.Uint40)
if err = sourceSerial.UnmarshalRead(bytes.NewReader(key[3:8])); chk.E(err) {
continue
}
// Get event data
var val []byte
if val, err = item.ValueCopy(nil); chk.E(err) {
continue
}
// Decode the event
// First get the event ID from serial (needed for compact format decoding)
eventId, idErr := d.GetEventIdBySerial(sourceSerial)
if idErr != nil {
continue
}
resolver := NewDatabaseSerialResolver(d, d.serialCache)
ev, decErr := UnmarshalCompactEvent(val, eventId, resolver)
if decErr != nil || ev == nil {
continue
}
processedEvents++
// Extract e-tags
eTags := ev.Tags.GetAll([]byte("e"))
if len(eTags) == 0 {
continue
}
eventsWithETags++
eventKind := new(types.Uint16)
eventKind.Set(ev.Kind)
for _, eTag := range eTags {
if eTag.Len() < 2 {
continue
}
// Get event ID from e-tag
var targetEventID []byte
targetEventID, err = hex.Dec(string(eTag.ValueHex()))
if err != nil || len(targetEventID) != 32 {
continue
}
// Look up target event's serial
targetSerial, lookupErr := d.GetSerialById(targetEventID)
if lookupErr != nil || targetSerial == nil {
// Target event not in our database - skip
skippedTargets++
continue
}
edges = append(edges, ETagEdge{
SourceSerial: sourceSerial,
TargetSerial: targetSerial,
Kind: eventKind,
})
}
}
return nil
}); chk.E(err) {
log.E.F("e-tag graph backfill: failed to collect edges: %v", err)
return
}
log.I.F("e-tag graph backfill: processed %d events, %d with e-tags, found %d edges to create (%d targets not found)",
processedEvents, eventsWithETags, len(edges), skippedTargets)
if len(edges) == 0 {
log.I.F("e-tag graph backfill: no edges to create")
return
}
// Sort edges for ordered writes (improves compaction)
sort.Slice(edges, func(i, j int) bool {
if edges[i].SourceSerial.Get() != edges[j].SourceSerial.Get() {
return edges[i].SourceSerial.Get() < edges[j].SourceSerial.Get()
}
return edges[i].TargetSerial.Get() < edges[j].TargetSerial.Get()
})
// Second pass: write edges in batches
const batchSize = 1000
var createdEdges int
for i := 0; i < len(edges); i += batchSize {
end := i + batchSize
if end > len(edges) {
end = len(edges)
}
batch := edges[i:end]
if err = d.Update(func(txn *badger.Txn) error {
for _, edge := range batch {
// Create forward edge: eeg|source|target|kind|direction(out)
directionOut := new(types.Letter)
directionOut.Set(types.EdgeDirectionETagOut)
keyBuf := new(bytes.Buffer)
if err = indexes.EventEventGraphEnc(edge.SourceSerial, edge.TargetSerial, edge.Kind, directionOut).MarshalWrite(keyBuf); chk.E(err) {
continue
}
if err = txn.Set(keyBuf.Bytes(), nil); chk.E(err) {
continue
}
// Create reverse edge: gee|target|kind|direction(in)|source
directionIn := new(types.Letter)
directionIn.Set(types.EdgeDirectionETagIn)
keyBuf.Reset()
if err = indexes.GraphEventEventEnc(edge.TargetSerial, edge.Kind, directionIn, edge.SourceSerial).MarshalWrite(keyBuf); chk.E(err) {
continue
}
if err = txn.Set(keyBuf.Bytes(), nil); chk.E(err) {
continue
}
createdEdges++
}
return nil
}); chk.E(err) {
log.W.F("e-tag graph backfill: batch write failed: %v", err)
continue
}
if (i/batchSize)%10 == 0 && i > 0 {
log.I.F("e-tag graph backfill progress: %d/%d edges created", i, len(edges))
}
}
log.I.F("e-tag graph backfill complete: created %d bidirectional edges", createdEdges)
}

22
pkg/neo4j/graph-adapter.go

@ -36,5 +36,27 @@ func (a *GraphAdapter) TraverseThread(seedEventID []byte, maxDepth int, directio @@ -36,5 +36,27 @@ func (a *GraphAdapter) TraverseThread(seedEventID []byte, maxDepth int, directio
return a.db.TraverseThread(seedEventID, maxDepth, direction)
}
// CollectInboundRefs implements graph.GraphDatabase.
// It collects events that reference items in the result.
func (a *GraphAdapter) CollectInboundRefs(result graph.GraphResultI, depth int, kinds []uint16) error {
// Type assert to get the concrete GraphResult
graphResult, ok := result.(*GraphResult)
if !ok {
return nil // Can't collect refs if we don't have a GraphResult
}
return a.db.AddInboundRefsToResult(graphResult, depth, kinds)
}
// CollectOutboundRefs implements graph.GraphDatabase.
// It collects events referenced by items in the result.
func (a *GraphAdapter) CollectOutboundRefs(result graph.GraphResultI, depth int, kinds []uint16) error {
// Type assert to get the concrete GraphResult
graphResult, ok := result.(*GraphResult)
if !ok {
return nil
}
return a.db.AddOutboundRefsToResult(graphResult, depth, kinds)
}
// Verify GraphAdapter implements graph.GraphDatabase
var _ graph.GraphDatabase = (*GraphAdapter)(nil)

163
pkg/neo4j/graph-refs.go

@ -0,0 +1,163 @@ @@ -0,0 +1,163 @@
package neo4j
import (
"context"
"fmt"
"strings"
)
// AddInboundRefsToResult collects inbound references (events that reference discovered items)
// for events at a specific depth in the result.
//
// For example, if you have a follows graph result and want to find all kind-7 reactions
// to posts by users at depth 1, this collects those reactions and adds them to result.InboundRefs.
//
// Parameters:
// - result: The graph result to augment with ref data
// - depth: The depth at which to collect refs (0 = all depths)
// - kinds: Event kinds to collect (e.g., [7] for reactions, [6] for reposts)
func (n *N) AddInboundRefsToResult(result *GraphResult, depth int, kinds []uint16) error {
ctx := context.Background()
// Get pubkeys to find refs for
var pubkeys []string
if depth == 0 {
pubkeys = result.GetAllPubkeys()
} else {
pubkeys = result.GetPubkeysAtDepth(depth)
}
if len(pubkeys) == 0 {
n.Logger.Debugf("AddInboundRefsToResult: no pubkeys at depth %d", depth)
return nil
}
// Convert kinds to int64 for Neo4j
kindsInt := make([]int64, len(kinds))
for i, k := range kinds {
kindsInt[i] = int64(k)
}
// Query for events by these pubkeys and their inbound references
// This finds: (ref:Event)-[:REFERENCES]->(authored:Event)<-[:AUTHORED_BY]-(u:NostrUser)
// where the referencing event has the specified kinds
cypher := `
UNWIND $pubkeys AS pk
MATCH (u:NostrUser {pubkey: pk})<-[:AUTHORED_BY]-(authored:Event)
WHERE authored.kind IN [1, 30023]
MATCH (ref:Event)-[:REFERENCES]->(authored)
WHERE ref.kind IN $kinds
RETURN authored.id AS target_id, ref.id AS ref_id, ref.kind AS ref_kind
`
params := map[string]any{
"pubkeys": pubkeys,
"kinds": kindsInt,
}
queryResult, err := n.ExecuteRead(ctx, cypher, params)
if err != nil {
return fmt.Errorf("failed to query inbound refs: %w", err)
}
refCount := 0
for queryResult.Next(ctx) {
record := queryResult.Record()
targetID, ok := record.Values[0].(string)
if !ok || targetID == "" {
continue
}
refID, ok := record.Values[1].(string)
if !ok || refID == "" {
continue
}
refKind, ok := record.Values[2].(int64)
if !ok {
continue
}
result.AddInboundRef(uint16(refKind), strings.ToLower(targetID), strings.ToLower(refID))
refCount++
}
n.Logger.Debugf("AddInboundRefsToResult: collected %d refs for %d pubkeys", refCount, len(pubkeys))
return nil
}
// AddOutboundRefsToResult collects outbound references (events referenced by discovered items).
//
// For example, find all events that posts by users at depth 1 reference (quoted posts, replied-to posts).
func (n *N) AddOutboundRefsToResult(result *GraphResult, depth int, kinds []uint16) error {
ctx := context.Background()
// Get pubkeys to find refs for
var pubkeys []string
if depth == 0 {
pubkeys = result.GetAllPubkeys()
} else {
pubkeys = result.GetPubkeysAtDepth(depth)
}
if len(pubkeys) == 0 {
n.Logger.Debugf("AddOutboundRefsToResult: no pubkeys at depth %d", depth)
return nil
}
// Convert kinds to int64 for Neo4j
kindsInt := make([]int64, len(kinds))
for i, k := range kinds {
kindsInt[i] = int64(k)
}
// Query for events by these pubkeys and their outbound references
// This finds: (authored:Event)-[:REFERENCES]->(ref:Event)
// where the authored event has the specified kinds
cypher := `
UNWIND $pubkeys AS pk
MATCH (u:NostrUser {pubkey: pk})<-[:AUTHORED_BY]-(authored:Event)
WHERE authored.kind IN $kinds
MATCH (authored)-[:REFERENCES]->(ref:Event)
RETURN authored.id AS source_id, ref.id AS ref_id, authored.kind AS source_kind
`
params := map[string]any{
"pubkeys": pubkeys,
"kinds": kindsInt,
}
queryResult, err := n.ExecuteRead(ctx, cypher, params)
if err != nil {
return fmt.Errorf("failed to query outbound refs: %w", err)
}
refCount := 0
for queryResult.Next(ctx) {
record := queryResult.Record()
sourceID, ok := record.Values[0].(string)
if !ok || sourceID == "" {
continue
}
refID, ok := record.Values[1].(string)
if !ok || refID == "" {
continue
}
sourceKind, ok := record.Values[2].(int64)
if !ok {
continue
}
result.AddOutboundRef(uint16(sourceKind), strings.ToLower(sourceID), strings.ToLower(refID))
refCount++
}
n.Logger.Debugf("AddOutboundRefsToResult: collected %d refs from %d pubkeys", refCount, len(pubkeys))
return nil
}

52
pkg/neo4j/graph-result.go

@ -33,6 +33,14 @@ type GraphResult struct { @@ -33,6 +33,14 @@ type GraphResult struct {
// TotalEvents is the count of unique events discovered across all depths.
TotalEvents int
// InboundRefs tracks inbound references (events that reference discovered items).
// Structure: kind -> target_id -> []referencing_event_ids
InboundRefs map[uint16]map[string][]string
// OutboundRefs tracks outbound references (events referenced by discovered items).
// Structure: kind -> source_id -> []referenced_event_ids
OutboundRefs map[uint16]map[string][]string
}
// NewGraphResult creates a new initialized GraphResult.
@ -42,6 +50,8 @@ func NewGraphResult() *GraphResult { @@ -42,6 +50,8 @@ func NewGraphResult() *GraphResult {
EventsByDepth: make(map[int][]string),
FirstSeenPubkey: make(map[string]int),
FirstSeenEvent: make(map[string]int),
InboundRefs: make(map[uint16]map[string][]string),
OutboundRefs: make(map[uint16]map[string][]string),
}
}
@ -195,3 +205,45 @@ func (r *GraphResult) GetEventDepthsSorted() []int { @@ -195,3 +205,45 @@ func (r *GraphResult) GetEventDepthsSorted() []int {
sort.Ints(depths)
return depths
}
// GetInboundRefs returns the InboundRefs map for external access.
func (r *GraphResult) GetInboundRefs() map[uint16]map[string][]string {
return r.InboundRefs
}
// GetOutboundRefs returns the OutboundRefs map for external access.
func (r *GraphResult) GetOutboundRefs() map[uint16]map[string][]string {
return r.OutboundRefs
}
// AddInboundRef records an inbound reference from a referencing event to a target.
func (r *GraphResult) AddInboundRef(kind uint16, targetIDHex string, referencingEventIDHex string) {
if r.InboundRefs[kind] == nil {
r.InboundRefs[kind] = make(map[string][]string)
}
r.InboundRefs[kind][targetIDHex] = append(r.InboundRefs[kind][targetIDHex], referencingEventIDHex)
}
// AddOutboundRef records an outbound reference from a source event to a referenced event.
func (r *GraphResult) AddOutboundRef(kind uint16, sourceIDHex string, referencedEventIDHex string) {
if r.OutboundRefs[kind] == nil {
r.OutboundRefs[kind] = make(map[string][]string)
}
r.OutboundRefs[kind][sourceIDHex] = append(r.OutboundRefs[kind][sourceIDHex], referencedEventIDHex)
}
// GetPubkeysAtDepth returns pubkeys at a specific depth, or empty slice if none.
func (r *GraphResult) GetPubkeysAtDepth(depth int) []string {
if pubkeys, exists := r.PubkeysByDepth[depth]; exists {
return pubkeys
}
return []string{}
}
// GetEventsAtDepth returns events at a specific depth, or empty slice if none.
func (r *GraphResult) GetEventsAtDepth(depth int) []string {
if events, exists := r.EventsByDepth[depth]; exists {
return events
}
return []string{}
}

97
pkg/protocol/graph/executor.go

@ -6,6 +6,7 @@ package graph @@ -6,6 +6,7 @@ package graph
import (
"encoding/json"
"sort"
"strconv"
"time"
@ -37,6 +38,9 @@ type GraphResultI interface { @@ -37,6 +38,9 @@ type GraphResultI interface {
GetEventsByDepth() map[int][]string
GetTotalPubkeys() int
GetTotalEvents() int
// Ref aggregation methods
GetInboundRefs() map[uint16]map[string][]string
GetOutboundRefs() map[uint16]map[string][]string
}
// GraphDatabase defines the interface for graph traversal operations.
@ -50,6 +54,10 @@ type GraphDatabase interface { @@ -50,6 +54,10 @@ type GraphDatabase interface {
FindMentions(pubkey []byte, kinds []uint16) (GraphResultI, error)
// TraverseThread performs BFS traversal of thread structure
TraverseThread(seedEventID []byte, maxDepth int, direction string) (GraphResultI, error)
// CollectInboundRefs finds events that reference items in the result
CollectInboundRefs(result GraphResultI, depth int, kinds []uint16) error
// CollectOutboundRefs finds events referenced by items in the result
CollectOutboundRefs(result GraphResultI, depth int, kinds []uint16) error
}
// Executor handles graph query execution and response generation.
@ -138,6 +146,36 @@ func (e *Executor) Execute(q *Query) (*event.E, error) { @@ -138,6 +146,36 @@ func (e *Executor) Execute(q *Query) (*event.E, error) {
return nil, ErrInvalidMethod
}
// Collect inbound refs if specified
if q.HasInboundRefs() {
for _, refSpec := range q.InboundRefs {
kinds := make([]uint16, len(refSpec.Kinds))
for i, k := range refSpec.Kinds {
kinds[i] = uint16(k)
}
// Collect refs at the specified from_depth (0 = all depths)
if err = e.db.CollectInboundRefs(result, refSpec.FromDepth, kinds); err != nil {
log.W.F("graph executor: failed to collect inbound refs: %v", err)
// Continue without refs rather than failing the query
}
}
log.D.F("graph executor: collected inbound refs")
}
// Collect outbound refs if specified
if q.HasOutboundRefs() {
for _, refSpec := range q.OutboundRefs {
kinds := make([]uint16, len(refSpec.Kinds))
for i, k := range refSpec.Kinds {
kinds[i] = uint16(k)
}
if err = e.db.CollectOutboundRefs(result, refSpec.FromDepth, kinds); err != nil {
log.W.F("graph executor: failed to collect outbound refs: %v", err)
}
}
log.D.F("graph executor: collected outbound refs")
}
// Generate response event
return e.generateResponse(q, result, responseKind)
}
@ -157,6 +195,14 @@ func (e *Executor) generateResponse(q *Query, result GraphResultI, responseKind @@ -157,6 +195,14 @@ func (e *Executor) generateResponse(q *Query, result GraphResultI, responseKind
content.TotalEvents = result.GetTotalEvents()
}
// Add ref summaries if present
if inboundRefs := result.GetInboundRefs(); len(inboundRefs) > 0 {
content.InboundRefs = buildRefSummaries(inboundRefs)
}
if outboundRefs := result.GetOutboundRefs(); len(outboundRefs) > 0 {
content.OutboundRefs = buildRefSummaries(outboundRefs)
}
contentBytes, err := json.Marshal(content)
if err != nil {
return nil, err
@ -199,4 +245,55 @@ type ResponseContent struct { @@ -199,4 +245,55 @@ type ResponseContent struct {
// TotalEvents is the total count of unique events discovered
TotalEvents int `json:"total_events,omitempty"`
// InboundRefs contains aggregated inbound references (events referencing discovered items)
// Structure: array of {kind, target, count, refs[]}
InboundRefs []RefSummary `json:"inbound_refs,omitempty"`
// OutboundRefs contains aggregated outbound references (events referenced by discovered items)
// Structure: array of {kind, source, count, refs[]}
OutboundRefs []RefSummary `json:"outbound_refs,omitempty"`
}
// RefSummary represents aggregated reference data for a single target/source.
type RefSummary struct {
// Kind is the kind of the referencing/referenced events
Kind uint16 `json:"kind"`
// Target is the event ID being referenced (for inbound) or referencing (for outbound)
Target string `json:"target"`
// Count is the number of references
Count int `json:"count"`
// Refs is the list of event IDs (optional, may be omitted for large sets)
Refs []string `json:"refs,omitempty"`
}
// buildRefSummaries converts the ref map structure to a sorted array of RefSummary.
// Results are sorted by count descending (most referenced first).
func buildRefSummaries(refs map[uint16]map[string][]string) []RefSummary {
var summaries []RefSummary
for kind, targets := range refs {
for targetID, refIDs := range targets {
summaries = append(summaries, RefSummary{
Kind: kind,
Target: targetID,
Count: len(refIDs),
Refs: refIDs,
})
}
}
// Sort by count descending
sort.Slice(summaries, func(i, j int) bool {
if summaries[i].Count != summaries[j].Count {
return summaries[i].Count > summaries[j].Count
}
// Secondary sort by kind for stability
return summaries[i].Kind < summaries[j].Kind
})
return summaries
}

4
pkg/ratelimit/memory.go

@ -10,7 +10,7 @@ import ( @@ -10,7 +10,7 @@ import (
)
// MinimumMemoryMB is the minimum memory required to run the relay with rate limiting.
const MinimumMemoryMB = 500
const MinimumMemoryMB = 128
// AutoDetectMemoryFraction is the fraction of available memory to use when auto-detecting.
const AutoDetectMemoryFraction = 0.66
@ -20,7 +20,7 @@ const AutoDetectMemoryFraction = 0.66 @@ -20,7 +20,7 @@ const AutoDetectMemoryFraction = 0.66
const DefaultMaxMemoryMB = 1500
// ErrInsufficientMemory is returned when there isn't enough memory to run the relay.
var ErrInsufficientMemory = errors.New("insufficient memory: relay requires at least 500MB of available memory")
var ErrInsufficientMemory = errors.New("insufficient memory: relay requires at least 128MB of available memory")
// ProcessMemoryStats contains memory statistics for the current process.
// On Linux, these are read from /proc/self/status for accurate RSS values.

2
pkg/version/version

@ -1 +1 @@ @@ -1 +1 @@
v0.46.2
v0.47.0

Loading…
Cancel
Save