Browse Source
Curation Mode: - Three-tier publisher classification: Trusted, Blacklisted, Unclassified - Per-pubkey rate limiting (default 50/day) for unclassified users - IP flood protection (default 500/day) with automatic banning - Event kind allow-listing via categories, ranges, and custom kinds - Query filtering hides blacklisted pubkey events (admin/owner exempt) - Web UI for managing trusted/blacklisted pubkeys and configuration - NIP-86 API endpoints for all curation management operations Graph Query Extension: - Complete reference aggregation for Badger and Neo4j backends - E-tag graph backfill migration (v8) runs automatically on startup - Configuration options: ORLY_GRAPH_QUERIES_ENABLED, MAX_DEPTH, etc. - NIP-11 advertisement of graph query capabilities Files modified: - app/handle-nip86-curating.go: NIP-86 curation API handlers (new) - app/web/src/CurationView.svelte: Curation management UI (new) - app/web/src/kindCategories.js: Kind category definitions (new) - pkg/acl/curating.go: Curating ACL implementation (new) - pkg/database/curating-acl.go: Database layer for curation (new) - pkg/neo4j/graph-refs.go: Neo4j ref collection (new) - pkg/database/migrations.go: E-tag graph backfill migration - pkg/protocol/graph/executor.go: Reference aggregation support - app/handle-event.go: Curation config event processing - app/handle-req.go: Blacklist filtering for queries - docs/GRAPH_QUERIES_REMAINING_PLAN.md: Updated completion status 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>main
28 changed files with 5350 additions and 35 deletions
@ -0,0 +1,593 @@
@@ -0,0 +1,593 @@
|
||||
package app |
||||
|
||||
import ( |
||||
"context" |
||||
"encoding/hex" |
||||
"encoding/json" |
||||
"io" |
||||
"net/http" |
||||
"strconv" |
||||
|
||||
"lol.mleku.dev/chk" |
||||
"next.orly.dev/pkg/acl" |
||||
"next.orly.dev/pkg/database" |
||||
"git.mleku.dev/mleku/nostr/httpauth" |
||||
) |
||||
|
||||
// handleCuratingNIP86Request handles curating NIP-86 requests with pre-authenticated pubkey.
|
||||
// This is called from the main NIP-86 handler after authentication.
|
||||
func (s *Server) handleCuratingNIP86Request(w http.ResponseWriter, r *http.Request, pubkey []byte) { |
||||
_ = pubkey // Pubkey already validated by caller
|
||||
|
||||
// Get the curating ACL instance
|
||||
var curatingACL *acl.Curating |
||||
for _, aclInstance := range acl.Registry.ACL { |
||||
if aclInstance.Type() == "curating" { |
||||
if curating, ok := aclInstance.(*acl.Curating); ok { |
||||
curatingACL = curating |
||||
break |
||||
} |
||||
} |
||||
} |
||||
|
||||
if curatingACL == nil { |
||||
http.Error(w, "Curating ACL not available", http.StatusInternalServerError) |
||||
return |
||||
} |
||||
|
||||
// Read and parse the request
|
||||
body, err := io.ReadAll(r.Body) |
||||
if chk.E(err) { |
||||
http.Error(w, "Failed to read request body", http.StatusBadRequest) |
||||
return |
||||
} |
||||
|
||||
var request NIP86Request |
||||
if err := json.Unmarshal(body, &request); chk.E(err) { |
||||
http.Error(w, "Invalid JSON request", http.StatusBadRequest) |
||||
return |
||||
} |
||||
|
||||
// Set response headers
|
||||
w.Header().Set("Content-Type", "application/json") |
||||
|
||||
// Handle the request based on method
|
||||
response := s.handleCuratingNIP86Method(request, curatingACL) |
||||
|
||||
// Send response
|
||||
jsonData, err := json.Marshal(response) |
||||
if chk.E(err) { |
||||
http.Error(w, "Error generating response", http.StatusInternalServerError) |
||||
return |
||||
} |
||||
|
||||
w.Write(jsonData) |
||||
} |
||||
|
||||
// handleCuratingNIP86Management handles NIP-86 management API requests for curating mode (standalone)
|
||||
func (s *Server) handleCuratingNIP86Management(w http.ResponseWriter, r *http.Request) { |
||||
if r.Method != http.MethodPost { |
||||
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed) |
||||
return |
||||
} |
||||
|
||||
// Check Content-Type
|
||||
contentType := r.Header.Get("Content-Type") |
||||
if contentType != "application/nostr+json+rpc" { |
||||
http.Error(w, "Content-Type must be application/nostr+json+rpc", http.StatusBadRequest) |
||||
return |
||||
} |
||||
|
||||
// Validate NIP-98 authentication
|
||||
valid, pubkey, err := httpauth.CheckAuth(r) |
||||
if chk.E(err) || !valid { |
||||
errorMsg := "NIP-98 authentication validation failed" |
||||
if err != nil { |
||||
errorMsg = err.Error() |
||||
} |
||||
http.Error(w, errorMsg, http.StatusUnauthorized) |
||||
return |
||||
} |
||||
|
||||
// Check permissions - require owner or admin level
|
||||
accessLevel := acl.Registry.GetAccessLevel(pubkey, r.RemoteAddr) |
||||
if accessLevel != "owner" && accessLevel != "admin" { |
||||
http.Error(w, "Owner or admin permission required", http.StatusForbidden) |
||||
return |
||||
} |
||||
|
||||
// Check if curating ACL is active
|
||||
if acl.Registry.Type() != "curating" { |
||||
http.Error(w, "Curating ACL mode is not active", http.StatusBadRequest) |
||||
return |
||||
} |
||||
|
||||
// Delegate to shared request handler
|
||||
s.handleCuratingNIP86Request(w, r, pubkey) |
||||
} |
||||
|
||||
// handleCuratingNIP86Method handles individual NIP-86 methods for curating mode
|
||||
func (s *Server) handleCuratingNIP86Method(request NIP86Request, curatingACL *acl.Curating) NIP86Response { |
||||
dbACL := curatingACL.GetCuratingACL() |
||||
|
||||
switch request.Method { |
||||
case "supportedmethods": |
||||
return s.handleCuratingSupportedMethods() |
||||
case "trustpubkey": |
||||
return s.handleTrustPubkey(request.Params, curatingACL) |
||||
case "untrustpubkey": |
||||
return s.handleUntrustPubkey(request.Params, curatingACL) |
||||
case "listtrustedpubkeys": |
||||
return s.handleListTrustedPubkeys(dbACL) |
||||
case "blacklistpubkey": |
||||
return s.handleBlacklistPubkey(request.Params, curatingACL) |
||||
case "unblacklistpubkey": |
||||
return s.handleUnblacklistPubkey(request.Params, curatingACL) |
||||
case "listblacklistedpubkeys": |
||||
return s.handleListBlacklistedPubkeys(dbACL) |
||||
case "listunclassifiedusers": |
||||
return s.handleListUnclassifiedUsers(request.Params, dbACL) |
||||
case "markspam": |
||||
return s.handleMarkSpam(request.Params, dbACL) |
||||
case "unmarkspam": |
||||
return s.handleUnmarkSpam(request.Params, dbACL) |
||||
case "listspamevents": |
||||
return s.handleListSpamEvents(dbACL) |
||||
case "deleteevent": |
||||
return s.handleDeleteEvent(request.Params) |
||||
case "getcuratingconfig": |
||||
return s.handleGetCuratingConfig(dbACL) |
||||
case "listblockedips": |
||||
return s.handleListCuratingBlockedIPs(dbACL) |
||||
case "unblockip": |
||||
return s.handleUnblockCuratingIP(request.Params, dbACL) |
||||
case "isconfigured": |
||||
return s.handleIsConfigured(dbACL) |
||||
default: |
||||
return NIP86Response{Error: "Unknown method: " + request.Method} |
||||
} |
||||
} |
||||
|
||||
// handleCuratingSupportedMethods returns the list of supported methods for curating mode
|
||||
func (s *Server) handleCuratingSupportedMethods() NIP86Response { |
||||
methods := []string{ |
||||
"supportedmethods", |
||||
"trustpubkey", |
||||
"untrustpubkey", |
||||
"listtrustedpubkeys", |
||||
"blacklistpubkey", |
||||
"unblacklistpubkey", |
||||
"listblacklistedpubkeys", |
||||
"listunclassifiedusers", |
||||
"markspam", |
||||
"unmarkspam", |
||||
"listspamevents", |
||||
"deleteevent", |
||||
"getcuratingconfig", |
||||
"listblockedips", |
||||
"unblockip", |
||||
"isconfigured", |
||||
} |
||||
return NIP86Response{Result: methods} |
||||
} |
||||
|
||||
// handleTrustPubkey adds a pubkey to the trusted list
|
||||
func (s *Server) handleTrustPubkey(params []interface{}, curatingACL *acl.Curating) NIP86Response { |
||||
if len(params) < 1 { |
||||
return NIP86Response{Error: "Missing required parameter: pubkey"} |
||||
} |
||||
|
||||
pubkey, ok := params[0].(string) |
||||
if !ok { |
||||
return NIP86Response{Error: "Invalid pubkey parameter"} |
||||
} |
||||
|
||||
if len(pubkey) != 64 { |
||||
return NIP86Response{Error: "Invalid pubkey format (must be 64 hex characters)"} |
||||
} |
||||
|
||||
note := "" |
||||
if len(params) > 1 { |
||||
if n, ok := params[1].(string); ok { |
||||
note = n |
||||
} |
||||
} |
||||
|
||||
if err := curatingACL.TrustPubkey(pubkey, note); chk.E(err) { |
||||
return NIP86Response{Error: "Failed to trust pubkey: " + err.Error()} |
||||
} |
||||
|
||||
return NIP86Response{Result: true} |
||||
} |
||||
|
||||
// handleUntrustPubkey removes a pubkey from the trusted list
|
||||
func (s *Server) handleUntrustPubkey(params []interface{}, curatingACL *acl.Curating) NIP86Response { |
||||
if len(params) < 1 { |
||||
return NIP86Response{Error: "Missing required parameter: pubkey"} |
||||
} |
||||
|
||||
pubkey, ok := params[0].(string) |
||||
if !ok { |
||||
return NIP86Response{Error: "Invalid pubkey parameter"} |
||||
} |
||||
|
||||
if err := curatingACL.UntrustPubkey(pubkey); chk.E(err) { |
||||
return NIP86Response{Error: "Failed to untrust pubkey: " + err.Error()} |
||||
} |
||||
|
||||
return NIP86Response{Result: true} |
||||
} |
||||
|
||||
// handleListTrustedPubkeys returns the list of trusted pubkeys
|
||||
func (s *Server) handleListTrustedPubkeys(dbACL *database.CuratingACL) NIP86Response { |
||||
trusted, err := dbACL.ListTrustedPubkeys() |
||||
if chk.E(err) { |
||||
return NIP86Response{Error: "Failed to list trusted pubkeys: " + err.Error()} |
||||
} |
||||
|
||||
result := make([]map[string]interface{}, len(trusted)) |
||||
for i, t := range trusted { |
||||
result[i] = map[string]interface{}{ |
||||
"pubkey": t.Pubkey, |
||||
"note": t.Note, |
||||
"added": t.Added.Unix(), |
||||
} |
||||
} |
||||
|
||||
return NIP86Response{Result: result} |
||||
} |
||||
|
||||
// handleBlacklistPubkey adds a pubkey to the blacklist
|
||||
func (s *Server) handleBlacklistPubkey(params []interface{}, curatingACL *acl.Curating) NIP86Response { |
||||
if len(params) < 1 { |
||||
return NIP86Response{Error: "Missing required parameter: pubkey"} |
||||
} |
||||
|
||||
pubkey, ok := params[0].(string) |
||||
if !ok { |
||||
return NIP86Response{Error: "Invalid pubkey parameter"} |
||||
} |
||||
|
||||
if len(pubkey) != 64 { |
||||
return NIP86Response{Error: "Invalid pubkey format (must be 64 hex characters)"} |
||||
} |
||||
|
||||
reason := "" |
||||
if len(params) > 1 { |
||||
if r, ok := params[1].(string); ok { |
||||
reason = r |
||||
} |
||||
} |
||||
|
||||
if err := curatingACL.BlacklistPubkey(pubkey, reason); chk.E(err) { |
||||
return NIP86Response{Error: "Failed to blacklist pubkey: " + err.Error()} |
||||
} |
||||
|
||||
return NIP86Response{Result: true} |
||||
} |
||||
|
||||
// handleUnblacklistPubkey removes a pubkey from the blacklist
|
||||
func (s *Server) handleUnblacklistPubkey(params []interface{}, curatingACL *acl.Curating) NIP86Response { |
||||
if len(params) < 1 { |
||||
return NIP86Response{Error: "Missing required parameter: pubkey"} |
||||
} |
||||
|
||||
pubkey, ok := params[0].(string) |
||||
if !ok { |
||||
return NIP86Response{Error: "Invalid pubkey parameter"} |
||||
} |
||||
|
||||
if err := curatingACL.UnblacklistPubkey(pubkey); chk.E(err) { |
||||
return NIP86Response{Error: "Failed to unblacklist pubkey: " + err.Error()} |
||||
} |
||||
|
||||
return NIP86Response{Result: true} |
||||
} |
||||
|
||||
// handleListBlacklistedPubkeys returns the list of blacklisted pubkeys
|
||||
func (s *Server) handleListBlacklistedPubkeys(dbACL *database.CuratingACL) NIP86Response { |
||||
blacklisted, err := dbACL.ListBlacklistedPubkeys() |
||||
if chk.E(err) { |
||||
return NIP86Response{Error: "Failed to list blacklisted pubkeys: " + err.Error()} |
||||
} |
||||
|
||||
result := make([]map[string]interface{}, len(blacklisted)) |
||||
for i, b := range blacklisted { |
||||
result[i] = map[string]interface{}{ |
||||
"pubkey": b.Pubkey, |
||||
"reason": b.Reason, |
||||
"added": b.Added.Unix(), |
||||
} |
||||
} |
||||
|
||||
return NIP86Response{Result: result} |
||||
} |
||||
|
||||
// handleListUnclassifiedUsers returns unclassified users sorted by event count
|
||||
func (s *Server) handleListUnclassifiedUsers(params []interface{}, dbACL *database.CuratingACL) NIP86Response { |
||||
limit := 100 // Default limit
|
||||
if len(params) > 0 { |
||||
if l, ok := params[0].(float64); ok { |
||||
limit = int(l) |
||||
} |
||||
} |
||||
|
||||
users, err := dbACL.ListUnclassifiedUsers(limit) |
||||
if chk.E(err) { |
||||
return NIP86Response{Error: "Failed to list unclassified users: " + err.Error()} |
||||
} |
||||
|
||||
result := make([]map[string]interface{}, len(users)) |
||||
for i, u := range users { |
||||
result[i] = map[string]interface{}{ |
||||
"pubkey": u.Pubkey, |
||||
"event_count": u.EventCount, |
||||
"last_event": u.LastEvent.Unix(), |
||||
} |
||||
} |
||||
|
||||
return NIP86Response{Result: result} |
||||
} |
||||
|
||||
// handleMarkSpam marks an event as spam
|
||||
func (s *Server) handleMarkSpam(params []interface{}, dbACL *database.CuratingACL) NIP86Response { |
||||
if len(params) < 1 { |
||||
return NIP86Response{Error: "Missing required parameter: event_id"} |
||||
} |
||||
|
||||
eventID, ok := params[0].(string) |
||||
if !ok { |
||||
return NIP86Response{Error: "Invalid event_id parameter"} |
||||
} |
||||
|
||||
if len(eventID) != 64 { |
||||
return NIP86Response{Error: "Invalid event_id format (must be 64 hex characters)"} |
||||
} |
||||
|
||||
pubkey := "" |
||||
if len(params) > 1 { |
||||
if p, ok := params[1].(string); ok { |
||||
pubkey = p |
||||
} |
||||
} |
||||
|
||||
reason := "" |
||||
if len(params) > 2 { |
||||
if r, ok := params[2].(string); ok { |
||||
reason = r |
||||
} |
||||
} |
||||
|
||||
if err := dbACL.MarkEventAsSpam(eventID, pubkey, reason); chk.E(err) { |
||||
return NIP86Response{Error: "Failed to mark event as spam: " + err.Error()} |
||||
} |
||||
|
||||
return NIP86Response{Result: true} |
||||
} |
||||
|
||||
// handleUnmarkSpam removes the spam flag from an event
|
||||
func (s *Server) handleUnmarkSpam(params []interface{}, dbACL *database.CuratingACL) NIP86Response { |
||||
if len(params) < 1 { |
||||
return NIP86Response{Error: "Missing required parameter: event_id"} |
||||
} |
||||
|
||||
eventID, ok := params[0].(string) |
||||
if !ok { |
||||
return NIP86Response{Error: "Invalid event_id parameter"} |
||||
} |
||||
|
||||
if err := dbACL.UnmarkEventAsSpam(eventID); chk.E(err) { |
||||
return NIP86Response{Error: "Failed to unmark event as spam: " + err.Error()} |
||||
} |
||||
|
||||
return NIP86Response{Result: true} |
||||
} |
||||
|
||||
// handleListSpamEvents returns the list of spam-flagged events
|
||||
func (s *Server) handleListSpamEvents(dbACL *database.CuratingACL) NIP86Response { |
||||
spam, err := dbACL.ListSpamEvents() |
||||
if chk.E(err) { |
||||
return NIP86Response{Error: "Failed to list spam events: " + err.Error()} |
||||
} |
||||
|
||||
result := make([]map[string]interface{}, len(spam)) |
||||
for i, sp := range spam { |
||||
result[i] = map[string]interface{}{ |
||||
"event_id": sp.EventID, |
||||
"pubkey": sp.Pubkey, |
||||
"reason": sp.Reason, |
||||
"added": sp.Added.Unix(), |
||||
} |
||||
} |
||||
|
||||
return NIP86Response{Result: result} |
||||
} |
||||
|
||||
// handleDeleteEvent permanently deletes an event from the database
|
||||
func (s *Server) handleDeleteEvent(params []interface{}) NIP86Response { |
||||
if len(params) < 1 { |
||||
return NIP86Response{Error: "Missing required parameter: event_id"} |
||||
} |
||||
|
||||
eventIDHex, ok := params[0].(string) |
||||
if !ok { |
||||
return NIP86Response{Error: "Invalid event_id parameter"} |
||||
} |
||||
|
||||
if len(eventIDHex) != 64 { |
||||
return NIP86Response{Error: "Invalid event_id format (must be 64 hex characters)"} |
||||
} |
||||
|
||||
// Convert hex to bytes
|
||||
eventID, err := hex.DecodeString(eventIDHex) |
||||
if err != nil { |
||||
return NIP86Response{Error: "Invalid event_id hex: " + err.Error()} |
||||
} |
||||
|
||||
// Delete from database
|
||||
if err := s.DB.DeleteEvent(context.Background(), eventID); chk.E(err) { |
||||
return NIP86Response{Error: "Failed to delete event: " + err.Error()} |
||||
} |
||||
|
||||
return NIP86Response{Result: true} |
||||
} |
||||
|
||||
// handleGetCuratingConfig returns the current curating configuration
|
||||
func (s *Server) handleGetCuratingConfig(dbACL *database.CuratingACL) NIP86Response { |
||||
config, err := dbACL.GetConfig() |
||||
if chk.E(err) { |
||||
return NIP86Response{Error: "Failed to get config: " + err.Error()} |
||||
} |
||||
|
||||
result := map[string]interface{}{ |
||||
"daily_limit": config.DailyLimit, |
||||
"first_ban_hours": config.FirstBanHours, |
||||
"second_ban_hours": config.SecondBanHours, |
||||
"allowed_kinds": config.AllowedKinds, |
||||
"allowed_ranges": config.AllowedRanges, |
||||
"kind_categories": config.KindCategories, |
||||
"config_event_id": config.ConfigEventID, |
||||
"config_pubkey": config.ConfigPubkey, |
||||
"configured_at": config.ConfiguredAt, |
||||
"is_configured": config.ConfigEventID != "", |
||||
} |
||||
|
||||
return NIP86Response{Result: result} |
||||
} |
||||
|
||||
// handleListCuratingBlockedIPs returns the list of blocked IPs in curating mode
|
||||
func (s *Server) handleListCuratingBlockedIPs(dbACL *database.CuratingACL) NIP86Response { |
||||
blocked, err := dbACL.ListBlockedIPs() |
||||
if chk.E(err) { |
||||
return NIP86Response{Error: "Failed to list blocked IPs: " + err.Error()} |
||||
} |
||||
|
||||
result := make([]map[string]interface{}, len(blocked)) |
||||
for i, b := range blocked { |
||||
result[i] = map[string]interface{}{ |
||||
"ip": b.IP, |
||||
"reason": b.Reason, |
||||
"expires_at": b.ExpiresAt.Unix(), |
||||
"added": b.Added.Unix(), |
||||
} |
||||
} |
||||
|
||||
return NIP86Response{Result: result} |
||||
} |
||||
|
||||
// handleUnblockCuratingIP unblocks an IP in curating mode
|
||||
func (s *Server) handleUnblockCuratingIP(params []interface{}, dbACL *database.CuratingACL) NIP86Response { |
||||
if len(params) < 1 { |
||||
return NIP86Response{Error: "Missing required parameter: ip"} |
||||
} |
||||
|
||||
ip, ok := params[0].(string) |
||||
if !ok { |
||||
return NIP86Response{Error: "Invalid ip parameter"} |
||||
} |
||||
|
||||
if err := dbACL.UnblockIP(ip); chk.E(err) { |
||||
return NIP86Response{Error: "Failed to unblock IP: " + err.Error()} |
||||
} |
||||
|
||||
return NIP86Response{Result: true} |
||||
} |
||||
|
||||
// handleIsConfigured checks if curating mode is configured
|
||||
func (s *Server) handleIsConfigured(dbACL *database.CuratingACL) NIP86Response { |
||||
configured, err := dbACL.IsConfigured() |
||||
if chk.E(err) { |
||||
return NIP86Response{Error: "Failed to check configuration: " + err.Error()} |
||||
} |
||||
|
||||
return NIP86Response{Result: configured} |
||||
} |
||||
|
||||
// GetKindCategoriesInfo returns information about available kind categories
|
||||
func GetKindCategoriesInfo() []map[string]interface{} { |
||||
categories := []map[string]interface{}{ |
||||
{ |
||||
"id": "social", |
||||
"name": "Social/Notes", |
||||
"description": "Profiles, text notes, follows, reposts, reactions", |
||||
"kinds": []int{0, 1, 3, 6, 7, 10002}, |
||||
}, |
||||
{ |
||||
"id": "dm", |
||||
"name": "Direct Messages", |
||||
"description": "NIP-04 DMs, NIP-17 private messages, gift wraps", |
||||
"kinds": []int{4, 14, 1059}, |
||||
}, |
||||
{ |
||||
"id": "longform", |
||||
"name": "Long-form Content", |
||||
"description": "Articles and drafts", |
||||
"kinds": []int{30023, 30024}, |
||||
}, |
||||
{ |
||||
"id": "media", |
||||
"name": "Media", |
||||
"description": "File metadata, video, audio", |
||||
"kinds": []int{1063, 20, 21, 22}, |
||||
}, |
||||
{ |
||||
"id": "marketplace", |
||||
"name": "Marketplace", |
||||
"description": "Product listings, stalls, auctions", |
||||
"kinds": []int{30017, 30018, 30019, 30020, 1021, 1022}, |
||||
}, |
||||
{ |
||||
"id": "groups_nip29", |
||||
"name": "Group Messaging (NIP-29)", |
||||
"description": "Simple group messages and metadata", |
||||
"kinds": []int{9, 10, 11, 12, 9000, 9001, 9002, 39000, 39001, 39002}, |
||||
}, |
||||
{ |
||||
"id": "groups_nip72", |
||||
"name": "Communities (NIP-72)", |
||||
"description": "Moderated communities and post approvals", |
||||
"kinds": []int{34550, 1111, 4550}, |
||||
}, |
||||
{ |
||||
"id": "lists", |
||||
"name": "Lists/Bookmarks", |
||||
"description": "Mute lists, pins, categorized lists, bookmarks", |
||||
"kinds": []int{10000, 10001, 10003, 30000, 30001, 30003}, |
||||
}, |
||||
} |
||||
return categories |
||||
} |
||||
|
||||
// expandKindRange expands a range string like "1000-1999" into individual kinds
|
||||
func expandKindRange(rangeStr string) []int { |
||||
var kinds []int |
||||
parts := make([]int, 2) |
||||
n, err := parseRange(rangeStr, parts) |
||||
if err != nil || n != 2 { |
||||
return kinds |
||||
} |
||||
for i := parts[0]; i <= parts[1]; i++ { |
||||
kinds = append(kinds, i) |
||||
} |
||||
return kinds |
||||
} |
||||
|
||||
func parseRange(s string, parts []int) (int, error) { |
||||
// Simple parsing of "start-end"
|
||||
for i, c := range s { |
||||
if c == '-' && i > 0 { |
||||
start, err := strconv.Atoi(s[:i]) |
||||
if err != nil { |
||||
return 0, err |
||||
} |
||||
end, err := strconv.Atoi(s[i+1:]) |
||||
if err != nil { |
||||
return 0, err |
||||
} |
||||
parts[0] = start |
||||
parts[1] = end |
||||
return 2, nil |
||||
} |
||||
} |
||||
return 0, nil |
||||
} |
||||
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,160 @@
@@ -0,0 +1,160 @@
|
||||
/** |
||||
* Kind categories for curating mode. |
||||
* These define predefined groups of event kinds that can be enabled/disabled together. |
||||
* The categories match the server-side definitions in pkg/database/curating-acl.go. |
||||
*/ |
||||
|
||||
export const curationKindCategories = [ |
||||
{ |
||||
id: "social", |
||||
name: "Social/Notes", |
||||
description: "User profiles, notes, follows, reposts, reactions, and relay lists", |
||||
kinds: [0, 1, 3, 6, 7, 10002], |
||||
}, |
||||
{ |
||||
id: "dm", |
||||
name: "Direct Messages", |
||||
description: "Encrypted direct messages (legacy and NIP-17 gift-wrapped)", |
||||
kinds: [4, 14, 1059], |
||||
}, |
||||
{ |
||||
id: "longform", |
||||
name: "Long-form Content", |
||||
description: "Blog posts and article drafts", |
||||
kinds: [30023, 30024], |
||||
}, |
||||
{ |
||||
id: "media", |
||||
name: "Media", |
||||
description: "File metadata and media attachments", |
||||
kinds: [1063, 20, 21, 22], |
||||
}, |
||||
{ |
||||
id: "marketplace", |
||||
name: "Marketplace", |
||||
description: "Product listings, stalls, and marketplace events", |
||||
kinds: [30017, 30018, 30019, 30020], |
||||
}, |
||||
{ |
||||
id: "groups_nip29", |
||||
name: "Group Messaging (NIP-29)", |
||||
description: "Simple relay-based group chat messages", |
||||
kinds: [9, 10, 11, 12], |
||||
}, |
||||
{ |
||||
id: "groups_nip72", |
||||
name: "Communities (NIP-72)", |
||||
description: "Community definitions and threaded discussions", |
||||
kinds: [34550, 1111, 4550], |
||||
}, |
||||
{ |
||||
id: "lists", |
||||
name: "Lists/Bookmarks", |
||||
description: "Mute lists, pin lists, and parameterized list events", |
||||
kinds: [10000, 10001, 30000, 30001], |
||||
}, |
||||
]; |
||||
|
||||
/** |
||||
* Get all kinds from selected categories. |
||||
* @param {string[]} categoryIds - Array of category IDs |
||||
* @returns {number[]} - Array of unique kind numbers |
||||
*/ |
||||
export function getKindsFromCategories(categoryIds) { |
||||
const kinds = new Set(); |
||||
for (const id of categoryIds) { |
||||
const category = curationKindCategories.find((c) => c.id === id); |
||||
if (category) { |
||||
category.kinds.forEach((k) => kinds.add(k)); |
||||
} |
||||
} |
||||
return Array.from(kinds).sort((a, b) => a - b); |
||||
} |
||||
|
||||
/** |
||||
* Get category IDs that contain a given kind. |
||||
* @param {number} kind - The kind number to look up |
||||
* @returns {string[]} - Array of category IDs containing this kind |
||||
*/ |
||||
export function getCategoriesForKind(kind) { |
||||
return curationKindCategories |
||||
.filter((c) => c.kinds.includes(kind)) |
||||
.map((c) => c.id); |
||||
} |
||||
|
||||
/** |
||||
* Parse a custom kinds string (e.g., "100, 200-300, 500") into an array of kinds. |
||||
* @param {string} customKinds - Comma-separated list of kinds and ranges |
||||
* @returns {number[]} - Array of individual kind numbers |
||||
*/ |
||||
export function parseCustomKinds(customKinds) { |
||||
if (!customKinds || !customKinds.trim()) return []; |
||||
|
||||
const kinds = new Set(); |
||||
const parts = customKinds.split(",").map((p) => p.trim()); |
||||
|
||||
for (const part of parts) { |
||||
if (!part) continue; |
||||
|
||||
// Check if it's a range (e.g., "100-200")
|
||||
if (part.includes("-")) { |
||||
const [start, end] = part.split("-").map((n) => parseInt(n.trim(), 10)); |
||||
if (!isNaN(start) && !isNaN(end) && start <= end) { |
||||
// Don't expand ranges > 1000 to avoid memory issues
|
||||
if (end - start <= 1000) { |
||||
for (let i = start; i <= end; i++) { |
||||
kinds.add(i); |
||||
} |
||||
} |
||||
} |
||||
} else { |
||||
const num = parseInt(part, 10); |
||||
if (!isNaN(num)) { |
||||
kinds.add(num); |
||||
} |
||||
} |
||||
} |
||||
|
||||
return Array.from(kinds).sort((a, b) => a - b); |
||||
} |
||||
|
||||
/** |
||||
* Format a list of kinds into a compact string with ranges. |
||||
* @param {number[]} kinds - Array of kind numbers |
||||
* @returns {string} - Formatted string like "1, 3, 5-10, 15" |
||||
*/ |
||||
export function formatKindsCompact(kinds) { |
||||
if (!kinds || kinds.length === 0) return ""; |
||||
|
||||
const sorted = [...kinds].sort((a, b) => a - b); |
||||
const ranges = []; |
||||
let rangeStart = sorted[0]; |
||||
let rangeEnd = sorted[0]; |
||||
|
||||
for (let i = 1; i < sorted.length; i++) { |
||||
if (sorted[i] === rangeEnd + 1) { |
||||
rangeEnd = sorted[i]; |
||||
} else { |
||||
if (rangeEnd > rangeStart + 1) { |
||||
ranges.push(`${rangeStart}-${rangeEnd}`); |
||||
} else if (rangeEnd === rangeStart + 1) { |
||||
ranges.push(`${rangeStart}, ${rangeEnd}`); |
||||
} else { |
||||
ranges.push(`${rangeStart}`); |
||||
} |
||||
rangeStart = sorted[i]; |
||||
rangeEnd = sorted[i]; |
||||
} |
||||
} |
||||
|
||||
// Push the last range
|
||||
if (rangeEnd > rangeStart + 1) { |
||||
ranges.push(`${rangeStart}-${rangeEnd}`); |
||||
} else if (rangeEnd === rangeStart + 1) { |
||||
ranges.push(`${rangeStart}, ${rangeEnd}`); |
||||
} else { |
||||
ranges.push(`${rangeStart}`); |
||||
} |
||||
|
||||
return ranges.join(", "); |
||||
} |
||||
@ -0,0 +1,778 @@
@@ -0,0 +1,778 @@
|
||||
# Graph Queries: Remaining Implementation Plan |
||||
|
||||
> Consolidated plan based on NIP-XX-GRAPH-QUERIES.md spec, existing implementation plans, and codebase analysis. |
||||
|
||||
## Current Status Summary |
||||
|
||||
| Component | Status | Notes | |
||||
|-----------|--------|-------| |
||||
| Filter extension parsing (`_graph`) | ✅ COMPLETE | Phase 0 | |
||||
| E-tag graph index (eeg/gee) | ✅ COMPLETE | Phase 1 - indexes populated on new events | |
||||
| Graph traversal primitives | ✅ COMPLETE | Phase 2 | |
||||
| High-level traversals (follows, followers, mentions, thread) | ✅ COMPLETE | Phase 3 | |
||||
| Query handler + relay-signed responses | ✅ COMPLETE | Phase 4 | |
||||
| **Reference aggregation (Badger)** | ✅ COMPLETE | Phase 5C - `pkg/database/graph-refs.go` | |
||||
| **Reference aggregation (Neo4j)** | ✅ COMPLETE | Phase 5C - `pkg/neo4j/graph-refs.go` | |
||||
| **E-tag graph backfill migration** | ✅ COMPLETE | Phase 5B - Migration v8 in `pkg/database/migrations.go` | |
||||
| **Configuration options** | ✅ COMPLETE | Phase 5A - `app/config/config.go` | |
||||
| **NIP-11 advertisement** | ✅ COMPLETE | Phase 5A - `app/handle-relayinfo.go` | |
||||
| **P-tag graph query optimization** | ❌ NOT IMPLEMENTED | Enhancement - lower priority | |
||||
|
||||
--- |
||||
|
||||
## Phase 5A: Configuration & NIP-11 Advertisement |
||||
|
||||
**Goal**: Allow relays to configure graph query support and advertise capabilities. |
||||
|
||||
### 5A.1 Configuration Options |
||||
|
||||
**File**: `app/config/config.go` or environment variables |
||||
|
||||
| Variable | Type | Default | Description | |
||||
|----------|------|---------|-------------| |
||||
| `ORLY_GRAPH_QUERIES_ENABLED` | bool | true | Enable/disable graph queries | |
||||
| `ORLY_GRAPH_MAX_DEPTH` | int | 16 | Maximum traversal depth | |
||||
| `ORLY_GRAPH_RATE_LIMIT` | int | 10 | Queries per minute per connection | |
||||
| `ORLY_GRAPH_MAX_RESULTS` | int | 10000 | Maximum pubkeys/events per response | |
||||
|
||||
**Implementation**: |
||||
|
||||
```go |
||||
// pkg/config/graph.go |
||||
type GraphConfig struct { |
||||
Enabled bool `env:"ORLY_GRAPH_QUERIES_ENABLED" default:"true"` |
||||
MaxDepth int `env:"ORLY_GRAPH_MAX_DEPTH" default:"16"` |
||||
RateLimit int `env:"ORLY_GRAPH_RATE_LIMIT" default:"10"` |
||||
MaxResults int `env:"ORLY_GRAPH_MAX_RESULTS" default:"10000"` |
||||
} |
||||
``` |
||||
|
||||
**Server integration** (`app/server.go`): |
||||
|
||||
```go |
||||
// Check config before initializing executor |
||||
if config.Graph.Enabled { |
||||
l.graphExecutor, err = graph.NewExecutor(graphAdapter, relaySecretKey, config.Graph) |
||||
} |
||||
``` |
||||
|
||||
### 5A.2 NIP-11 Advertisement |
||||
|
||||
**File**: `app/handle-relayinfo.go` |
||||
|
||||
**Changes required**: |
||||
|
||||
```go |
||||
// In buildRelayInfo(): |
||||
if s.graphExecutor != nil { |
||||
info.SupportedNips = append(info.SupportedNips, "XX") // Or appropriate NIP number |
||||
info.Limitation.GraphQueryMaxDepth = config.Graph.MaxDepth |
||||
} |
||||
|
||||
// Add to RelayInfo struct or use Software field: |
||||
type RelayInfo struct { |
||||
// ... existing fields |
||||
Limitation struct { |
||||
// ... existing limitations |
||||
GraphQueryMaxDepth int `json:"graph_query_max_depth,omitempty"` |
||||
} `json:"limitation,omitempty"` |
||||
} |
||||
``` |
||||
|
||||
**Example NIP-11 output**: |
||||
|
||||
```json |
||||
{ |
||||
"supported_nips": [1, "XX"], |
||||
"limitation": { |
||||
"graph_query_max_depth": 16 |
||||
} |
||||
} |
||||
``` |
||||
|
||||
--- |
||||
|
||||
## Phase 5B: E-Tag Graph Backfill Migration |
||||
|
||||
**Goal**: Populate e-tag graph indexes (eeg/gee) for events stored before graph feature was added. |
||||
|
||||
### 5B.1 Migration Implementation |
||||
|
||||
**File**: `pkg/database/migration-etag-graph.go` |
||||
|
||||
```go |
||||
package database |
||||
|
||||
import ( |
||||
"bytes" |
||||
|
||||
"github.com/dgraph-io/badger/v4" |
||||
"next.orly.dev/pkg/database/indexes" |
||||
"next.orly.dev/pkg/database/indexes/types" |
||||
) |
||||
|
||||
// MigrateETagGraph backfills e-tag graph edges for existing events |
||||
// This is safe to run multiple times (idempotent) |
||||
func (d *D) MigrateETagGraph() error { |
||||
log.I.F("Starting e-tag graph backfill migration...") |
||||
|
||||
var processed, edges, skipped int |
||||
batchSize := 1000 |
||||
batch := make([]eTagEdge, 0, batchSize) |
||||
|
||||
// Iterate all events using serial-event index (sei) |
||||
err := d.View(func(txn *badger.Txn) error { |
||||
opts := badger.DefaultIteratorOptions |
||||
opts.PrefetchValues = true |
||||
it := txn.NewIterator(opts) |
||||
defer it.Close() |
||||
|
||||
prefix := indexes.NewPrefix(indexes.SerialEvent).Bytes() |
||||
|
||||
for it.Seek(prefix); it.ValidForPrefix(prefix); it.Next() { |
||||
item := it.Item() |
||||
|
||||
// Decode event serial from key |
||||
key := item.Key() |
||||
sourceSer := new(types.Uint40) |
||||
dec := indexes.SerialEventDec(sourceSer) |
||||
if err := dec.UnmarshalRead(bytes.NewReader(key)); err != nil { |
||||
skipped++ |
||||
continue |
||||
} |
||||
|
||||
// Get event data |
||||
var ev event.T |
||||
if err := item.Value(func(val []byte) error { |
||||
return ev.UnmarshalCompact(val) |
||||
}); err != nil { |
||||
skipped++ |
||||
continue |
||||
} |
||||
|
||||
// Get event kind |
||||
eventKind := new(types.Uint16) |
||||
eventKind.Set(uint16(ev.Kind)) |
||||
|
||||
// Extract e-tags |
||||
for _, eTag := range ev.Tags.GetAll([]byte("e")) { |
||||
if eTag.Len() < 2 { |
||||
continue |
||||
} |
||||
|
||||
targetID, err := hex.Dec(string(eTag.Value())) |
||||
if err != nil || len(targetID) != 32 { |
||||
continue |
||||
} |
||||
|
||||
// Look up target event serial |
||||
targetSer, err := d.GetEventSerialByID(targetID) |
||||
if err != nil || targetSer == nil { |
||||
// Target event doesn't exist in our relay - skip |
||||
continue |
||||
} |
||||
|
||||
batch = append(batch, eTagEdge{ |
||||
sourceSer: sourceSer, |
||||
targetSer: targetSer, |
||||
kind: eventKind, |
||||
}) |
||||
} |
||||
|
||||
// Flush batch if full |
||||
if len(batch) >= batchSize { |
||||
if err := d.writeETagEdges(batch); err != nil { |
||||
return err |
||||
} |
||||
edges += len(batch) |
||||
batch = batch[:0] |
||||
} |
||||
|
||||
processed++ |
||||
if processed%10000 == 0 { |
||||
log.I.F("Migration progress: %d events, %d edges, %d skipped", processed, edges, skipped) |
||||
} |
||||
} |
||||
return nil |
||||
}) |
||||
|
||||
// Flush remaining batch |
||||
if len(batch) > 0 { |
||||
if err := d.writeETagEdges(batch); err != nil { |
||||
return err |
||||
} |
||||
edges += len(batch) |
||||
} |
||||
|
||||
log.I.F("E-tag graph migration complete: %d events, %d edges, %d skipped", processed, edges, skipped) |
||||
return err |
||||
} |
||||
|
||||
type eTagEdge struct { |
||||
sourceSer *types.Uint40 |
||||
targetSer *types.Uint40 |
||||
kind *types.Uint16 |
||||
} |
||||
|
||||
func (d *D) writeETagEdges(edges []eTagEdge) error { |
||||
return d.Update(func(txn *badger.Txn) error { |
||||
for _, edge := range edges { |
||||
// Forward edge: eeg|source|target|kind|direction(0) |
||||
keyBuf := new(bytes.Buffer) |
||||
dirOut := new(types.Letter) |
||||
dirOut.Set(types.EdgeDirectionETagOut) |
||||
if err := indexes.EventEventGraphEnc(edge.sourceSer, edge.targetSer, edge.kind, dirOut).MarshalWrite(keyBuf); err != nil { |
||||
return err |
||||
} |
||||
if err := txn.Set(keyBuf.Bytes(), nil); err != nil { |
||||
return err |
||||
} |
||||
|
||||
// Reverse edge: gee|target|kind|direction(1)|source |
||||
keyBuf.Reset() |
||||
dirIn := new(types.Letter) |
||||
dirIn.Set(types.EdgeDirectionETagIn) |
||||
if err := indexes.GraphEventEventEnc(edge.targetSer, edge.kind, dirIn, edge.sourceSer).MarshalWrite(keyBuf); err != nil { |
||||
return err |
||||
} |
||||
if err := txn.Set(keyBuf.Bytes(), nil); err != nil { |
||||
return err |
||||
} |
||||
} |
||||
return nil |
||||
}) |
||||
} |
||||
``` |
||||
|
||||
### 5B.2 CLI Integration |
||||
|
||||
**File**: `cmd/migrate.go` (or add to existing migration command) |
||||
|
||||
```go |
||||
// Add migration subcommand |
||||
func runETagGraphMigration(cmd *cobra.Command, args []string) error { |
||||
db, err := database.Open(cfg.DataDir) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
defer db.Close() |
||||
|
||||
return db.MigrateETagGraph() |
||||
} |
||||
``` |
||||
|
||||
**Usage**: |
||||
|
||||
```bash |
||||
./orly migrate --backfill-etag-graph |
||||
# OR |
||||
./orly migrate etag-graph |
||||
``` |
||||
|
||||
--- |
||||
|
||||
## Phase 5C: Reference Aggregation (inbound_refs / outbound_refs) |
||||
|
||||
**Goal**: Implement the `inbound_refs` and `outbound_refs` query parameters per NIP spec. |
||||
|
||||
### Spec Requirements |
||||
|
||||
From NIP-XX-GRAPH-QUERIES.md: |
||||
|
||||
```json |
||||
{ |
||||
"_graph": { |
||||
"method": "follows", |
||||
"seed": "<pubkey>", |
||||
"depth": 1, |
||||
"inbound_refs": [ |
||||
{"kinds": [7], "from_depth": 1} |
||||
] |
||||
} |
||||
} |
||||
``` |
||||
|
||||
**Semantics**: |
||||
- `inbound_refs`: Find events that **reference** discovered events (reactions, replies, reposts) |
||||
- `outbound_refs`: Find events **referenced by** discovered events (what posts are replying to) |
||||
- Multiple `ref_spec` items have **AND** semantics |
||||
- Multiple `kinds` within a single `ref_spec` have **OR** semantics |
||||
- Results sorted by count **descending** (most referenced first) |
||||
|
||||
### 5C.1 Extend Query Struct |
||||
|
||||
**File**: `pkg/protocol/graph/query.go` |
||||
|
||||
Already defined but needs execution: |
||||
|
||||
```go |
||||
type RefSpec struct { |
||||
Kinds []uint16 `json:"kinds"` |
||||
FromDepth int `json:"from_depth,omitempty"` |
||||
} |
||||
|
||||
type Query struct { |
||||
// ... existing fields |
||||
InboundRefs []RefSpec `json:"inbound_refs,omitempty"` |
||||
OutboundRefs []RefSpec `json:"outbound_refs,omitempty"` |
||||
} |
||||
``` |
||||
|
||||
### 5C.2 Implement Reference Collection |
||||
|
||||
**File**: `pkg/database/graph-refs.go` |
||||
|
||||
```go |
||||
// CollectInboundRefs finds events that reference events authored by the discovered pubkeys |
||||
// For each depth level, finds inbound e-tag references of specified kinds |
||||
// Returns aggregated counts sorted by popularity (descending) |
||||
func (d *D) CollectInboundRefs( |
||||
result *GraphResult, |
||||
refSpecs []RefSpec, |
||||
maxDepth int, |
||||
) error { |
||||
if result.InboundRefs == nil { |
||||
result.InboundRefs = make(map[uint16]map[string][]string) |
||||
} |
||||
|
||||
// For each depth level |
||||
for depth := 0; depth <= maxDepth; depth++ { |
||||
// Determine which ref specs apply at this depth |
||||
var applicableKinds []uint16 |
||||
for _, spec := range refSpecs { |
||||
if depth >= spec.FromDepth { |
||||
applicableKinds = append(applicableKinds, spec.Kinds...) |
||||
} |
||||
} |
||||
|
||||
if len(applicableKinds) == 0 { |
||||
continue |
||||
} |
||||
|
||||
// Get pubkeys at this depth |
||||
var pubkeySerials []*types.Uint40 |
||||
if depth == 0 { |
||||
// depth 0 = seed pubkey |
||||
seedSerial, _ := d.PubkeyHexToSerial(result.SeedPubkey) |
||||
if seedSerial != nil { |
||||
pubkeySerials = []*types.Uint40{seedSerial} |
||||
} |
||||
} else { |
||||
pubkeys := result.PubkeysByDepth[depth] |
||||
for _, pkHex := range pubkeys { |
||||
ser, _ := d.PubkeyHexToSerial(pkHex) |
||||
if ser != nil { |
||||
pubkeySerials = append(pubkeySerials, ser) |
||||
} |
||||
} |
||||
} |
||||
|
||||
// For each pubkey, find their events, then find references to those events |
||||
for _, pkSerial := range pubkeySerials { |
||||
// Get events authored by this pubkey |
||||
authoredEvents, err := d.GetEventsAuthoredByPubkey(pkSerial, nil) |
||||
if err != nil { |
||||
continue |
||||
} |
||||
|
||||
for _, eventSerial := range authoredEvents { |
||||
eventIDHex, _ := d.GetEventIDFromSerial(eventSerial) |
||||
if eventIDHex == "" { |
||||
continue |
||||
} |
||||
|
||||
// Find inbound references (events that reference this event) |
||||
refSerials, err := d.GetReferencingEvents(eventSerial, applicableKinds) |
||||
if err != nil { |
||||
continue |
||||
} |
||||
|
||||
for _, refSerial := range refSerials { |
||||
refIDHex, _ := d.GetEventIDFromSerial(refSerial) |
||||
if refIDHex == "" { |
||||
continue |
||||
} |
||||
|
||||
// Get the kind of the referencing event |
||||
refKind, _ := d.GetEventKindFromSerial(refSerial) |
||||
|
||||
// Add to aggregation |
||||
if result.InboundRefs[refKind] == nil { |
||||
result.InboundRefs[refKind] = make(map[string][]string) |
||||
} |
||||
result.InboundRefs[refKind][eventIDHex] = append( |
||||
result.InboundRefs[refKind][eventIDHex], |
||||
refIDHex, |
||||
) |
||||
} |
||||
} |
||||
} |
||||
} |
||||
|
||||
return nil |
||||
} |
||||
|
||||
// CollectOutboundRefs finds events referenced BY events at each depth |
||||
// (following e-tag chains to find what posts are being replied to) |
||||
func (d *D) CollectOutboundRefs( |
||||
result *GraphResult, |
||||
refSpecs []RefSpec, |
||||
maxDepth int, |
||||
) error { |
||||
if result.OutboundRefs == nil { |
||||
result.OutboundRefs = make(map[uint16]map[string][]string) |
||||
} |
||||
|
||||
// Similar implementation to CollectInboundRefs but using GetETagsFromEventSerial |
||||
// to follow outbound references instead of GetReferencingEvents for inbound |
||||
// ... |
||||
|
||||
return nil |
||||
} |
||||
``` |
||||
|
||||
### 5C.3 Response Generation with Refs |
||||
|
||||
**File**: `pkg/protocol/graph/executor.go` |
||||
|
||||
Add ref aggregation support to response: |
||||
|
||||
```go |
||||
func (e *Executor) Execute(q *Query) (*event.T, error) { |
||||
// ... existing traversal code ... |
||||
|
||||
// Collect references if specified |
||||
if len(q.InboundRefs) > 0 { |
||||
if err := e.db.CollectInboundRefs(result, q.InboundRefs, q.Depth); err != nil { |
||||
return nil, fmt.Errorf("inbound refs: %w", err) |
||||
} |
||||
} |
||||
|
||||
if len(q.OutboundRefs) > 0 { |
||||
if err := e.db.CollectOutboundRefs(result, q.OutboundRefs, q.Depth); err != nil { |
||||
return nil, fmt.Errorf("outbound refs: %w", err) |
||||
} |
||||
} |
||||
|
||||
// Build response content with refs included |
||||
content := ResponseContent{ |
||||
PubkeysByDepth: result.ToDepthArrays(), |
||||
TotalPubkeys: result.TotalPubkeys, |
||||
} |
||||
|
||||
// Add ref summaries if present |
||||
if len(result.InboundRefs) > 0 || len(result.OutboundRefs) > 0 { |
||||
content.InboundRefSummary = buildRefSummary(result.InboundRefs) |
||||
content.OutboundRefSummary = buildRefSummary(result.OutboundRefs) |
||||
} |
||||
|
||||
// ... rest of response generation ... |
||||
} |
||||
|
||||
type ResponseContent struct { |
||||
PubkeysByDepth [][]string `json:"pubkeys_by_depth,omitempty"` |
||||
EventsByDepth [][]string `json:"events_by_depth,omitempty"` |
||||
TotalPubkeys int `json:"total_pubkeys,omitempty"` |
||||
TotalEvents int `json:"total_events,omitempty"` |
||||
InboundRefSummary []RefSummary `json:"inbound_refs,omitempty"` |
||||
OutboundRefSummary []RefSummary `json:"outbound_refs,omitempty"` |
||||
} |
||||
|
||||
type RefSummary struct { |
||||
Kind uint16 `json:"kind"` |
||||
TargetEventID string `json:"target"` |
||||
RefCount int `json:"count"` |
||||
RefEventIDs []string `json:"refs,omitempty"` // Optional: include actual ref IDs |
||||
} |
||||
|
||||
func buildRefSummary(refs map[uint16]map[string][]string) []RefSummary { |
||||
var summaries []RefSummary |
||||
|
||||
for kind, targets := range refs { |
||||
for targetID, refIDs := range targets { |
||||
summaries = append(summaries, RefSummary{ |
||||
Kind: kind, |
||||
TargetEventID: targetID, |
||||
RefCount: len(refIDs), |
||||
RefEventIDs: refIDs, |
||||
}) |
||||
} |
||||
} |
||||
|
||||
// Sort by count descending |
||||
sort.Slice(summaries, func(i, j int) bool { |
||||
return summaries[i].RefCount > summaries[j].RefCount |
||||
}) |
||||
|
||||
return summaries |
||||
} |
||||
``` |
||||
|
||||
--- |
||||
|
||||
## Phase 5D: P-Tag Graph Query Optimization |
||||
|
||||
**Goal**: Use the pubkey graph index (peg) for faster `#p` tag queries. |
||||
|
||||
### Spec |
||||
|
||||
From PTAG_GRAPH_OPTIMIZATION.md: |
||||
|
||||
When a filter has `#p` tags (mentions/references), use the `peg` index instead of the `tkc` (TagKind) index for: |
||||
- 41% smaller index size |
||||
- No hash collisions (exact serial match vs 8-byte hash) |
||||
- Kind-indexed in key structure |
||||
|
||||
### 5D.1 Query Optimization |
||||
|
||||
**File**: `pkg/database/query-for-ptag-graph.go` |
||||
|
||||
```go |
||||
package database |
||||
|
||||
// canUsePTagGraph checks if a filter can use graph optimization |
||||
func canUsePTagGraph(f *filter.F) bool { |
||||
// Has p-tags? |
||||
if f.Tags == nil || f.Tags.Len() == 0 { |
||||
return false |
||||
} |
||||
|
||||
hasPTags := false |
||||
for _, t := range *f.Tags { |
||||
if len(t.Key()) >= 1 && t.Key()[0] == 'p' { |
||||
hasPTags = true |
||||
break |
||||
} |
||||
} |
||||
if !hasPTags { |
||||
return false |
||||
} |
||||
|
||||
// No authors filter (use different optimization for that) |
||||
if f.Authors != nil && f.Authors.Len() > 0 { |
||||
return false |
||||
} |
||||
|
||||
return true |
||||
} |
||||
|
||||
// QueryPTagGraph uses the pubkey graph index for efficient p-tag queries |
||||
func (d *D) QueryPTagGraph(f *filter.F) (serials types.Uint40s, err error) { |
||||
// Extract p-tags from filter |
||||
var ptagPubkeys [][]byte |
||||
for _, t := range *f.Tags { |
||||
if len(t.Key()) >= 1 && t.Key()[0] == 'p' { |
||||
for _, val := range t.Values() { |
||||
pubkeyBytes, err := hex.Dec(string(val)) |
||||
if err == nil && len(pubkeyBytes) == 32 { |
||||
ptagPubkeys = append(ptagPubkeys, pubkeyBytes) |
||||
} |
||||
} |
||||
} |
||||
} |
||||
|
||||
if len(ptagPubkeys) == 0 { |
||||
return nil, nil |
||||
} |
||||
|
||||
// Resolve pubkeys to serials |
||||
var pubkeySerials []*types.Uint40 |
||||
for _, pk := range ptagPubkeys { |
||||
ser, err := d.GetPubkeySerial(pk) |
||||
if err == nil && ser != nil { |
||||
pubkeySerials = append(pubkeySerials, ser) |
||||
} |
||||
} |
||||
|
||||
// Query kinds (optional) |
||||
var kinds []uint16 |
||||
if f.Kinds != nil { |
||||
kinds = f.Kinds.ToUint16() |
||||
} |
||||
|
||||
// Scan peg index for each pubkey |
||||
seen := make(map[uint64]bool) |
||||
for _, pkSerial := range pubkeySerials { |
||||
// peg|pubkey_serial|kind|direction(2)|event_serial |
||||
// direction=2 means "inbound p-tag" (this pubkey is referenced) |
||||
eventSerials, err := d.GetEventsReferencingPubkey(pkSerial, kinds) |
||||
if err != nil { |
||||
continue |
||||
} |
||||
|
||||
for _, evSer := range eventSerials { |
||||
if !seen[evSer.Uint64()] { |
||||
seen[evSer.Uint64()] = true |
||||
serials = append(serials, evSer) |
||||
} |
||||
} |
||||
} |
||||
|
||||
return serials, nil |
||||
} |
||||
|
||||
// GetEventsReferencingPubkey finds events that have p-tag referencing this pubkey |
||||
// Uses peg index with direction=2 (p-tag inbound) |
||||
func (d *D) GetEventsReferencingPubkey(pubkeySerial *types.Uint40, kinds []uint16) ([]*types.Uint40, error) { |
||||
var eventSerials []*types.Uint40 |
||||
|
||||
err := d.View(func(txn *badger.Txn) error { |
||||
if len(kinds) > 0 { |
||||
// Scan specific kinds |
||||
for _, k := range kinds { |
||||
kind := new(types.Uint16) |
||||
kind.Set(k) |
||||
direction := new(types.Letter) |
||||
direction.Set(types.EdgeDirectionPTagIn) // direction=2 |
||||
|
||||
prefix := new(bytes.Buffer) |
||||
indexes.PubkeyEventGraphEnc(pubkeySerial, kind, direction, nil).MarshalWrite(prefix) |
||||
|
||||
opts := badger.DefaultIteratorOptions |
||||
opts.PrefetchValues = false |
||||
it := txn.NewIterator(opts) |
||||
|
||||
for it.Seek(prefix.Bytes()); it.ValidForPrefix(prefix.Bytes()); it.Next() { |
||||
key := it.Item().Key() |
||||
|
||||
_, _, _, evSer := indexes.PubkeyEventGraphVars() |
||||
dec := indexes.PubkeyEventGraphDec(new(types.Uint40), new(types.Uint16), new(types.Letter), evSer) |
||||
if err := dec.UnmarshalRead(bytes.NewReader(key)); err != nil { |
||||
continue |
||||
} |
||||
|
||||
ser := new(types.Uint40) |
||||
ser.Set(evSer.Uint64()) |
||||
eventSerials = append(eventSerials, ser) |
||||
} |
||||
it.Close() |
||||
} |
||||
} else { |
||||
// Scan all kinds (direction=2 only) |
||||
direction := new(types.Letter) |
||||
direction.Set(types.EdgeDirectionPTagIn) |
||||
|
||||
// Need to scan all kinds for this pubkey with direction=2 |
||||
// This is less efficient - recommend always filtering by kinds |
||||
// ... implementation |
||||
} |
||||
return nil |
||||
}) |
||||
|
||||
return eventSerials, err |
||||
} |
||||
``` |
||||
|
||||
### 5D.2 Query Dispatcher Integration |
||||
|
||||
**File**: `pkg/database/query.go` (or equivalent) |
||||
|
||||
```go |
||||
func (d *D) GetSerialsFromFilter(f *filter.F) (sers types.Uint40s, err error) { |
||||
// Try p-tag graph optimization first |
||||
if canUsePTagGraph(f) { |
||||
if sers, err = d.QueryPTagGraph(f); err == nil && len(sers) > 0 { |
||||
return sers, nil |
||||
} |
||||
// Fall through to traditional indexes on error or empty result |
||||
} |
||||
|
||||
// Existing index selection logic... |
||||
} |
||||
``` |
||||
|
||||
--- |
||||
|
||||
## Implementation Priority Order |
||||
|
||||
### Critical Path (Completes NIP Spec) |
||||
|
||||
1. **Phase 5C: Reference Aggregation** - Required by NIP spec for full feature parity |
||||
- Estimated: Medium complexity |
||||
- Value: High (enables reaction/reply counts, popular post discovery) |
||||
|
||||
2. **Phase 5A: Configuration & NIP-11** - Needed for relay discoverability |
||||
- Estimated: Low complexity |
||||
- Value: Medium (allows clients to detect support) |
||||
|
||||
### Enhancement Path (Performance & Operations) |
||||
|
||||
3. **Phase 5B: E-Tag Graph Backfill** - Enables graph queries on historical data |
||||
- Estimated: Medium complexity |
||||
- Value: Medium (relays with existing data need this) |
||||
|
||||
4. **Phase 5D: P-Tag Graph Optimization** - Performance improvement |
||||
- Estimated: Low-Medium complexity |
||||
- Value: Medium (3-5x faster for mention queries) |
||||
|
||||
--- |
||||
|
||||
## Testing Plan |
||||
|
||||
### Unit Tests |
||||
|
||||
```go |
||||
// Reference aggregation |
||||
func TestCollectInboundRefs(t *testing.T) |
||||
func TestCollectOutboundRefs(t *testing.T) |
||||
func TestRefSummarySorting(t *testing.T) |
||||
|
||||
// Configuration |
||||
func TestGraphConfigDefaults(t *testing.T) |
||||
func TestGraphConfigEnvOverrides(t *testing.T) |
||||
|
||||
// Migration |
||||
func TestETagGraphMigration(t *testing.T) |
||||
func TestMigrationIdempotency(t *testing.T) |
||||
|
||||
// P-tag optimization |
||||
func TestCanUsePTagGraph(t *testing.T) |
||||
func TestQueryPTagGraph(t *testing.T) |
||||
``` |
||||
|
||||
### Integration Tests |
||||
|
||||
```go |
||||
// Full round-trip with refs |
||||
func TestGraphQueryWithInboundRefs(t *testing.T) |
||||
func TestGraphQueryWithOutboundRefs(t *testing.T) |
||||
func TestGraphQueryRefsSortedByCount(t *testing.T) |
||||
|
||||
// NIP-11 |
||||
func TestRelayInfoAdvertisesGraphQueries(t *testing.T) |
||||
``` |
||||
|
||||
### Performance Tests |
||||
|
||||
```go |
||||
// Benchmark ref collection |
||||
func BenchmarkCollectInboundRefs(b *testing.B) |
||||
func BenchmarkPTagGraphVsTagIndex(b *testing.B) |
||||
``` |
||||
|
||||
--- |
||||
|
||||
## Summary |
||||
|
||||
| Phase | Description | Complexity | Priority | Status | |
||||
|-------|-------------|------------|----------|--------| |
||||
| **5A** | Configuration & NIP-11 | Low | High | ✅ COMPLETE | |
||||
| **5B** | E-tag graph migration | Medium | Medium | ✅ COMPLETE | |
||||
| **5C** | Reference aggregation | Medium | High | ✅ COMPLETE | |
||||
| **5D** | P-tag optimization | Low-Medium | Medium | ❌ Not started | |
||||
|
||||
**Completed 2025-01-05:** |
||||
- Phase 5A: Configuration options (`ORLY_GRAPH_*` env vars) and NIP-11 `graph_query` field |
||||
- Phase 5B: E-tag graph backfill migration (v8) runs automatically on startup |
||||
- Phase 5C: Inbound/outbound ref collection for both Badger and Neo4j backends |
||||
|
||||
**Remaining:** |
||||
- Phase 5D: P-tag graph query optimization (enhancement, not critical) |
||||
|
||||
**Implementation Files:** |
||||
- `app/config/config.go` - Graph configuration options |
||||
- `app/handle-relayinfo.go` - NIP-11 advertisement |
||||
- `pkg/database/migrations.go` - E-tag graph backfill (v8) |
||||
- `pkg/database/graph-refs.go` - Badger ref collection |
||||
- `pkg/neo4j/graph-refs.go` - Neo4j ref collection |
||||
- `pkg/protocol/graph/executor.go` - Query execution with ref support |
||||
@ -0,0 +1,699 @@
@@ -0,0 +1,699 @@
|
||||
package acl |
||||
|
||||
import ( |
||||
"context" |
||||
"encoding/hex" |
||||
"reflect" |
||||
"strconv" |
||||
"strings" |
||||
"sync" |
||||
"time" |
||||
|
||||
"lol.mleku.dev/chk" |
||||
"lol.mleku.dev/errorf" |
||||
"lol.mleku.dev/log" |
||||
"next.orly.dev/app/config" |
||||
"next.orly.dev/pkg/database" |
||||
"git.mleku.dev/mleku/nostr/encoders/bech32encoding" |
||||
"git.mleku.dev/mleku/nostr/encoders/event" |
||||
"next.orly.dev/pkg/utils" |
||||
) |
||||
|
||||
// Default values for curating mode
|
||||
const ( |
||||
DefaultDailyLimit = 50 |
||||
DefaultIPDailyLimit = 500 // Max events per IP per day (flood protection)
|
||||
DefaultFirstBanHours = 1 |
||||
DefaultSecondBanHours = 168 // 1 week
|
||||
CuratingConfigKind = 30078 |
||||
CuratingConfigDTag = "curating-config" |
||||
) |
||||
|
||||
// Curating implements the curating ACL mode with three-tier publisher classification:
|
||||
// - Trusted: Unlimited publishing
|
||||
// - Blacklisted: Cannot publish
|
||||
// - Unclassified: Rate-limited publishing (default 50/day)
|
||||
type Curating struct { |
||||
Ctx context.Context |
||||
cfg *config.C |
||||
db *database.D |
||||
curatingACL *database.CuratingACL |
||||
owners [][]byte |
||||
admins [][]byte |
||||
mx sync.RWMutex |
||||
|
||||
// In-memory caches for performance
|
||||
trustedCache map[string]bool |
||||
blacklistedCache map[string]bool |
||||
kindCache map[int]bool |
||||
configCache *database.CuratingConfig |
||||
cacheMx sync.RWMutex |
||||
} |
||||
|
||||
func (c *Curating) Configure(cfg ...any) (err error) { |
||||
log.I.F("configuring curating ACL") |
||||
for _, ca := range cfg { |
||||
switch cv := ca.(type) { |
||||
case *config.C: |
||||
c.cfg = cv |
||||
case *database.D: |
||||
c.db = cv |
||||
c.curatingACL = database.NewCuratingACL(cv) |
||||
case context.Context: |
||||
c.Ctx = cv |
||||
default: |
||||
err = errorf.E("invalid type: %T", reflect.TypeOf(ca)) |
||||
} |
||||
} |
||||
if c.cfg == nil || c.db == nil { |
||||
err = errorf.E("both config and database must be set") |
||||
return |
||||
} |
||||
|
||||
// Initialize caches
|
||||
c.trustedCache = make(map[string]bool) |
||||
c.blacklistedCache = make(map[string]bool) |
||||
c.kindCache = make(map[int]bool) |
||||
|
||||
// Load owners from config
|
||||
for _, owner := range c.cfg.Owners { |
||||
var own []byte |
||||
if o, e := bech32encoding.NpubOrHexToPublicKeyBinary(owner); chk.E(e) { |
||||
continue |
||||
} else { |
||||
own = o |
||||
} |
||||
c.owners = append(c.owners, own) |
||||
} |
||||
|
||||
// Load admins from config
|
||||
for _, admin := range c.cfg.Admins { |
||||
var adm []byte |
||||
if a, e := bech32encoding.NpubOrHexToPublicKeyBinary(admin); chk.E(e) { |
||||
continue |
||||
} else { |
||||
adm = a |
||||
} |
||||
c.admins = append(c.admins, adm) |
||||
} |
||||
|
||||
// Refresh caches from database
|
||||
if err = c.RefreshCaches(); err != nil { |
||||
log.W.F("curating ACL: failed to refresh caches: %v", err) |
||||
} |
||||
|
||||
return nil |
||||
} |
||||
|
||||
func (c *Curating) GetAccessLevel(pub []byte, address string) (level string) { |
||||
c.mx.RLock() |
||||
defer c.mx.RUnlock() |
||||
|
||||
pubkeyHex := hex.EncodeToString(pub) |
||||
|
||||
// Check owners first
|
||||
for _, v := range c.owners { |
||||
if utils.FastEqual(v, pub) { |
||||
return "owner" |
||||
} |
||||
} |
||||
|
||||
// Check admins
|
||||
for _, v := range c.admins { |
||||
if utils.FastEqual(v, pub) { |
||||
return "admin" |
||||
} |
||||
} |
||||
|
||||
// Check if IP is blocked
|
||||
if address != "" { |
||||
blocked, _, err := c.curatingACL.IsIPBlocked(address) |
||||
if err == nil && blocked { |
||||
return "blocked" |
||||
} |
||||
} |
||||
|
||||
// Check if pubkey is blacklisted (check cache first)
|
||||
c.cacheMx.RLock() |
||||
if c.blacklistedCache[pubkeyHex] { |
||||
c.cacheMx.RUnlock() |
||||
return "banned" |
||||
} |
||||
c.cacheMx.RUnlock() |
||||
|
||||
// Double-check database for blacklisted
|
||||
blacklisted, _ := c.curatingACL.IsPubkeyBlacklisted(pubkeyHex) |
||||
if blacklisted { |
||||
// Update cache
|
||||
c.cacheMx.Lock() |
||||
c.blacklistedCache[pubkeyHex] = true |
||||
c.cacheMx.Unlock() |
||||
return "banned" |
||||
} |
||||
|
||||
// All other users get write access (rate limiting handled in CheckPolicy)
|
||||
return "write" |
||||
} |
||||
|
||||
// CheckPolicy implements the PolicyChecker interface for event-level filtering
|
||||
func (c *Curating) CheckPolicy(ev *event.E) (allowed bool, err error) { |
||||
pubkeyHex := hex.EncodeToString(ev.Pubkey) |
||||
|
||||
// Check if configured
|
||||
config, err := c.GetConfig() |
||||
if err != nil { |
||||
return false, errorf.E("failed to get config: %v", err) |
||||
} |
||||
if config.ConfigEventID == "" { |
||||
return false, errorf.E("curating mode not configured: please publish a configuration event") |
||||
} |
||||
|
||||
// Check if event is spam-flagged
|
||||
isSpam, _ := c.curatingACL.IsEventSpam(hex.EncodeToString(ev.ID[:])) |
||||
if isSpam { |
||||
return false, errorf.E("blocked: event is flagged as spam") |
||||
} |
||||
|
||||
// Check if event kind is allowed
|
||||
if !c.curatingACL.IsKindAllowed(int(ev.Kind), &config) { |
||||
return false, errorf.E("blocked: event kind %d is not in the allow list", ev.Kind) |
||||
} |
||||
|
||||
// Check if pubkey is blacklisted
|
||||
c.cacheMx.RLock() |
||||
isBlacklisted := c.blacklistedCache[pubkeyHex] |
||||
c.cacheMx.RUnlock() |
||||
if !isBlacklisted { |
||||
isBlacklisted, _ = c.curatingACL.IsPubkeyBlacklisted(pubkeyHex) |
||||
} |
||||
if isBlacklisted { |
||||
return false, errorf.E("blocked: pubkey is blacklisted") |
||||
} |
||||
|
||||
// Check if pubkey is trusted (bypass rate limiting)
|
||||
c.cacheMx.RLock() |
||||
isTrusted := c.trustedCache[pubkeyHex] |
||||
c.cacheMx.RUnlock() |
||||
if !isTrusted { |
||||
isTrusted, _ = c.curatingACL.IsPubkeyTrusted(pubkeyHex) |
||||
if isTrusted { |
||||
// Update cache
|
||||
c.cacheMx.Lock() |
||||
c.trustedCache[pubkeyHex] = true |
||||
c.cacheMx.Unlock() |
||||
} |
||||
} |
||||
if isTrusted { |
||||
return true, nil |
||||
} |
||||
|
||||
// Check if owner or admin (bypass rate limiting)
|
||||
for _, v := range c.owners { |
||||
if utils.FastEqual(v, ev.Pubkey) { |
||||
return true, nil |
||||
} |
||||
} |
||||
for _, v := range c.admins { |
||||
if utils.FastEqual(v, ev.Pubkey) { |
||||
return true, nil |
||||
} |
||||
} |
||||
|
||||
// For unclassified users, check rate limit
|
||||
today := time.Now().Format("2006-01-02") |
||||
dailyLimit := config.DailyLimit |
||||
if dailyLimit == 0 { |
||||
dailyLimit = DefaultDailyLimit |
||||
} |
||||
|
||||
count, err := c.curatingACL.GetEventCount(pubkeyHex, today) |
||||
if err != nil { |
||||
log.W.F("curating ACL: failed to get event count: %v", err) |
||||
count = 0 |
||||
} |
||||
|
||||
if count >= dailyLimit { |
||||
return false, errorf.E("rate limit exceeded: maximum %d events per day for unclassified users", dailyLimit) |
||||
} |
||||
|
||||
// Increment the counter
|
||||
_, err = c.curatingACL.IncrementEventCount(pubkeyHex, today) |
||||
if err != nil { |
||||
log.W.F("curating ACL: failed to increment event count: %v", err) |
||||
} |
||||
|
||||
return true, nil |
||||
} |
||||
|
||||
// RateLimitCheck checks if an unclassified user can publish and handles IP tracking
|
||||
// This is called separately when we have access to the IP address
|
||||
func (c *Curating) RateLimitCheck(pubkeyHex, ip string) (allowed bool, message string, err error) { |
||||
config, err := c.GetConfig() |
||||
if err != nil { |
||||
return false, "", errorf.E("failed to get config: %v", err) |
||||
} |
||||
|
||||
today := time.Now().Format("2006-01-02") |
||||
|
||||
// Check IP flood limit first (applies to all non-trusted users from this IP)
|
||||
if ip != "" { |
||||
ipDailyLimit := config.IPDailyLimit |
||||
if ipDailyLimit == 0 { |
||||
ipDailyLimit = DefaultIPDailyLimit |
||||
} |
||||
|
||||
ipCount, err := c.curatingACL.GetIPEventCount(ip, today) |
||||
if err != nil { |
||||
ipCount = 0 |
||||
} |
||||
|
||||
if ipCount >= ipDailyLimit { |
||||
// IP has exceeded flood limit - record offense and ban
|
||||
c.recordIPOffenseAndBan(ip, pubkeyHex, config, "IP flood limit exceeded") |
||||
return false, "rate limit exceeded: too many events from this IP address", nil |
||||
} |
||||
} |
||||
|
||||
// Check per-pubkey daily limit
|
||||
dailyLimit := config.DailyLimit |
||||
if dailyLimit == 0 { |
||||
dailyLimit = DefaultDailyLimit |
||||
} |
||||
|
||||
count, err := c.curatingACL.GetEventCount(pubkeyHex, today) |
||||
if err != nil { |
||||
count = 0 |
||||
} |
||||
|
||||
if count >= dailyLimit { |
||||
// Record IP offense and potentially ban
|
||||
if ip != "" { |
||||
c.recordIPOffenseAndBan(ip, pubkeyHex, config, "pubkey rate limit exceeded") |
||||
} |
||||
return false, "rate limit exceeded: maximum events per day for unclassified users", nil |
||||
} |
||||
|
||||
// Increment IP event count for flood tracking (only for non-trusted users)
|
||||
if ip != "" { |
||||
_, _ = c.curatingACL.IncrementIPEventCount(ip, today) |
||||
} |
||||
|
||||
return true, "", nil |
||||
} |
||||
|
||||
// recordIPOffenseAndBan records an offense for an IP and applies a ban if warranted
|
||||
func (c *Curating) recordIPOffenseAndBan(ip, pubkeyHex string, config database.CuratingConfig, reason string) { |
||||
offenseCount, _ := c.curatingACL.RecordIPOffense(ip, pubkeyHex) |
||||
if offenseCount > 0 { |
||||
firstBanHours := config.FirstBanHours |
||||
if firstBanHours == 0 { |
||||
firstBanHours = DefaultFirstBanHours |
||||
} |
||||
secondBanHours := config.SecondBanHours |
||||
if secondBanHours == 0 { |
||||
secondBanHours = DefaultSecondBanHours |
||||
} |
||||
|
||||
var banDuration time.Duration |
||||
if offenseCount >= 2 { |
||||
banDuration = time.Duration(secondBanHours) * time.Hour |
||||
log.W.F("curating ACL: IP %s banned for %d hours (offense #%d, reason: %s)", ip, secondBanHours, offenseCount, reason) |
||||
} else { |
||||
banDuration = time.Duration(firstBanHours) * time.Hour |
||||
log.W.F("curating ACL: IP %s banned for %d hours (offense #%d, reason: %s)", ip, firstBanHours, offenseCount, reason) |
||||
} |
||||
c.curatingACL.BlockIP(ip, banDuration, reason) |
||||
} |
||||
} |
||||
|
||||
func (c *Curating) GetACLInfo() (name, description, documentation string) { |
||||
return "curating", "curated relay with rate-limited unclassified publishers", |
||||
`Curating ACL mode provides three-tier publisher classification: |
||||
|
||||
- Trusted: Unlimited publishing, explicitly marked by admin |
||||
- Blacklisted: Cannot publish, events rejected |
||||
- Unclassified: Default state, rate-limited (default 50 events/day) |
||||
|
||||
Features: |
||||
- Per-pubkey daily rate limiting for unclassified users (default 50/day) |
||||
- Per-IP daily rate limiting for flood protection (default 500/day) |
||||
- IP-based spam detection (tracks multiple rate-limited pubkeys) |
||||
- Automatic IP bans (1-hour first offense, 1-week second offense) |
||||
- Event kind allow-listing for content control |
||||
- Spam flagging (events hidden from queries without deletion) |
||||
|
||||
Configuration via kind 30078 event with d-tag "curating-config". |
||||
The relay will not accept events until configured. |
||||
|
||||
Management through NIP-86 API endpoints: |
||||
- trustpubkey, untrustpubkey, listtrustedpubkeys |
||||
- blacklistpubkey, unblacklistpubkey, listblacklistedpubkeys |
||||
- listunclassifiedusers |
||||
- markspam, unmarkspam, listspamevents |
||||
- setallowedkindcategories, getallowedkindcategories` |
||||
} |
||||
|
||||
func (c *Curating) Type() string { return "curating" } |
||||
|
||||
// IsEventVisible checks if an event should be visible to the given access level.
|
||||
// Events from blacklisted pubkeys are only visible to admin/owner.
|
||||
func (c *Curating) IsEventVisible(ev *event.E, accessLevel string) bool { |
||||
// Admin and owner can see all events
|
||||
if accessLevel == "admin" || accessLevel == "owner" { |
||||
return true |
||||
} |
||||
|
||||
// Check if the event author is blacklisted
|
||||
pubkeyHex := hex.EncodeToString(ev.Pubkey) |
||||
|
||||
// Check cache first
|
||||
c.cacheMx.RLock() |
||||
isBlacklisted := c.blacklistedCache[pubkeyHex] |
||||
c.cacheMx.RUnlock() |
||||
|
||||
if isBlacklisted { |
||||
return false |
||||
} |
||||
|
||||
// Check database if not in cache
|
||||
if blacklisted, _ := c.curatingACL.IsPubkeyBlacklisted(pubkeyHex); blacklisted { |
||||
c.cacheMx.Lock() |
||||
c.blacklistedCache[pubkeyHex] = true |
||||
c.cacheMx.Unlock() |
||||
return false |
||||
} |
||||
|
||||
return true |
||||
} |
||||
|
||||
// FilterVisibleEvents filters a list of events, removing those from blacklisted pubkeys.
|
||||
// Returns only events visible to the given access level.
|
||||
func (c *Curating) FilterVisibleEvents(events []*event.E, accessLevel string) []*event.E { |
||||
// Admin and owner can see all events
|
||||
if accessLevel == "admin" || accessLevel == "owner" { |
||||
return events |
||||
} |
||||
|
||||
// Filter out events from blacklisted pubkeys
|
||||
visible := make([]*event.E, 0, len(events)) |
||||
for _, ev := range events { |
||||
if c.IsEventVisible(ev, accessLevel) { |
||||
visible = append(visible, ev) |
||||
} |
||||
} |
||||
return visible |
||||
} |
||||
|
||||
// GetCuratingACL returns the database ACL instance for direct access
|
||||
func (c *Curating) GetCuratingACL() *database.CuratingACL { |
||||
return c.curatingACL |
||||
} |
||||
|
||||
func (c *Curating) Syncer() { |
||||
log.I.F("starting curating ACL syncer") |
||||
|
||||
// Start background cleanup goroutine
|
||||
go c.backgroundCleanup() |
||||
} |
||||
|
||||
// backgroundCleanup periodically cleans up expired data
|
||||
func (c *Curating) backgroundCleanup() { |
||||
// Run cleanup every hour
|
||||
ticker := time.NewTicker(time.Hour) |
||||
defer ticker.Stop() |
||||
|
||||
for { |
||||
select { |
||||
case <-c.Ctx.Done(): |
||||
log.D.F("curating ACL background cleanup stopped") |
||||
return |
||||
case <-ticker.C: |
||||
c.runCleanup() |
||||
} |
||||
} |
||||
} |
||||
|
||||
func (c *Curating) runCleanup() { |
||||
log.D.F("curating ACL: running background cleanup") |
||||
|
||||
// Clean up expired IP blocks
|
||||
if err := c.curatingACL.CleanupExpiredIPBlocks(); err != nil { |
||||
log.W.F("curating ACL: failed to cleanup expired IP blocks: %v", err) |
||||
} |
||||
|
||||
// Clean up old event counts (older than 7 days)
|
||||
cutoffDate := time.Now().AddDate(0, 0, -7).Format("2006-01-02") |
||||
if err := c.curatingACL.CleanupOldEventCounts(cutoffDate); err != nil { |
||||
log.W.F("curating ACL: failed to cleanup old event counts: %v", err) |
||||
} |
||||
|
||||
// Refresh caches
|
||||
if err := c.RefreshCaches(); err != nil { |
||||
log.W.F("curating ACL: failed to refresh caches: %v", err) |
||||
} |
||||
} |
||||
|
||||
// RefreshCaches refreshes all in-memory caches from the database
|
||||
func (c *Curating) RefreshCaches() error { |
||||
c.cacheMx.Lock() |
||||
defer c.cacheMx.Unlock() |
||||
|
||||
// Refresh trusted pubkeys cache
|
||||
trusted, err := c.curatingACL.ListTrustedPubkeys() |
||||
if err != nil { |
||||
return errorf.E("failed to list trusted pubkeys: %v", err) |
||||
} |
||||
c.trustedCache = make(map[string]bool) |
||||
for _, t := range trusted { |
||||
c.trustedCache[t.Pubkey] = true |
||||
} |
||||
|
||||
// Refresh blacklisted pubkeys cache
|
||||
blacklisted, err := c.curatingACL.ListBlacklistedPubkeys() |
||||
if err != nil { |
||||
return errorf.E("failed to list blacklisted pubkeys: %v", err) |
||||
} |
||||
c.blacklistedCache = make(map[string]bool) |
||||
for _, b := range blacklisted { |
||||
c.blacklistedCache[b.Pubkey] = true |
||||
} |
||||
|
||||
// Refresh config cache
|
||||
config, err := c.curatingACL.GetConfig() |
||||
if err != nil { |
||||
return errorf.E("failed to get config: %v", err) |
||||
} |
||||
c.configCache = &config |
||||
|
||||
// Refresh allowed kinds cache
|
||||
c.kindCache = make(map[int]bool) |
||||
for _, k := range config.AllowedKinds { |
||||
c.kindCache[k] = true |
||||
} |
||||
|
||||
log.D.F("curating ACL: caches refreshed - %d trusted, %d blacklisted, %d allowed kinds", |
||||
len(c.trustedCache), len(c.blacklistedCache), len(c.kindCache)) |
||||
|
||||
return nil |
||||
} |
||||
|
||||
// GetConfig returns the current configuration
|
||||
func (c *Curating) GetConfig() (database.CuratingConfig, error) { |
||||
c.cacheMx.RLock() |
||||
if c.configCache != nil { |
||||
config := *c.configCache |
||||
c.cacheMx.RUnlock() |
||||
return config, nil |
||||
} |
||||
c.cacheMx.RUnlock() |
||||
|
||||
return c.curatingACL.GetConfig() |
||||
} |
||||
|
||||
// IsConfigured returns true if the relay has been configured
|
||||
func (c *Curating) IsConfigured() (bool, error) { |
||||
return c.curatingACL.IsConfigured() |
||||
} |
||||
|
||||
// ProcessConfigEvent processes a kind 30078 event to extract curating configuration
|
||||
func (c *Curating) ProcessConfigEvent(ev *event.E) error { |
||||
if ev.Kind != CuratingConfigKind { |
||||
return errorf.E("invalid event kind: expected %d, got %d", CuratingConfigKind, ev.Kind) |
||||
} |
||||
|
||||
// Check d-tag
|
||||
dTag := ev.Tags.GetFirst([]byte("d")) |
||||
if dTag == nil || string(dTag.Value()) != CuratingConfigDTag { |
||||
return errorf.E("invalid d-tag: expected %s", CuratingConfigDTag) |
||||
} |
||||
|
||||
// Check if pubkey is owner or admin
|
||||
pubkeyHex := hex.EncodeToString(ev.Pubkey) |
||||
isOwner := false |
||||
isAdmin := false |
||||
for _, v := range c.owners { |
||||
if utils.FastEqual(v, ev.Pubkey) { |
||||
isOwner = true |
||||
break |
||||
} |
||||
} |
||||
if !isOwner { |
||||
for _, v := range c.admins { |
||||
if utils.FastEqual(v, ev.Pubkey) { |
||||
isAdmin = true |
||||
break |
||||
} |
||||
} |
||||
} |
||||
if !isOwner && !isAdmin { |
||||
return errorf.E("config event must be from owner or admin") |
||||
} |
||||
|
||||
// Parse configuration from tags
|
||||
config := database.CuratingConfig{ |
||||
ConfigEventID: hex.EncodeToString(ev.ID[:]), |
||||
ConfigPubkey: pubkeyHex, |
||||
ConfiguredAt: ev.CreatedAt, |
||||
DailyLimit: DefaultDailyLimit, |
||||
FirstBanHours: DefaultFirstBanHours, |
||||
SecondBanHours: DefaultSecondBanHours, |
||||
} |
||||
|
||||
for _, tag := range *ev.Tags { |
||||
if tag.Len() < 2 { |
||||
continue |
||||
} |
||||
key := string(tag.Key()) |
||||
value := string(tag.Value()) |
||||
|
||||
switch key { |
||||
case "daily_limit": |
||||
if v, err := strconv.Atoi(value); err == nil && v > 0 { |
||||
config.DailyLimit = v |
||||
} |
||||
case "ip_daily_limit": |
||||
if v, err := strconv.Atoi(value); err == nil && v > 0 { |
||||
config.IPDailyLimit = v |
||||
} |
||||
case "first_ban_hours": |
||||
if v, err := strconv.Atoi(value); err == nil && v > 0 { |
||||
config.FirstBanHours = v |
||||
} |
||||
case "second_ban_hours": |
||||
if v, err := strconv.Atoi(value); err == nil && v > 0 { |
||||
config.SecondBanHours = v |
||||
} |
||||
case "kind_category": |
||||
config.KindCategories = append(config.KindCategories, value) |
||||
case "kind_range": |
||||
config.AllowedRanges = append(config.AllowedRanges, value) |
||||
case "kind": |
||||
if k, err := strconv.Atoi(value); err == nil { |
||||
config.AllowedKinds = append(config.AllowedKinds, k) |
||||
} |
||||
} |
||||
} |
||||
|
||||
// Save configuration
|
||||
if err := c.curatingACL.SaveConfig(config); err != nil { |
||||
return errorf.E("failed to save config: %v", err) |
||||
} |
||||
|
||||
// Refresh caches
|
||||
c.cacheMx.Lock() |
||||
c.configCache = &config |
||||
c.cacheMx.Unlock() |
||||
|
||||
log.I.F("curating ACL: configuration updated from event %s by %s", |
||||
config.ConfigEventID, config.ConfigPubkey) |
||||
|
||||
return nil |
||||
} |
||||
|
||||
// IsTrusted checks if a pubkey is trusted
|
||||
func (c *Curating) IsTrusted(pubkeyHex string) bool { |
||||
c.cacheMx.RLock() |
||||
if c.trustedCache[pubkeyHex] { |
||||
c.cacheMx.RUnlock() |
||||
return true |
||||
} |
||||
c.cacheMx.RUnlock() |
||||
|
||||
trusted, _ := c.curatingACL.IsPubkeyTrusted(pubkeyHex) |
||||
return trusted |
||||
} |
||||
|
||||
// IsBlacklisted checks if a pubkey is blacklisted
|
||||
func (c *Curating) IsBlacklisted(pubkeyHex string) bool { |
||||
c.cacheMx.RLock() |
||||
if c.blacklistedCache[pubkeyHex] { |
||||
c.cacheMx.RUnlock() |
||||
return true |
||||
} |
||||
c.cacheMx.RUnlock() |
||||
|
||||
blacklisted, _ := c.curatingACL.IsPubkeyBlacklisted(pubkeyHex) |
||||
return blacklisted |
||||
} |
||||
|
||||
// TrustPubkey adds a pubkey to the trusted list
|
||||
func (c *Curating) TrustPubkey(pubkeyHex, note string) error { |
||||
pubkeyHex = strings.ToLower(pubkeyHex) |
||||
if err := c.curatingACL.SaveTrustedPubkey(pubkeyHex, note); err != nil { |
||||
return err |
||||
} |
||||
// Update cache
|
||||
c.cacheMx.Lock() |
||||
c.trustedCache[pubkeyHex] = true |
||||
delete(c.blacklistedCache, pubkeyHex) // Remove from blacklist cache if present
|
||||
c.cacheMx.Unlock() |
||||
// Also remove from blacklist in DB
|
||||
c.curatingACL.RemoveBlacklistedPubkey(pubkeyHex) |
||||
return nil |
||||
} |
||||
|
||||
// UntrustPubkey removes a pubkey from the trusted list
|
||||
func (c *Curating) UntrustPubkey(pubkeyHex string) error { |
||||
pubkeyHex = strings.ToLower(pubkeyHex) |
||||
if err := c.curatingACL.RemoveTrustedPubkey(pubkeyHex); err != nil { |
||||
return err |
||||
} |
||||
// Update cache
|
||||
c.cacheMx.Lock() |
||||
delete(c.trustedCache, pubkeyHex) |
||||
c.cacheMx.Unlock() |
||||
return nil |
||||
} |
||||
|
||||
// BlacklistPubkey adds a pubkey to the blacklist
|
||||
func (c *Curating) BlacklistPubkey(pubkeyHex, reason string) error { |
||||
pubkeyHex = strings.ToLower(pubkeyHex) |
||||
if err := c.curatingACL.SaveBlacklistedPubkey(pubkeyHex, reason); err != nil { |
||||
return err |
||||
} |
||||
// Update cache
|
||||
c.cacheMx.Lock() |
||||
c.blacklistedCache[pubkeyHex] = true |
||||
delete(c.trustedCache, pubkeyHex) // Remove from trusted cache if present
|
||||
c.cacheMx.Unlock() |
||||
// Also remove from trusted list in DB
|
||||
c.curatingACL.RemoveTrustedPubkey(pubkeyHex) |
||||
return nil |
||||
} |
||||
|
||||
// UnblacklistPubkey removes a pubkey from the blacklist
|
||||
func (c *Curating) UnblacklistPubkey(pubkeyHex string) error { |
||||
pubkeyHex = strings.ToLower(pubkeyHex) |
||||
if err := c.curatingACL.RemoveBlacklistedPubkey(pubkeyHex); err != nil { |
||||
return err |
||||
} |
||||
// Update cache
|
||||
c.cacheMx.Lock() |
||||
delete(c.blacklistedCache, pubkeyHex) |
||||
c.cacheMx.Unlock() |
||||
return nil |
||||
} |
||||
|
||||
func init() { |
||||
Registry.Register(new(Curating)) |
||||
} |
||||
@ -0,0 +1,989 @@
@@ -0,0 +1,989 @@
|
||||
//go:build !(js && wasm)
|
||||
|
||||
package database |
||||
|
||||
import ( |
||||
"bytes" |
||||
"encoding/json" |
||||
"fmt" |
||||
"sort" |
||||
"time" |
||||
|
||||
"github.com/dgraph-io/badger/v4" |
||||
) |
||||
|
||||
// CuratingConfig represents the configuration for curating ACL mode
|
||||
// This is parsed from a kind 30078 event with d-tag "curating-config"
|
||||
type CuratingConfig struct { |
||||
DailyLimit int `json:"daily_limit"` // Max events per day for unclassified users
|
||||
IPDailyLimit int `json:"ip_daily_limit"` // Max events per day from a single IP (flood protection)
|
||||
FirstBanHours int `json:"first_ban_hours"` // IP ban duration for first offense
|
||||
SecondBanHours int `json:"second_ban_hours"` // IP ban duration for second+ offense
|
||||
AllowedKinds []int `json:"allowed_kinds"` // Explicit kind numbers
|
||||
AllowedRanges []string `json:"allowed_ranges"` // Kind ranges like "1000-1999"
|
||||
KindCategories []string `json:"kind_categories"` // Category IDs like "social", "dm"
|
||||
ConfigEventID string `json:"config_event_id"` // ID of the config event
|
||||
ConfigPubkey string `json:"config_pubkey"` // Pubkey that published config
|
||||
ConfiguredAt int64 `json:"configured_at"` // Timestamp of config event
|
||||
} |
||||
|
||||
// TrustedPubkey represents an explicitly trusted publisher
|
||||
type TrustedPubkey struct { |
||||
Pubkey string `json:"pubkey"` |
||||
Note string `json:"note,omitempty"` |
||||
Added time.Time `json:"added"` |
||||
} |
||||
|
||||
// BlacklistedPubkey represents a blacklisted publisher
|
||||
type BlacklistedPubkey struct { |
||||
Pubkey string `json:"pubkey"` |
||||
Reason string `json:"reason,omitempty"` |
||||
Added time.Time `json:"added"` |
||||
} |
||||
|
||||
// PubkeyEventCount tracks daily event counts for rate limiting
|
||||
type PubkeyEventCount struct { |
||||
Pubkey string `json:"pubkey"` |
||||
Date string `json:"date"` // YYYY-MM-DD format
|
||||
Count int `json:"count"` |
||||
LastEvent time.Time `json:"last_event"` |
||||
} |
||||
|
||||
// IPOffense tracks rate limit violations from IPs
|
||||
type IPOffense struct { |
||||
IP string `json:"ip"` |
||||
OffenseCount int `json:"offense_count"` |
||||
PubkeysHit []string `json:"pubkeys_hit"` // Pubkeys that hit rate limit from this IP
|
||||
LastOffense time.Time `json:"last_offense"` |
||||
} |
||||
|
||||
// CuratingBlockedIP represents a temporarily blocked IP with expiration
|
||||
type CuratingBlockedIP struct { |
||||
IP string `json:"ip"` |
||||
Reason string `json:"reason"` |
||||
ExpiresAt time.Time `json:"expires_at"` |
||||
Added time.Time `json:"added"` |
||||
} |
||||
|
||||
// SpamEvent represents an event flagged as spam
|
||||
type SpamEvent struct { |
||||
EventID string `json:"event_id"` |
||||
Pubkey string `json:"pubkey"` |
||||
Reason string `json:"reason,omitempty"` |
||||
Added time.Time `json:"added"` |
||||
} |
||||
|
||||
// UnclassifiedUser represents a user who hasn't been trusted or blacklisted
|
||||
type UnclassifiedUser struct { |
||||
Pubkey string `json:"pubkey"` |
||||
EventCount int `json:"event_count"` |
||||
LastEvent time.Time `json:"last_event"` |
||||
} |
||||
|
||||
// CuratingACL database operations
|
||||
type CuratingACL struct { |
||||
*D |
||||
} |
||||
|
||||
// NewCuratingACL creates a new CuratingACL instance
|
||||
func NewCuratingACL(db *D) *CuratingACL { |
||||
return &CuratingACL{D: db} |
||||
} |
||||
|
||||
// ==================== Configuration ====================
|
||||
|
||||
// SaveConfig saves the curating configuration
|
||||
func (c *CuratingACL) SaveConfig(config CuratingConfig) error { |
||||
return c.Update(func(txn *badger.Txn) error { |
||||
key := c.getConfigKey() |
||||
data, err := json.Marshal(config) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
return txn.Set(key, data) |
||||
}) |
||||
} |
||||
|
||||
// GetConfig returns the curating configuration
|
||||
func (c *CuratingACL) GetConfig() (CuratingConfig, error) { |
||||
var config CuratingConfig |
||||
err := c.View(func(txn *badger.Txn) error { |
||||
key := c.getConfigKey() |
||||
item, err := txn.Get(key) |
||||
if err != nil { |
||||
if err == badger.ErrKeyNotFound { |
||||
return nil // Return empty config
|
||||
} |
||||
return err |
||||
} |
||||
val, err := item.ValueCopy(nil) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
return json.Unmarshal(val, &config) |
||||
}) |
||||
return config, err |
||||
} |
||||
|
||||
// IsConfigured returns true if a configuration event has been set
|
||||
func (c *CuratingACL) IsConfigured() (bool, error) { |
||||
config, err := c.GetConfig() |
||||
if err != nil { |
||||
return false, err |
||||
} |
||||
return config.ConfigEventID != "", nil |
||||
} |
||||
|
||||
// ==================== Trusted Pubkeys ====================
|
||||
|
||||
// SaveTrustedPubkey saves a trusted pubkey to the database
|
||||
func (c *CuratingACL) SaveTrustedPubkey(pubkey string, note string) error { |
||||
return c.Update(func(txn *badger.Txn) error { |
||||
key := c.getTrustedPubkeyKey(pubkey) |
||||
trusted := TrustedPubkey{ |
||||
Pubkey: pubkey, |
||||
Note: note, |
||||
Added: time.Now(), |
||||
} |
||||
data, err := json.Marshal(trusted) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
return txn.Set(key, data) |
||||
}) |
||||
} |
||||
|
||||
// RemoveTrustedPubkey removes a trusted pubkey from the database
|
||||
func (c *CuratingACL) RemoveTrustedPubkey(pubkey string) error { |
||||
return c.Update(func(txn *badger.Txn) error { |
||||
key := c.getTrustedPubkeyKey(pubkey) |
||||
return txn.Delete(key) |
||||
}) |
||||
} |
||||
|
||||
// ListTrustedPubkeys returns all trusted pubkeys
|
||||
func (c *CuratingACL) ListTrustedPubkeys() ([]TrustedPubkey, error) { |
||||
var trusted []TrustedPubkey |
||||
err := c.View(func(txn *badger.Txn) error { |
||||
prefix := c.getTrustedPubkeyPrefix() |
||||
it := txn.NewIterator(badger.IteratorOptions{Prefix: prefix}) |
||||
defer it.Close() |
||||
|
||||
for it.Rewind(); it.Valid(); it.Next() { |
||||
item := it.Item() |
||||
val, err := item.ValueCopy(nil) |
||||
if err != nil { |
||||
continue |
||||
} |
||||
var t TrustedPubkey |
||||
if err := json.Unmarshal(val, &t); err != nil { |
||||
continue |
||||
} |
||||
trusted = append(trusted, t) |
||||
} |
||||
return nil |
||||
}) |
||||
return trusted, err |
||||
} |
||||
|
||||
// IsPubkeyTrusted checks if a pubkey is trusted
|
||||
func (c *CuratingACL) IsPubkeyTrusted(pubkey string) (bool, error) { |
||||
var trusted bool |
||||
err := c.View(func(txn *badger.Txn) error { |
||||
key := c.getTrustedPubkeyKey(pubkey) |
||||
_, err := txn.Get(key) |
||||
if err == badger.ErrKeyNotFound { |
||||
trusted = false |
||||
return nil |
||||
} |
||||
if err != nil { |
||||
return err |
||||
} |
||||
trusted = true |
||||
return nil |
||||
}) |
||||
return trusted, err |
||||
} |
||||
|
||||
// ==================== Blacklisted Pubkeys ====================
|
||||
|
||||
// SaveBlacklistedPubkey saves a blacklisted pubkey to the database
|
||||
func (c *CuratingACL) SaveBlacklistedPubkey(pubkey string, reason string) error { |
||||
return c.Update(func(txn *badger.Txn) error { |
||||
key := c.getBlacklistedPubkeyKey(pubkey) |
||||
blacklisted := BlacklistedPubkey{ |
||||
Pubkey: pubkey, |
||||
Reason: reason, |
||||
Added: time.Now(), |
||||
} |
||||
data, err := json.Marshal(blacklisted) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
return txn.Set(key, data) |
||||
}) |
||||
} |
||||
|
||||
// RemoveBlacklistedPubkey removes a blacklisted pubkey from the database
|
||||
func (c *CuratingACL) RemoveBlacklistedPubkey(pubkey string) error { |
||||
return c.Update(func(txn *badger.Txn) error { |
||||
key := c.getBlacklistedPubkeyKey(pubkey) |
||||
return txn.Delete(key) |
||||
}) |
||||
} |
||||
|
||||
// ListBlacklistedPubkeys returns all blacklisted pubkeys
|
||||
func (c *CuratingACL) ListBlacklistedPubkeys() ([]BlacklistedPubkey, error) { |
||||
var blacklisted []BlacklistedPubkey |
||||
err := c.View(func(txn *badger.Txn) error { |
||||
prefix := c.getBlacklistedPubkeyPrefix() |
||||
it := txn.NewIterator(badger.IteratorOptions{Prefix: prefix}) |
||||
defer it.Close() |
||||
|
||||
for it.Rewind(); it.Valid(); it.Next() { |
||||
item := it.Item() |
||||
val, err := item.ValueCopy(nil) |
||||
if err != nil { |
||||
continue |
||||
} |
||||
var b BlacklistedPubkey |
||||
if err := json.Unmarshal(val, &b); err != nil { |
||||
continue |
||||
} |
||||
blacklisted = append(blacklisted, b) |
||||
} |
||||
return nil |
||||
}) |
||||
return blacklisted, err |
||||
} |
||||
|
||||
// IsPubkeyBlacklisted checks if a pubkey is blacklisted
|
||||
func (c *CuratingACL) IsPubkeyBlacklisted(pubkey string) (bool, error) { |
||||
var blacklisted bool |
||||
err := c.View(func(txn *badger.Txn) error { |
||||
key := c.getBlacklistedPubkeyKey(pubkey) |
||||
_, err := txn.Get(key) |
||||
if err == badger.ErrKeyNotFound { |
||||
blacklisted = false |
||||
return nil |
||||
} |
||||
if err != nil { |
||||
return err |
||||
} |
||||
blacklisted = true |
||||
return nil |
||||
}) |
||||
return blacklisted, err |
||||
} |
||||
|
||||
// ==================== Event Counting ====================
|
||||
|
||||
// GetEventCount returns the event count for a pubkey on a specific date
|
||||
func (c *CuratingACL) GetEventCount(pubkey, date string) (int, error) { |
||||
var count int |
||||
err := c.View(func(txn *badger.Txn) error { |
||||
key := c.getEventCountKey(pubkey, date) |
||||
item, err := txn.Get(key) |
||||
if err == badger.ErrKeyNotFound { |
||||
count = 0 |
||||
return nil |
||||
} |
||||
if err != nil { |
||||
return err |
||||
} |
||||
val, err := item.ValueCopy(nil) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
var ec PubkeyEventCount |
||||
if err := json.Unmarshal(val, &ec); err != nil { |
||||
return err |
||||
} |
||||
count = ec.Count |
||||
return nil |
||||
}) |
||||
return count, err |
||||
} |
||||
|
||||
// IncrementEventCount increments and returns the new event count for a pubkey
|
||||
func (c *CuratingACL) IncrementEventCount(pubkey, date string) (int, error) { |
||||
var newCount int |
||||
err := c.Update(func(txn *badger.Txn) error { |
||||
key := c.getEventCountKey(pubkey, date) |
||||
var ec PubkeyEventCount |
||||
|
||||
item, err := txn.Get(key) |
||||
if err == badger.ErrKeyNotFound { |
||||
ec = PubkeyEventCount{ |
||||
Pubkey: pubkey, |
||||
Date: date, |
||||
Count: 0, |
||||
LastEvent: time.Now(), |
||||
} |
||||
} else if err != nil { |
||||
return err |
||||
} else { |
||||
val, err := item.ValueCopy(nil) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
if err := json.Unmarshal(val, &ec); err != nil { |
||||
return err |
||||
} |
||||
} |
||||
|
||||
ec.Count++ |
||||
ec.LastEvent = time.Now() |
||||
newCount = ec.Count |
||||
|
||||
data, err := json.Marshal(ec) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
return txn.Set(key, data) |
||||
}) |
||||
return newCount, err |
||||
} |
||||
|
||||
// CleanupOldEventCounts removes event counts older than the specified date
|
||||
func (c *CuratingACL) CleanupOldEventCounts(beforeDate string) error { |
||||
return c.Update(func(txn *badger.Txn) error { |
||||
prefix := c.getEventCountPrefix() |
||||
it := txn.NewIterator(badger.IteratorOptions{Prefix: prefix}) |
||||
defer it.Close() |
||||
|
||||
var keysToDelete [][]byte |
||||
for it.Rewind(); it.Valid(); it.Next() { |
||||
item := it.Item() |
||||
val, err := item.ValueCopy(nil) |
||||
if err != nil { |
||||
continue |
||||
} |
||||
var ec PubkeyEventCount |
||||
if err := json.Unmarshal(val, &ec); err != nil { |
||||
continue |
||||
} |
||||
if ec.Date < beforeDate { |
||||
keysToDelete = append(keysToDelete, item.KeyCopy(nil)) |
||||
} |
||||
} |
||||
|
||||
for _, key := range keysToDelete { |
||||
if err := txn.Delete(key); err != nil { |
||||
return err |
||||
} |
||||
} |
||||
return nil |
||||
}) |
||||
} |
||||
|
||||
// ==================== IP Event Counting ====================
|
||||
|
||||
// IPEventCount tracks events from an IP address per day (flood protection)
|
||||
type IPEventCount struct { |
||||
IP string `json:"ip"` |
||||
Date string `json:"date"` |
||||
Count int `json:"count"` |
||||
LastEvent time.Time `json:"last_event"` |
||||
} |
||||
|
||||
// GetIPEventCount returns the total event count for an IP on a specific date
|
||||
func (c *CuratingACL) GetIPEventCount(ip, date string) (int, error) { |
||||
var count int |
||||
err := c.View(func(txn *badger.Txn) error { |
||||
key := c.getIPEventCountKey(ip, date) |
||||
item, err := txn.Get(key) |
||||
if err == badger.ErrKeyNotFound { |
||||
count = 0 |
||||
return nil |
||||
} |
||||
if err != nil { |
||||
return err |
||||
} |
||||
val, err := item.ValueCopy(nil) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
var ec IPEventCount |
||||
if err := json.Unmarshal(val, &ec); err != nil { |
||||
return err |
||||
} |
||||
count = ec.Count |
||||
return nil |
||||
}) |
||||
return count, err |
||||
} |
||||
|
||||
// IncrementIPEventCount increments and returns the new event count for an IP
|
||||
func (c *CuratingACL) IncrementIPEventCount(ip, date string) (int, error) { |
||||
var newCount int |
||||
err := c.Update(func(txn *badger.Txn) error { |
||||
key := c.getIPEventCountKey(ip, date) |
||||
var ec IPEventCount |
||||
|
||||
item, err := txn.Get(key) |
||||
if err == badger.ErrKeyNotFound { |
||||
ec = IPEventCount{ |
||||
IP: ip, |
||||
Date: date, |
||||
Count: 0, |
||||
LastEvent: time.Now(), |
||||
} |
||||
} else if err != nil { |
||||
return err |
||||
} else { |
||||
val, err := item.ValueCopy(nil) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
if err := json.Unmarshal(val, &ec); err != nil { |
||||
return err |
||||
} |
||||
} |
||||
|
||||
ec.Count++ |
||||
ec.LastEvent = time.Now() |
||||
newCount = ec.Count |
||||
|
||||
data, err := json.Marshal(ec) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
return txn.Set(key, data) |
||||
}) |
||||
return newCount, err |
||||
} |
||||
|
||||
// CleanupOldIPEventCounts removes IP event counts older than the specified date
|
||||
func (c *CuratingACL) CleanupOldIPEventCounts(beforeDate string) error { |
||||
return c.Update(func(txn *badger.Txn) error { |
||||
prefix := c.getIPEventCountPrefix() |
||||
it := txn.NewIterator(badger.IteratorOptions{Prefix: prefix}) |
||||
defer it.Close() |
||||
|
||||
var keysToDelete [][]byte |
||||
for it.Rewind(); it.Valid(); it.Next() { |
||||
item := it.Item() |
||||
val, err := item.ValueCopy(nil) |
||||
if err != nil { |
||||
continue |
||||
} |
||||
var ec IPEventCount |
||||
if err := json.Unmarshal(val, &ec); err != nil { |
||||
continue |
||||
} |
||||
if ec.Date < beforeDate { |
||||
keysToDelete = append(keysToDelete, item.KeyCopy(nil)) |
||||
} |
||||
} |
||||
|
||||
for _, key := range keysToDelete { |
||||
if err := txn.Delete(key); err != nil { |
||||
return err |
||||
} |
||||
} |
||||
return nil |
||||
}) |
||||
} |
||||
|
||||
func (c *CuratingACL) getIPEventCountKey(ip, date string) []byte { |
||||
buf := new(bytes.Buffer) |
||||
buf.WriteString("CURATING_ACL_IP_EVENT_COUNT_") |
||||
buf.WriteString(ip) |
||||
buf.WriteString("_") |
||||
buf.WriteString(date) |
||||
return buf.Bytes() |
||||
} |
||||
|
||||
func (c *CuratingACL) getIPEventCountPrefix() []byte { |
||||
return []byte("CURATING_ACL_IP_EVENT_COUNT_") |
||||
} |
||||
|
||||
// ==================== IP Offense Tracking ====================
|
||||
|
||||
// GetIPOffense returns the offense record for an IP
|
||||
func (c *CuratingACL) GetIPOffense(ip string) (*IPOffense, error) { |
||||
var offense *IPOffense |
||||
err := c.View(func(txn *badger.Txn) error { |
||||
key := c.getIPOffenseKey(ip) |
||||
item, err := txn.Get(key) |
||||
if err == badger.ErrKeyNotFound { |
||||
return nil |
||||
} |
||||
if err != nil { |
||||
return err |
||||
} |
||||
val, err := item.ValueCopy(nil) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
offense = new(IPOffense) |
||||
return json.Unmarshal(val, offense) |
||||
}) |
||||
return offense, err |
||||
} |
||||
|
||||
// RecordIPOffense records a rate limit violation from an IP for a pubkey
|
||||
// Returns the new offense count
|
||||
func (c *CuratingACL) RecordIPOffense(ip, pubkey string) (int, error) { |
||||
var newCount int |
||||
err := c.Update(func(txn *badger.Txn) error { |
||||
key := c.getIPOffenseKey(ip) |
||||
var offense IPOffense |
||||
|
||||
item, err := txn.Get(key) |
||||
if err == badger.ErrKeyNotFound { |
||||
offense = IPOffense{ |
||||
IP: ip, |
||||
OffenseCount: 0, |
||||
PubkeysHit: []string{}, |
||||
LastOffense: time.Now(), |
||||
} |
||||
} else if err != nil { |
||||
return err |
||||
} else { |
||||
val, err := item.ValueCopy(nil) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
if err := json.Unmarshal(val, &offense); err != nil { |
||||
return err |
||||
} |
||||
} |
||||
|
||||
// Add pubkey if not already in list
|
||||
found := false |
||||
for _, p := range offense.PubkeysHit { |
||||
if p == pubkey { |
||||
found = true |
||||
break |
||||
} |
||||
} |
||||
if !found { |
||||
offense.PubkeysHit = append(offense.PubkeysHit, pubkey) |
||||
offense.OffenseCount++ |
||||
} |
||||
offense.LastOffense = time.Now() |
||||
newCount = offense.OffenseCount |
||||
|
||||
data, err := json.Marshal(offense) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
return txn.Set(key, data) |
||||
}) |
||||
return newCount, err |
||||
} |
||||
|
||||
// ==================== IP Blocking ====================
|
||||
|
||||
// BlockIP blocks an IP for a specified duration
|
||||
func (c *CuratingACL) BlockIP(ip string, duration time.Duration, reason string) error { |
||||
return c.Update(func(txn *badger.Txn) error { |
||||
key := c.getBlockedIPKey(ip) |
||||
blocked := CuratingBlockedIP{ |
||||
IP: ip, |
||||
Reason: reason, |
||||
ExpiresAt: time.Now().Add(duration), |
||||
Added: time.Now(), |
||||
} |
||||
data, err := json.Marshal(blocked) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
return txn.Set(key, data) |
||||
}) |
||||
} |
||||
|
||||
// UnblockIP removes an IP from the blocked list
|
||||
func (c *CuratingACL) UnblockIP(ip string) error { |
||||
return c.Update(func(txn *badger.Txn) error { |
||||
key := c.getBlockedIPKey(ip) |
||||
return txn.Delete(key) |
||||
}) |
||||
} |
||||
|
||||
// IsIPBlocked checks if an IP is blocked and returns expiration time
|
||||
func (c *CuratingACL) IsIPBlocked(ip string) (bool, time.Time, error) { |
||||
var blocked bool |
||||
var expiresAt time.Time |
||||
err := c.View(func(txn *badger.Txn) error { |
||||
key := c.getBlockedIPKey(ip) |
||||
item, err := txn.Get(key) |
||||
if err == badger.ErrKeyNotFound { |
||||
blocked = false |
||||
return nil |
||||
} |
||||
if err != nil { |
||||
return err |
||||
} |
||||
val, err := item.ValueCopy(nil) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
var b CuratingBlockedIP |
||||
if err := json.Unmarshal(val, &b); err != nil { |
||||
return err |
||||
} |
||||
if time.Now().After(b.ExpiresAt) { |
||||
// Block has expired
|
||||
blocked = false |
||||
return nil |
||||
} |
||||
blocked = true |
||||
expiresAt = b.ExpiresAt |
||||
return nil |
||||
}) |
||||
return blocked, expiresAt, err |
||||
} |
||||
|
||||
// ListBlockedIPs returns all blocked IPs (including expired ones)
|
||||
func (c *CuratingACL) ListBlockedIPs() ([]CuratingBlockedIP, error) { |
||||
var blocked []CuratingBlockedIP |
||||
err := c.View(func(txn *badger.Txn) error { |
||||
prefix := c.getBlockedIPPrefix() |
||||
it := txn.NewIterator(badger.IteratorOptions{Prefix: prefix}) |
||||
defer it.Close() |
||||
|
||||
for it.Rewind(); it.Valid(); it.Next() { |
||||
item := it.Item() |
||||
val, err := item.ValueCopy(nil) |
||||
if err != nil { |
||||
continue |
||||
} |
||||
var b CuratingBlockedIP |
||||
if err := json.Unmarshal(val, &b); err != nil { |
||||
continue |
||||
} |
||||
blocked = append(blocked, b) |
||||
} |
||||
return nil |
||||
}) |
||||
return blocked, err |
||||
} |
||||
|
||||
// CleanupExpiredIPBlocks removes expired IP blocks
|
||||
func (c *CuratingACL) CleanupExpiredIPBlocks() error { |
||||
return c.Update(func(txn *badger.Txn) error { |
||||
prefix := c.getBlockedIPPrefix() |
||||
it := txn.NewIterator(badger.IteratorOptions{Prefix: prefix}) |
||||
defer it.Close() |
||||
|
||||
now := time.Now() |
||||
var keysToDelete [][]byte |
||||
for it.Rewind(); it.Valid(); it.Next() { |
||||
item := it.Item() |
||||
val, err := item.ValueCopy(nil) |
||||
if err != nil { |
||||
continue |
||||
} |
||||
var b CuratingBlockedIP |
||||
if err := json.Unmarshal(val, &b); err != nil { |
||||
continue |
||||
} |
||||
if now.After(b.ExpiresAt) { |
||||
keysToDelete = append(keysToDelete, item.KeyCopy(nil)) |
||||
} |
||||
} |
||||
|
||||
for _, key := range keysToDelete { |
||||
if err := txn.Delete(key); err != nil { |
||||
return err |
||||
} |
||||
} |
||||
return nil |
||||
}) |
||||
} |
||||
|
||||
// ==================== Spam Events ====================
|
||||
|
||||
// MarkEventAsSpam marks an event as spam
|
||||
func (c *CuratingACL) MarkEventAsSpam(eventID, pubkey, reason string) error { |
||||
return c.Update(func(txn *badger.Txn) error { |
||||
key := c.getSpamEventKey(eventID) |
||||
spam := SpamEvent{ |
||||
EventID: eventID, |
||||
Pubkey: pubkey, |
||||
Reason: reason, |
||||
Added: time.Now(), |
||||
} |
||||
data, err := json.Marshal(spam) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
return txn.Set(key, data) |
||||
}) |
||||
} |
||||
|
||||
// UnmarkEventAsSpam removes the spam flag from an event
|
||||
func (c *CuratingACL) UnmarkEventAsSpam(eventID string) error { |
||||
return c.Update(func(txn *badger.Txn) error { |
||||
key := c.getSpamEventKey(eventID) |
||||
return txn.Delete(key) |
||||
}) |
||||
} |
||||
|
||||
// IsEventSpam checks if an event is marked as spam
|
||||
func (c *CuratingACL) IsEventSpam(eventID string) (bool, error) { |
||||
var spam bool |
||||
err := c.View(func(txn *badger.Txn) error { |
||||
key := c.getSpamEventKey(eventID) |
||||
_, err := txn.Get(key) |
||||
if err == badger.ErrKeyNotFound { |
||||
spam = false |
||||
return nil |
||||
} |
||||
if err != nil { |
||||
return err |
||||
} |
||||
spam = true |
||||
return nil |
||||
}) |
||||
return spam, err |
||||
} |
||||
|
||||
// ListSpamEvents returns all spam events
|
||||
func (c *CuratingACL) ListSpamEvents() ([]SpamEvent, error) { |
||||
var spam []SpamEvent |
||||
err := c.View(func(txn *badger.Txn) error { |
||||
prefix := c.getSpamEventPrefix() |
||||
it := txn.NewIterator(badger.IteratorOptions{Prefix: prefix}) |
||||
defer it.Close() |
||||
|
||||
for it.Rewind(); it.Valid(); it.Next() { |
||||
item := it.Item() |
||||
val, err := item.ValueCopy(nil) |
||||
if err != nil { |
||||
continue |
||||
} |
||||
var s SpamEvent |
||||
if err := json.Unmarshal(val, &s); err != nil { |
||||
continue |
||||
} |
||||
spam = append(spam, s) |
||||
} |
||||
return nil |
||||
}) |
||||
return spam, err |
||||
} |
||||
|
||||
// ==================== Unclassified Users ====================
|
||||
|
||||
// ListUnclassifiedUsers returns users who are neither trusted nor blacklisted
|
||||
// sorted by event count descending
|
||||
func (c *CuratingACL) ListUnclassifiedUsers(limit int) ([]UnclassifiedUser, error) { |
||||
// First, get all trusted and blacklisted pubkeys to exclude
|
||||
trusted, err := c.ListTrustedPubkeys() |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
blacklisted, err := c.ListBlacklistedPubkeys() |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
excludeSet := make(map[string]struct{}) |
||||
for _, t := range trusted { |
||||
excludeSet[t.Pubkey] = struct{}{} |
||||
} |
||||
for _, b := range blacklisted { |
||||
excludeSet[b.Pubkey] = struct{}{} |
||||
} |
||||
|
||||
// Now iterate through event counts and aggregate by pubkey
|
||||
pubkeyCounts := make(map[string]*UnclassifiedUser) |
||||
|
||||
err = c.View(func(txn *badger.Txn) error { |
||||
prefix := c.getEventCountPrefix() |
||||
it := txn.NewIterator(badger.IteratorOptions{Prefix: prefix}) |
||||
defer it.Close() |
||||
|
||||
for it.Rewind(); it.Valid(); it.Next() { |
||||
item := it.Item() |
||||
val, err := item.ValueCopy(nil) |
||||
if err != nil { |
||||
continue |
||||
} |
||||
var ec PubkeyEventCount |
||||
if err := json.Unmarshal(val, &ec); err != nil { |
||||
continue |
||||
} |
||||
|
||||
// Skip if trusted or blacklisted
|
||||
if _, excluded := excludeSet[ec.Pubkey]; excluded { |
||||
continue |
||||
} |
||||
|
||||
if existing, ok := pubkeyCounts[ec.Pubkey]; ok { |
||||
existing.EventCount += ec.Count |
||||
if ec.LastEvent.After(existing.LastEvent) { |
||||
existing.LastEvent = ec.LastEvent |
||||
} |
||||
} else { |
||||
pubkeyCounts[ec.Pubkey] = &UnclassifiedUser{ |
||||
Pubkey: ec.Pubkey, |
||||
EventCount: ec.Count, |
||||
LastEvent: ec.LastEvent, |
||||
} |
||||
} |
||||
} |
||||
return nil |
||||
}) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
// Convert to slice and sort by event count descending
|
||||
var users []UnclassifiedUser |
||||
for _, u := range pubkeyCounts { |
||||
users = append(users, *u) |
||||
} |
||||
sort.Slice(users, func(i, j int) bool { |
||||
return users[i].EventCount > users[j].EventCount |
||||
}) |
||||
|
||||
// Apply limit
|
||||
if limit > 0 && len(users) > limit { |
||||
users = users[:limit] |
||||
} |
||||
|
||||
return users, nil |
||||
} |
||||
|
||||
// ==================== Key Generation ====================
|
||||
|
||||
func (c *CuratingACL) getConfigKey() []byte { |
||||
return []byte("CURATING_ACL_CONFIG") |
||||
} |
||||
|
||||
func (c *CuratingACL) getTrustedPubkeyKey(pubkey string) []byte { |
||||
buf := new(bytes.Buffer) |
||||
buf.WriteString("CURATING_ACL_TRUSTED_PUBKEY_") |
||||
buf.WriteString(pubkey) |
||||
return buf.Bytes() |
||||
} |
||||
|
||||
func (c *CuratingACL) getTrustedPubkeyPrefix() []byte { |
||||
return []byte("CURATING_ACL_TRUSTED_PUBKEY_") |
||||
} |
||||
|
||||
func (c *CuratingACL) getBlacklistedPubkeyKey(pubkey string) []byte { |
||||
buf := new(bytes.Buffer) |
||||
buf.WriteString("CURATING_ACL_BLACKLISTED_PUBKEY_") |
||||
buf.WriteString(pubkey) |
||||
return buf.Bytes() |
||||
} |
||||
|
||||
func (c *CuratingACL) getBlacklistedPubkeyPrefix() []byte { |
||||
return []byte("CURATING_ACL_BLACKLISTED_PUBKEY_") |
||||
} |
||||
|
||||
func (c *CuratingACL) getEventCountKey(pubkey, date string) []byte { |
||||
buf := new(bytes.Buffer) |
||||
buf.WriteString("CURATING_ACL_EVENT_COUNT_") |
||||
buf.WriteString(pubkey) |
||||
buf.WriteString("_") |
||||
buf.WriteString(date) |
||||
return buf.Bytes() |
||||
} |
||||
|
||||
func (c *CuratingACL) getEventCountPrefix() []byte { |
||||
return []byte("CURATING_ACL_EVENT_COUNT_") |
||||
} |
||||
|
||||
func (c *CuratingACL) getIPOffenseKey(ip string) []byte { |
||||
buf := new(bytes.Buffer) |
||||
buf.WriteString("CURATING_ACL_IP_OFFENSE_") |
||||
buf.WriteString(ip) |
||||
return buf.Bytes() |
||||
} |
||||
|
||||
func (c *CuratingACL) getBlockedIPKey(ip string) []byte { |
||||
buf := new(bytes.Buffer) |
||||
buf.WriteString("CURATING_ACL_BLOCKED_IP_") |
||||
buf.WriteString(ip) |
||||
return buf.Bytes() |
||||
} |
||||
|
||||
func (c *CuratingACL) getBlockedIPPrefix() []byte { |
||||
return []byte("CURATING_ACL_BLOCKED_IP_") |
||||
} |
||||
|
||||
func (c *CuratingACL) getSpamEventKey(eventID string) []byte { |
||||
buf := new(bytes.Buffer) |
||||
buf.WriteString("CURATING_ACL_SPAM_EVENT_") |
||||
buf.WriteString(eventID) |
||||
return buf.Bytes() |
||||
} |
||||
|
||||
func (c *CuratingACL) getSpamEventPrefix() []byte { |
||||
return []byte("CURATING_ACL_SPAM_EVENT_") |
||||
} |
||||
|
||||
// ==================== Kind Checking Helpers ====================
|
||||
|
||||
// IsKindAllowed checks if an event kind is allowed based on config
|
||||
func (c *CuratingACL) IsKindAllowed(kind int, config *CuratingConfig) bool { |
||||
if config == nil { |
||||
return false |
||||
} |
||||
|
||||
// Check explicit kinds
|
||||
for _, k := range config.AllowedKinds { |
||||
if k == kind { |
||||
return true |
||||
} |
||||
} |
||||
|
||||
// Check ranges
|
||||
for _, rangeStr := range config.AllowedRanges { |
||||
if kindInRange(kind, rangeStr) { |
||||
return true |
||||
} |
||||
} |
||||
|
||||
// Check categories
|
||||
for _, cat := range config.KindCategories { |
||||
if kindInCategory(kind, cat) { |
||||
return true |
||||
} |
||||
} |
||||
|
||||
return false |
||||
} |
||||
|
||||
// kindInRange checks if a kind is within a range string like "1000-1999"
|
||||
func kindInRange(kind int, rangeStr string) bool { |
||||
var start, end int |
||||
n, err := fmt.Sscanf(rangeStr, "%d-%d", &start, &end) |
||||
if err != nil || n != 2 { |
||||
return false |
||||
} |
||||
return kind >= start && kind <= end |
||||
} |
||||
|
||||
// kindInCategory checks if a kind belongs to a predefined category
|
||||
func kindInCategory(kind int, category string) bool { |
||||
categories := map[string][]int{ |
||||
"social": {0, 1, 3, 6, 7, 10002}, |
||||
"dm": {4, 14, 1059}, |
||||
"longform": {30023, 30024}, |
||||
"media": {1063, 20, 21, 22}, |
||||
"marketplace": {30017, 30018, 30019, 30020, 1021, 1022}, |
||||
"groups_nip29": {9, 10, 11, 12, 9000, 9001, 9002, 39000, 39001, 39002}, |
||||
"groups_nip72": {34550, 1111, 4550}, |
||||
"lists": {10000, 10001, 10003, 30000, 30001, 30003}, |
||||
} |
||||
|
||||
kinds, ok := categories[category] |
||||
if !ok { |
||||
return false |
||||
} |
||||
|
||||
for _, k := range kinds { |
||||
if k == kind { |
||||
return true |
||||
} |
||||
} |
||||
return false |
||||
} |
||||
@ -0,0 +1,163 @@
@@ -0,0 +1,163 @@
|
||||
package neo4j |
||||
|
||||
import ( |
||||
"context" |
||||
"fmt" |
||||
"strings" |
||||
) |
||||
|
||||
// AddInboundRefsToResult collects inbound references (events that reference discovered items)
|
||||
// for events at a specific depth in the result.
|
||||
//
|
||||
// For example, if you have a follows graph result and want to find all kind-7 reactions
|
||||
// to posts by users at depth 1, this collects those reactions and adds them to result.InboundRefs.
|
||||
//
|
||||
// Parameters:
|
||||
// - result: The graph result to augment with ref data
|
||||
// - depth: The depth at which to collect refs (0 = all depths)
|
||||
// - kinds: Event kinds to collect (e.g., [7] for reactions, [6] for reposts)
|
||||
func (n *N) AddInboundRefsToResult(result *GraphResult, depth int, kinds []uint16) error { |
||||
ctx := context.Background() |
||||
|
||||
// Get pubkeys to find refs for
|
||||
var pubkeys []string |
||||
if depth == 0 { |
||||
pubkeys = result.GetAllPubkeys() |
||||
} else { |
||||
pubkeys = result.GetPubkeysAtDepth(depth) |
||||
} |
||||
|
||||
if len(pubkeys) == 0 { |
||||
n.Logger.Debugf("AddInboundRefsToResult: no pubkeys at depth %d", depth) |
||||
return nil |
||||
} |
||||
|
||||
// Convert kinds to int64 for Neo4j
|
||||
kindsInt := make([]int64, len(kinds)) |
||||
for i, k := range kinds { |
||||
kindsInt[i] = int64(k) |
||||
} |
||||
|
||||
// Query for events by these pubkeys and their inbound references
|
||||
// This finds: (ref:Event)-[:REFERENCES]->(authored:Event)<-[:AUTHORED_BY]-(u:NostrUser)
|
||||
// where the referencing event has the specified kinds
|
||||
cypher := ` |
||||
UNWIND $pubkeys AS pk |
||||
MATCH (u:NostrUser {pubkey: pk})<-[:AUTHORED_BY]-(authored:Event) |
||||
WHERE authored.kind IN [1, 30023] |
||||
MATCH (ref:Event)-[:REFERENCES]->(authored) |
||||
WHERE ref.kind IN $kinds |
||||
RETURN authored.id AS target_id, ref.id AS ref_id, ref.kind AS ref_kind |
||||
` |
||||
|
||||
params := map[string]any{ |
||||
"pubkeys": pubkeys, |
||||
"kinds": kindsInt, |
||||
} |
||||
|
||||
queryResult, err := n.ExecuteRead(ctx, cypher, params) |
||||
if err != nil { |
||||
return fmt.Errorf("failed to query inbound refs: %w", err) |
||||
} |
||||
|
||||
refCount := 0 |
||||
for queryResult.Next(ctx) { |
||||
record := queryResult.Record() |
||||
|
||||
targetID, ok := record.Values[0].(string) |
||||
if !ok || targetID == "" { |
||||
continue |
||||
} |
||||
|
||||
refID, ok := record.Values[1].(string) |
||||
if !ok || refID == "" { |
||||
continue |
||||
} |
||||
|
||||
refKind, ok := record.Values[2].(int64) |
||||
if !ok { |
||||
continue |
||||
} |
||||
|
||||
result.AddInboundRef(uint16(refKind), strings.ToLower(targetID), strings.ToLower(refID)) |
||||
refCount++ |
||||
} |
||||
|
||||
n.Logger.Debugf("AddInboundRefsToResult: collected %d refs for %d pubkeys", refCount, len(pubkeys)) |
||||
|
||||
return nil |
||||
} |
||||
|
||||
// AddOutboundRefsToResult collects outbound references (events referenced by discovered items).
|
||||
//
|
||||
// For example, find all events that posts by users at depth 1 reference (quoted posts, replied-to posts).
|
||||
func (n *N) AddOutboundRefsToResult(result *GraphResult, depth int, kinds []uint16) error { |
||||
ctx := context.Background() |
||||
|
||||
// Get pubkeys to find refs for
|
||||
var pubkeys []string |
||||
if depth == 0 { |
||||
pubkeys = result.GetAllPubkeys() |
||||
} else { |
||||
pubkeys = result.GetPubkeysAtDepth(depth) |
||||
} |
||||
|
||||
if len(pubkeys) == 0 { |
||||
n.Logger.Debugf("AddOutboundRefsToResult: no pubkeys at depth %d", depth) |
||||
return nil |
||||
} |
||||
|
||||
// Convert kinds to int64 for Neo4j
|
||||
kindsInt := make([]int64, len(kinds)) |
||||
for i, k := range kinds { |
||||
kindsInt[i] = int64(k) |
||||
} |
||||
|
||||
// Query for events by these pubkeys and their outbound references
|
||||
// This finds: (authored:Event)-[:REFERENCES]->(ref:Event)
|
||||
// where the authored event has the specified kinds
|
||||
cypher := ` |
||||
UNWIND $pubkeys AS pk |
||||
MATCH (u:NostrUser {pubkey: pk})<-[:AUTHORED_BY]-(authored:Event) |
||||
WHERE authored.kind IN $kinds |
||||
MATCH (authored)-[:REFERENCES]->(ref:Event) |
||||
RETURN authored.id AS source_id, ref.id AS ref_id, authored.kind AS source_kind |
||||
` |
||||
|
||||
params := map[string]any{ |
||||
"pubkeys": pubkeys, |
||||
"kinds": kindsInt, |
||||
} |
||||
|
||||
queryResult, err := n.ExecuteRead(ctx, cypher, params) |
||||
if err != nil { |
||||
return fmt.Errorf("failed to query outbound refs: %w", err) |
||||
} |
||||
|
||||
refCount := 0 |
||||
for queryResult.Next(ctx) { |
||||
record := queryResult.Record() |
||||
|
||||
sourceID, ok := record.Values[0].(string) |
||||
if !ok || sourceID == "" { |
||||
continue |
||||
} |
||||
|
||||
refID, ok := record.Values[1].(string) |
||||
if !ok || refID == "" { |
||||
continue |
||||
} |
||||
|
||||
sourceKind, ok := record.Values[2].(int64) |
||||
if !ok { |
||||
continue |
||||
} |
||||
|
||||
result.AddOutboundRef(uint16(sourceKind), strings.ToLower(sourceID), strings.ToLower(refID)) |
||||
refCount++ |
||||
} |
||||
|
||||
n.Logger.Debugf("AddOutboundRefsToResult: collected %d refs from %d pubkeys", refCount, len(pubkeys)) |
||||
|
||||
return nil |
||||
} |
||||
Loading…
Reference in new issue