Browse Source

implement wasm/js specific database engine

main
mleku 1 month ago
parent
commit
0a61f274d5
No known key found for this signature in database
  1. 5
      .claude/settings.local.json
  2. 7
      .gitignore
  3. 2
      go.mod
  4. 4
      go.sum
  5. 2
      pkg/database/cleanup-kind3.go
  6. 2
      pkg/database/count.go
  7. 2
      pkg/database/database.go
  8. 2
      pkg/database/delete-event.go
  9. 2
      pkg/database/delete-expired.go
  10. 2
      pkg/database/export.go
  11. 20
      pkg/database/factory.go
  12. 2
      pkg/database/fetch-event-by-serial.go
  13. 2
      pkg/database/fetch-events-by-serials.go
  14. 2
      pkg/database/get-fullidpubkey-by-serial.go
  15. 2
      pkg/database/get-fullidpubkey-by-serials.go
  16. 2
      pkg/database/get-serial-by-id.go
  17. 2
      pkg/database/get-serials-by-range.go
  18. 2
      pkg/database/identity.go
  19. 2
      pkg/database/import.go
  20. 2
      pkg/database/import_utils.go
  21. 2
      pkg/database/logger.go
  22. 2
      pkg/database/managed-acl.go
  23. 2
      pkg/database/markers.go
  24. 2
      pkg/database/migrations.go
  25. 9
      pkg/database/nip43.go
  26. 2
      pkg/database/process-delete.go
  27. 2
      pkg/database/pubkey-serial.go
  28. 2
      pkg/database/query-events.go
  29. 2
      pkg/database/query-for-deleted.go
  30. 2
      pkg/database/query-for-ids.go
  31. 2
      pkg/database/query-for-ptag-graph.go
  32. 2
      pkg/database/query-for-serials.go
  33. 2
      pkg/database/save-event.go
  34. 16
      pkg/database/subscriptions.go
  35. 59
      pkg/neo4j/delete.go
  36. 555
      pkg/neo4j/delete_test.go
  37. 570
      pkg/neo4j/expiration_test.go
  38. 502
      pkg/neo4j/fetch-event_test.go
  39. 117
      pkg/neo4j/import-export.go
  40. 342
      pkg/neo4j/nip43_test.go
  41. 452
      pkg/neo4j/query-events_test.go
  42. 24
      pkg/neo4j/save-event.go
  43. 5
      pkg/neo4j/schema.go
  44. 436
      pkg/neo4j/subscriptions_test.go
  45. 2
      pkg/version/version
  46. 614
      pkg/wasmdb/delete-event.go
  47. 256
      pkg/wasmdb/fetch-event.go
  48. 162
      pkg/wasmdb/helpers.go
  49. 293
      pkg/wasmdb/import-export.go
  50. 75
      pkg/wasmdb/logger.go
  51. 213
      pkg/wasmdb/nip43.go
  52. 767
      pkg/wasmdb/query-events.go
  53. 26
      pkg/wasmdb/run-tests.sh
  54. 423
      pkg/wasmdb/save-event.go
  55. 332
      pkg/wasmdb/subscriptions.go
  56. 24
      pkg/wasmdb/testdata/package-lock.json
  57. 12
      pkg/wasmdb/testdata/package.json
  58. 572
      pkg/wasmdb/wasmdb.go
  59. 1739
      pkg/wasmdb/wasmdb_test.go

5
.claude/settings.local.json

@ -168,7 +168,10 @@ @@ -168,7 +168,10 @@
"Bash(./orly:*)",
"Bash(./orly -version:*)",
"Bash(./orly --version:*)",
"Bash(GOOS=js GOARCH=wasm go test:*)"
"Bash(GOOS=js GOARCH=wasm go test:*)",
"Bash(ls:*)",
"Bash(GOROOT=/home/mleku/go node:*)",
"Bash(GOOS=js GOARCH=wasm go build:*)"
],
"deny": [],
"ask": []

7
.gitignore vendored

@ -10,8 +10,6 @@ @@ -10,8 +10,6 @@
# Especially these
.vscode/
**/.vscode/
node_modules/
**/node_modules/
/test*
.idea/
# and others
@ -98,6 +96,10 @@ cmd/benchmark/data @@ -98,6 +96,10 @@ cmd/benchmark/data
# Re-ignore IDE directories (must come after !*/)
.idea/
**/.idea/
# Re-ignore node_modules everywhere (must come after !*/)
node_modules/
**/node_modules/
/blocklist.json
/gui/gui/main.wasm
/gui/gui/index.html
@ -105,7 +107,6 @@ pkg/database/testrealy @@ -105,7 +107,6 @@ pkg/database/testrealy
/ctxproxy.config.yml
cmd/benchmark/external/**
private*
pkg/protocol/directory-client/node_modules
# Build outputs
build/orly-*

2
go.mod

@ -30,6 +30,7 @@ require ( @@ -30,6 +30,7 @@ require (
require (
github.com/BurntSushi/toml v1.5.0 // indirect
github.com/ImVexed/fasturl v0.0.0-20230304231329-4e41488060f3 // indirect
github.com/aperturerobotics/go-indexeddb v0.2.3 // indirect
github.com/btcsuite/btcd/btcec/v2 v2.3.4 // indirect
github.com/btcsuite/btcd/chaincfg/chainhash v1.1.0 // indirect
github.com/bytedance/sonic v1.13.1 // indirect
@ -50,6 +51,7 @@ require ( @@ -50,6 +51,7 @@ require (
github.com/golang/protobuf v1.5.4 // indirect
github.com/google/flatbuffers v25.9.23+incompatible // indirect
github.com/google/pprof v0.0.0-20251007162407-5df77e3f7d1d // indirect
github.com/hack-pad/safejs v0.1.1 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/klauspost/cpuid/v2 v2.3.0 // indirect

4
go.sum

@ -8,6 +8,8 @@ github.com/ImVexed/fasturl v0.0.0-20230304231329-4e41488060f3 h1:ClzzXMDDuUbWfNN @@ -8,6 +8,8 @@ github.com/ImVexed/fasturl v0.0.0-20230304231329-4e41488060f3 h1:ClzzXMDDuUbWfNN
github.com/ImVexed/fasturl v0.0.0-20230304231329-4e41488060f3/go.mod h1:we0YA5CsBbH5+/NUzC/AlMmxaDtWlXeNsqrwXjTzmzA=
github.com/adrg/xdg v0.5.3 h1:xRnxJXne7+oWDatRhR1JLnvuccuIeCoBu2rtuLqQB78=
github.com/adrg/xdg v0.5.3/go.mod h1:nlTsY+NNiCBGCK2tpm09vRqfVzrc2fLmXGpBLF0zlTQ=
github.com/aperturerobotics/go-indexeddb v0.2.3 h1:DfquIk9YEZjWD/lJyBWZWGCtRga43/a96bx0Ulv9VhQ=
github.com/aperturerobotics/go-indexeddb v0.2.3/go.mod h1:JV1XngOCCui7zrMSyRz+Wvz00nUSfotRKZqJzWpl5fQ=
github.com/btcsuite/btcd/btcec/v2 v2.3.4 h1:3EJjcN70HCu/mwqlUsGK8GcNVyLVxFDlWurTXGPFfiQ=
github.com/btcsuite/btcd/btcec/v2 v2.3.4/go.mod h1:zYzJ8etWJQIv1Ogk7OzpWjowwOdXY1W/17j2MW85J04=
github.com/btcsuite/btcd/chaincfg/chainhash v1.1.0 h1:59Kx4K6lzOW5w6nFlA0v5+lk/6sjybR934QNHSJZPTQ=
@ -99,6 +101,8 @@ github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= @@ -99,6 +101,8 @@ github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg=
github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/hack-pad/safejs v0.1.1 h1:d5qPO0iQ7h2oVtpzGnLExE+Wn9AtytxIfltcS2b9KD8=
github.com/hack-pad/safejs v0.1.1/go.mod h1:HdS+bKF1NrE72VoXZeWzxFOVQVUSqZJAG0xNCnb+Tio=
github.com/ianlancetaylor/demangle v0.0.0-20210905161508-09a460cdf81d/go.mod h1:aYm2/VgdVmcIU8iMfdMvDMsRAQjcfZSKFby6HOFvi/w=
github.com/ianlancetaylor/demangle v0.0.0-20230524184225-eabc099b10ab/go.mod h1:gx7rwoVhcfuVKG5uya9Hs3Sxj7EIvldVofAWIUtGouw=
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=

2
pkg/database/cleanup-kind3.go

@ -1,3 +1,5 @@ @@ -1,3 +1,5 @@
//go:build !(js && wasm)
package database
import (

2
pkg/database/count.go

@ -1,3 +1,5 @@ @@ -1,3 +1,5 @@
//go:build !(js && wasm)
package database
import (

2
pkg/database/database.go

@ -1,3 +1,5 @@ @@ -1,3 +1,5 @@
//go:build !(js && wasm)
package database
import (

2
pkg/database/delete-event.go

@ -1,3 +1,5 @@ @@ -1,3 +1,5 @@
//go:build !(js && wasm)
package database
import (

2
pkg/database/delete-expired.go

@ -1,3 +1,5 @@ @@ -1,3 +1,5 @@
//go:build !(js && wasm)
package database
import (

2
pkg/database/export.go

@ -1,3 +1,5 @@ @@ -1,3 +1,5 @@
//go:build !(js && wasm)
package database
import (

20
pkg/database/factory.go

@ -1,3 +1,5 @@ @@ -1,3 +1,5 @@
//go:build !(js && wasm)
package database
import (
@ -72,8 +74,14 @@ func NewDatabaseWithConfig( @@ -72,8 +74,14 @@ func NewDatabaseWithConfig(
return nil, fmt.Errorf("neo4j database backend not available (import _ \"next.orly.dev/pkg/neo4j\")")
}
return newNeo4jDatabase(ctx, cancel, cfg)
case "wasmdb", "indexeddb", "wasm":
// Use the wasmdb implementation (IndexedDB backend for WebAssembly)
if newWasmDBDatabase == nil {
return nil, fmt.Errorf("wasmdb database backend not available (import _ \"next.orly.dev/pkg/wasmdb\")")
}
return newWasmDBDatabase(ctx, cancel, cfg)
default:
return nil, fmt.Errorf("unsupported database type: %s (supported: badger, dgraph, neo4j)", dbType)
return nil, fmt.Errorf("unsupported database type: %s (supported: badger, dgraph, neo4j, wasmdb)", dbType)
}
}
@ -96,3 +104,13 @@ var newNeo4jDatabase func(context.Context, context.CancelFunc, *DatabaseConfig) @@ -96,3 +104,13 @@ var newNeo4jDatabase func(context.Context, context.CancelFunc, *DatabaseConfig)
func RegisterNeo4jFactory(factory func(context.Context, context.CancelFunc, *DatabaseConfig) (Database, error)) {
newNeo4jDatabase = factory
}
// newWasmDBDatabase creates a wasmdb database instance (IndexedDB backend for WebAssembly)
// This is defined here to avoid import cycles
var newWasmDBDatabase func(context.Context, context.CancelFunc, *DatabaseConfig) (Database, error)
// RegisterWasmDBFactory registers the wasmdb database factory
// This is called from the wasmdb package's init() function
func RegisterWasmDBFactory(factory func(context.Context, context.CancelFunc, *DatabaseConfig) (Database, error)) {
newWasmDBDatabase = factory
}

2
pkg/database/fetch-event-by-serial.go

@ -1,3 +1,5 @@ @@ -1,3 +1,5 @@
//go:build !(js && wasm)
package database
import (

2
pkg/database/fetch-events-by-serials.go

@ -1,3 +1,5 @@ @@ -1,3 +1,5 @@
//go:build !(js && wasm)
package database
import (

2
pkg/database/get-fullidpubkey-by-serial.go

@ -1,3 +1,5 @@ @@ -1,3 +1,5 @@
//go:build !(js && wasm)
package database
import (

2
pkg/database/get-fullidpubkey-by-serials.go

@ -1,3 +1,5 @@ @@ -1,3 +1,5 @@
//go:build !(js && wasm)
package database
import (

2
pkg/database/get-serial-by-id.go

@ -1,3 +1,5 @@ @@ -1,3 +1,5 @@
//go:build !(js && wasm)
package database
import (

2
pkg/database/get-serials-by-range.go

@ -1,3 +1,5 @@ @@ -1,3 +1,5 @@
//go:build !(js && wasm)
package database
import (

2
pkg/database/identity.go

@ -1,3 +1,5 @@ @@ -1,3 +1,5 @@
//go:build !(js && wasm)
package database
import (

2
pkg/database/import.go

@ -1,3 +1,5 @@ @@ -1,3 +1,5 @@
//go:build !(js && wasm)
package database
import (

2
pkg/database/import_utils.go

@ -1,3 +1,5 @@ @@ -1,3 +1,5 @@
//go:build !(js && wasm)
// Package database provides shared import utilities for events
package database

2
pkg/database/logger.go

@ -1,3 +1,5 @@ @@ -1,3 +1,5 @@
//go:build !(js && wasm)
package database
import (

2
pkg/database/managed-acl.go

@ -1,3 +1,5 @@ @@ -1,3 +1,5 @@
//go:build !(js && wasm)
package database
import (

2
pkg/database/markers.go

@ -1,3 +1,5 @@ @@ -1,3 +1,5 @@
//go:build !(js && wasm)
package database
import (

2
pkg/database/migrations.go

@ -1,3 +1,5 @@ @@ -1,3 +1,5 @@
//go:build !(js && wasm)
package database
import (

9
pkg/database/nip43.go

@ -1,3 +1,5 @@ @@ -1,3 +1,5 @@
//go:build !(js && wasm)
package database
import (
@ -11,13 +13,6 @@ import ( @@ -11,13 +13,6 @@ import (
"git.mleku.dev/mleku/nostr/encoders/hex"
)
// NIP43Membership represents membership metadata for NIP-43
type NIP43Membership struct {
Pubkey []byte
AddedAt time.Time
InviteCode string
}
// Database key prefixes for NIP-43
const (
nip43MemberPrefix = "nip43:member:"

2
pkg/database/process-delete.go

@ -1,3 +1,5 @@ @@ -1,3 +1,5 @@
//go:build !(js && wasm)
package database
import (

2
pkg/database/pubkey-serial.go

@ -1,3 +1,5 @@ @@ -1,3 +1,5 @@
//go:build !(js && wasm)
package database
import (

2
pkg/database/query-events.go

@ -1,3 +1,5 @@ @@ -1,3 +1,5 @@
//go:build !(js && wasm)
package database
import (

2
pkg/database/query-for-deleted.go

@ -1,3 +1,5 @@ @@ -1,3 +1,5 @@
//go:build !(js && wasm)
package database
import (

2
pkg/database/query-for-ids.go

@ -1,3 +1,5 @@ @@ -1,3 +1,5 @@
//go:build !(js && wasm)
package database
import (

2
pkg/database/query-for-ptag-graph.go

@ -1,3 +1,5 @@ @@ -1,3 +1,5 @@
//go:build !(js && wasm)
package database
import (

2
pkg/database/query-for-serials.go

@ -1,3 +1,5 @@ @@ -1,3 +1,5 @@
//go:build !(js && wasm)
package database
import (

2
pkg/database/save-event.go

@ -1,3 +1,5 @@ @@ -1,3 +1,5 @@
//go:build !(js && wasm)
package database
import (

16
pkg/database/subscriptions.go

@ -1,3 +1,5 @@ @@ -1,3 +1,5 @@
//go:build !(js && wasm)
package database
import (
@ -12,13 +14,6 @@ import ( @@ -12,13 +14,6 @@ import (
"github.com/dgraph-io/badger/v4"
)
type Subscription struct {
TrialEnd time.Time `json:"trial_end"`
PaidUntil time.Time `json:"paid_until"`
BlossomLevel string `json:"blossom_level,omitempty"` // Service level name (e.g., "basic", "premium")
BlossomStorage int64 `json:"blossom_storage,omitempty"` // Storage quota in MB
}
func (d *D) GetSubscription(pubkey []byte) (*Subscription, error) {
key := fmt.Sprintf("sub:%s", hex.EncodeToString(pubkey))
var sub *Subscription
@ -122,13 +117,6 @@ func (d *D) ExtendSubscription(pubkey []byte, days int) error { @@ -122,13 +117,6 @@ func (d *D) ExtendSubscription(pubkey []byte, days int) error {
)
}
type Payment struct {
Amount int64 `json:"amount"`
Timestamp time.Time `json:"timestamp"`
Invoice string `json:"invoice"`
Preimage string `json:"preimage"`
}
func (d *D) RecordPayment(
pubkey []byte, amount int64, invoice, preimage string,
) error {

59
pkg/neo4j/delete.go

@ -3,10 +3,11 @@ package neo4j @@ -3,10 +3,11 @@ package neo4j
import (
"context"
"fmt"
"time"
"next.orly.dev/pkg/database/indexes/types"
"git.mleku.dev/mleku/nostr/encoders/event"
"git.mleku.dev/mleku/nostr/encoders/hex"
"next.orly.dev/pkg/database/indexes/types"
)
// DeleteEvent deletes an event by its ID
@ -39,10 +40,60 @@ func (n *N) DeleteEventBySerial(c context.Context, ser *types.Uint40, ev *event. @@ -39,10 +40,60 @@ func (n *N) DeleteEventBySerial(c context.Context, ser *types.Uint40, ev *event.
return nil
}
// DeleteExpired deletes expired events (stub implementation)
// DeleteExpired deletes expired events based on NIP-40 expiration tags
// Events with an expiration property > 0 and <= current time are deleted
func (n *N) DeleteExpired() {
// This would need to implement expiration logic based on event.expiration tag (NIP-40)
// For now, this is a no-op
ctx := context.Background()
now := time.Now().Unix()
// Query for expired events (expiration > 0 means it has an expiration, and <= now means it's expired)
cypher := `
MATCH (e:Event)
WHERE e.expiration > 0 AND e.expiration <= $now
RETURN e.serial AS serial, e.id AS id
LIMIT 1000`
params := map[string]any{"now": now}
result, err := n.ExecuteRead(ctx, cypher, params)
if err != nil {
n.Logger.Warningf("failed to query expired events: %v", err)
return
}
// Collect serials to delete
var deleteCount int
for result.Next(ctx) {
record := result.Record()
if record == nil {
continue
}
idRaw, found := record.Get("id")
if !found {
continue
}
idStr, ok := idRaw.(string)
if !ok {
continue
}
// Delete the expired event
deleteCypher := "MATCH (e:Event {id: $id}) DETACH DELETE e"
deleteParams := map[string]any{"id": idStr}
if _, err := n.ExecuteWrite(ctx, deleteCypher, deleteParams); err != nil {
n.Logger.Warningf("failed to delete expired event %s: %v", idStr[:16], err)
continue
}
deleteCount++
}
if deleteCount > 0 {
n.Logger.Infof("deleted %d expired events", deleteCount)
}
}
// ProcessDelete processes a kind 5 deletion event

555
pkg/neo4j/delete_test.go

@ -0,0 +1,555 @@ @@ -0,0 +1,555 @@
package neo4j
import (
"context"
"os"
"testing"
"git.mleku.dev/mleku/nostr/encoders/event"
"git.mleku.dev/mleku/nostr/encoders/filter"
"git.mleku.dev/mleku/nostr/encoders/hex"
"git.mleku.dev/mleku/nostr/encoders/kind"
"git.mleku.dev/mleku/nostr/encoders/tag"
"git.mleku.dev/mleku/nostr/encoders/timestamp"
"git.mleku.dev/mleku/nostr/interfaces/signer/p8k"
)
func TestDeleteEvent(t *testing.T) {
neo4jURI := os.Getenv("ORLY_NEO4J_URI")
if neo4jURI == "" {
t.Skip("Skipping Neo4j test: ORLY_NEO4J_URI not set")
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
tempDir := t.TempDir()
db, err := New(ctx, cancel, tempDir, "debug")
if err != nil {
t.Fatalf("Failed to create database: %v", err)
}
defer db.Close()
<-db.Ready()
if err := db.Wipe(); err != nil {
t.Fatalf("Failed to wipe database: %v", err)
}
signer, err := p8k.New()
if err != nil {
t.Fatalf("Failed to create signer: %v", err)
}
if err := signer.Generate(); err != nil {
t.Fatalf("Failed to generate keypair: %v", err)
}
// Create and save event
ev := event.New()
ev.Pubkey = signer.Pub()
ev.CreatedAt = timestamp.Now().V
ev.Kind = 1
ev.Content = []byte("Event to be deleted")
if err := ev.Sign(signer); err != nil {
t.Fatalf("Failed to sign event: %v", err)
}
if _, err := db.SaveEvent(ctx, ev); err != nil {
t.Fatalf("Failed to save event: %v", err)
}
// Verify event exists
evs, err := db.QueryEvents(ctx, &filter.F{
Ids: tag.NewFromBytesSlice(ev.ID),
})
if err != nil {
t.Fatalf("Failed to query event: %v", err)
}
if len(evs) != 1 {
t.Fatalf("Expected 1 event before deletion, got %d", len(evs))
}
// Delete the event
if err := db.DeleteEvent(ctx, ev.ID[:]); err != nil {
t.Fatalf("Failed to delete event: %v", err)
}
// Verify event is deleted
evs, err = db.QueryEvents(ctx, &filter.F{
Ids: tag.NewFromBytesSlice(ev.ID),
})
if err != nil {
t.Fatalf("Failed to query after deletion: %v", err)
}
if len(evs) != 0 {
t.Fatalf("Expected 0 events after deletion, got %d", len(evs))
}
t.Logf("✓ DeleteEvent successfully removed event")
}
func TestDeleteEventBySerial(t *testing.T) {
neo4jURI := os.Getenv("ORLY_NEO4J_URI")
if neo4jURI == "" {
t.Skip("Skipping Neo4j test: ORLY_NEO4J_URI not set")
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
tempDir := t.TempDir()
db, err := New(ctx, cancel, tempDir, "debug")
if err != nil {
t.Fatalf("Failed to create database: %v", err)
}
defer db.Close()
<-db.Ready()
if err := db.Wipe(); err != nil {
t.Fatalf("Failed to wipe database: %v", err)
}
signer, err := p8k.New()
if err != nil {
t.Fatalf("Failed to create signer: %v", err)
}
if err := signer.Generate(); err != nil {
t.Fatalf("Failed to generate keypair: %v", err)
}
// Create and save event
ev := event.New()
ev.Pubkey = signer.Pub()
ev.CreatedAt = timestamp.Now().V
ev.Kind = 1
ev.Content = []byte("Event to be deleted by serial")
if err := ev.Sign(signer); err != nil {
t.Fatalf("Failed to sign event: %v", err)
}
if _, err := db.SaveEvent(ctx, ev); err != nil {
t.Fatalf("Failed to save event: %v", err)
}
// Get serial
serial, err := db.GetSerialById(ev.ID[:])
if err != nil {
t.Fatalf("Failed to get serial: %v", err)
}
// Delete by serial
if err := db.DeleteEventBySerial(ctx, serial, ev); err != nil {
t.Fatalf("Failed to delete event by serial: %v", err)
}
// Verify event is deleted
evs, err := db.QueryEvents(ctx, &filter.F{
Ids: tag.NewFromBytesSlice(ev.ID),
})
if err != nil {
t.Fatalf("Failed to query after deletion: %v", err)
}
if len(evs) != 0 {
t.Fatalf("Expected 0 events after deletion, got %d", len(evs))
}
t.Logf("✓ DeleteEventBySerial successfully removed event")
}
func TestProcessDelete_AuthorCanDeleteOwnEvent(t *testing.T) {
neo4jURI := os.Getenv("ORLY_NEO4J_URI")
if neo4jURI == "" {
t.Skip("Skipping Neo4j test: ORLY_NEO4J_URI not set")
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
tempDir := t.TempDir()
db, err := New(ctx, cancel, tempDir, "debug")
if err != nil {
t.Fatalf("Failed to create database: %v", err)
}
defer db.Close()
<-db.Ready()
if err := db.Wipe(); err != nil {
t.Fatalf("Failed to wipe database: %v", err)
}
signer, err := p8k.New()
if err != nil {
t.Fatalf("Failed to create signer: %v", err)
}
if err := signer.Generate(); err != nil {
t.Fatalf("Failed to generate keypair: %v", err)
}
// Create and save original event
originalEvent := event.New()
originalEvent.Pubkey = signer.Pub()
originalEvent.CreatedAt = timestamp.Now().V
originalEvent.Kind = 1
originalEvent.Content = []byte("This event will be deleted via kind 5")
if err := originalEvent.Sign(signer); err != nil {
t.Fatalf("Failed to sign event: %v", err)
}
if _, err := db.SaveEvent(ctx, originalEvent); err != nil {
t.Fatalf("Failed to save event: %v", err)
}
// Create kind 5 deletion event
deleteEvent := event.New()
deleteEvent.Pubkey = signer.Pub() // Same author
deleteEvent.CreatedAt = timestamp.Now().V + 1
deleteEvent.Kind = kind.Deletion.K
deleteEvent.Content = []byte("Deleting my event")
deleteEvent.Tags = tag.NewS(
tag.NewFromAny("e", hex.Enc(originalEvent.ID[:])),
)
if err := deleteEvent.Sign(signer); err != nil {
t.Fatalf("Failed to sign delete event: %v", err)
}
// Process deletion (no admins)
if err := db.ProcessDelete(deleteEvent, nil); err != nil {
t.Fatalf("Failed to process delete: %v", err)
}
// Verify original event is deleted
evs, err := db.QueryEvents(ctx, &filter.F{
Ids: tag.NewFromBytesSlice(originalEvent.ID),
})
if err != nil {
t.Fatalf("Failed to query after deletion: %v", err)
}
if len(evs) != 0 {
t.Fatalf("Expected 0 events after deletion, got %d", len(evs))
}
t.Logf("✓ ProcessDelete allowed author to delete own event")
}
func TestProcessDelete_OtherUserCannotDelete(t *testing.T) {
neo4jURI := os.Getenv("ORLY_NEO4J_URI")
if neo4jURI == "" {
t.Skip("Skipping Neo4j test: ORLY_NEO4J_URI not set")
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
tempDir := t.TempDir()
db, err := New(ctx, cancel, tempDir, "debug")
if err != nil {
t.Fatalf("Failed to create database: %v", err)
}
defer db.Close()
<-db.Ready()
if err := db.Wipe(); err != nil {
t.Fatalf("Failed to wipe database: %v", err)
}
alice, _ := p8k.New()
alice.Generate()
bob, _ := p8k.New()
bob.Generate()
// Alice creates an event
aliceEvent := event.New()
aliceEvent.Pubkey = alice.Pub()
aliceEvent.CreatedAt = timestamp.Now().V
aliceEvent.Kind = 1
aliceEvent.Content = []byte("Alice's event")
if err := aliceEvent.Sign(alice); err != nil {
t.Fatalf("Failed to sign event: %v", err)
}
if _, err := db.SaveEvent(ctx, aliceEvent); err != nil {
t.Fatalf("Failed to save event: %v", err)
}
// Bob tries to delete Alice's event
deleteEvent := event.New()
deleteEvent.Pubkey = bob.Pub() // Different author
deleteEvent.CreatedAt = timestamp.Now().V + 1
deleteEvent.Kind = kind.Deletion.K
deleteEvent.Tags = tag.NewS(
tag.NewFromAny("e", hex.Enc(aliceEvent.ID[:])),
)
if err := deleteEvent.Sign(bob); err != nil {
t.Fatalf("Failed to sign delete event: %v", err)
}
// Process deletion (Bob is not an admin)
_ = db.ProcessDelete(deleteEvent, nil)
// Verify Alice's event still exists
evs, err := db.QueryEvents(ctx, &filter.F{
Ids: tag.NewFromBytesSlice(aliceEvent.ID),
})
if err != nil {
t.Fatalf("Failed to query: %v", err)
}
if len(evs) != 1 {
t.Fatalf("Expected Alice's event to still exist, got %d events", len(evs))
}
t.Logf("✓ ProcessDelete correctly prevented unauthorized deletion")
}
func TestProcessDelete_AdminCanDeleteAnyEvent(t *testing.T) {
neo4jURI := os.Getenv("ORLY_NEO4J_URI")
if neo4jURI == "" {
t.Skip("Skipping Neo4j test: ORLY_NEO4J_URI not set")
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
tempDir := t.TempDir()
db, err := New(ctx, cancel, tempDir, "debug")
if err != nil {
t.Fatalf("Failed to create database: %v", err)
}
defer db.Close()
<-db.Ready()
if err := db.Wipe(); err != nil {
t.Fatalf("Failed to wipe database: %v", err)
}
alice, _ := p8k.New()
alice.Generate()
admin, _ := p8k.New()
admin.Generate()
// Alice creates an event
aliceEvent := event.New()
aliceEvent.Pubkey = alice.Pub()
aliceEvent.CreatedAt = timestamp.Now().V
aliceEvent.Kind = 1
aliceEvent.Content = []byte("Alice's event to be deleted by admin")
if err := aliceEvent.Sign(alice); err != nil {
t.Fatalf("Failed to sign event: %v", err)
}
if _, err := db.SaveEvent(ctx, aliceEvent); err != nil {
t.Fatalf("Failed to save event: %v", err)
}
// Admin creates deletion event
deleteEvent := event.New()
deleteEvent.Pubkey = admin.Pub()
deleteEvent.CreatedAt = timestamp.Now().V + 1
deleteEvent.Kind = kind.Deletion.K
deleteEvent.Tags = tag.NewS(
tag.NewFromAny("e", hex.Enc(aliceEvent.ID[:])),
)
if err := deleteEvent.Sign(admin); err != nil {
t.Fatalf("Failed to sign delete event: %v", err)
}
// Process deletion with admin pubkey
adminPubkeys := [][]byte{admin.Pub()}
if err := db.ProcessDelete(deleteEvent, adminPubkeys); err != nil {
t.Fatalf("Failed to process delete: %v", err)
}
// Verify Alice's event is deleted
evs, err := db.QueryEvents(ctx, &filter.F{
Ids: tag.NewFromBytesSlice(aliceEvent.ID),
})
if err != nil {
t.Fatalf("Failed to query: %v", err)
}
if len(evs) != 0 {
t.Fatalf("Expected Alice's event to be deleted, got %d events", len(evs))
}
t.Logf("✓ ProcessDelete allowed admin to delete event")
}
func TestCheckForDeleted(t *testing.T) {
neo4jURI := os.Getenv("ORLY_NEO4J_URI")
if neo4jURI == "" {
t.Skip("Skipping Neo4j test: ORLY_NEO4J_URI not set")
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
tempDir := t.TempDir()
db, err := New(ctx, cancel, tempDir, "debug")
if err != nil {
t.Fatalf("Failed to create database: %v", err)
}
defer db.Close()
<-db.Ready()
if err := db.Wipe(); err != nil {
t.Fatalf("Failed to wipe database: %v", err)
}
signer, err := p8k.New()
if err != nil {
t.Fatalf("Failed to create signer: %v", err)
}
if err := signer.Generate(); err != nil {
t.Fatalf("Failed to generate keypair: %v", err)
}
// Create target event
targetEvent := event.New()
targetEvent.Pubkey = signer.Pub()
targetEvent.CreatedAt = timestamp.Now().V
targetEvent.Kind = 1
targetEvent.Content = []byte("Target event")
if err := targetEvent.Sign(signer); err != nil {
t.Fatalf("Failed to sign target event: %v", err)
}
if _, err := db.SaveEvent(ctx, targetEvent); err != nil {
t.Fatalf("Failed to save target event: %v", err)
}
// Check that event is not deleted (no deletion event exists)
err = db.CheckForDeleted(targetEvent, nil)
if err != nil {
t.Fatalf("Expected no error for non-deleted event, got: %v", err)
}
// Create deletion event that references target
deleteEvent := event.New()
deleteEvent.Pubkey = signer.Pub()
deleteEvent.CreatedAt = timestamp.Now().V + 1
deleteEvent.Kind = kind.Deletion.K
deleteEvent.Tags = tag.NewS(
tag.NewFromAny("e", hex.Enc(targetEvent.ID[:])),
)
if err := deleteEvent.Sign(signer); err != nil {
t.Fatalf("Failed to sign delete event: %v", err)
}
if _, err := db.SaveEvent(ctx, deleteEvent); err != nil {
t.Fatalf("Failed to save delete event: %v", err)
}
// Now check should return error (event has been deleted)
err = db.CheckForDeleted(targetEvent, nil)
if err == nil {
t.Fatal("Expected error for deleted event")
}
t.Logf("✓ CheckForDeleted correctly detected deletion event")
}
func TestReplaceableEventDeletion(t *testing.T) {
neo4jURI := os.Getenv("ORLY_NEO4J_URI")
if neo4jURI == "" {
t.Skip("Skipping Neo4j test: ORLY_NEO4J_URI not set")
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
tempDir := t.TempDir()
db, err := New(ctx, cancel, tempDir, "debug")
if err != nil {
t.Fatalf("Failed to create database: %v", err)
}
defer db.Close()
<-db.Ready()
if err := db.Wipe(); err != nil {
t.Fatalf("Failed to wipe database: %v", err)
}
signer, err := p8k.New()
if err != nil {
t.Fatalf("Failed to create signer: %v", err)
}
if err := signer.Generate(); err != nil {
t.Fatalf("Failed to generate keypair: %v", err)
}
// Create replaceable event (kind 0 - profile)
profileEvent := event.New()
profileEvent.Pubkey = signer.Pub()
profileEvent.CreatedAt = timestamp.Now().V
profileEvent.Kind = 0
profileEvent.Content = []byte(`{"name":"Test User"}`)
if err := profileEvent.Sign(signer); err != nil {
t.Fatalf("Failed to sign event: %v", err)
}
if _, err := db.SaveEvent(ctx, profileEvent); err != nil {
t.Fatalf("Failed to save event: %v", err)
}
// Verify event exists
evs, err := db.QueryEvents(ctx, &filter.F{
Kinds: kind.NewS(kind.New(0)),
Authors: tag.NewFromBytesSlice(signer.Pub()),
})
if err != nil {
t.Fatalf("Failed to query: %v", err)
}
if len(evs) != 1 {
t.Fatalf("Expected 1 profile event, got %d", len(evs))
}
// Create a newer replaceable event (replaces the old one)
newerProfileEvent := event.New()
newerProfileEvent.Pubkey = signer.Pub()
newerProfileEvent.CreatedAt = timestamp.Now().V + 100
newerProfileEvent.Kind = 0
newerProfileEvent.Content = []byte(`{"name":"Updated User"}`)
if err := newerProfileEvent.Sign(signer); err != nil {
t.Fatalf("Failed to sign newer event: %v", err)
}
if _, err := db.SaveEvent(ctx, newerProfileEvent); err != nil {
t.Fatalf("Failed to save newer event: %v", err)
}
// Query should return only the newer event
evs, err = db.QueryEvents(ctx, &filter.F{
Kinds: kind.NewS(kind.New(0)),
Authors: tag.NewFromBytesSlice(signer.Pub()),
})
if err != nil {
t.Fatalf("Failed to query: %v", err)
}
if len(evs) != 1 {
t.Fatalf("Expected 1 profile event after replacement, got %d", len(evs))
}
if hex.Enc(evs[0].ID[:]) != hex.Enc(newerProfileEvent.ID[:]) {
t.Fatal("Expected newer profile event to be returned")
}
t.Logf("✓ Replaceable event correctly replaced by newer version")
}

570
pkg/neo4j/expiration_test.go

@ -0,0 +1,570 @@ @@ -0,0 +1,570 @@
package neo4j
import (
"bytes"
"context"
"encoding/json"
"os"
"testing"
"time"
"git.mleku.dev/mleku/nostr/encoders/event"
"git.mleku.dev/mleku/nostr/encoders/filter"
"git.mleku.dev/mleku/nostr/encoders/hex"
"git.mleku.dev/mleku/nostr/encoders/kind"
"git.mleku.dev/mleku/nostr/encoders/tag"
"git.mleku.dev/mleku/nostr/encoders/timestamp"
"git.mleku.dev/mleku/nostr/interfaces/signer/p8k"
)
func TestExpiration_SaveEventWithExpiration(t *testing.T) {
neo4jURI := os.Getenv("ORLY_NEO4J_URI")
if neo4jURI == "" {
t.Skip("Skipping Neo4j test: ORLY_NEO4J_URI not set")
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
tempDir := t.TempDir()
db, err := New(ctx, cancel, tempDir, "debug")
if err != nil {
t.Fatalf("Failed to create database: %v", err)
}
defer db.Close()
<-db.Ready()
if err := db.Wipe(); err != nil {
t.Fatalf("Failed to wipe database: %v", err)
}
signer, err := p8k.New()
if err != nil {
t.Fatalf("Failed to create signer: %v", err)
}
if err := signer.Generate(); err != nil {
t.Fatalf("Failed to generate keypair: %v", err)
}
// Create event with expiration tag (expires in 1 hour)
futureExpiration := time.Now().Add(1 * time.Hour).Unix()
ev := event.New()
ev.Pubkey = signer.Pub()
ev.CreatedAt = timestamp.Now().V
ev.Kind = 1
ev.Content = []byte("Event with expiration")
ev.Tags = tag.NewS(tag.NewFromAny("expiration", timestamp.From(futureExpiration).String()))
if err := ev.Sign(signer); err != nil {
t.Fatalf("Failed to sign event: %v", err)
}
if _, err := db.SaveEvent(ctx, ev); err != nil {
t.Fatalf("Failed to save event: %v", err)
}
// Query the event to verify it was saved
evs, err := db.QueryEvents(ctx, &filter.F{
Ids: tag.NewFromBytesSlice(ev.ID),
})
if err != nil {
t.Fatalf("Failed to query event: %v", err)
}
if len(evs) != 1 {
t.Fatalf("Expected 1 event, got %d", len(evs))
}
t.Logf("✓ Event with expiration tag saved successfully")
}
func TestExpiration_DeleteExpiredEvents(t *testing.T) {
neo4jURI := os.Getenv("ORLY_NEO4J_URI")
if neo4jURI == "" {
t.Skip("Skipping Neo4j test: ORLY_NEO4J_URI not set")
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
tempDir := t.TempDir()
db, err := New(ctx, cancel, tempDir, "debug")
if err != nil {
t.Fatalf("Failed to create database: %v", err)
}
defer db.Close()
<-db.Ready()
if err := db.Wipe(); err != nil {
t.Fatalf("Failed to wipe database: %v", err)
}
signer, err := p8k.New()
if err != nil {
t.Fatalf("Failed to create signer: %v", err)
}
if err := signer.Generate(); err != nil {
t.Fatalf("Failed to generate keypair: %v", err)
}
// Create an expired event (expired 1 hour ago)
pastExpiration := time.Now().Add(-1 * time.Hour).Unix()
expiredEv := event.New()
expiredEv.Pubkey = signer.Pub()
expiredEv.CreatedAt = timestamp.Now().V - 7200 // 2 hours ago
expiredEv.Kind = 1
expiredEv.Content = []byte("Expired event")
expiredEv.Tags = tag.NewS(tag.NewFromAny("expiration", timestamp.From(pastExpiration).String()))
if err := expiredEv.Sign(signer); err != nil {
t.Fatalf("Failed to sign expired event: %v", err)
}
if _, err := db.SaveEvent(ctx, expiredEv); err != nil {
t.Fatalf("Failed to save expired event: %v", err)
}
// Create a non-expired event (expires in 1 hour)
futureExpiration := time.Now().Add(1 * time.Hour).Unix()
validEv := event.New()
validEv.Pubkey = signer.Pub()
validEv.CreatedAt = timestamp.Now().V
validEv.Kind = 1
validEv.Content = []byte("Valid event")
validEv.Tags = tag.NewS(tag.NewFromAny("expiration", timestamp.From(futureExpiration).String()))
if err := validEv.Sign(signer); err != nil {
t.Fatalf("Failed to sign valid event: %v", err)
}
if _, err := db.SaveEvent(ctx, validEv); err != nil {
t.Fatalf("Failed to save valid event: %v", err)
}
// Create an event without expiration
permanentEv := event.New()
permanentEv.Pubkey = signer.Pub()
permanentEv.CreatedAt = timestamp.Now().V + 1
permanentEv.Kind = 1
permanentEv.Content = []byte("Permanent event (no expiration)")
if err := permanentEv.Sign(signer); err != nil {
t.Fatalf("Failed to sign permanent event: %v", err)
}
if _, err := db.SaveEvent(ctx, permanentEv); err != nil {
t.Fatalf("Failed to save permanent event: %v", err)
}
// Verify all 3 events exist
evs, err := db.QueryEvents(ctx, &filter.F{
Authors: tag.NewFromBytesSlice(signer.Pub()),
})
if err != nil {
t.Fatalf("Failed to query events: %v", err)
}
if len(evs) != 3 {
t.Fatalf("Expected 3 events before deletion, got %d", len(evs))
}
// Run DeleteExpired
db.DeleteExpired()
// Verify only expired event was deleted
evs, err = db.QueryEvents(ctx, &filter.F{
Authors: tag.NewFromBytesSlice(signer.Pub()),
})
if err != nil {
t.Fatalf("Failed to query events after deletion: %v", err)
}
if len(evs) != 2 {
t.Fatalf("Expected 2 events after deletion (expired removed), got %d", len(evs))
}
// Verify the correct events remain
foundValid := false
foundPermanent := false
for _, ev := range evs {
if hex.Enc(ev.ID[:]) == hex.Enc(validEv.ID[:]) {
foundValid = true
}
if hex.Enc(ev.ID[:]) == hex.Enc(permanentEv.ID[:]) {
foundPermanent = true
}
}
if !foundValid {
t.Fatal("Valid event (with future expiration) was incorrectly deleted")
}
if !foundPermanent {
t.Fatal("Permanent event (no expiration) was incorrectly deleted")
}
t.Logf("✓ DeleteExpired correctly removed only expired events")
}
func TestExpiration_NoExpirationTag(t *testing.T) {
neo4jURI := os.Getenv("ORLY_NEO4J_URI")
if neo4jURI == "" {
t.Skip("Skipping Neo4j test: ORLY_NEO4J_URI not set")
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
tempDir := t.TempDir()
db, err := New(ctx, cancel, tempDir, "debug")
if err != nil {
t.Fatalf("Failed to create database: %v", err)
}
defer db.Close()
<-db.Ready()
if err := db.Wipe(); err != nil {
t.Fatalf("Failed to wipe database: %v", err)
}
signer, err := p8k.New()
if err != nil {
t.Fatalf("Failed to create signer: %v", err)
}
if err := signer.Generate(); err != nil {
t.Fatalf("Failed to generate keypair: %v", err)
}
// Create event without expiration tag
ev := event.New()
ev.Pubkey = signer.Pub()
ev.CreatedAt = timestamp.Now().V
ev.Kind = 1
ev.Content = []byte("Event without expiration")
if err := ev.Sign(signer); err != nil {
t.Fatalf("Failed to sign event: %v", err)
}
if _, err := db.SaveEvent(ctx, ev); err != nil {
t.Fatalf("Failed to save event: %v", err)
}
// Run DeleteExpired - event should not be deleted
db.DeleteExpired()
// Verify event still exists
evs, err := db.QueryEvents(ctx, &filter.F{
Ids: tag.NewFromBytesSlice(ev.ID),
})
if err != nil {
t.Fatalf("Failed to query event: %v", err)
}
if len(evs) != 1 {
t.Fatalf("Expected 1 event (no expiration should not be deleted), got %d", len(evs))
}
t.Logf("✓ Events without expiration tag are not deleted")
}
func TestExport_AllEvents(t *testing.T) {
neo4jURI := os.Getenv("ORLY_NEO4J_URI")
if neo4jURI == "" {
t.Skip("Skipping Neo4j test: ORLY_NEO4J_URI not set")
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
tempDir := t.TempDir()
db, err := New(ctx, cancel, tempDir, "debug")
if err != nil {
t.Fatalf("Failed to create database: %v", err)
}
defer db.Close()
<-db.Ready()
if err := db.Wipe(); err != nil {
t.Fatalf("Failed to wipe database: %v", err)
}
signer, err := p8k.New()
if err != nil {
t.Fatalf("Failed to create signer: %v", err)
}
if err := signer.Generate(); err != nil {
t.Fatalf("Failed to generate keypair: %v", err)
}
// Create and save some events
for i := 0; i < 5; i++ {
ev := event.New()
ev.Pubkey = signer.Pub()
ev.CreatedAt = timestamp.Now().V + int64(i)
ev.Kind = 1
ev.Content = []byte("Test event for export")
ev.Tags = tag.NewS(tag.NewFromAny("t", "test"))
if err := ev.Sign(signer); err != nil {
t.Fatalf("Failed to sign event: %v", err)
}
if _, err := db.SaveEvent(ctx, ev); err != nil {
t.Fatalf("Failed to save event: %v", err)
}
}
// Export all events
var buf bytes.Buffer
db.Export(ctx, &buf)
// Parse the exported JSONL
lines := bytes.Split(buf.Bytes(), []byte("\n"))
validLines := 0
for _, line := range lines {
if len(line) == 0 {
continue
}
var ev event.E
if err := json.Unmarshal(line, &ev); err != nil {
t.Fatalf("Failed to parse exported event: %v", err)
}
validLines++
}
if validLines != 5 {
t.Fatalf("Expected 5 exported events, got %d", validLines)
}
t.Logf("✓ Export all events returned %d events in JSONL format", validLines)
}
func TestExport_FilterByPubkey(t *testing.T) {
neo4jURI := os.Getenv("ORLY_NEO4J_URI")
if neo4jURI == "" {
t.Skip("Skipping Neo4j test: ORLY_NEO4J_URI not set")
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
tempDir := t.TempDir()
db, err := New(ctx, cancel, tempDir, "debug")
if err != nil {
t.Fatalf("Failed to create database: %v", err)
}
defer db.Close()
<-db.Ready()
if err := db.Wipe(); err != nil {
t.Fatalf("Failed to wipe database: %v", err)
}
// Create two signers
alice, _ := p8k.New()
alice.Generate()
bob, _ := p8k.New()
bob.Generate()
baseTs := timestamp.Now().V
// Create events from Alice
for i := 0; i < 3; i++ {
ev := event.New()
ev.Pubkey = alice.Pub()
ev.CreatedAt = baseTs + int64(i)
ev.Kind = 1
ev.Content = []byte("Alice's event")
if err := ev.Sign(alice); err != nil {
t.Fatalf("Failed to sign event: %v", err)
}
if _, err := db.SaveEvent(ctx, ev); err != nil {
t.Fatalf("Failed to save event: %v", err)
}
}
// Create events from Bob
for i := 0; i < 2; i++ {
ev := event.New()
ev.Pubkey = bob.Pub()
ev.CreatedAt = baseTs + int64(i) + 10
ev.Kind = 1
ev.Content = []byte("Bob's event")
if err := ev.Sign(bob); err != nil {
t.Fatalf("Failed to sign event: %v", err)
}
if _, err := db.SaveEvent(ctx, ev); err != nil {
t.Fatalf("Failed to save event: %v", err)
}
}
// Export only Alice's events
var buf bytes.Buffer
db.Export(ctx, &buf, alice.Pub())
// Parse the exported JSONL
lines := bytes.Split(buf.Bytes(), []byte("\n"))
validLines := 0
alicePubkey := hex.Enc(alice.Pub())
for _, line := range lines {
if len(line) == 0 {
continue
}
var ev event.E
if err := json.Unmarshal(line, &ev); err != nil {
t.Fatalf("Failed to parse exported event: %v", err)
}
if hex.Enc(ev.Pubkey[:]) != alicePubkey {
t.Fatalf("Exported event has wrong pubkey (expected Alice)")
}
validLines++
}
if validLines != 3 {
t.Fatalf("Expected 3 events from Alice, got %d", validLines)
}
t.Logf("✓ Export with pubkey filter returned %d events from Alice only", validLines)
}
func TestExport_Empty(t *testing.T) {
neo4jURI := os.Getenv("ORLY_NEO4J_URI")
if neo4jURI == "" {
t.Skip("Skipping Neo4j test: ORLY_NEO4J_URI not set")
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
tempDir := t.TempDir()
db, err := New(ctx, cancel, tempDir, "debug")
if err != nil {
t.Fatalf("Failed to create database: %v", err)
}
defer db.Close()
<-db.Ready()
if err := db.Wipe(); err != nil {
t.Fatalf("Failed to wipe database: %v", err)
}
// Export from empty database
var buf bytes.Buffer
db.Export(ctx, &buf)
// Should be empty or just whitespace
content := bytes.TrimSpace(buf.Bytes())
if len(content) != 0 {
t.Fatalf("Expected empty export, got: %s", string(content))
}
t.Logf("✓ Export from empty database returns empty result")
}
func TestImportExport_RoundTrip(t *testing.T) {
neo4jURI := os.Getenv("ORLY_NEO4J_URI")
if neo4jURI == "" {
t.Skip("Skipping Neo4j test: ORLY_NEO4J_URI not set")
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
tempDir := t.TempDir()
db, err := New(ctx, cancel, tempDir, "debug")
if err != nil {
t.Fatalf("Failed to create database: %v", err)
}
defer db.Close()
<-db.Ready()
if err := db.Wipe(); err != nil {
t.Fatalf("Failed to wipe database: %v", err)
}
signer, _ := p8k.New()
signer.Generate()
// Create original events
originalEvents := make([]*event.E, 3)
for i := 0; i < 3; i++ {
ev := event.New()
ev.Pubkey = signer.Pub()
ev.CreatedAt = timestamp.Now().V + int64(i)
ev.Kind = 1
ev.Content = []byte("Round trip test event")
ev.Tags = tag.NewS(tag.NewFromAny("t", "roundtrip"))
if err := ev.Sign(signer); err != nil {
t.Fatalf("Failed to sign event: %v", err)
}
if _, err := db.SaveEvent(ctx, ev); err != nil {
t.Fatalf("Failed to save event: %v", err)
}
originalEvents[i] = ev
}
// Export events
var buf bytes.Buffer
db.Export(ctx, &buf)
// Wipe database
if err := db.Wipe(); err != nil {
t.Fatalf("Failed to wipe database: %v", err)
}
// Verify database is empty
evs, err := db.QueryEvents(ctx, &filter.F{
Kinds: kind.NewS(kind.New(1)),
})
if err != nil {
t.Fatalf("Failed to query events: %v", err)
}
if len(evs) != 0 {
t.Fatalf("Expected 0 events after wipe, got %d", len(evs))
}
// Import events
db.Import(bytes.NewReader(buf.Bytes()))
// Verify events were restored
evs, err = db.QueryEvents(ctx, &filter.F{
Authors: tag.NewFromBytesSlice(signer.Pub()),
})
if err != nil {
t.Fatalf("Failed to query imported events: %v", err)
}
if len(evs) != 3 {
t.Fatalf("Expected 3 imported events, got %d", len(evs))
}
// Verify event IDs match
importedIDs := make(map[string]bool)
for _, ev := range evs {
importedIDs[hex.Enc(ev.ID[:])] = true
}
for _, orig := range originalEvents {
if !importedIDs[hex.Enc(orig.ID[:])] {
t.Fatalf("Original event %s not found after import", hex.Enc(orig.ID[:]))
}
}
t.Logf("✓ Export/Import round trip preserved %d events correctly", len(evs))
}

502
pkg/neo4j/fetch-event_test.go

@ -0,0 +1,502 @@ @@ -0,0 +1,502 @@
package neo4j
import (
"context"
"os"
"testing"
"git.mleku.dev/mleku/nostr/encoders/event"
"git.mleku.dev/mleku/nostr/encoders/filter"
"git.mleku.dev/mleku/nostr/encoders/hex"
"git.mleku.dev/mleku/nostr/encoders/tag"
"git.mleku.dev/mleku/nostr/encoders/timestamp"
"git.mleku.dev/mleku/nostr/interfaces/signer/p8k"
"next.orly.dev/pkg/database/indexes/types"
)
func TestFetchEventBySerial(t *testing.T) {
neo4jURI := os.Getenv("ORLY_NEO4J_URI")
if neo4jURI == "" {
t.Skip("Skipping Neo4j test: ORLY_NEO4J_URI not set")
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
tempDir := t.TempDir()
db, err := New(ctx, cancel, tempDir, "debug")
if err != nil {
t.Fatalf("Failed to create database: %v", err)
}
defer db.Close()
<-db.Ready()
if err := db.Wipe(); err != nil {
t.Fatalf("Failed to wipe database: %v", err)
}
signer, err := p8k.New()
if err != nil {
t.Fatalf("Failed to create signer: %v", err)
}
if err := signer.Generate(); err != nil {
t.Fatalf("Failed to generate keypair: %v", err)
}
// Create and save a test event
ev := event.New()
ev.Pubkey = signer.Pub()
ev.CreatedAt = timestamp.Now().V
ev.Kind = 1
ev.Content = []byte("Test event for fetch by serial")
if err := ev.Sign(signer); err != nil {
t.Fatalf("Failed to sign event: %v", err)
}
if _, err := db.SaveEvent(ctx, ev); err != nil {
t.Fatalf("Failed to save event: %v", err)
}
// Get the serial for this event
serial, err := db.GetSerialById(ev.ID[:])
if err != nil {
t.Fatalf("Failed to get serial by ID: %v", err)
}
// Fetch event by serial
fetchedEvent, err := db.FetchEventBySerial(serial)
if err != nil {
t.Fatalf("Failed to fetch event by serial: %v", err)
}
if fetchedEvent == nil {
t.Fatal("Expected fetched event to be non-nil")
}
// Verify event properties
if hex.Enc(fetchedEvent.ID[:]) != hex.Enc(ev.ID[:]) {
t.Fatalf("Event ID mismatch: got %s, expected %s",
hex.Enc(fetchedEvent.ID[:]), hex.Enc(ev.ID[:]))
}
if fetchedEvent.Kind != ev.Kind {
t.Fatalf("Kind mismatch: got %d, expected %d", fetchedEvent.Kind, ev.Kind)
}
if hex.Enc(fetchedEvent.Pubkey[:]) != hex.Enc(ev.Pubkey[:]) {
t.Fatalf("Pubkey mismatch")
}
if fetchedEvent.CreatedAt != ev.CreatedAt {
t.Fatalf("CreatedAt mismatch: got %d, expected %d",
fetchedEvent.CreatedAt, ev.CreatedAt)
}
t.Logf("✓ FetchEventBySerial returned correct event")
}
func TestFetchEventBySerial_NonExistent(t *testing.T) {
neo4jURI := os.Getenv("ORLY_NEO4J_URI")
if neo4jURI == "" {
t.Skip("Skipping Neo4j test: ORLY_NEO4J_URI not set")
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
tempDir := t.TempDir()
db, err := New(ctx, cancel, tempDir, "debug")
if err != nil {
t.Fatalf("Failed to create database: %v", err)
}
defer db.Close()
<-db.Ready()
// Try to fetch with non-existent serial
nonExistentSerial := &types.Uint40{}
nonExistentSerial.Set(0xFFFFFFFFFF) // Max value
_, err = db.FetchEventBySerial(nonExistentSerial)
if err == nil {
t.Fatal("Expected error for non-existent serial")
}
t.Logf("✓ FetchEventBySerial correctly returned error for non-existent serial")
}
func TestFetchEventsBySerials(t *testing.T) {
neo4jURI := os.Getenv("ORLY_NEO4J_URI")
if neo4jURI == "" {
t.Skip("Skipping Neo4j test: ORLY_NEO4J_URI not set")
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
tempDir := t.TempDir()
db, err := New(ctx, cancel, tempDir, "debug")
if err != nil {
t.Fatalf("Failed to create database: %v", err)
}
defer db.Close()
<-db.Ready()
if err := db.Wipe(); err != nil {
t.Fatalf("Failed to wipe database: %v", err)
}
signer, err := p8k.New()
if err != nil {
t.Fatalf("Failed to create signer: %v", err)
}
if err := signer.Generate(); err != nil {
t.Fatalf("Failed to generate keypair: %v", err)
}
// Create and save multiple events
var serials []*types.Uint40
eventIDs := make(map[uint64]string)
for i := 0; i < 5; i++ {
ev := event.New()
ev.Pubkey = signer.Pub()
ev.CreatedAt = timestamp.Now().V + int64(i)
ev.Kind = 1
ev.Content = []byte("Test event")
if err := ev.Sign(signer); err != nil {
t.Fatalf("Failed to sign event: %v", err)
}
if _, err := db.SaveEvent(ctx, ev); err != nil {
t.Fatalf("Failed to save event: %v", err)
}
serial, err := db.GetSerialById(ev.ID[:])
if err != nil {
t.Fatalf("Failed to get serial: %v", err)
}
serials = append(serials, serial)
eventIDs[serial.Get()] = hex.Enc(ev.ID[:])
}
// Fetch all events by serials
events, err := db.FetchEventsBySerials(serials)
if err != nil {
t.Fatalf("Failed to fetch events by serials: %v", err)
}
if len(events) != 5 {
t.Fatalf("Expected 5 events, got %d", len(events))
}
// Verify each event
for serial, expectedID := range eventIDs {
ev, exists := events[serial]
if !exists {
t.Fatalf("Event with serial %d not found", serial)
}
if hex.Enc(ev.ID[:]) != expectedID {
t.Fatalf("Event ID mismatch for serial %d", serial)
}
}
t.Logf("✓ FetchEventsBySerials returned %d correct events", len(events))
}
func TestGetSerialById(t *testing.T) {
neo4jURI := os.Getenv("ORLY_NEO4J_URI")
if neo4jURI == "" {
t.Skip("Skipping Neo4j test: ORLY_NEO4J_URI not set")
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
tempDir := t.TempDir()
db, err := New(ctx, cancel, tempDir, "debug")
if err != nil {
t.Fatalf("Failed to create database: %v", err)
}
defer db.Close()
<-db.Ready()
if err := db.Wipe(); err != nil {
t.Fatalf("Failed to wipe database: %v", err)
}
signer, err := p8k.New()
if err != nil {
t.Fatalf("Failed to create signer: %v", err)
}
if err := signer.Generate(); err != nil {
t.Fatalf("Failed to generate keypair: %v", err)
}
// Create and save event
ev := event.New()
ev.Pubkey = signer.Pub()
ev.CreatedAt = timestamp.Now().V
ev.Kind = 1
ev.Content = []byte("Test event")
if err := ev.Sign(signer); err != nil {
t.Fatalf("Failed to sign event: %v", err)
}
if _, err := db.SaveEvent(ctx, ev); err != nil {
t.Fatalf("Failed to save event: %v", err)
}
// Get serial by ID
serial, err := db.GetSerialById(ev.ID[:])
if err != nil {
t.Fatalf("Failed to get serial by ID: %v", err)
}
if serial == nil {
t.Fatal("Expected serial to be non-nil")
}
if serial.Get() == 0 {
t.Fatal("Expected non-zero serial")
}
t.Logf("✓ GetSerialById returned serial: %d", serial.Get())
}
func TestGetSerialById_NonExistent(t *testing.T) {
neo4jURI := os.Getenv("ORLY_NEO4J_URI")
if neo4jURI == "" {
t.Skip("Skipping Neo4j test: ORLY_NEO4J_URI not set")
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
tempDir := t.TempDir()
db, err := New(ctx, cancel, tempDir, "debug")
if err != nil {
t.Fatalf("Failed to create database: %v", err)
}
defer db.Close()
<-db.Ready()
// Try to get serial for non-existent event
fakeID, _ := hex.Dec("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
_, err = db.GetSerialById(fakeID)
if err == nil {
t.Fatal("Expected error for non-existent event ID")
}
t.Logf("✓ GetSerialById correctly returned error for non-existent ID")
}
func TestGetSerialsByIds(t *testing.T) {
neo4jURI := os.Getenv("ORLY_NEO4J_URI")
if neo4jURI == "" {
t.Skip("Skipping Neo4j test: ORLY_NEO4J_URI not set")
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
tempDir := t.TempDir()
db, err := New(ctx, cancel, tempDir, "debug")
if err != nil {
t.Fatalf("Failed to create database: %v", err)
}
defer db.Close()
<-db.Ready()
if err := db.Wipe(); err != nil {
t.Fatalf("Failed to wipe database: %v", err)
}
signer, err := p8k.New()
if err != nil {
t.Fatalf("Failed to create signer: %v", err)
}
if err := signer.Generate(); err != nil {
t.Fatalf("Failed to generate keypair: %v", err)
}
// Create and save multiple events
ids := tag.NewS()
for i := 0; i < 3; i++ {
ev := event.New()
ev.Pubkey = signer.Pub()
ev.CreatedAt = timestamp.Now().V + int64(i)
ev.Kind = 1
ev.Content = []byte("Test event")
if err := ev.Sign(signer); err != nil {
t.Fatalf("Failed to sign event: %v", err)
}
if _, err := db.SaveEvent(ctx, ev); err != nil {
t.Fatalf("Failed to save event: %v", err)
}
ids.Append(tag.NewFromAny("", hex.Enc(ev.ID[:])))
}
// Get serials by IDs
serials, err := db.GetSerialsByIds(ids)
if err != nil {
t.Fatalf("Failed to get serials by IDs: %v", err)
}
if len(serials) != 3 {
t.Fatalf("Expected 3 serials, got %d", len(serials))
}
t.Logf("✓ GetSerialsByIds returned %d serials", len(serials))
}
func TestGetFullIdPubkeyBySerial(t *testing.T) {
neo4jURI := os.Getenv("ORLY_NEO4J_URI")
if neo4jURI == "" {
t.Skip("Skipping Neo4j test: ORLY_NEO4J_URI not set")
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
tempDir := t.TempDir()
db, err := New(ctx, cancel, tempDir, "debug")
if err != nil {
t.Fatalf("Failed to create database: %v", err)
}
defer db.Close()
<-db.Ready()
if err := db.Wipe(); err != nil {
t.Fatalf("Failed to wipe database: %v", err)
}
signer, err := p8k.New()
if err != nil {
t.Fatalf("Failed to create signer: %v", err)
}
if err := signer.Generate(); err != nil {
t.Fatalf("Failed to generate keypair: %v", err)
}
// Create and save event
ev := event.New()
ev.Pubkey = signer.Pub()
ev.CreatedAt = timestamp.Now().V
ev.Kind = 1
ev.Content = []byte("Test event")
if err := ev.Sign(signer); err != nil {
t.Fatalf("Failed to sign event: %v", err)
}
if _, err := db.SaveEvent(ctx, ev); err != nil {
t.Fatalf("Failed to save event: %v", err)
}
// Get serial
serial, err := db.GetSerialById(ev.ID[:])
if err != nil {
t.Fatalf("Failed to get serial: %v", err)
}
// Get full ID and pubkey
idPkTs, err := db.GetFullIdPubkeyBySerial(serial)
if err != nil {
t.Fatalf("Failed to get full ID and pubkey: %v", err)
}
if idPkTs == nil {
t.Fatal("Expected non-nil result")
}
if hex.Enc(idPkTs.Id) != hex.Enc(ev.ID[:]) {
t.Fatalf("ID mismatch")
}
if hex.Enc(idPkTs.Pub) != hex.Enc(ev.Pubkey[:]) {
t.Fatalf("Pubkey mismatch")
}
if idPkTs.Ts != ev.CreatedAt {
t.Fatalf("Timestamp mismatch")
}
t.Logf("✓ GetFullIdPubkeyBySerial returned correct data")
}
func TestQueryForSerials(t *testing.T) {
neo4jURI := os.Getenv("ORLY_NEO4J_URI")
if neo4jURI == "" {
t.Skip("Skipping Neo4j test: ORLY_NEO4J_URI not set")
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
tempDir := t.TempDir()
db, err := New(ctx, cancel, tempDir, "debug")
if err != nil {
t.Fatalf("Failed to create database: %v", err)
}
defer db.Close()
<-db.Ready()
if err := db.Wipe(); err != nil {
t.Fatalf("Failed to wipe database: %v", err)
}
signer, err := p8k.New()
if err != nil {
t.Fatalf("Failed to create signer: %v", err)
}
if err := signer.Generate(); err != nil {
t.Fatalf("Failed to generate keypair: %v", err)
}
// Create and save events
for i := 0; i < 5; i++ {
ev := event.New()
ev.Pubkey = signer.Pub()
ev.CreatedAt = timestamp.Now().V + int64(i)
ev.Kind = 1
ev.Content = []byte("Test event")
if err := ev.Sign(signer); err != nil {
t.Fatalf("Failed to sign event: %v", err)
}
if _, err := db.SaveEvent(ctx, ev); err != nil {
t.Fatalf("Failed to save event: %v", err)
}
}
// Query for serials
serials, err := db.QueryForSerials(ctx, &filter.F{
Authors: tag.NewFromBytesSlice(signer.Pub()),
})
if err != nil {
t.Fatalf("Failed to query for serials: %v", err)
}
if len(serials) != 5 {
t.Fatalf("Expected 5 serials, got %d", len(serials))
}
t.Logf("✓ QueryForSerials returned %d serials", len(serials))
}

117
pkg/neo4j/import-export.go

@ -8,6 +8,8 @@ import ( @@ -8,6 +8,8 @@ import (
"io"
"git.mleku.dev/mleku/nostr/encoders/event"
"git.mleku.dev/mleku/nostr/encoders/hex"
"git.mleku.dev/mleku/nostr/encoders/tag"
)
// Import imports events from a reader (JSONL format)
@ -16,12 +18,119 @@ func (n *N) Import(rr io.Reader) { @@ -16,12 +18,119 @@ func (n *N) Import(rr io.Reader) {
}
// Export exports events to a writer (JSONL format)
// If pubkeys are provided, only exports events from those authors
// Otherwise exports all events
func (n *N) Export(c context.Context, w io.Writer, pubkeys ...[]byte) {
// Query all events or events for specific pubkeys
// Write as JSONL
var cypher string
params := make(map[string]any)
if len(pubkeys) > 0 {
// Export events for specific pubkeys
pubkeyStrings := make([]string, len(pubkeys))
for i, pk := range pubkeys {
pubkeyStrings[i] = hex.Enc(pk)
}
params["pubkeys"] = pubkeyStrings
cypher = `
MATCH (e:Event)
WHERE e.pubkey IN $pubkeys
RETURN e.id AS id, e.kind AS kind, e.pubkey AS pubkey,
e.created_at AS created_at, e.content AS content,
e.sig AS sig, e.tags AS tags
ORDER BY e.created_at ASC`
} else {
// Export all events
cypher = `
MATCH (e:Event)
RETURN e.id AS id, e.kind AS kind, e.pubkey AS pubkey,
e.created_at AS created_at, e.content AS content,
e.sig AS sig, e.tags AS tags
ORDER BY e.created_at ASC`
}
result, err := n.ExecuteRead(c, cypher, params)
if err != nil {
n.Logger.Warningf("failed to query events for export: %v", err)
fmt.Fprintf(w, "# Export failed: %v\n", err)
return
}
count := 0
for result.Next(c) {
record := result.Record()
if record == nil {
continue
}
// Build event from record
ev := &event.E{}
// Parse ID
if idRaw, found := record.Get("id"); found {
if idStr, ok := idRaw.(string); ok {
if idBytes, err := hex.Dec(idStr); err == nil && len(idBytes) == 32 {
copy(ev.ID[:], idBytes)
}
}
}
// Parse kind
if kindRaw, found := record.Get("kind"); found {
if kindVal, ok := kindRaw.(int64); ok {
ev.Kind = uint16(kindVal)
}
}
// Parse pubkey
if pkRaw, found := record.Get("pubkey"); found {
if pkStr, ok := pkRaw.(string); ok {
if pkBytes, err := hex.Dec(pkStr); err == nil && len(pkBytes) == 32 {
copy(ev.Pubkey[:], pkBytes)
}
}
}
// Parse created_at
if tsRaw, found := record.Get("created_at"); found {
if tsVal, ok := tsRaw.(int64); ok {
ev.CreatedAt = tsVal
}
}
// Parse content
if contentRaw, found := record.Get("content"); found {
if contentStr, ok := contentRaw.(string); ok {
ev.Content = []byte(contentStr)
}
}
// Parse sig
if sigRaw, found := record.Get("sig"); found {
if sigStr, ok := sigRaw.(string); ok {
if sigBytes, err := hex.Dec(sigStr); err == nil && len(sigBytes) == 64 {
copy(ev.Sig[:], sigBytes)
}
}
}
// Parse tags (stored as JSON string)
if tagsRaw, found := record.Get("tags"); found {
if tagsStr, ok := tagsRaw.(string); ok {
ev.Tags = &tag.S{}
if err := json.Unmarshal([]byte(tagsStr), ev.Tags); err != nil {
n.Logger.Warningf("failed to unmarshal tags: %v", err)
}
}
}
// Write event as JSON line
if evJSON, err := json.Marshal(ev); err == nil {
fmt.Fprintf(w, "%s\n", evJSON)
count++
}
}
// Stub implementation
fmt.Fprintf(w, "# Export not yet implemented for neo4j\n")
n.Logger.Infof("exported %d events", count)
}
// ImportEventsFromReader imports events from a reader

342
pkg/neo4j/nip43_test.go

@ -0,0 +1,342 @@ @@ -0,0 +1,342 @@
package neo4j
import (
"context"
"os"
"testing"
"time"
"git.mleku.dev/mleku/nostr/encoders/hex"
"git.mleku.dev/mleku/nostr/interfaces/signer/p8k"
)
func TestNIP43_AddAndRemoveMember(t *testing.T) {
neo4jURI := os.Getenv("ORLY_NEO4J_URI")
if neo4jURI == "" {
t.Skip("Skipping Neo4j test: ORLY_NEO4J_URI not set")
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
tempDir := t.TempDir()
db, err := New(ctx, cancel, tempDir, "debug")
if err != nil {
t.Fatalf("Failed to create database: %v", err)
}
defer db.Close()
<-db.Ready()
if err := db.Wipe(); err != nil {
t.Fatalf("Failed to wipe database: %v", err)
}
signer, _ := p8k.New()
signer.Generate()
pubkey := signer.Pub()
// Add member
inviteCode := "test-invite-123"
if err := db.AddNIP43Member(pubkey, inviteCode); err != nil {
t.Fatalf("Failed to add NIP-43 member: %v", err)
}
// Check membership
isMember, err := db.IsNIP43Member(pubkey)
if err != nil {
t.Fatalf("Failed to check membership: %v", err)
}
if !isMember {
t.Fatal("Expected pubkey to be a member")
}
// Get membership details
membership, err := db.GetNIP43Membership(pubkey)
if err != nil {
t.Fatalf("Failed to get membership: %v", err)
}
if membership.InviteCode != inviteCode {
t.Fatalf("Invite code mismatch: got %s, expected %s", membership.InviteCode, inviteCode)
}
// Remove member
if err := db.RemoveNIP43Member(pubkey); err != nil {
t.Fatalf("Failed to remove member: %v", err)
}
// Verify no longer a member
isMember, _ = db.IsNIP43Member(pubkey)
if isMember {
t.Fatal("Expected pubkey to not be a member after removal")
}
t.Logf("✓ NIP-43 add and remove member works correctly")
}
func TestNIP43_GetAllMembers(t *testing.T) {
neo4jURI := os.Getenv("ORLY_NEO4J_URI")
if neo4jURI == "" {
t.Skip("Skipping Neo4j test: ORLY_NEO4J_URI not set")
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
tempDir := t.TempDir()
db, err := New(ctx, cancel, tempDir, "debug")
if err != nil {
t.Fatalf("Failed to create database: %v", err)
}
defer db.Close()
<-db.Ready()
if err := db.Wipe(); err != nil {
t.Fatalf("Failed to wipe database: %v", err)
}
// Add multiple members
var pubkeys [][]byte
for i := 0; i < 3; i++ {
signer, _ := p8k.New()
signer.Generate()
pubkey := signer.Pub()
pubkeys = append(pubkeys, pubkey)
if err := db.AddNIP43Member(pubkey, "invite"+string(rune('A'+i))); err != nil {
t.Fatalf("Failed to add member %d: %v", i, err)
}
}
// Get all members
members, err := db.GetAllNIP43Members()
if err != nil {
t.Fatalf("Failed to get all members: %v", err)
}
if len(members) != 3 {
t.Fatalf("Expected 3 members, got %d", len(members))
}
// Verify all added pubkeys are in the members list
memberMap := make(map[string]bool)
for _, m := range members {
memberMap[hex.Enc(m)] = true
}
for i, pk := range pubkeys {
if !memberMap[hex.Enc(pk)] {
t.Fatalf("Member %d not found in list", i)
}
}
t.Logf("✓ GetAllNIP43Members returned %d members", len(members))
}
func TestNIP43_InviteCode(t *testing.T) {
neo4jURI := os.Getenv("ORLY_NEO4J_URI")
if neo4jURI == "" {
t.Skip("Skipping Neo4j test: ORLY_NEO4J_URI not set")
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
tempDir := t.TempDir()
db, err := New(ctx, cancel, tempDir, "debug")
if err != nil {
t.Fatalf("Failed to create database: %v", err)
}
defer db.Close()
<-db.Ready()
if err := db.Wipe(); err != nil {
t.Fatalf("Failed to wipe database: %v", err)
}
// Store valid invite code (expires in 1 hour)
validCode := "valid-code-123"
expiresAt := time.Now().Add(1 * time.Hour)
if err := db.StoreInviteCode(validCode, expiresAt); err != nil {
t.Fatalf("Failed to store invite code: %v", err)
}
// Validate the code
isValid, err := db.ValidateInviteCode(validCode)
if err != nil {
t.Fatalf("Failed to validate invite code: %v", err)
}
if !isValid {
t.Fatal("Expected valid invite code to be valid")
}
// Test non-existent code
isValid, err = db.ValidateInviteCode("non-existent-code")
if err != nil {
t.Fatalf("Failed to validate non-existent code: %v", err)
}
if isValid {
t.Fatal("Expected non-existent code to be invalid")
}
// Delete the invite code
if err := db.DeleteInviteCode(validCode); err != nil {
t.Fatalf("Failed to delete invite code: %v", err)
}
// Verify code is no longer valid
isValid, _ = db.ValidateInviteCode(validCode)
if isValid {
t.Fatal("Expected deleted code to be invalid")
}
t.Logf("✓ NIP-43 invite code operations work correctly")
}
func TestNIP43_ExpiredInviteCode(t *testing.T) {
neo4jURI := os.Getenv("ORLY_NEO4J_URI")
if neo4jURI == "" {
t.Skip("Skipping Neo4j test: ORLY_NEO4J_URI not set")
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
tempDir := t.TempDir()
db, err := New(ctx, cancel, tempDir, "debug")
if err != nil {
t.Fatalf("Failed to create database: %v", err)
}
defer db.Close()
<-db.Ready()
if err := db.Wipe(); err != nil {
t.Fatalf("Failed to wipe database: %v", err)
}
// Store expired invite code (expired 1 hour ago)
expiredCode := "expired-code-123"
expiresAt := time.Now().Add(-1 * time.Hour)
if err := db.StoreInviteCode(expiredCode, expiresAt); err != nil {
t.Fatalf("Failed to store expired invite code: %v", err)
}
// Validate should return false for expired code
isValid, err := db.ValidateInviteCode(expiredCode)
if err != nil {
t.Fatalf("Failed to validate expired code: %v", err)
}
if isValid {
t.Fatal("Expected expired code to be invalid")
}
t.Logf("✓ Expired invite code correctly detected as invalid")
}
func TestNIP43_DuplicateMember(t *testing.T) {
neo4jURI := os.Getenv("ORLY_NEO4J_URI")
if neo4jURI == "" {
t.Skip("Skipping Neo4j test: ORLY_NEO4J_URI not set")
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
tempDir := t.TempDir()
db, err := New(ctx, cancel, tempDir, "debug")
if err != nil {
t.Fatalf("Failed to create database: %v", err)
}
defer db.Close()
<-db.Ready()
if err := db.Wipe(); err != nil {
t.Fatalf("Failed to wipe database: %v", err)
}
signer, _ := p8k.New()
signer.Generate()
pubkey := signer.Pub()
// Add member first time
if err := db.AddNIP43Member(pubkey, "invite1"); err != nil {
t.Fatalf("Failed to add member: %v", err)
}
// Add same member again (should not error, just update)
if err := db.AddNIP43Member(pubkey, "invite2"); err != nil {
t.Fatalf("Failed to re-add member: %v", err)
}
// Check membership still exists
isMember, _ := db.IsNIP43Member(pubkey)
if !isMember {
t.Fatal("Expected pubkey to still be a member")
}
// Get all members should have only 1 entry
members, _ := db.GetAllNIP43Members()
if len(members) != 1 {
t.Fatalf("Expected 1 member, got %d", len(members))
}
t.Logf("✓ Duplicate member handling works correctly")
}
func TestNIP43_MembershipPersistence(t *testing.T) {
neo4jURI := os.Getenv("ORLY_NEO4J_URI")
if neo4jURI == "" {
t.Skip("Skipping Neo4j test: ORLY_NEO4J_URI not set")
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
tempDir := t.TempDir()
db, err := New(ctx, cancel, tempDir, "debug")
if err != nil {
t.Fatalf("Failed to create database: %v", err)
}
defer db.Close()
<-db.Ready()
if err := db.Wipe(); err != nil {
t.Fatalf("Failed to wipe database: %v", err)
}
signer, _ := p8k.New()
signer.Generate()
pubkey := signer.Pub()
// Add member
inviteCode := "persistence-test"
if err := db.AddNIP43Member(pubkey, inviteCode); err != nil {
t.Fatalf("Failed to add member: %v", err)
}
// Get membership and verify all fields
membership, err := db.GetNIP43Membership(pubkey)
if err != nil {
t.Fatalf("Failed to get membership: %v", err)
}
if membership.InviteCode != inviteCode {
t.Fatalf("InviteCode mismatch")
}
if membership.AddedAt.IsZero() {
t.Fatal("AddedAt should not be zero")
}
// Verify the pubkey in membership matches
if hex.Enc(membership.Pubkey[:]) != hex.Enc(pubkey) {
t.Fatal("Pubkey mismatch in membership")
}
t.Logf("✓ NIP-43 membership persistence verified")
}

452
pkg/neo4j/query-events_test.go

@ -0,0 +1,452 @@ @@ -0,0 +1,452 @@
package neo4j
import (
"context"
"os"
"testing"
"git.mleku.dev/mleku/nostr/encoders/event"
"git.mleku.dev/mleku/nostr/encoders/filter"
"git.mleku.dev/mleku/nostr/encoders/hex"
"git.mleku.dev/mleku/nostr/encoders/kind"
"git.mleku.dev/mleku/nostr/encoders/tag"
"git.mleku.dev/mleku/nostr/encoders/timestamp"
"git.mleku.dev/mleku/nostr/interfaces/signer/p8k"
)
// setupTestDatabase creates a fresh Neo4j database connection for testing
func setupTestDatabase(t *testing.T) (*N, context.Context, context.CancelFunc) {
t.Helper()
neo4jURI := os.Getenv("ORLY_NEO4J_URI")
if neo4jURI == "" {
t.Skip("Skipping Neo4j test: ORLY_NEO4J_URI not set")
}
ctx, cancel := context.WithCancel(context.Background())
tempDir := t.TempDir()
db, err := New(ctx, cancel, tempDir, "debug")
if err != nil {
cancel()
t.Fatalf("Failed to create database: %v", err)
}
<-db.Ready()
if err := db.Wipe(); err != nil {
db.Close()
cancel()
t.Fatalf("Failed to wipe database: %v", err)
}
return db, ctx, cancel
}
// createTestSigner creates a new signer for test events
func createTestSigner(t *testing.T) *p8k.Signer {
t.Helper()
signer, err := p8k.New()
if err != nil {
t.Fatalf("Failed to create signer: %v", err)
}
if err := signer.Generate(); err != nil {
t.Fatalf("Failed to generate keypair: %v", err)
}
return signer
}
// createAndSaveEvent creates a signed event and saves it to the database
func createAndSaveEvent(t *testing.T, ctx context.Context, db *N, signer *p8k.Signer, k uint16, content string, tags *tag.S, ts int64) *event.E {
t.Helper()
ev := event.New()
ev.Pubkey = signer.Pub()
ev.CreatedAt = ts
ev.Kind = k
ev.Content = []byte(content)
ev.Tags = tags
if err := ev.Sign(signer); err != nil {
t.Fatalf("Failed to sign event: %v", err)
}
if _, err := db.SaveEvent(ctx, ev); err != nil {
t.Fatalf("Failed to save event: %v", err)
}
return ev
}
func TestQueryEventsByID(t *testing.T) {
db, ctx, cancel := setupTestDatabase(t)
defer db.Close()
defer cancel()
signer := createTestSigner(t)
// Create and save a test event
ev := createAndSaveEvent(t, ctx, db, signer, 1, "Test event for ID query", nil, timestamp.Now().V)
// Query by ID
evs, err := db.QueryEvents(ctx, &filter.F{
Ids: tag.NewFromBytesSlice(ev.ID),
})
if err != nil {
t.Fatalf("Failed to query events by ID: %v", err)
}
if len(evs) != 1 {
t.Fatalf("Expected 1 event, got %d", len(evs))
}
if hex.Enc(evs[0].ID[:]) != hex.Enc(ev.ID[:]) {
t.Fatalf("Event ID mismatch: got %s, expected %s",
hex.Enc(evs[0].ID[:]), hex.Enc(ev.ID[:]))
}
t.Logf("✓ Query by ID returned correct event")
}
func TestQueryEventsByKind(t *testing.T) {
db, ctx, cancel := setupTestDatabase(t)
defer db.Close()
defer cancel()
signer := createTestSigner(t)
baseTs := timestamp.Now().V
// Create events of different kinds
createAndSaveEvent(t, ctx, db, signer, 1, "Kind 1 event A", nil, baseTs)
createAndSaveEvent(t, ctx, db, signer, 1, "Kind 1 event B", nil, baseTs+1)
createAndSaveEvent(t, ctx, db, signer, 7, "Kind 7 reaction", nil, baseTs+2)
createAndSaveEvent(t, ctx, db, signer, 30023, "Kind 30023 article", nil, baseTs+3)
// Query for kind 1
evs, err := db.QueryEvents(ctx, &filter.F{
Kinds: kind.NewS(kind.New(1)),
})
if err != nil {
t.Fatalf("Failed to query events by kind: %v", err)
}
if len(evs) != 2 {
t.Fatalf("Expected 2 kind 1 events, got %d", len(evs))
}
for _, ev := range evs {
if ev.Kind != 1 {
t.Fatalf("Expected kind 1, got %d", ev.Kind)
}
}
t.Logf("✓ Query by kind returned %d correct events", len(evs))
}
func TestQueryEventsByAuthor(t *testing.T) {
db, ctx, cancel := setupTestDatabase(t)
defer db.Close()
defer cancel()
alice := createTestSigner(t)
bob := createTestSigner(t)
baseTs := timestamp.Now().V
// Create events from different authors
createAndSaveEvent(t, ctx, db, alice, 1, "Alice's event 1", nil, baseTs)
createAndSaveEvent(t, ctx, db, alice, 1, "Alice's event 2", nil, baseTs+1)
createAndSaveEvent(t, ctx, db, bob, 1, "Bob's event", nil, baseTs+2)
// Query for Alice's events
evs, err := db.QueryEvents(ctx, &filter.F{
Authors: tag.NewFromBytesSlice(alice.Pub()),
})
if err != nil {
t.Fatalf("Failed to query events by author: %v", err)
}
if len(evs) != 2 {
t.Fatalf("Expected 2 events from Alice, got %d", len(evs))
}
alicePubkey := hex.Enc(alice.Pub())
for _, ev := range evs {
if hex.Enc(ev.Pubkey[:]) != alicePubkey {
t.Fatalf("Expected author %s, got %s", alicePubkey, hex.Enc(ev.Pubkey[:]))
}
}
t.Logf("✓ Query by author returned %d correct events", len(evs))
}
func TestQueryEventsByTimeRange(t *testing.T) {
db, ctx, cancel := setupTestDatabase(t)
defer db.Close()
defer cancel()
signer := createTestSigner(t)
baseTs := timestamp.Now().V
// Create events at different times
createAndSaveEvent(t, ctx, db, signer, 1, "Old event", nil, baseTs-7200) // 2 hours ago
createAndSaveEvent(t, ctx, db, signer, 1, "Recent event", nil, baseTs-1800) // 30 min ago
createAndSaveEvent(t, ctx, db, signer, 1, "Current event", nil, baseTs)
// Query for events in the last hour
since := &timestamp.T{V: baseTs - 3600}
evs, err := db.QueryEvents(ctx, &filter.F{
Since: since,
})
if err != nil {
t.Fatalf("Failed to query events by time range: %v", err)
}
if len(evs) != 2 {
t.Fatalf("Expected 2 events in last hour, got %d", len(evs))
}
for _, ev := range evs {
if ev.CreatedAt < since.V {
t.Fatalf("Event created_at %d is before since %d", ev.CreatedAt, since.V)
}
}
t.Logf("✓ Query by time range returned %d correct events", len(evs))
}
func TestQueryEventsByTag(t *testing.T) {
db, ctx, cancel := setupTestDatabase(t)
defer db.Close()
defer cancel()
signer := createTestSigner(t)
baseTs := timestamp.Now().V
// Create events with tags
createAndSaveEvent(t, ctx, db, signer, 1, "Bitcoin post",
tag.NewS(tag.NewFromAny("t", "bitcoin")), baseTs)
createAndSaveEvent(t, ctx, db, signer, 1, "Nostr post",
tag.NewS(tag.NewFromAny("t", "nostr")), baseTs+1)
createAndSaveEvent(t, ctx, db, signer, 1, "Bitcoin and Nostr post",
tag.NewS(tag.NewFromAny("t", "bitcoin"), tag.NewFromAny("t", "nostr")), baseTs+2)
// Query for bitcoin tagged events
evs, err := db.QueryEvents(ctx, &filter.F{
Tags: tag.NewS(tag.NewFromAny("t", "bitcoin")),
})
if err != nil {
t.Fatalf("Failed to query events by tag: %v", err)
}
if len(evs) != 2 {
t.Fatalf("Expected 2 bitcoin-tagged events, got %d", len(evs))
}
t.Logf("✓ Query by tag returned %d correct events", len(evs))
}
func TestQueryEventsByKindAndAuthor(t *testing.T) {
db, ctx, cancel := setupTestDatabase(t)
defer db.Close()
defer cancel()
alice := createTestSigner(t)
bob := createTestSigner(t)
baseTs := timestamp.Now().V
// Create events
createAndSaveEvent(t, ctx, db, alice, 1, "Alice note", nil, baseTs)
createAndSaveEvent(t, ctx, db, alice, 7, "Alice reaction", nil, baseTs+1)
createAndSaveEvent(t, ctx, db, bob, 1, "Bob note", nil, baseTs+2)
// Query for Alice's kind 1 events
evs, err := db.QueryEvents(ctx, &filter.F{
Kinds: kind.NewS(kind.New(1)),
Authors: tag.NewFromBytesSlice(alice.Pub()),
})
if err != nil {
t.Fatalf("Failed to query events by kind and author: %v", err)
}
if len(evs) != 1 {
t.Fatalf("Expected 1 kind 1 event from Alice, got %d", len(evs))
}
t.Logf("✓ Query by kind and author returned correct events")
}
func TestQueryEventsWithLimit(t *testing.T) {
db, ctx, cancel := setupTestDatabase(t)
defer db.Close()
defer cancel()
signer := createTestSigner(t)
baseTs := timestamp.Now().V
// Create many events
for i := 0; i < 20; i++ {
createAndSaveEvent(t, ctx, db, signer, 1, "Event", nil, baseTs+int64(i))
}
// Query with limit
limit := 5
evs, err := db.QueryEvents(ctx, &filter.F{
Kinds: kind.NewS(kind.New(1)),
Limit: limit,
})
if err != nil {
t.Fatalf("Failed to query events with limit: %v", err)
}
if len(evs) != limit {
t.Fatalf("Expected %d events with limit, got %d", limit, len(evs))
}
t.Logf("✓ Query with limit returned %d events", len(evs))
}
func TestQueryEventsOrderByCreatedAt(t *testing.T) {
db, ctx, cancel := setupTestDatabase(t)
defer db.Close()
defer cancel()
signer := createTestSigner(t)
baseTs := timestamp.Now().V
// Create events at different times
createAndSaveEvent(t, ctx, db, signer, 1, "First", nil, baseTs)
createAndSaveEvent(t, ctx, db, signer, 1, "Second", nil, baseTs+100)
createAndSaveEvent(t, ctx, db, signer, 1, "Third", nil, baseTs+200)
// Query and verify order (should be descending by created_at)
evs, err := db.QueryEvents(ctx, &filter.F{
Kinds: kind.NewS(kind.New(1)),
})
if err != nil {
t.Fatalf("Failed to query events: %v", err)
}
if len(evs) < 2 {
t.Fatalf("Expected at least 2 events, got %d", len(evs))
}
// Verify descending order
for i := 1; i < len(evs); i++ {
if evs[i-1].CreatedAt < evs[i].CreatedAt {
t.Fatalf("Events not in descending order: %d < %d at index %d",
evs[i-1].CreatedAt, evs[i].CreatedAt, i)
}
}
t.Logf("✓ Query returned events in correct descending order")
}
func TestQueryEventsEmpty(t *testing.T) {
db, ctx, cancel := setupTestDatabase(t)
defer db.Close()
defer cancel()
// Query for non-existent kind
evs, err := db.QueryEvents(ctx, &filter.F{
Kinds: kind.NewS(kind.New(99999)),
})
if err != nil {
t.Fatalf("Failed to query events: %v", err)
}
if len(evs) != 0 {
t.Fatalf("Expected 0 events, got %d", len(evs))
}
t.Logf("✓ Query for non-existent kind returned empty result")
}
func TestQueryEventsMultipleKinds(t *testing.T) {
db, ctx, cancel := setupTestDatabase(t)
defer db.Close()
defer cancel()
signer := createTestSigner(t)
baseTs := timestamp.Now().V
// Create events of different kinds
createAndSaveEvent(t, ctx, db, signer, 1, "Note", nil, baseTs)
createAndSaveEvent(t, ctx, db, signer, 7, "Reaction", nil, baseTs+1)
createAndSaveEvent(t, ctx, db, signer, 30023, "Article", nil, baseTs+2)
// Query for multiple kinds
evs, err := db.QueryEvents(ctx, &filter.F{
Kinds: kind.NewS(kind.New(1), kind.New(7)),
})
if err != nil {
t.Fatalf("Failed to query events: %v", err)
}
if len(evs) != 2 {
t.Fatalf("Expected 2 events (kind 1 and 7), got %d", len(evs))
}
t.Logf("✓ Query for multiple kinds returned correct events")
}
func TestQueryEventsMultipleAuthors(t *testing.T) {
db, ctx, cancel := setupTestDatabase(t)
defer db.Close()
defer cancel()
alice := createTestSigner(t)
bob := createTestSigner(t)
charlie := createTestSigner(t)
baseTs := timestamp.Now().V
// Create events from different authors
createAndSaveEvent(t, ctx, db, alice, 1, "Alice", nil, baseTs)
createAndSaveEvent(t, ctx, db, bob, 1, "Bob", nil, baseTs+1)
createAndSaveEvent(t, ctx, db, charlie, 1, "Charlie", nil, baseTs+2)
// Query for Alice and Bob's events
authors := tag.NewFromBytesSlice(alice.Pub())
authors.Append(tag.NewFromBytesSlice(bob.Pub()).GetFirst(nil))
evs, err := db.QueryEvents(ctx, &filter.F{
Authors: authors,
})
if err != nil {
t.Fatalf("Failed to query events: %v", err)
}
if len(evs) != 2 {
t.Fatalf("Expected 2 events from Alice and Bob, got %d", len(evs))
}
t.Logf("✓ Query for multiple authors returned correct events")
}
func TestCountEvents(t *testing.T) {
db, ctx, cancel := setupTestDatabase(t)
defer db.Close()
defer cancel()
signer := createTestSigner(t)
baseTs := timestamp.Now().V
// Create events
for i := 0; i < 5; i++ {
createAndSaveEvent(t, ctx, db, signer, 1, "Event", nil, baseTs+int64(i))
}
// Count events
count, err := db.CountEvents(ctx, &filter.F{
Kinds: kind.NewS(kind.New(1)),
})
if err != nil {
t.Fatalf("Failed to count events: %v", err)
}
if count != 5 {
t.Fatalf("Expected count 5, got %d", count)
}
t.Logf("✓ Count events returned correct count: %d", count)
}

24
pkg/neo4j/save-event.go

@ -3,13 +3,19 @@ package neo4j @@ -3,13 +3,19 @@ package neo4j
import (
"context"
"fmt"
"strconv"
"next.orly.dev/pkg/database/indexes/types"
"git.mleku.dev/mleku/nostr/encoders/event"
"git.mleku.dev/mleku/nostr/encoders/filter"
"git.mleku.dev/mleku/nostr/encoders/hex"
"next.orly.dev/pkg/database/indexes/types"
)
// parseInt64 parses a string to int64
func parseInt64(s string) (int64, error) {
return strconv.ParseInt(s, 10, 64)
}
// SaveEvent stores a Nostr event in the Neo4j database.
// It creates event nodes and relationships for authors, tags, and references.
// This method leverages Neo4j's graph capabilities to model Nostr's social graph naturally.
@ -96,6 +102,17 @@ func (n *N) buildEventCreationCypher(ev *event.E, serial uint64) (string, map[st @@ -96,6 +102,17 @@ func (n *N) buildEventCreationCypher(ev *event.E, serial uint64) (string, map[st
params["sig"] = hex.Enc(ev.Sig[:])
params["pubkey"] = authorPubkey
// Check for expiration tag (NIP-40)
var expirationTs int64 = 0
if ev.Tags != nil {
if expTag := ev.Tags.GetFirst([]byte("expiration")); expTag != nil && len(expTag.T) >= 2 {
if ts, err := parseInt64(string(expTag.T[1])); err == nil {
expirationTs = ts
}
}
}
params["expiration"] = expirationTs
// Serialize tags as JSON string for storage
// Handle nil tags gracefully - nil means empty tags "[]"
var tagsJSON []byte
@ -112,7 +129,7 @@ func (n *N) buildEventCreationCypher(ev *event.E, serial uint64) (string, map[st @@ -112,7 +129,7 @@ func (n *N) buildEventCreationCypher(ev *event.E, serial uint64) (string, map[st
// Create or match author node
MERGE (a:Author {pubkey: $pubkey})
// Create event node
// Create event node with expiration for NIP-40 support
CREATE (e:Event {
id: $eventId,
serial: $serial,
@ -121,7 +138,8 @@ CREATE (e:Event { @@ -121,7 +138,8 @@ CREATE (e:Event {
content: $content,
sig: $sig,
pubkey: $pubkey,
tags: $tags
tags: $tags,
expiration: $expiration
})
// Link event to author

5
pkg/neo4j/schema.go

@ -125,6 +125,10 @@ func (n *N) applySchema(ctx context.Context) error { @@ -125,6 +125,10 @@ func (n *N) applySchema(ctx context.Context) error {
// Used for cursor-based pagination and sync operations
"CREATE INDEX event_serial IF NOT EXISTS FOR (e:Event) ON (e.serial)",
// OPTIONAL (NIP-40): Event.expiration for expired event cleanup
// Used by DeleteExpired to efficiently find events past their expiration time
"CREATE INDEX event_expiration IF NOT EXISTS FOR (e:Event) ON (e.expiration)",
// ============================================================
// === OPTIONAL: Social Graph Event Processing Indexes ===
// Support tracking of processed social events for graph updates
@ -230,6 +234,7 @@ func (n *N) dropAll(ctx context.Context) error { @@ -230,6 +234,7 @@ func (n *N) dropAll(ctx context.Context) error {
// OPTIONAL (Internal) indexes
"DROP INDEX event_serial IF EXISTS",
"DROP INDEX event_expiration IF EXISTS",
// OPTIONAL (Social Graph) indexes
"DROP INDEX processedSocialEvent_pubkey_kind IF EXISTS",

436
pkg/neo4j/subscriptions_test.go

@ -0,0 +1,436 @@ @@ -0,0 +1,436 @@
package neo4j
import (
"context"
"os"
"testing"
"git.mleku.dev/mleku/nostr/encoders/filter"
"git.mleku.dev/mleku/nostr/encoders/kind"
"git.mleku.dev/mleku/nostr/interfaces/signer/p8k"
)
func TestSubscriptions_AddAndRemove(t *testing.T) {
neo4jURI := os.Getenv("ORLY_NEO4J_URI")
if neo4jURI == "" {
t.Skip("Skipping Neo4j test: ORLY_NEO4J_URI not set")
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
tempDir := t.TempDir()
db, err := New(ctx, cancel, tempDir, "debug")
if err != nil {
t.Fatalf("Failed to create database: %v", err)
}
defer db.Close()
<-db.Ready()
// Create a subscription
subID := "test-sub-123"
f := &filter.F{
Kinds: kind.NewS(kind.New(1)),
}
// Add subscription
db.AddSubscription(subID, f)
// Get subscription count (should be 1)
count := db.GetSubscriptionCount()
if count != 1 {
t.Fatalf("Expected 1 subscription, got %d", count)
}
// Remove subscription
db.RemoveSubscription(subID)
// Get subscription count (should be 0)
count = db.GetSubscriptionCount()
if count != 0 {
t.Fatalf("Expected 0 subscriptions after removal, got %d", count)
}
t.Logf("✓ Subscription add/remove works correctly")
}
func TestSubscriptions_MultipleSubscriptions(t *testing.T) {
neo4jURI := os.Getenv("ORLY_NEO4J_URI")
if neo4jURI == "" {
t.Skip("Skipping Neo4j test: ORLY_NEO4J_URI not set")
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
tempDir := t.TempDir()
db, err := New(ctx, cancel, tempDir, "debug")
if err != nil {
t.Fatalf("Failed to create database: %v", err)
}
defer db.Close()
<-db.Ready()
// Add multiple subscriptions
for i := 0; i < 5; i++ {
subID := string(rune('A' + i))
f := &filter.F{
Kinds: kind.NewS(kind.New(uint16(i + 1))),
}
db.AddSubscription(subID, f)
}
// Get subscription count
count := db.GetSubscriptionCount()
if count != 5 {
t.Fatalf("Expected 5 subscriptions, got %d", count)
}
// Remove some subscriptions
db.RemoveSubscription("A")
db.RemoveSubscription("C")
count = db.GetSubscriptionCount()
if count != 3 {
t.Fatalf("Expected 3 subscriptions after removal, got %d", count)
}
// Clear all subscriptions
db.ClearSubscriptions()
count = db.GetSubscriptionCount()
if count != 0 {
t.Fatalf("Expected 0 subscriptions after clear, got %d", count)
}
t.Logf("✓ Multiple subscriptions managed correctly")
}
func TestSubscriptions_DuplicateID(t *testing.T) {
neo4jURI := os.Getenv("ORLY_NEO4J_URI")
if neo4jURI == "" {
t.Skip("Skipping Neo4j test: ORLY_NEO4J_URI not set")
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
tempDir := t.TempDir()
db, err := New(ctx, cancel, tempDir, "debug")
if err != nil {
t.Fatalf("Failed to create database: %v", err)
}
defer db.Close()
<-db.Ready()
subID := "duplicate-test"
// Add first subscription
f1 := &filter.F{
Kinds: kind.NewS(kind.New(1)),
}
db.AddSubscription(subID, f1)
// Add subscription with same ID (should replace)
f2 := &filter.F{
Kinds: kind.NewS(kind.New(7)),
}
db.AddSubscription(subID, f2)
// Should still have only 1 subscription
count := db.GetSubscriptionCount()
if count != 1 {
t.Fatalf("Expected 1 subscription (duplicate replaced), got %d", count)
}
t.Logf("✓ Duplicate subscription ID handling works correctly")
}
func TestSubscriptions_RemoveNonExistent(t *testing.T) {
neo4jURI := os.Getenv("ORLY_NEO4J_URI")
if neo4jURI == "" {
t.Skip("Skipping Neo4j test: ORLY_NEO4J_URI not set")
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
tempDir := t.TempDir()
db, err := New(ctx, cancel, tempDir, "debug")
if err != nil {
t.Fatalf("Failed to create database: %v", err)
}
defer db.Close()
<-db.Ready()
// Try to remove non-existent subscription (should not panic)
db.RemoveSubscription("non-existent")
// Should still have 0 subscriptions
count := db.GetSubscriptionCount()
if count != 0 {
t.Fatalf("Expected 0 subscriptions, got %d", count)
}
t.Logf("✓ Removing non-existent subscription handled gracefully")
}
func TestMarkers_SetGetDelete(t *testing.T) {
neo4jURI := os.Getenv("ORLY_NEO4J_URI")
if neo4jURI == "" {
t.Skip("Skipping Neo4j test: ORLY_NEO4J_URI not set")
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
tempDir := t.TempDir()
db, err := New(ctx, cancel, tempDir, "debug")
if err != nil {
t.Fatalf("Failed to create database: %v", err)
}
defer db.Close()
<-db.Ready()
if err := db.Wipe(); err != nil {
t.Fatalf("Failed to wipe database: %v", err)
}
// Set a marker
key := "test-marker"
value := []byte("test-value-123")
if err := db.SetMarker(key, value); err != nil {
t.Fatalf("Failed to set marker: %v", err)
}
// Get the marker
retrieved, err := db.GetMarker(key)
if err != nil {
t.Fatalf("Failed to get marker: %v", err)
}
if string(retrieved) != string(value) {
t.Fatalf("Marker value mismatch: got %s, expected %s", string(retrieved), string(value))
}
// Update the marker
newValue := []byte("updated-value")
if err := db.SetMarker(key, newValue); err != nil {
t.Fatalf("Failed to update marker: %v", err)
}
retrieved, err = db.GetMarker(key)
if err != nil {
t.Fatalf("Failed to get updated marker: %v", err)
}
if string(retrieved) != string(newValue) {
t.Fatalf("Updated marker value mismatch")
}
// Delete the marker
if err := db.DeleteMarker(key); err != nil {
t.Fatalf("Failed to delete marker: %v", err)
}
// Verify marker is deleted
_, err = db.GetMarker(key)
if err == nil {
t.Fatal("Expected error when getting deleted marker")
}
t.Logf("✓ Markers set/get/delete works correctly")
}
func TestMarkers_GetNonExistent(t *testing.T) {
neo4jURI := os.Getenv("ORLY_NEO4J_URI")
if neo4jURI == "" {
t.Skip("Skipping Neo4j test: ORLY_NEO4J_URI not set")
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
tempDir := t.TempDir()
db, err := New(ctx, cancel, tempDir, "debug")
if err != nil {
t.Fatalf("Failed to create database: %v", err)
}
defer db.Close()
<-db.Ready()
// Try to get non-existent marker
_, err = db.GetMarker("non-existent-marker")
if err == nil {
t.Fatal("Expected error when getting non-existent marker")
}
t.Logf("✓ Getting non-existent marker returns error as expected")
}
func TestSerial_GetNextSerial(t *testing.T) {
neo4jURI := os.Getenv("ORLY_NEO4J_URI")
if neo4jURI == "" {
t.Skip("Skipping Neo4j test: ORLY_NEO4J_URI not set")
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
tempDir := t.TempDir()
db, err := New(ctx, cancel, tempDir, "debug")
if err != nil {
t.Fatalf("Failed to create database: %v", err)
}
defer db.Close()
<-db.Ready()
if err := db.Wipe(); err != nil {
t.Fatalf("Failed to wipe database: %v", err)
}
// Get first serial
serial1, err := db.getNextSerial()
if err != nil {
t.Fatalf("Failed to get first serial: %v", err)
}
// Get second serial
serial2, err := db.getNextSerial()
if err != nil {
t.Fatalf("Failed to get second serial: %v", err)
}
// Serial should increment
if serial2 <= serial1 {
t.Fatalf("Expected serial to increment: serial1=%d, serial2=%d", serial1, serial2)
}
// Get multiple more serials and verify they're all unique and increasing
var serials []uint64
for i := 0; i < 10; i++ {
s, err := db.getNextSerial()
if err != nil {
t.Fatalf("Failed to get serial %d: %v", i, err)
}
serials = append(serials, s)
}
for i := 1; i < len(serials); i++ {
if serials[i] <= serials[i-1] {
t.Fatalf("Serials not increasing: %d <= %d", serials[i], serials[i-1])
}
}
t.Logf("✓ Serial generation works correctly (generated %d unique serials)", len(serials)+2)
}
func TestDatabaseReady(t *testing.T) {
neo4jURI := os.Getenv("ORLY_NEO4J_URI")
if neo4jURI == "" {
t.Skip("Skipping Neo4j test: ORLY_NEO4J_URI not set")
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
tempDir := t.TempDir()
db, err := New(ctx, cancel, tempDir, "debug")
if err != nil {
t.Fatalf("Failed to create database: %v", err)
}
defer db.Close()
// Wait for ready
<-db.Ready()
// Database should be ready now
t.Logf("✓ Database ready signal works correctly")
}
func TestIdentity(t *testing.T) {
neo4jURI := os.Getenv("ORLY_NEO4J_URI")
if neo4jURI == "" {
t.Skip("Skipping Neo4j test: ORLY_NEO4J_URI not set")
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
tempDir := t.TempDir()
db, err := New(ctx, cancel, tempDir, "debug")
if err != nil {
t.Fatalf("Failed to create database: %v", err)
}
defer db.Close()
<-db.Ready()
// Get identity (creates if not exists)
signer := db.Identity()
if signer == nil {
t.Fatal("Expected non-nil signer from Identity()")
}
// Get identity again (should return same one)
signer2 := db.Identity()
if signer2 == nil {
t.Fatal("Expected non-nil signer from second Identity() call")
}
// Public keys should match
pub1 := signer.Pub()
pub2 := signer2.Pub()
for i := range pub1 {
if pub1[i] != pub2[i] {
t.Fatal("Identity pubkeys don't match across calls")
}
}
t.Logf("✓ Identity persistence works correctly")
}
func TestWipe(t *testing.T) {
neo4jURI := os.Getenv("ORLY_NEO4J_URI")
if neo4jURI == "" {
t.Skip("Skipping Neo4j test: ORLY_NEO4J_URI not set")
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
tempDir := t.TempDir()
db, err := New(ctx, cancel, tempDir, "debug")
if err != nil {
t.Fatalf("Failed to create database: %v", err)
}
defer db.Close()
<-db.Ready()
signer, _ := p8k.New()
signer.Generate()
// Add some data
if err := db.AddNIP43Member(signer.Pub(), "test"); err != nil {
t.Fatalf("Failed to add member: %v", err)
}
// Wipe the database
if err := db.Wipe(); err != nil {
t.Fatalf("Failed to wipe database: %v", err)
}
// Verify data is gone
isMember, _ := db.IsNIP43Member(signer.Pub())
if isMember {
t.Fatal("Expected data to be wiped")
}
t.Logf("✓ Wipe clears database correctly")
}

2
pkg/version/version

@ -1 +1 @@ @@ -1 +1 @@
v0.31.11
v0.32.0

614
pkg/wasmdb/delete-event.go

@ -0,0 +1,614 @@ @@ -0,0 +1,614 @@
//go:build js && wasm
package wasmdb
import (
"bytes"
"context"
"fmt"
"sort"
"strconv"
"time"
"github.com/aperturerobotics/go-indexeddb/idb"
"lol.mleku.dev/chk"
"lol.mleku.dev/errorf"
"git.mleku.dev/mleku/nostr/encoders/event"
"git.mleku.dev/mleku/nostr/encoders/filter"
hexenc "git.mleku.dev/mleku/nostr/encoders/hex"
"git.mleku.dev/mleku/nostr/encoders/ints"
"git.mleku.dev/mleku/nostr/encoders/kind"
"git.mleku.dev/mleku/nostr/encoders/tag"
"git.mleku.dev/mleku/nostr/encoders/tag/atag"
"next.orly.dev/pkg/database"
"next.orly.dev/pkg/database/indexes"
"next.orly.dev/pkg/database/indexes/types"
"next.orly.dev/pkg/interfaces/store"
"next.orly.dev/pkg/utils"
)
// DeleteEvent removes an event from the database identified by `eid`.
func (w *W) DeleteEvent(c context.Context, eid []byte) (err error) {
w.Logger.Warnf("deleting event %0x", eid)
// Get the serial number for the event ID
var ser *types.Uint40
ser, err = w.GetSerialById(eid)
if chk.E(err) {
return
}
if ser == nil {
// Event wasn't found, nothing to delete
return
}
// Fetch the event to get its data
var ev *event.E
ev, err = w.FetchEventBySerial(ser)
if chk.E(err) {
return
}
if ev == nil {
// Event wasn't found, nothing to delete
return
}
if err = w.DeleteEventBySerial(c, ser, ev); chk.E(err) {
return
}
return
}
// DeleteEventBySerial removes an event and all its indexes by serial number.
func (w *W) DeleteEventBySerial(c context.Context, ser *types.Uint40, ev *event.E) (err error) {
w.Logger.Infof("DeleteEventBySerial: deleting event %0x (serial %d)", ev.ID, ser.Get())
// Get all indexes for the event
var idxs [][]byte
idxs, err = database.GetIndexesForEvent(ev, ser.Get())
if chk.E(err) {
w.Logger.Errorf("DeleteEventBySerial: failed to get indexes for event %0x: %v", ev.ID, err)
return
}
w.Logger.Infof("DeleteEventBySerial: found %d indexes for event %0x", len(idxs), ev.ID)
// Collect all unique store names we need to access
storeNames := make(map[string]struct{})
for _, key := range idxs {
if len(key) >= 3 {
storeNames[string(key[:3])] = struct{}{}
}
}
// Also include event stores
storeNames[string(indexes.EventPrefix)] = struct{}{}
storeNames[string(indexes.SmallEventPrefix)] = struct{}{}
// Convert to slice
storeList := make([]string, 0, len(storeNames))
for name := range storeNames {
storeList = append(storeList, name)
}
if len(storeList) == 0 {
return nil
}
// Start a transaction to delete the event and all its indexes
tx, err := w.db.Transaction(idb.TransactionReadWrite, storeList[0], storeList[1:]...)
if err != nil {
return fmt.Errorf("failed to start delete transaction: %w", err)
}
// Delete all indexes
for _, key := range idxs {
if len(key) < 3 {
continue
}
storeName := string(key[:3])
objStore, storeErr := tx.ObjectStore(storeName)
if storeErr != nil {
w.Logger.Warnf("DeleteEventBySerial: failed to get object store %s: %v", storeName, storeErr)
continue
}
keyJS := bytesToSafeValue(key)
if _, delErr := objStore.Delete(keyJS); delErr != nil {
w.Logger.Warnf("DeleteEventBySerial: failed to delete index from %s: %v", storeName, delErr)
}
}
// Delete from small event store
sevKeyBuf := new(bytes.Buffer)
if err = indexes.SmallEventEnc(ser).MarshalWrite(sevKeyBuf); err == nil {
if objStore, storeErr := tx.ObjectStore(string(indexes.SmallEventPrefix)); storeErr == nil {
// For small events, the key includes size and data, so we need to scan
w.deleteKeysByPrefix(objStore, sevKeyBuf.Bytes())
}
}
// Delete from large event store
evtKeyBuf := new(bytes.Buffer)
if err = indexes.EventEnc(ser).MarshalWrite(evtKeyBuf); err == nil {
if objStore, storeErr := tx.ObjectStore(string(indexes.EventPrefix)); storeErr == nil {
keyJS := bytesToSafeValue(evtKeyBuf.Bytes())
objStore.Delete(keyJS)
}
}
// Commit transaction
if err = tx.Await(c); err != nil {
return fmt.Errorf("failed to commit delete transaction: %w", err)
}
w.Logger.Infof("DeleteEventBySerial: successfully deleted event %0x and all indexes", ev.ID)
return nil
}
// deleteKeysByPrefix deletes all keys starting with the given prefix from an object store
func (w *W) deleteKeysByPrefix(store *idb.ObjectStore, prefix []byte) {
cursorReq, err := store.OpenCursor(idb.CursorNext)
if err != nil {
return
}
var keysToDelete [][]byte
cursorReq.Iter(w.ctx, func(cursor *idb.CursorWithValue) error {
keyVal, keyErr := cursor.Key()
if keyErr != nil {
return keyErr
}
keyBytes := safeValueToBytes(keyVal)
if len(keyBytes) >= len(prefix) && bytes.HasPrefix(keyBytes, prefix) {
keysToDelete = append(keysToDelete, keyBytes)
}
return cursor.Continue()
})
// Delete collected keys
for _, key := range keysToDelete {
keyJS := bytesToSafeValue(key)
store.Delete(keyJS)
}
}
// DeleteExpired scans for events with expiration timestamps that have passed and deletes them.
func (w *W) DeleteExpired() {
now := time.Now().Unix()
// Open read transaction to find expired events
tx, err := w.db.Transaction(idb.TransactionReadOnly, string(indexes.ExpirationPrefix))
if err != nil {
w.Logger.Warnf("DeleteExpired: failed to start transaction: %v", err)
return
}
objStore, err := tx.ObjectStore(string(indexes.ExpirationPrefix))
if err != nil {
w.Logger.Warnf("DeleteExpired: failed to get expiration store: %v", err)
return
}
var expiredSerials types.Uint40s
cursorReq, err := objStore.OpenCursor(idb.CursorNext)
if err != nil {
w.Logger.Warnf("DeleteExpired: failed to open cursor: %v", err)
return
}
cursorReq.Iter(w.ctx, func(cursor *idb.CursorWithValue) error {
keyVal, keyErr := cursor.Key()
if keyErr != nil {
return keyErr
}
keyBytes := safeValueToBytes(keyVal)
if len(keyBytes) < 8 { // exp prefix (3) + expiration (variable) + serial (5)
return cursor.Continue()
}
// Parse expiration key: exp|expiration_timestamp|serial
exp, ser := indexes.ExpirationVars()
buf := bytes.NewBuffer(keyBytes)
if err := indexes.ExpirationDec(exp, ser).UnmarshalRead(buf); err != nil {
return cursor.Continue()
}
if int64(exp.Get()) > now {
// Not expired yet
return cursor.Continue()
}
expiredSerials = append(expiredSerials, ser)
return cursor.Continue()
})
// Delete expired events
for _, ser := range expiredSerials {
ev, fetchErr := w.FetchEventBySerial(ser)
if fetchErr != nil || ev == nil {
continue
}
if err := w.DeleteEventBySerial(context.Background(), ser, ev); err != nil {
w.Logger.Warnf("DeleteExpired: failed to delete expired event: %v", err)
}
}
}
// ProcessDelete processes a kind 5 deletion event, deleting referenced events.
func (w *W) ProcessDelete(ev *event.E, admins [][]byte) (err error) {
eTags := ev.Tags.GetAll([]byte("e"))
aTags := ev.Tags.GetAll([]byte("a"))
kTags := ev.Tags.GetAll([]byte("k"))
// Process e-tags: delete specific events by ID
for _, eTag := range eTags {
if eTag.Len() < 2 {
continue
}
// Use ValueHex() to handle both binary and hex storage formats
eventIdHex := eTag.ValueHex()
if len(eventIdHex) != 64 { // hex encoded event ID
continue
}
// Decode hex event ID
var eid []byte
if eid, err = hexenc.DecAppend(nil, eventIdHex); chk.E(err) {
continue
}
// Fetch the event to verify ownership
var ser *types.Uint40
if ser, err = w.GetSerialById(eid); chk.E(err) || ser == nil {
continue
}
var targetEv *event.E
if targetEv, err = w.FetchEventBySerial(ser); chk.E(err) || targetEv == nil {
continue
}
// Only allow users to delete their own events
if !utils.FastEqual(targetEv.Pubkey, ev.Pubkey) {
continue
}
// Delete the event
if err = w.DeleteEvent(context.Background(), eid); chk.E(err) {
w.Logger.Warnf("failed to delete event %x via e-tag: %v", eid, err)
continue
}
w.Logger.Debugf("deleted event %x via e-tag deletion", eid)
}
// Process a-tags: delete addressable events by kind:pubkey:d-tag
for _, aTag := range aTags {
if aTag.Len() < 2 {
continue
}
// Parse the 'a' tag value: kind:pubkey:d-tag (for parameterized) or kind:pubkey (for regular)
split := bytes.Split(aTag.Value(), []byte{':'})
if len(split) < 2 {
continue
}
// Parse the kind
kindStr := string(split[0])
kindInt, parseErr := strconv.Atoi(kindStr)
if parseErr != nil {
continue
}
kk := kind.New(uint16(kindInt))
// Parse the pubkey
var pk []byte
if pk, err = hexenc.DecAppend(nil, split[1]); chk.E(err) {
continue
}
// Only allow users to delete their own events
if !utils.FastEqual(pk, ev.Pubkey) {
continue
}
// Build filter for events to delete
delFilter := &filter.F{
Authors: tag.NewFromBytesSlice(pk),
Kinds: kind.NewS(kk),
}
// For parameterized replaceable events, add d-tag filter
if kind.IsParameterizedReplaceable(kk.K) && len(split) >= 3 {
dValue := split[2]
delFilter.Tags = tag.NewS(tag.NewFromAny([]byte("d"), dValue))
}
// Find matching events
var idxs []database.Range
if idxs, err = database.GetIndexesFromFilter(delFilter); chk.E(err) {
continue
}
var sers types.Uint40s
for _, idx := range idxs {
var s types.Uint40s
if s, err = w.GetSerialsByRange(idx); chk.E(err) {
continue
}
sers = append(sers, s...)
}
// Delete events older than the deletion event
if len(sers) > 0 {
var idPkTss []*store.IdPkTs
var tmp []*store.IdPkTs
if tmp, err = w.GetFullIdPubkeyBySerials(sers); chk.E(err) {
continue
}
idPkTss = append(idPkTss, tmp...)
// Sort by timestamp
sort.Slice(idPkTss, func(i, j int) bool {
return idPkTss[i].Ts > idPkTss[j].Ts
})
for _, v := range idPkTss {
if v.Ts < ev.CreatedAt {
if err = w.DeleteEvent(context.Background(), v.Id); chk.E(err) {
w.Logger.Warnf("failed to delete event %x via a-tag: %v", v.Id, err)
continue
}
w.Logger.Debugf("deleted event %x via a-tag deletion", v.Id)
}
}
}
}
// If there are no e or a tags, delete all replaceable events of the kinds
// specified by the k tags for the pubkey of the delete event.
if len(eTags) == 0 && len(aTags) == 0 {
// Parse the kind tags
var kinds []*kind.K
for _, k := range kTags {
kv := k.Value()
iv := ints.New(0)
if _, err = iv.Unmarshal(kv); chk.E(err) {
continue
}
kinds = append(kinds, kind.New(iv.N))
}
var idxs []database.Range
if idxs, err = database.GetIndexesFromFilter(
&filter.F{
Authors: tag.NewFromBytesSlice(ev.Pubkey),
Kinds: kind.NewS(kinds...),
},
); chk.E(err) {
return
}
var sers types.Uint40s
for _, idx := range idxs {
var s types.Uint40s
if s, err = w.GetSerialsByRange(idx); chk.E(err) {
return
}
sers = append(sers, s...)
}
if len(sers) > 0 {
var idPkTss []*store.IdPkTs
var tmp []*store.IdPkTs
if tmp, err = w.GetFullIdPubkeyBySerials(sers); chk.E(err) {
return
}
idPkTss = append(idPkTss, tmp...)
// Sort by timestamp
sort.Slice(idPkTss, func(i, j int) bool {
return idPkTss[i].Ts > idPkTss[j].Ts
})
for _, v := range idPkTss {
if v.Ts < ev.CreatedAt {
if err = w.DeleteEvent(context.Background(), v.Id); chk.E(err) {
continue
}
}
}
}
}
return
}
// CheckForDeleted checks if the event has been deleted, and returns an error with
// prefix "blocked:" if it is. This function also allows designating admin
// pubkeys that may also delete the event.
func (w *W) CheckForDeleted(ev *event.E, admins [][]byte) (err error) {
keys := append([][]byte{ev.Pubkey}, admins...)
authors := tag.NewFromBytesSlice(keys...)
// If the event is addressable, check for a deletion event with the same
// kind/pubkey/dtag
if kind.IsParameterizedReplaceable(ev.Kind) {
var idxs []database.Range
// Construct an a-tag
t := ev.Tags.GetFirst([]byte("d"))
var dTagValue []byte
if t != nil {
dTagValue = t.Value()
}
a := atag.T{
Kind: kind.New(ev.Kind),
Pubkey: ev.Pubkey,
DTag: dTagValue,
}
at := a.Marshal(nil)
if idxs, err = database.GetIndexesFromFilter(
&filter.F{
Authors: authors,
Kinds: kind.NewS(kind.Deletion),
Tags: tag.NewS(tag.NewFromAny("#a", at)),
},
); chk.E(err) {
return
}
var sers types.Uint40s
for _, idx := range idxs {
var s types.Uint40s
if s, err = w.GetSerialsByRange(idx); chk.E(err) {
return
}
sers = append(sers, s...)
}
if len(sers) > 0 {
var idPkTss []*store.IdPkTs
var tmp []*store.IdPkTs
if tmp, err = w.GetFullIdPubkeyBySerials(sers); chk.E(err) {
return
}
idPkTss = append(idPkTss, tmp...)
// Find the newest deletion timestamp
maxTs := idPkTss[0].Ts
for i := 1; i < len(idPkTss); i++ {
if idPkTss[i].Ts > maxTs {
maxTs = idPkTss[i].Ts
}
}
if ev.CreatedAt < maxTs {
err = errorf.E(
"blocked: %0x was deleted by address %s because it is older than the delete: event: %d delete: %d",
ev.ID, at, ev.CreatedAt, maxTs,
)
return
}
return
}
return
}
// If the event is replaceable, check if there is a deletion event newer
// than the event
if kind.IsReplaceable(ev.Kind) {
var idxs []database.Range
if idxs, err = database.GetIndexesFromFilter(
&filter.F{
Authors: tag.NewFromBytesSlice(ev.Pubkey),
Kinds: kind.NewS(kind.Deletion),
Tags: tag.NewS(
tag.NewFromAny("#k", fmt.Sprint(ev.Kind)),
),
},
); chk.E(err) {
return
}
var sers types.Uint40s
for _, idx := range idxs {
var s types.Uint40s
if s, err = w.GetSerialsByRange(idx); chk.E(err) {
return
}
sers = append(sers, s...)
}
if len(sers) > 0 {
var idPkTss []*store.IdPkTs
var tmp []*store.IdPkTs
if tmp, err = w.GetFullIdPubkeyBySerials(sers); chk.E(err) {
return
}
idPkTss = append(idPkTss, tmp...)
// Find the newest deletion
maxTs := idPkTss[0].Ts
maxId := idPkTss[0].Id
for i := 1; i < len(idPkTss); i++ {
if idPkTss[i].Ts > maxTs {
maxTs = idPkTss[i].Ts
maxId = idPkTss[i].Id
}
}
if ev.CreatedAt < maxTs {
err = fmt.Errorf(
"blocked: %0x was deleted: the event is older than the delete event %0x: event: %d delete: %d",
ev.ID, maxId, ev.CreatedAt, maxTs,
)
return
}
}
// This type of delete can also use an a tag to specify kind and author
idxs = nil
a := atag.T{
Kind: kind.New(ev.Kind),
Pubkey: ev.Pubkey,
}
at := a.Marshal(nil)
if idxs, err = database.GetIndexesFromFilter(
&filter.F{
Authors: authors,
Kinds: kind.NewS(kind.Deletion),
Tags: tag.NewS(tag.NewFromAny("#a", at)),
},
); chk.E(err) {
return
}
sers = nil
for _, idx := range idxs {
var s types.Uint40s
if s, err = w.GetSerialsByRange(idx); chk.E(err) {
return
}
sers = append(sers, s...)
}
if len(sers) > 0 {
var idPkTss []*store.IdPkTs
var tmp []*store.IdPkTs
if tmp, err = w.GetFullIdPubkeyBySerials(sers); chk.E(err) {
return
}
idPkTss = append(idPkTss, tmp...)
// Find the newest deletion
maxTs := idPkTss[0].Ts
for i := 1; i < len(idPkTss); i++ {
if idPkTss[i].Ts > maxTs {
maxTs = idPkTss[i].Ts
}
}
if ev.CreatedAt < maxTs {
err = errorf.E(
"blocked: %0x was deleted by address %s because it is older than the delete: event: %d delete: %d",
ev.ID, at, ev.CreatedAt, maxTs,
)
return
}
return
}
return
}
// Otherwise check for a delete by event id
var idxs []database.Range
if idxs, err = database.GetIndexesFromFilter(
&filter.F{
Authors: authors,
Kinds: kind.NewS(kind.Deletion),
Tags: tag.NewS(
tag.NewFromAny("e", hexenc.Enc(ev.ID)),
),
},
); chk.E(err) {
return
}
for _, idx := range idxs {
var s types.Uint40s
if s, err = w.GetSerialsByRange(idx); chk.E(err) {
return
}
if len(s) > 0 {
// Any e-tag deletion found means the exact event was deleted
err = errorf.E("blocked: %0x has been deleted", ev.ID)
return
}
}
return
}

256
pkg/wasmdb/fetch-event.go

@ -0,0 +1,256 @@ @@ -0,0 +1,256 @@
//go:build js && wasm
package wasmdb
import (
"bytes"
"errors"
"github.com/aperturerobotics/go-indexeddb/idb"
"lol.mleku.dev/chk"
"git.mleku.dev/mleku/nostr/encoders/event"
"next.orly.dev/pkg/database/indexes"
"next.orly.dev/pkg/database/indexes/types"
"next.orly.dev/pkg/interfaces/store"
)
// FetchEventBySerial retrieves an event by its serial number
func (w *W) FetchEventBySerial(ser *types.Uint40) (ev *event.E, err error) {
if ser == nil {
return nil, errors.New("nil serial")
}
// First try small event store (sev prefix)
ev, err = w.fetchSmallEvent(ser)
if err == nil && ev != nil {
return ev, nil
}
// Then try large event store (evt prefix)
ev, err = w.fetchLargeEvent(ser)
if err == nil && ev != nil {
return ev, nil
}
return nil, errors.New("event not found")
}
// fetchSmallEvent fetches an event from the small event store
func (w *W) fetchSmallEvent(ser *types.Uint40) (*event.E, error) {
// Build the key prefix
keyBuf := new(bytes.Buffer)
if err := indexes.SmallEventEnc(ser).MarshalWrite(keyBuf); chk.E(err) {
return nil, err
}
prefix := keyBuf.Bytes()
// Open transaction
tx, err := w.db.Transaction(idb.TransactionReadOnly, string(indexes.SmallEventPrefix))
if err != nil {
return nil, err
}
store, err := tx.ObjectStore(string(indexes.SmallEventPrefix))
if err != nil {
return nil, err
}
// Use cursor to find matching key
cursorReq, err := store.OpenCursor(idb.CursorNext)
if err != nil {
return nil, err
}
var foundEvent *event.E
err = cursorReq.Iter(w.ctx, func(cursor *idb.CursorWithValue) error {
keyVal, keyErr := cursor.Key()
if keyErr != nil {
return keyErr
}
keyBytes := safeValueToBytes(keyVal)
if len(keyBytes) >= len(prefix) && bytes.HasPrefix(keyBytes, prefix) {
// Found matching key
// Format: sev|serial(5)|size(2)|data(variable)
if len(keyBytes) > 10 { // 3 + 5 + 2 = 10 minimum
sizeOffset := 8 // 3 prefix + 5 serial
if len(keyBytes) > sizeOffset+2 {
size := int(keyBytes[sizeOffset])<<8 | int(keyBytes[sizeOffset+1])
dataStart := sizeOffset + 2
if len(keyBytes) >= dataStart+size {
eventData := keyBytes[dataStart : dataStart+size]
ev := new(event.E)
if unmarshalErr := ev.UnmarshalBinary(bytes.NewReader(eventData)); unmarshalErr == nil {
foundEvent = ev
return errors.New("found") // Stop iteration
}
}
}
}
}
return cursor.Continue()
})
if foundEvent != nil {
return foundEvent, nil
}
if err != nil && err.Error() != "found" {
return nil, err
}
return nil, errors.New("small event not found")
}
// fetchLargeEvent fetches an event from the large event store
func (w *W) fetchLargeEvent(ser *types.Uint40) (*event.E, error) {
// Build the key
keyBuf := new(bytes.Buffer)
if err := indexes.EventEnc(ser).MarshalWrite(keyBuf); chk.E(err) {
return nil, err
}
// Open transaction
tx, err := w.db.Transaction(idb.TransactionReadOnly, string(indexes.EventPrefix))
if err != nil {
return nil, err
}
store, err := tx.ObjectStore(string(indexes.EventPrefix))
if err != nil {
return nil, err
}
// Get the value directly
keyJS := bytesToSafeValue(keyBuf.Bytes())
req, err := store.Get(keyJS)
if err != nil {
return nil, err
}
val, err := req.Await(w.ctx)
if err != nil {
return nil, err
}
if val.IsUndefined() || val.IsNull() {
return nil, errors.New("large event not found")
}
eventData := safeValueToBytes(val)
if len(eventData) == 0 {
return nil, errors.New("empty event data")
}
ev := new(event.E)
if err := ev.UnmarshalBinary(bytes.NewReader(eventData)); err != nil {
return nil, err
}
return ev, nil
}
// FetchEventsBySerials retrieves multiple events by their serial numbers
func (w *W) FetchEventsBySerials(serials []*types.Uint40) (events map[uint64]*event.E, err error) {
events = make(map[uint64]*event.E)
for _, ser := range serials {
if ser == nil {
continue
}
ev, fetchErr := w.FetchEventBySerial(ser)
if fetchErr == nil && ev != nil {
events[ser.Get()] = ev
}
}
return events, nil
}
// GetFullIdPubkeyBySerial retrieves the ID, pubkey hash, and timestamp for a serial
func (w *W) GetFullIdPubkeyBySerial(ser *types.Uint40) (fidpk *store.IdPkTs, err error) {
if ser == nil {
return nil, errors.New("nil serial")
}
// Build the prefix to search for
keyBuf := new(bytes.Buffer)
indexes.FullIdPubkeyEnc(ser, nil, nil, nil).MarshalWrite(keyBuf)
prefix := keyBuf.Bytes()[:8] // 3 prefix + 5 serial
// Search in the fpc object store
tx, err := w.db.Transaction(idb.TransactionReadOnly, string(indexes.FullIdPubkeyPrefix))
if err != nil {
return nil, err
}
objStore, err := tx.ObjectStore(string(indexes.FullIdPubkeyPrefix))
if err != nil {
return nil, err
}
// Use cursor to find matching key
cursorReq, err := objStore.OpenCursor(idb.CursorNext)
if err != nil {
return nil, err
}
err = cursorReq.Iter(w.ctx, func(cursor *idb.CursorWithValue) error {
keyVal, keyErr := cursor.Key()
if keyErr != nil {
return keyErr
}
keyBytes := safeValueToBytes(keyVal)
if len(keyBytes) >= len(prefix) && bytes.HasPrefix(keyBytes, prefix) {
// Found matching key
// Format: fpc|serial(5)|id(32)|pubkey_hash(8)|timestamp(8)
if len(keyBytes) >= 56 { // 3 + 5 + 32 + 8 + 8 = 56
fidpk = &store.IdPkTs{
Id: make([]byte, 32),
Pub: make([]byte, 8),
Ts: 0,
}
copy(fidpk.Id, keyBytes[8:40])
copy(fidpk.Pub, keyBytes[40:48])
// Parse timestamp (big-endian uint64)
var ts int64
for i := 0; i < 8; i++ {
ts = (ts << 8) | int64(keyBytes[48+i])
}
fidpk.Ts = ts
fidpk.Ser = ser.Get()
return errors.New("found") // Stop iteration
}
}
return cursor.Continue()
})
if fidpk != nil {
return fidpk, nil
}
if err != nil && err.Error() != "found" {
return nil, err
}
return nil, errors.New("full id pubkey not found")
}
// GetFullIdPubkeyBySerials retrieves ID/pubkey/timestamp for multiple serials
func (w *W) GetFullIdPubkeyBySerials(sers []*types.Uint40) (fidpks []*store.IdPkTs, err error) {
fidpks = make([]*store.IdPkTs, 0, len(sers))
for _, ser := range sers {
if ser == nil {
continue
}
fidpk, fetchErr := w.GetFullIdPubkeyBySerial(ser)
if fetchErr == nil && fidpk != nil {
fidpks = append(fidpks, fidpk)
}
}
return fidpks, nil
}

162
pkg/wasmdb/helpers.go

@ -0,0 +1,162 @@ @@ -0,0 +1,162 @@
//go:build js && wasm
package wasmdb
import (
"syscall/js"
"github.com/hack-pad/safejs"
)
// safeValueToBytes converts a safejs.Value to a []byte
// This handles Uint8Array, ArrayBuffer, and strings from IndexedDB
func safeValueToBytes(val safejs.Value) []byte {
if val.IsUndefined() || val.IsNull() {
return nil
}
// Get global Uint8Array and ArrayBuffer constructors
uint8ArrayType := safejs.MustGetGlobal("Uint8Array")
arrayBufferType := safejs.MustGetGlobal("ArrayBuffer")
// Check if it's a Uint8Array
isUint8Array, _ := val.InstanceOf(uint8ArrayType)
if isUint8Array {
length, err := val.Length()
if err != nil {
return nil
}
buf := make([]byte, length)
// Copy bytes - we need to iterate since safejs doesn't have CopyBytesToGo
for i := 0; i < length; i++ {
elem, err := val.Index(i)
if err != nil {
return nil
}
intVal, err := elem.Int()
if err != nil {
return nil
}
buf[i] = byte(intVal)
}
return buf
}
// Check if it's an ArrayBuffer
isArrayBuffer, _ := val.InstanceOf(arrayBufferType)
if isArrayBuffer {
// Create a Uint8Array view of the ArrayBuffer
uint8Array, err := uint8ArrayType.New(val)
if err != nil {
return nil
}
return safeValueToBytes(uint8Array)
}
// Try to treat it as a typed array-like object
length, err := val.Length()
if err == nil && length > 0 {
buf := make([]byte, length)
for i := 0; i < length; i++ {
elem, err := val.Index(i)
if err != nil {
return nil
}
intVal, err := elem.Int()
if err != nil {
return nil
}
buf[i] = byte(intVal)
}
return buf
}
// Last resort: check if it's a string (for string keys in IndexedDB)
if val.Type() == safejs.TypeString {
str, err := val.String()
if err == nil {
return []byte(str)
}
}
return nil
}
// bytesToSafeValue converts a []byte to a safejs.Value (Uint8Array)
func bytesToSafeValue(buf []byte) safejs.Value {
if buf == nil {
return safejs.Null()
}
uint8Array := js.Global().Get("Uint8Array").New(len(buf))
js.CopyBytesToJS(uint8Array, buf)
return safejs.Safe(uint8Array)
}
// cryptoRandom fills the provided byte slice with cryptographically secure random bytes
// using the Web Crypto API (crypto.getRandomValues) or Node.js crypto.randomFillSync
func cryptoRandom(buf []byte) error {
if len(buf) == 0 {
return nil
}
// First try browser's crypto.getRandomValues
crypto := js.Global().Get("crypto")
if crypto.IsUndefined() {
// Fallback to msCrypto for older IE
crypto = js.Global().Get("msCrypto")
}
if !crypto.IsUndefined() {
// Try getRandomValues (browser API)
getRandomValues := crypto.Get("getRandomValues")
if !getRandomValues.IsUndefined() && getRandomValues.Type() == js.TypeFunction {
// Create a Uint8Array to receive random bytes
uint8Array := js.Global().Get("Uint8Array").New(len(buf))
// Call crypto.getRandomValues - may throw in Node.js
defer func() {
// Recover from panic if this method doesn't work
recover()
}()
getRandomValues.Invoke(uint8Array)
// Copy the random bytes to our Go slice
js.CopyBytesToGo(buf, uint8Array)
return nil
}
// Try randomFillSync (Node.js API)
randomFillSync := crypto.Get("randomFillSync")
if !randomFillSync.IsUndefined() && randomFillSync.Type() == js.TypeFunction {
uint8Array := js.Global().Get("Uint8Array").New(len(buf))
randomFillSync.Invoke(uint8Array)
js.CopyBytesToGo(buf, uint8Array)
return nil
}
}
// Try to load Node.js crypto module via require
requireFunc := js.Global().Get("require")
if !requireFunc.IsUndefined() && requireFunc.Type() == js.TypeFunction {
nodeCrypto := requireFunc.Invoke("crypto")
if !nodeCrypto.IsUndefined() {
randomFillSync := nodeCrypto.Get("randomFillSync")
if !randomFillSync.IsUndefined() && randomFillSync.Type() == js.TypeFunction {
uint8Array := js.Global().Get("Uint8Array").New(len(buf))
randomFillSync.Invoke(uint8Array)
js.CopyBytesToGo(buf, uint8Array)
return nil
}
}
}
return errNoCryptoAPI
}
// errNoCryptoAPI is returned when the Web Crypto API is not available
type cryptoAPIError struct{}
func (cryptoAPIError) Error() string { return "Web Crypto API not available" }
var errNoCryptoAPI = cryptoAPIError{}

293
pkg/wasmdb/import-export.go

@ -0,0 +1,293 @@ @@ -0,0 +1,293 @@
//go:build js && wasm
package wasmdb
import (
"bufio"
"bytes"
"context"
"encoding/json"
"io"
"github.com/aperturerobotics/go-indexeddb/idb"
"lol.mleku.dev/chk"
"git.mleku.dev/mleku/nostr/encoders/event"
"git.mleku.dev/mleku/nostr/encoders/filter"
"git.mleku.dev/mleku/nostr/encoders/tag"
"next.orly.dev/pkg/database"
"next.orly.dev/pkg/database/indexes"
"next.orly.dev/pkg/database/indexes/types"
)
// Import reads events from a JSONL reader and imports them into the database
func (w *W) Import(rr io.Reader) {
ctx := context.Background()
scanner := bufio.NewScanner(rr)
// Increase buffer size for large events
buf := make([]byte, 1024*1024) // 1MB buffer
scanner.Buffer(buf, len(buf))
imported := 0
for scanner.Scan() {
line := scanner.Bytes()
if len(line) == 0 {
continue
}
ev := event.New()
if err := json.Unmarshal(line, ev); err != nil {
w.Logger.Warnf("Import: failed to unmarshal event: %v", err)
continue
}
if _, err := w.SaveEvent(ctx, ev); err != nil {
w.Logger.Debugf("Import: failed to save event: %v", err)
continue
}
imported++
}
if err := scanner.Err(); err != nil {
w.Logger.Errorf("Import: scanner error: %v", err)
}
w.Logger.Infof("Import: imported %d events", imported)
}
// Export writes events to a JSONL writer, optionally filtered by pubkeys
func (w *W) Export(c context.Context, wr io.Writer, pubkeys ...[]byte) {
var evs event.S
var err error
// Query events
if len(pubkeys) > 0 {
// Export only events from specified pubkeys
for _, pk := range pubkeys {
// Get all serials for this pubkey
serials, err := w.GetSerialsByPubkey(pk)
if err != nil {
w.Logger.Warnf("Export: failed to get serials for pubkey: %v", err)
continue
}
for _, ser := range serials {
ev, err := w.FetchEventBySerial(ser)
if err != nil || ev == nil {
continue
}
evs = append(evs, ev)
}
}
} else {
// Export all events
evs, err = w.getAllEvents(c)
if err != nil {
w.Logger.Errorf("Export: failed to get all events: %v", err)
return
}
}
// Write events as JSONL
exported := 0
for _, ev := range evs {
data, err := json.Marshal(ev)
if err != nil {
w.Logger.Warnf("Export: failed to marshal event: %v", err)
continue
}
wr.Write(data)
wr.Write([]byte("\n"))
exported++
}
w.Logger.Infof("Export: exported %d events", exported)
}
// ImportEventsFromReader imports events from a JSONL reader with context support
func (w *W) ImportEventsFromReader(ctx context.Context, rr io.Reader) error {
scanner := bufio.NewScanner(rr)
buf := make([]byte, 1024*1024)
scanner.Buffer(buf, len(buf))
imported := 0
for scanner.Scan() {
select {
case <-ctx.Done():
w.Logger.Infof("ImportEventsFromReader: cancelled after %d events", imported)
return ctx.Err()
default:
}
line := scanner.Bytes()
if len(line) == 0 {
continue
}
ev := event.New()
if err := json.Unmarshal(line, ev); err != nil {
w.Logger.Warnf("ImportEventsFromReader: failed to unmarshal: %v", err)
continue
}
if _, err := w.SaveEvent(ctx, ev); err != nil {
w.Logger.Debugf("ImportEventsFromReader: failed to save: %v", err)
continue
}
imported++
}
if err := scanner.Err(); err != nil {
return err
}
w.Logger.Infof("ImportEventsFromReader: imported %d events", imported)
return nil
}
// ImportEventsFromStrings imports events from JSON strings with policy checking
func (w *W) ImportEventsFromStrings(
ctx context.Context,
eventJSONs []string,
policyManager interface {
CheckPolicy(action string, ev *event.E, pubkey []byte, remote string) (bool, error)
},
) error {
imported := 0
for _, jsonStr := range eventJSONs {
select {
case <-ctx.Done():
w.Logger.Infof("ImportEventsFromStrings: cancelled after %d events", imported)
return ctx.Err()
default:
}
ev := event.New()
if err := json.Unmarshal([]byte(jsonStr), ev); err != nil {
w.Logger.Warnf("ImportEventsFromStrings: failed to unmarshal: %v", err)
continue
}
// Check policy if manager is provided
if policyManager != nil {
allowed, err := policyManager.CheckPolicy("write", ev, ev.Pubkey, "import")
if err != nil || !allowed {
w.Logger.Debugf("ImportEventsFromStrings: policy rejected event")
continue
}
}
if _, err := w.SaveEvent(ctx, ev); err != nil {
w.Logger.Debugf("ImportEventsFromStrings: failed to save: %v", err)
continue
}
imported++
}
w.Logger.Infof("ImportEventsFromStrings: imported %d events", imported)
return nil
}
// GetSerialsByPubkey returns all event serials for a given pubkey
func (w *W) GetSerialsByPubkey(pubkey []byte) ([]*types.Uint40, error) {
// Build range for pubkey index
idx, err := database.GetIndexesFromFilter(&filter.F{
Authors: tag.NewFromBytesSlice(pubkey),
})
if chk.E(err) {
return nil, err
}
var serials []*types.Uint40
for _, r := range idx {
sers, err := w.GetSerialsByRange(r)
if err != nil {
continue
}
serials = append(serials, sers...)
}
return serials, nil
}
// getAllEvents retrieves all events from the database
func (w *W) getAllEvents(c context.Context) (event.S, error) {
// Scan through the small event store and large event store
var events event.S
// Get events from small event store
sevEvents, err := w.scanEventStore(string(indexes.SmallEventPrefix), true)
if err == nil {
events = append(events, sevEvents...)
}
// Get events from large event store
evtEvents, err := w.scanEventStore(string(indexes.EventPrefix), false)
if err == nil {
events = append(events, evtEvents...)
}
return events, nil
}
// scanEventStore scans an event store and returns all events
func (w *W) scanEventStore(storeName string, isSmallEvent bool) (event.S, error) {
tx, err := w.db.Transaction(idb.TransactionReadOnly, storeName)
if err != nil {
return nil, err
}
store, err := tx.ObjectStore(storeName)
if err != nil {
return nil, err
}
var events event.S
cursorReq, err := store.OpenCursor(idb.CursorNext)
if err != nil {
return nil, err
}
err = cursorReq.Iter(w.ctx, func(cursor *idb.CursorWithValue) error {
var eventData []byte
if isSmallEvent {
// Small events: data is embedded in the key
keyVal, keyErr := cursor.Key()
if keyErr != nil {
return keyErr
}
keyBytes := safeValueToBytes(keyVal)
// Format: sev|serial|size_uint16|event_data
if len(keyBytes) > 10 { // 3 + 5 + 2 minimum
sizeOffset := 8 // 3 prefix + 5 serial
if len(keyBytes) > sizeOffset+2 {
size := int(keyBytes[sizeOffset])<<8 | int(keyBytes[sizeOffset+1])
if len(keyBytes) >= sizeOffset+2+size {
eventData = keyBytes[sizeOffset+2 : sizeOffset+2+size]
}
}
}
} else {
// Large events: data is in the value
val, valErr := cursor.Value()
if valErr != nil {
return valErr
}
eventData = safeValueToBytes(val)
}
if len(eventData) > 0 {
ev := event.New()
if err := ev.UnmarshalBinary(bytes.NewReader(eventData)); err == nil {
events = append(events, ev)
}
}
return cursor.Continue()
})
return events, err
}

75
pkg/wasmdb/logger.go

@ -0,0 +1,75 @@ @@ -0,0 +1,75 @@
//go:build js && wasm
package wasmdb
import (
"fmt"
"syscall/js"
"time"
"lol.mleku.dev"
)
// logger provides logging functionality for the wasmdb package
// It outputs to the browser console via console.log/warn/error
type logger struct {
level int
}
// NewLogger creates a new logger with the specified level
func NewLogger(level int) *logger {
return &logger{level: level}
}
// SetLogLevel changes the logging level
func (l *logger) SetLogLevel(level int) {
l.level = level
}
// formatMessage creates a formatted log message with timestamp
func (l *logger) formatMessage(level, format string, args ...interface{}) string {
msg := fmt.Sprintf(format, args...)
return fmt.Sprintf("[%s] [wasmdb] [%s] %s",
time.Now().Format("15:04:05.000"),
level,
msg,
)
}
// Debugf logs a debug message
func (l *logger) Debugf(format string, args ...interface{}) {
if l.level <= lol.Debug {
msg := l.formatMessage("DEBUG", format, args...)
js.Global().Get("console").Call("log", msg)
}
}
// Infof logs an info message
func (l *logger) Infof(format string, args ...interface{}) {
if l.level <= lol.Info {
msg := l.formatMessage("INFO", format, args...)
js.Global().Get("console").Call("log", msg)
}
}
// Warnf logs a warning message
func (l *logger) Warnf(format string, args ...interface{}) {
if l.level <= lol.Warn {
msg := l.formatMessage("WARN", format, args...)
js.Global().Get("console").Call("warn", msg)
}
}
// Errorf logs an error message
func (l *logger) Errorf(format string, args ...interface{}) {
if l.level <= lol.Error {
msg := l.formatMessage("ERROR", format, args...)
js.Global().Get("console").Call("error", msg)
}
}
// Fatalf logs a fatal message (does not exit in WASM)
func (l *logger) Fatalf(format string, args ...interface{}) {
msg := l.formatMessage("FATAL", format, args...)
js.Global().Get("console").Call("error", msg)
}

213
pkg/wasmdb/nip43.go

@ -0,0 +1,213 @@ @@ -0,0 +1,213 @@
//go:build js && wasm
package wasmdb
import (
"bytes"
"encoding/binary"
"errors"
"time"
"github.com/aperturerobotics/go-indexeddb/idb"
"github.com/hack-pad/safejs"
"next.orly.dev/pkg/database"
)
const (
// NIP43StoreName is the object store for NIP-43 membership
NIP43StoreName = "nip43"
// InvitesStoreName is the object store for invite codes
InvitesStoreName = "invites"
)
// AddNIP43Member adds a pubkey as a NIP-43 member with the given invite code
func (w *W) AddNIP43Member(pubkey []byte, inviteCode string) error {
if len(pubkey) != 32 {
return errors.New("invalid pubkey length")
}
// Create membership record
membership := &database.NIP43Membership{
Pubkey: make([]byte, 32),
InviteCode: inviteCode,
AddedAt: time.Now(),
}
copy(membership.Pubkey, pubkey)
// Serialize membership
data := w.serializeNIP43Membership(membership)
// Store using pubkey as key
return w.setStoreValue(NIP43StoreName, string(pubkey), data)
}
// RemoveNIP43Member removes a pubkey from NIP-43 membership
func (w *W) RemoveNIP43Member(pubkey []byte) error {
return w.deleteStoreValue(NIP43StoreName, string(pubkey))
}
// IsNIP43Member checks if a pubkey is a NIP-43 member
func (w *W) IsNIP43Member(pubkey []byte) (isMember bool, err error) {
data, err := w.getStoreValue(NIP43StoreName, string(pubkey))
if err != nil {
return false, nil // Not found is not an error, just not a member
}
return data != nil, nil
}
// GetNIP43Membership returns the full membership details for a pubkey
func (w *W) GetNIP43Membership(pubkey []byte) (*database.NIP43Membership, error) {
data, err := w.getStoreValue(NIP43StoreName, string(pubkey))
if err != nil {
return nil, err
}
if data == nil {
return nil, errors.New("membership not found")
}
return w.deserializeNIP43Membership(data)
}
// GetAllNIP43Members returns all NIP-43 member pubkeys
func (w *W) GetAllNIP43Members() ([][]byte, error) {
tx, err := w.db.Transaction(idb.TransactionReadOnly, NIP43StoreName)
if err != nil {
return nil, err
}
store, err := tx.ObjectStore(NIP43StoreName)
if err != nil {
return nil, err
}
var members [][]byte
cursorReq, err := store.OpenCursor(idb.CursorNext)
if err != nil {
return nil, err
}
err = cursorReq.Iter(w.ctx, func(cursor *idb.CursorWithValue) error {
keyVal, keyErr := cursor.Key()
if keyErr != nil {
return keyErr
}
// Key is the pubkey stored as string
keyBytes := safeValueToBytes(keyVal)
if len(keyBytes) == 32 {
pubkey := make([]byte, 32)
copy(pubkey, keyBytes)
members = append(members, pubkey)
}
return cursor.Continue()
})
if err != nil && err.Error() != "found" {
return nil, err
}
return members, nil
}
// StoreInviteCode stores an invite code with expiration time
func (w *W) StoreInviteCode(code string, expiresAt time.Time) error {
// Serialize expiration time as unix timestamp
data := make([]byte, 8)
binary.BigEndian.PutUint64(data, uint64(expiresAt.Unix()))
return w.setStoreValue(InvitesStoreName, code, data)
}
// ValidateInviteCode checks if an invite code is valid (exists and not expired)
func (w *W) ValidateInviteCode(code string) (valid bool, err error) {
data, err := w.getStoreValue(InvitesStoreName, code)
if err != nil {
return false, nil
}
if data == nil || len(data) < 8 {
return false, nil
}
// Check expiration
expiresAt := time.Unix(int64(binary.BigEndian.Uint64(data)), 0)
if time.Now().After(expiresAt) {
return false, nil
}
return true, nil
}
// DeleteInviteCode removes an invite code
func (w *W) DeleteInviteCode(code string) error {
return w.deleteStoreValue(InvitesStoreName, code)
}
// PublishNIP43MembershipEvent is a no-op in WASM (events are handled by the relay)
func (w *W) PublishNIP43MembershipEvent(kind int, pubkey []byte) error {
// In WASM context, this would typically be handled by the client
// This is a no-op implementation
return nil
}
// serializeNIP43Membership converts a membership to bytes for storage
func (w *W) serializeNIP43Membership(m *database.NIP43Membership) []byte {
buf := new(bytes.Buffer)
// Write pubkey (32 bytes)
buf.Write(m.Pubkey)
// Write AddedAt as unix timestamp (8 bytes)
ts := make([]byte, 8)
binary.BigEndian.PutUint64(ts, uint64(m.AddedAt.Unix()))
buf.Write(ts)
// Write invite code length (4 bytes) + invite code
codeBytes := []byte(m.InviteCode)
codeLen := make([]byte, 4)
binary.BigEndian.PutUint32(codeLen, uint32(len(codeBytes)))
buf.Write(codeLen)
buf.Write(codeBytes)
return buf.Bytes()
}
// deserializeNIP43Membership converts bytes back to a membership
func (w *W) deserializeNIP43Membership(data []byte) (*database.NIP43Membership, error) {
if len(data) < 44 { // 32 + 8 + 4 minimum
return nil, errors.New("invalid membership data")
}
m := &database.NIP43Membership{}
// Read pubkey
m.Pubkey = make([]byte, 32)
copy(m.Pubkey, data[:32])
// Read AddedAt
m.AddedAt = time.Unix(int64(binary.BigEndian.Uint64(data[32:40])), 0)
// Read invite code
codeLen := binary.BigEndian.Uint32(data[40:44])
if len(data) < int(44+codeLen) {
return nil, errors.New("invalid invite code length")
}
m.InviteCode = string(data[44 : 44+codeLen])
return m, nil
}
// Helper to convert safejs.Value to string for keys
func safeValueToString(v safejs.Value) string {
if v.IsUndefined() || v.IsNull() {
return ""
}
str, err := v.String()
if err != nil {
return ""
}
return str
}

767
pkg/wasmdb/query-events.go

@ -0,0 +1,767 @@ @@ -0,0 +1,767 @@
//go:build js && wasm
package wasmdb
import (
"bytes"
"context"
"errors"
"sort"
"strconv"
"time"
"github.com/aperturerobotics/go-indexeddb/idb"
"lol.mleku.dev/chk"
sha256 "github.com/minio/sha256-simd"
"git.mleku.dev/mleku/nostr/encoders/event"
"git.mleku.dev/mleku/nostr/encoders/filter"
"git.mleku.dev/mleku/nostr/encoders/hex"
"git.mleku.dev/mleku/nostr/encoders/ints"
"git.mleku.dev/mleku/nostr/encoders/kind"
"git.mleku.dev/mleku/nostr/encoders/tag"
"next.orly.dev/pkg/database"
"next.orly.dev/pkg/database/indexes/types"
"next.orly.dev/pkg/interfaces/store"
"next.orly.dev/pkg/utils"
)
// CheckExpiration checks if an event has expired based on its "expiration" tag
func CheckExpiration(ev *event.E) (expired bool) {
var err error
expTag := ev.Tags.GetFirst([]byte("expiration"))
if expTag != nil {
expTS := ints.New(0)
if _, err = expTS.Unmarshal(expTag.Value()); err == nil {
if int64(expTS.N) < time.Now().Unix() {
return true
}
}
}
return
}
// GetSerialsByRange retrieves serials from an index range using cursor iteration.
// The index keys must end with a 5-byte serial number.
func (w *W) GetSerialsByRange(idx database.Range) (sers types.Uint40s, err error) {
if len(idx.Start) < 3 {
return nil, errors.New("invalid range: start key too short")
}
// Extract the object store name from the 3-byte prefix
storeName := string(idx.Start[:3])
// Open a read transaction
tx, err := w.db.Transaction(idb.TransactionReadOnly, storeName)
if err != nil {
return nil, err
}
objStore, err := tx.ObjectStore(storeName)
if err != nil {
return nil, err
}
// Open cursor in reverse order (newest first like Badger)
cursorReq, err := objStore.OpenCursor(idb.CursorPrevious)
if err != nil {
return nil, err
}
// Pre-allocate slice
sers = make(types.Uint40s, 0, 100)
// Create end boundary with 0xff suffix for inclusive range
endBoundary := make([]byte, len(idx.End)+5)
copy(endBoundary, idx.End)
for i := len(idx.End); i < len(endBoundary); i++ {
endBoundary[i] = 0xff
}
err = cursorReq.Iter(w.ctx, func(cursor *idb.CursorWithValue) error {
keyVal, keyErr := cursor.Key()
if keyErr != nil {
return keyErr
}
key := safeValueToBytes(keyVal)
if len(key) < 8 { // minimum: 3 prefix + 5 serial
return cursor.Continue()
}
// Check if key is within range
keyWithoutSerial := key[:len(key)-5]
// Compare with start (lower bound)
cmp := bytes.Compare(keyWithoutSerial, idx.Start)
if cmp < 0 {
// Key is before range start, stop iteration
return errors.New("done")
}
// Compare with end boundary
if bytes.Compare(key, endBoundary) > 0 {
// Key is after range end, continue to find keys in range
return cursor.Continue()
}
// Extract serial from last 5 bytes
ser := new(types.Uint40)
if err := ser.UnmarshalRead(bytes.NewReader(key[len(key)-5:])); err == nil {
sers = append(sers, ser)
}
return cursor.Continue()
})
if err != nil && err.Error() != "done" {
return nil, err
}
// Sort by serial (ascending)
sort.Slice(sers, func(i, j int) bool {
return sers[i].Get() < sers[j].Get()
})
return sers, nil
}
// QueryForIds retrieves IdPkTs records based on a filter.
// Results are sorted by timestamp in reverse chronological order.
func (w *W) QueryForIds(c context.Context, f *filter.F) (idPkTs []*store.IdPkTs, err error) {
if f.Ids != nil && f.Ids.Len() > 0 {
err = errors.New("query for Ids is invalid for a filter with Ids")
return
}
var idxs []database.Range
if idxs, err = database.GetIndexesFromFilter(f); chk.E(err) {
return
}
var results []*store.IdPkTs
results = make([]*store.IdPkTs, 0, len(idxs)*100)
// Track match counts for search ranking
counts := make(map[uint64]int)
for _, idx := range idxs {
var founds types.Uint40s
if founds, err = w.GetSerialsByRange(idx); err != nil {
w.Logger.Warnf("QueryForIds: GetSerialsByRange error: %v", err)
continue
}
var tmp []*store.IdPkTs
if tmp, err = w.GetFullIdPubkeyBySerials(founds); err != nil {
w.Logger.Warnf("QueryForIds: GetFullIdPubkeyBySerials error: %v", err)
continue
}
// Track match counts for search queries
if len(f.Search) > 0 {
for _, v := range tmp {
counts[v.Ser]++
}
}
results = append(results, tmp...)
}
// Deduplicate results
seen := make(map[uint64]struct{}, len(results))
idPkTs = make([]*store.IdPkTs, 0, len(results))
for _, idpk := range results {
if _, ok := seen[idpk.Ser]; !ok {
seen[idpk.Ser] = struct{}{}
idPkTs = append(idPkTs, idpk)
}
}
// For search queries combined with other filters, verify matches
if len(f.Search) > 0 && ((f.Authors != nil && f.Authors.Len() > 0) ||
(f.Kinds != nil && f.Kinds.Len() > 0) ||
(f.Tags != nil && f.Tags.Len() > 0)) {
// Build serial list for fetching
serials := make([]*types.Uint40, 0, len(idPkTs))
for _, v := range idPkTs {
s := new(types.Uint40)
s.Set(v.Ser)
serials = append(serials, s)
}
var evs map[uint64]*event.E
if evs, err = w.FetchEventsBySerials(serials); chk.E(err) {
return
}
filtered := make([]*store.IdPkTs, 0, len(idPkTs))
for _, v := range idPkTs {
ev, ok := evs[v.Ser]
if !ok || ev == nil {
continue
}
matchesAll := true
if f.Authors != nil && f.Authors.Len() > 0 && !f.Authors.Contains(ev.Pubkey) {
matchesAll = false
}
if matchesAll && f.Kinds != nil && f.Kinds.Len() > 0 && !f.Kinds.Contains(ev.Kind) {
matchesAll = false
}
if matchesAll && f.Tags != nil && f.Tags.Len() > 0 {
tagOK := true
for _, t := range *f.Tags {
if t.Len() < 2 {
continue
}
key := t.Key()
values := t.T[1:]
if !ev.Tags.ContainsAny(key, values) {
tagOK = false
break
}
}
if !tagOK {
matchesAll = false
}
}
if matchesAll {
filtered = append(filtered, v)
}
}
idPkTs = filtered
}
// Sort by timestamp (newest first)
if len(f.Search) == 0 {
sort.Slice(idPkTs, func(i, j int) bool {
return idPkTs[i].Ts > idPkTs[j].Ts
})
} else {
// Search ranking: blend match count with recency
var maxCount int
var minTs, maxTs int64
if len(idPkTs) > 0 {
minTs, maxTs = idPkTs[0].Ts, idPkTs[0].Ts
}
for _, v := range idPkTs {
if c := counts[v.Ser]; c > maxCount {
maxCount = c
}
if v.Ts < minTs {
minTs = v.Ts
}
if v.Ts > maxTs {
maxTs = v.Ts
}
}
tsSpan := maxTs - minTs
if tsSpan <= 0 {
tsSpan = 1
}
if maxCount <= 0 {
maxCount = 1
}
sort.Slice(idPkTs, func(i, j int) bool {
ci := float64(counts[idPkTs[i].Ser]) / float64(maxCount)
cj := float64(counts[idPkTs[j].Ser]) / float64(maxCount)
ai := float64(idPkTs[i].Ts-minTs) / float64(tsSpan)
aj := float64(idPkTs[j].Ts-minTs) / float64(tsSpan)
si := 0.5*ci + 0.5*ai
sj := 0.5*cj + 0.5*aj
if si == sj {
return idPkTs[i].Ts > idPkTs[j].Ts
}
return si > sj
})
}
// Apply limit
if f.Limit != nil && len(idPkTs) > int(*f.Limit) {
idPkTs = idPkTs[:*f.Limit]
}
return
}
// QueryForSerials takes a filter and returns matching event serials
func (w *W) QueryForSerials(c context.Context, f *filter.F) (sers types.Uint40s, err error) {
var founds []*types.Uint40
var idPkTs []*store.IdPkTs
if f.Ids != nil && f.Ids.Len() > 0 {
// Use batch lookup for IDs
var serialMap map[string]*types.Uint40
if serialMap, err = w.GetSerialsByIds(f.Ids); chk.E(err) {
return
}
for _, ser := range serialMap {
founds = append(founds, ser)
}
var tmp []*store.IdPkTs
if tmp, err = w.GetFullIdPubkeyBySerials(founds); chk.E(err) {
return
}
idPkTs = append(idPkTs, tmp...)
} else {
if idPkTs, err = w.QueryForIds(c, f); chk.E(err) {
return
}
}
// Extract serials
for _, idpk := range idPkTs {
ser := new(types.Uint40)
if err = ser.Set(idpk.Ser); chk.E(err) {
continue
}
sers = append(sers, ser)
}
return
}
// QueryEvents queries events based on a filter
func (w *W) QueryEvents(c context.Context, f *filter.F) (evs event.S, err error) {
return w.QueryEventsWithOptions(c, f, true, false)
}
// QueryAllVersions queries events and returns all versions of replaceable events
func (w *W) QueryAllVersions(c context.Context, f *filter.F) (evs event.S, err error) {
return w.QueryEventsWithOptions(c, f, true, true)
}
// QueryEventsWithOptions queries events with additional options for deletion and versioning
func (w *W) QueryEventsWithOptions(c context.Context, f *filter.F, includeDeleteEvents bool, showAllVersions bool) (evs event.S, err error) {
wantMultipleVersions := showAllVersions || (f.Limit != nil && *f.Limit > 1)
var expDeletes types.Uint40s
var expEvs event.S
// Handle ID-based queries
if f.Ids != nil && f.Ids.Len() > 0 {
w.Logger.Debugf("QueryEvents: ids path, count=%d", f.Ids.Len())
serials, idErr := w.GetSerialsByIds(f.Ids)
if idErr != nil {
w.Logger.Warnf("QueryEvents: error looking up ids: %v", idErr)
}
// Convert to slice for batch fetch
var serialsSlice []*types.Uint40
idHexToSerial := make(map[uint64]string, len(serials))
for idHex, ser := range serials {
serialsSlice = append(serialsSlice, ser)
idHexToSerial[ser.Get()] = idHex
}
// Batch fetch events
var fetchedEvents map[uint64]*event.E
if fetchedEvents, err = w.FetchEventsBySerials(serialsSlice); err != nil {
w.Logger.Warnf("QueryEvents: batch fetch failed: %v", err)
return
}
// Process fetched events
for serialValue, ev := range fetchedEvents {
idHex := idHexToSerial[serialValue]
ser := new(types.Uint40)
if err = ser.Set(serialValue); err != nil {
continue
}
// Check expiration
if CheckExpiration(ev) {
w.Logger.Debugf("QueryEvents: id=%s filtered out due to expiration", idHex)
expDeletes = append(expDeletes, ser)
expEvs = append(expEvs, ev)
continue
}
// Check for deletion
if derr := w.CheckForDeleted(ev, nil); derr != nil {
w.Logger.Debugf("QueryEvents: id=%s filtered out due to deletion: %v", idHex, derr)
continue
}
evs = append(evs, ev)
}
// Sort and apply limit
sort.Slice(evs, func(i, j int) bool {
return evs[i].CreatedAt > evs[j].CreatedAt
})
if f.Limit != nil && len(evs) > int(*f.Limit) {
evs = evs[:*f.Limit]
}
} else {
// Non-IDs path
var idPkTs []*store.IdPkTs
if idPkTs, err = w.QueryForIds(c, f); chk.E(err) {
return
}
// Maps for replaceable event handling
replaceableEvents := make(map[string]*event.E)
replaceableEventVersions := make(map[string]event.S)
paramReplaceableEvents := make(map[string]map[string]*event.E)
paramReplaceableEventVersions := make(map[string]map[string]event.S)
var regularEvents event.S
// Deletion tracking maps
deletionsByKindPubkey := make(map[string]bool)
deletionsByKindPubkeyDTag := make(map[string]map[string]int64)
deletedEventIds := make(map[string]bool)
// Query for deletion events if we have authors
if f.Authors != nil && f.Authors.Len() > 0 {
deletionFilter := &filter.F{
Kinds: kind.NewS(kind.New(5)),
Authors: f.Authors,
}
var deletionIdPkTs []*store.IdPkTs
if deletionIdPkTs, err = w.QueryForIds(c, deletionFilter); err == nil {
idPkTs = append(idPkTs, deletionIdPkTs...)
}
}
// Prepare serials for batch fetch
var allSerials []*types.Uint40
serialToIdPk := make(map[uint64]*store.IdPkTs, len(idPkTs))
for _, idpk := range idPkTs {
ser := new(types.Uint40)
if err = ser.Set(idpk.Ser); err != nil {
continue
}
allSerials = append(allSerials, ser)
serialToIdPk[ser.Get()] = idpk
}
// Batch fetch all events
var allEvents map[uint64]*event.E
if allEvents, err = w.FetchEventsBySerials(allSerials); err != nil {
w.Logger.Warnf("QueryEvents: batch fetch failed in non-IDs path: %v", err)
return
}
// First pass: collect deletion events
for serialValue, ev := range allEvents {
ser := new(types.Uint40)
if err = ser.Set(serialValue); err != nil {
continue
}
if CheckExpiration(ev) {
expDeletes = append(expDeletes, ser)
expEvs = append(expEvs, ev)
continue
}
if ev.Kind == kind.Deletion.K {
// Process e-tags and a-tags for deletion tracking
aTags := ev.Tags.GetAll([]byte("a"))
for _, aTag := range aTags {
if aTag.Len() < 2 {
continue
}
split := bytes.Split(aTag.Value(), []byte{':'})
if len(split) < 2 {
continue
}
kindInt, parseErr := strconv.Atoi(string(split[0]))
if parseErr != nil {
continue
}
kk := kind.New(uint16(kindInt))
if !kind.IsReplaceable(kk.K) {
continue
}
var pk []byte
if pk, err = hex.DecAppend(nil, split[1]); err != nil {
continue
}
if !utils.FastEqual(pk, ev.Pubkey) {
continue
}
key := hex.Enc(pk) + ":" + strconv.Itoa(int(kk.K))
if kind.IsParameterizedReplaceable(kk.K) {
if len(split) < 3 {
continue
}
if _, exists := deletionsByKindPubkeyDTag[key]; !exists {
deletionsByKindPubkeyDTag[key] = make(map[string]int64)
}
dValue := string(split[2])
if ts, ok := deletionsByKindPubkeyDTag[key][dValue]; !ok || ev.CreatedAt > ts {
deletionsByKindPubkeyDTag[key][dValue] = ev.CreatedAt
}
} else {
deletionsByKindPubkey[key] = true
}
}
// Process e-tags for specific event deletions
eTags := ev.Tags.GetAll([]byte("e"))
for _, eTag := range eTags {
eTagHex := eTag.ValueHex()
if len(eTagHex) != 64 {
continue
}
evId := make([]byte, sha256.Size)
if _, hexErr := hex.DecBytes(evId, eTagHex); hexErr != nil {
continue
}
// Look for target in current batch
var targetEv *event.E
for _, candidateEv := range allEvents {
if utils.FastEqual(candidateEv.ID, evId) {
targetEv = candidateEv
break
}
}
// Try to fetch if not in batch
if targetEv == nil {
ser, serErr := w.GetSerialById(evId)
if serErr != nil || ser == nil {
continue
}
targetEv, serErr = w.FetchEventBySerial(ser)
if serErr != nil || targetEv == nil {
continue
}
}
if !utils.FastEqual(targetEv.Pubkey, ev.Pubkey) {
continue
}
deletedEventIds[hex.Enc(targetEv.ID)] = true
}
}
}
// Second pass: process all events, filtering deleted ones
for _, ev := range allEvents {
// Tag filter verification
if f.Tags != nil && f.Tags.Len() > 0 {
tagMatches := 0
for _, filterTag := range *f.Tags {
if filterTag.Len() >= 2 {
filterKey := filterTag.Key()
var actualKey []byte
if len(filterKey) == 2 && filterKey[0] == '#' {
actualKey = filterKey[1:]
} else {
actualKey = filterKey
}
eventHasTag := false
if ev.Tags != nil {
for _, eventTag := range *ev.Tags {
if eventTag.Len() >= 2 && bytes.Equal(eventTag.Key(), actualKey) {
for _, filterValue := range filterTag.T[1:] {
if database.TagValuesMatchUsingTagMethods(eventTag, filterValue) {
eventHasTag = true
break
}
}
if eventHasTag {
break
}
}
}
}
if eventHasTag {
tagMatches++
}
}
}
if tagMatches < f.Tags.Len() {
continue
}
}
// Skip deletion events unless explicitly requested
if ev.Kind == kind.Deletion.K {
kind5Requested := false
if f.Kinds != nil && f.Kinds.Len() > 0 {
for i := 0; i < f.Kinds.Len(); i++ {
if f.Kinds.K[i].K == kind.Deletion.K {
kind5Requested = true
break
}
}
}
if !kind5Requested {
continue
}
}
// Check if event ID is in filter
isIdInFilter := false
if f.Ids != nil && f.Ids.Len() > 0 {
for i := 0; i < f.Ids.Len(); i++ {
if utils.FastEqual(ev.ID, (*f.Ids).T[i]) {
isIdInFilter = true
break
}
}
}
// Check if specifically deleted
eventIdHex := hex.Enc(ev.ID)
if deletedEventIds[eventIdHex] {
continue
}
// Handle replaceable events
if kind.IsReplaceable(ev.Kind) {
key := hex.Enc(ev.Pubkey) + ":" + strconv.Itoa(int(ev.Kind))
if deletionsByKindPubkey[key] && !isIdInFilter {
continue
} else if wantMultipleVersions {
replaceableEventVersions[key] = append(replaceableEventVersions[key], ev)
} else {
existing, exists := replaceableEvents[key]
if !exists || ev.CreatedAt > existing.CreatedAt {
replaceableEvents[key] = ev
}
}
} else if kind.IsParameterizedReplaceable(ev.Kind) {
key := hex.Enc(ev.Pubkey) + ":" + strconv.Itoa(int(ev.Kind))
dTag := ev.Tags.GetFirst([]byte("d"))
var dValue string
if dTag != nil && dTag.Len() > 1 {
dValue = string(dTag.Value())
}
if deletionMap, exists := deletionsByKindPubkeyDTag[key]; exists {
if delTs, ok := deletionMap[dValue]; ok && ev.CreatedAt < delTs && !isIdInFilter {
continue
}
}
if wantMultipleVersions {
if _, exists := paramReplaceableEventVersions[key]; !exists {
paramReplaceableEventVersions[key] = make(map[string]event.S)
}
paramReplaceableEventVersions[key][dValue] = append(paramReplaceableEventVersions[key][dValue], ev)
} else {
if _, exists := paramReplaceableEvents[key]; !exists {
paramReplaceableEvents[key] = make(map[string]*event.E)
}
existing, exists := paramReplaceableEvents[key][dValue]
if !exists || ev.CreatedAt > existing.CreatedAt {
paramReplaceableEvents[key][dValue] = ev
}
}
} else {
regularEvents = append(regularEvents, ev)
}
}
// Collect results
if wantMultipleVersions {
for _, versions := range replaceableEventVersions {
sort.Slice(versions, func(i, j int) bool {
return versions[i].CreatedAt > versions[j].CreatedAt
})
limit := len(versions)
if f.Limit != nil && int(*f.Limit) < limit {
limit = int(*f.Limit)
}
for i := 0; i < limit; i++ {
evs = append(evs, versions[i])
}
}
} else {
for _, ev := range replaceableEvents {
evs = append(evs, ev)
}
}
if wantMultipleVersions {
for _, dTagMap := range paramReplaceableEventVersions {
for _, versions := range dTagMap {
sort.Slice(versions, func(i, j int) bool {
return versions[i].CreatedAt > versions[j].CreatedAt
})
limit := len(versions)
if f.Limit != nil && int(*f.Limit) < limit {
limit = int(*f.Limit)
}
for i := 0; i < limit; i++ {
evs = append(evs, versions[i])
}
}
}
} else {
for _, innerMap := range paramReplaceableEvents {
for _, ev := range innerMap {
evs = append(evs, ev)
}
}
}
evs = append(evs, regularEvents...)
// Sort and limit
sort.Slice(evs, func(i, j int) bool {
return evs[i].CreatedAt > evs[j].CreatedAt
})
if f.Limit != nil && len(evs) > int(*f.Limit) {
evs = evs[:*f.Limit]
}
// Delete expired events in background
go func() {
for i, ser := range expDeletes {
w.DeleteEventBySerial(context.Background(), ser, expEvs[i])
}
}()
}
return
}
// QueryDeleteEventsByTargetId queries for delete events targeting a specific event ID
func (w *W) QueryDeleteEventsByTargetId(c context.Context, targetEventId []byte) (evs event.S, err error) {
f := &filter.F{
Kinds: kind.NewS(kind.Deletion),
Tags: tag.NewS(
tag.NewFromAny("#e", hex.Enc(targetEventId)),
),
}
return w.QueryEventsWithOptions(c, f, true, false)
}
// CountEvents counts events matching a filter
func (w *W) CountEvents(c context.Context, f *filter.F) (count int, approx bool, err error) {
approx = false
if f == nil {
return 0, false, nil
}
// For ID-based queries, count resolved IDs
if f.Ids != nil && f.Ids.Len() > 0 {
serials, idErr := w.GetSerialsByIds(f.Ids)
if idErr != nil {
return 0, false, idErr
}
return len(serials), false, nil
}
// For other queries, get serials and count
var sers types.Uint40s
if sers, err = w.QueryForSerials(c, f); err != nil {
return 0, false, err
}
return len(sers), false, nil
}
// GetSerialsFromFilter is an alias for QueryForSerials for interface compatibility
func (w *W) GetSerialsFromFilter(f *filter.F) (serials types.Uint40s, err error) {
return w.QueryForSerials(w.ctx, f)
}

26
pkg/wasmdb/run-tests.sh

@ -0,0 +1,26 @@ @@ -0,0 +1,26 @@
#!/bin/bash
# Run wasmdb tests using Node.js with fake-indexeddb
# This script builds the test binary and runs it in Node.js
set -e
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
TESTDATA_DIR="$SCRIPT_DIR/testdata"
WASM_FILE="$TESTDATA_DIR/wasmdb_test.wasm"
# Ensure Node.js dependencies are installed
if [ ! -d "$TESTDATA_DIR/node_modules" ]; then
echo "Installing Node.js dependencies..."
cd "$TESTDATA_DIR"
npm install
cd - > /dev/null
fi
# Build the test binary
echo "Building WASM test binary..."
GOOS=js GOARCH=wasm CGO_ENABLED=0 go test -c -o "$WASM_FILE" "$SCRIPT_DIR"
# Run the tests
echo "Running tests in Node.js..."
node "$TESTDATA_DIR/run_wasm_tests.mjs" "$WASM_FILE" "$@"

423
pkg/wasmdb/save-event.go

@ -0,0 +1,423 @@ @@ -0,0 +1,423 @@
//go:build js && wasm
package wasmdb
import (
"bytes"
"context"
"errors"
"fmt"
"strings"
"github.com/aperturerobotics/go-indexeddb/idb"
"github.com/hack-pad/safejs"
"lol.mleku.dev/chk"
"git.mleku.dev/mleku/nostr/encoders/event"
"git.mleku.dev/mleku/nostr/encoders/hex"
"git.mleku.dev/mleku/nostr/encoders/kind"
"git.mleku.dev/mleku/nostr/encoders/tag"
"next.orly.dev/pkg/database"
"next.orly.dev/pkg/database/indexes"
"next.orly.dev/pkg/database/indexes/types"
)
var (
// ErrOlderThanExisting is returned when a candidate event is older than an existing replaceable/addressable event.
ErrOlderThanExisting = errors.New("older than existing event")
// ErrMissingDTag is returned when a parameterized replaceable event lacks the required 'd' tag.
ErrMissingDTag = errors.New("event is missing a d tag identifier")
)
// SaveEvent saves an event to the database, generating all necessary indexes.
func (w *W) SaveEvent(c context.Context, ev *event.E) (replaced bool, err error) {
if ev == nil {
err = errors.New("nil event")
return
}
// Reject ephemeral events (kinds 20000-29999) - they should never be stored
if ev.Kind >= 20000 && ev.Kind <= 29999 {
err = errors.New("blocked: ephemeral events should not be stored")
return
}
// Validate kind 3 (follow list) events have at least one p tag
if ev.Kind == 3 {
hasPTag := false
if ev.Tags != nil {
for _, t := range *ev.Tags {
if t != nil && t.Len() >= 2 {
key := t.Key()
if len(key) == 1 && key[0] == 'p' {
hasPTag = true
break
}
}
}
}
if !hasPTag {
w.Logger.Warnf("SaveEvent: rejecting kind 3 event without p tags from pubkey %x", ev.Pubkey)
err = errors.New("blocked: kind 3 follow list events must have at least one p tag")
return
}
}
// Check if the event already exists
var ser *types.Uint40
if ser, err = w.GetSerialById(ev.ID); err == nil && ser != nil {
err = errors.New("blocked: event already exists: " + hex.Enc(ev.ID[:]))
return
}
// If the error is "id not found", we can proceed
if err != nil && strings.Contains(err.Error(), "id not found") {
err = nil
} else if err != nil {
return
}
// Check for replacement - only validate, don't delete old events
if kind.IsReplaceable(ev.Kind) || kind.IsParameterizedReplaceable(ev.Kind) {
var werr error
if replaced, _, werr = w.WouldReplaceEvent(ev); werr != nil {
if errors.Is(werr, ErrOlderThanExisting) {
if kind.IsReplaceable(ev.Kind) {
err = errors.New("blocked: event is older than existing replaceable event")
} else {
err = errors.New("blocked: event is older than existing addressable event")
}
return
}
if errors.Is(werr, ErrMissingDTag) {
err = ErrMissingDTag
return
}
return
}
}
// Get the next sequence number for the event
serial, err := w.nextEventSerial()
if err != nil {
return
}
// Generate all indexes for the event
idxs, err := database.GetIndexesForEvent(ev, serial)
if err != nil {
return
}
// Serialize event to binary
eventDataBuf := new(bytes.Buffer)
ev.MarshalBinary(eventDataBuf)
eventData := eventDataBuf.Bytes()
// Determine storage strategy
smallEventThreshold := 1024 // Could be made configurable
isSmallEvent := len(eventData) <= smallEventThreshold
isReplaceableEvent := kind.IsReplaceable(ev.Kind)
isAddressableEvent := kind.IsParameterizedReplaceable(ev.Kind)
// Create serial type
ser = new(types.Uint40)
if err = ser.Set(serial); chk.E(err) {
return
}
// Start a transaction to save the event and all its indexes
// We need to include all object stores we'll write to
storesToWrite := []string{
string(indexes.IdPrefix),
string(indexes.FullIdPubkeyPrefix),
string(indexes.CreatedAtPrefix),
string(indexes.PubkeyPrefix),
string(indexes.KindPrefix),
string(indexes.KindPubkeyPrefix),
string(indexes.TagPrefix),
string(indexes.TagKindPrefix),
string(indexes.TagPubkeyPrefix),
string(indexes.TagKindPubkeyPrefix),
string(indexes.WordPrefix),
}
// Add event storage store
if isSmallEvent {
storesToWrite = append(storesToWrite, string(indexes.SmallEventPrefix))
} else {
storesToWrite = append(storesToWrite, string(indexes.EventPrefix))
}
// Add specialized stores if needed
if isAddressableEvent && isSmallEvent {
storesToWrite = append(storesToWrite, string(indexes.AddressableEventPrefix))
} else if isReplaceableEvent && isSmallEvent {
storesToWrite = append(storesToWrite, string(indexes.ReplaceableEventPrefix))
}
// Start transaction
tx, err := w.db.Transaction(idb.TransactionReadWrite, storesToWrite[0], storesToWrite[1:]...)
if err != nil {
return false, fmt.Errorf("failed to start transaction: %w", err)
}
// Save each index to its respective object store
for _, key := range idxs {
if len(key) < 3 {
continue
}
// Extract store name from 3-byte prefix
storeName := string(key[:3])
store, storeErr := tx.ObjectStore(storeName)
if storeErr != nil {
w.Logger.Warnf("SaveEvent: failed to get object store %s: %v", storeName, storeErr)
continue
}
// Use the full key as the IndexedDB key, empty value
keyJS := bytesToSafeValue(key)
_, putErr := store.PutKey(keyJS, safejs.Null())
if putErr != nil {
w.Logger.Warnf("SaveEvent: failed to put index %s: %v", storeName, putErr)
}
}
// Store the event data
if isSmallEvent {
// Small event: store inline with sev prefix
// Format: sev|serial|size_uint16|event_data
keyBuf := new(bytes.Buffer)
if err = indexes.SmallEventEnc(ser).MarshalWrite(keyBuf); chk.E(err) {
return
}
// Append size as uint16 big-endian
sizeBytes := []byte{byte(len(eventData) >> 8), byte(len(eventData))}
keyBuf.Write(sizeBytes)
keyBuf.Write(eventData)
store, storeErr := tx.ObjectStore(string(indexes.SmallEventPrefix))
if storeErr == nil {
keyJS := bytesToSafeValue(keyBuf.Bytes())
store.PutKey(keyJS, safejs.Null())
}
} else {
// Large event: store separately with evt prefix
keyBuf := new(bytes.Buffer)
if err = indexes.EventEnc(ser).MarshalWrite(keyBuf); chk.E(err) {
return
}
store, storeErr := tx.ObjectStore(string(indexes.EventPrefix))
if storeErr == nil {
keyJS := bytesToSafeValue(keyBuf.Bytes())
valueJS := bytesToSafeValue(eventData)
store.PutKey(keyJS, valueJS)
}
}
// Store specialized keys for replaceable/addressable events
if isAddressableEvent && isSmallEvent {
dTag := ev.Tags.GetFirst([]byte("d"))
if dTag != nil {
pubHash := new(types.PubHash)
pubHash.FromPubkey(ev.Pubkey)
kindVal := new(types.Uint16)
kindVal.Set(ev.Kind)
dTagHash := new(types.Ident)
dTagHash.FromIdent(dTag.Value())
keyBuf := new(bytes.Buffer)
if err = indexes.AddressableEventEnc(pubHash, kindVal, dTagHash).MarshalWrite(keyBuf); chk.E(err) {
return
}
sizeBytes := []byte{byte(len(eventData) >> 8), byte(len(eventData))}
keyBuf.Write(sizeBytes)
keyBuf.Write(eventData)
store, storeErr := tx.ObjectStore(string(indexes.AddressableEventPrefix))
if storeErr == nil {
keyJS := bytesToSafeValue(keyBuf.Bytes())
store.PutKey(keyJS, safejs.Null())
}
}
} else if isReplaceableEvent && isSmallEvent {
pubHash := new(types.PubHash)
pubHash.FromPubkey(ev.Pubkey)
kindVal := new(types.Uint16)
kindVal.Set(ev.Kind)
keyBuf := new(bytes.Buffer)
if err = indexes.ReplaceableEventEnc(pubHash, kindVal).MarshalWrite(keyBuf); chk.E(err) {
return
}
sizeBytes := []byte{byte(len(eventData) >> 8), byte(len(eventData))}
keyBuf.Write(sizeBytes)
keyBuf.Write(eventData)
store, storeErr := tx.ObjectStore(string(indexes.ReplaceableEventPrefix))
if storeErr == nil {
keyJS := bytesToSafeValue(keyBuf.Bytes())
store.PutKey(keyJS, safejs.Null())
}
}
// Commit transaction
if err = tx.Await(c); err != nil {
return false, fmt.Errorf("failed to commit transaction: %w", err)
}
w.Logger.Debugf("SaveEvent: saved event %x (kind %d, %d bytes, %d indexes)",
ev.ID[:8], ev.Kind, len(eventData), len(idxs))
return
}
// WouldReplaceEvent checks if the provided event would replace existing events
func (w *W) WouldReplaceEvent(ev *event.E) (bool, types.Uint40s, error) {
// Only relevant for replaceable or parameterized replaceable kinds
if !(kind.IsReplaceable(ev.Kind) || kind.IsParameterizedReplaceable(ev.Kind)) {
return false, nil, nil
}
// Build filter for existing events
var f interface{}
if kind.IsReplaceable(ev.Kind) {
// For now, simplified check - would need full filter implementation
return false, nil, nil
} else {
// Parameterized replaceable requires 'd' tag
dTag := ev.Tags.GetFirst([]byte("d"))
if dTag == nil {
return false, nil, ErrMissingDTag
}
// Simplified - full implementation would query existing events
_ = f
}
// Simplified implementation - assume no conflicts for now
// Full implementation would query the database and compare timestamps
return false, nil, nil
}
// GetSerialById looks up the serial number for an event ID
func (w *W) GetSerialById(id []byte) (ser *types.Uint40, err error) {
if len(id) != 32 {
return nil, errors.New("invalid event ID length")
}
// Create ID hash
idHash := new(types.IdHash)
if err = idHash.FromId(id); chk.E(err) {
return nil, err
}
// Build the prefix to search for
keyBuf := new(bytes.Buffer)
indexes.IdEnc(idHash, nil).MarshalWrite(keyBuf)
prefix := keyBuf.Bytes()[:11] // 3 prefix + 8 id hash
// Search in the eid object store
tx, err := w.db.Transaction(idb.TransactionReadOnly, string(indexes.IdPrefix))
if err != nil {
return nil, err
}
store, err := tx.ObjectStore(string(indexes.IdPrefix))
if err != nil {
return nil, err
}
// Use cursor to find matching key
cursorReq, err := store.OpenCursor(idb.CursorNext)
if err != nil {
return nil, err
}
err = cursorReq.Iter(w.ctx, func(cursor *idb.CursorWithValue) error {
keyVal, keyErr := cursor.Key()
if keyErr != nil {
return keyErr
}
keyBytes := safeValueToBytes(keyVal)
if len(keyBytes) >= len(prefix) && bytes.HasPrefix(keyBytes, prefix) {
// Found matching key, extract serial from last 5 bytes
if len(keyBytes) >= 16 { // 3 + 8 + 5
ser = new(types.Uint40)
ser.UnmarshalRead(bytes.NewReader(keyBytes[11:16]))
return errors.New("found") // Stop iteration
}
}
return cursor.Continue()
})
if ser != nil {
return ser, nil
}
if err != nil && err.Error() != "found" {
return nil, err
}
return nil, errors.New("id not found in database")
}
// GetSerialsByIds looks up serial numbers for multiple event IDs
func (w *W) GetSerialsByIds(ids *tag.T) (serials map[string]*types.Uint40, err error) {
serials = make(map[string]*types.Uint40)
if ids == nil {
return
}
for i := 1; i < ids.Len(); i++ {
idBytes := ids.T[i]
if len(idBytes) == 64 {
// Hex encoded ID
var decoded []byte
decoded, err = hex.Dec(string(idBytes))
if err != nil {
continue
}
idBytes = decoded
}
if len(idBytes) == 32 {
var ser *types.Uint40
ser, err = w.GetSerialById(idBytes)
if err == nil && ser != nil {
serials[hex.Enc(idBytes)] = ser
}
}
}
err = nil
return
}
// GetSerialsByIdsWithFilter looks up serial numbers with a filter function
func (w *W) GetSerialsByIdsWithFilter(ids *tag.T, fn func(ev *event.E, ser *types.Uint40) bool) (serials map[string]*types.Uint40, err error) {
allSerials, err := w.GetSerialsByIds(ids)
if err != nil {
return nil, err
}
if fn == nil {
return allSerials, nil
}
serials = make(map[string]*types.Uint40)
for idHex, ser := range allSerials {
ev, fetchErr := w.FetchEventBySerial(ser)
if fetchErr != nil {
continue
}
if fn(ev, ser) {
serials[idHex] = ser
}
}
return serials, nil
}

332
pkg/wasmdb/subscriptions.go

@ -0,0 +1,332 @@ @@ -0,0 +1,332 @@
//go:build js && wasm
package wasmdb
import (
"bytes"
"encoding/binary"
"encoding/json"
"errors"
"time"
"github.com/aperturerobotics/go-indexeddb/idb"
"next.orly.dev/pkg/database"
)
const (
// SubscriptionsStoreName is the object store for payment subscriptions
SubscriptionsStoreName = "subscriptions"
// PaymentsPrefix is the key prefix for payment records
PaymentsPrefix = "payment:"
)
// GetSubscription retrieves a subscription for a pubkey
func (w *W) GetSubscription(pubkey []byte) (*database.Subscription, error) {
key := "sub:" + string(pubkey)
data, err := w.getStoreValue(SubscriptionsStoreName, key)
if err != nil {
return nil, err
}
if data == nil {
return nil, nil
}
return w.deserializeSubscription(data)
}
// IsSubscriptionActive checks if a pubkey has an active subscription
// If no subscription exists, creates a 30-day trial
func (w *W) IsSubscriptionActive(pubkey []byte) (bool, error) {
key := "sub:" + string(pubkey)
data, err := w.getStoreValue(SubscriptionsStoreName, key)
if err != nil {
return false, err
}
now := time.Now()
if data == nil {
// Create new trial subscription
sub := &database.Subscription{
TrialEnd: now.AddDate(0, 0, 30),
}
subData := w.serializeSubscription(sub)
if err := w.setStoreValue(SubscriptionsStoreName, key, subData); err != nil {
return false, err
}
return true, nil
}
sub, err := w.deserializeSubscription(data)
if err != nil {
return false, err
}
// Active if within trial or paid period
return now.Before(sub.TrialEnd) || (!sub.PaidUntil.IsZero() && now.Before(sub.PaidUntil)), nil
}
// ExtendSubscription extends a subscription by the given number of days
func (w *W) ExtendSubscription(pubkey []byte, days int) error {
if days <= 0 {
return errors.New("invalid days")
}
key := "sub:" + string(pubkey)
data, err := w.getStoreValue(SubscriptionsStoreName, key)
if err != nil {
return err
}
now := time.Now()
var sub *database.Subscription
if data == nil {
// Create new subscription
sub = &database.Subscription{
PaidUntil: now.AddDate(0, 0, days),
}
} else {
sub, err = w.deserializeSubscription(data)
if err != nil {
return err
}
// Extend from current paid date if still active, otherwise from now
extendFrom := now
if !sub.PaidUntil.IsZero() && sub.PaidUntil.After(now) {
extendFrom = sub.PaidUntil
}
sub.PaidUntil = extendFrom.AddDate(0, 0, days)
}
// Serialize and store
subData := w.serializeSubscription(sub)
return w.setStoreValue(SubscriptionsStoreName, key, subData)
}
// RecordPayment records a payment for a pubkey
func (w *W) RecordPayment(pubkey []byte, amount int64, invoice, preimage string) error {
now := time.Now()
payment := &database.Payment{
Amount: amount,
Timestamp: now,
Invoice: invoice,
Preimage: preimage,
}
data := w.serializePayment(payment)
// Create unique key with timestamp
key := PaymentsPrefix + string(pubkey) + ":" + now.Format(time.RFC3339Nano)
return w.setStoreValue(SubscriptionsStoreName, key, data)
}
// GetPaymentHistory retrieves all payments for a pubkey
func (w *W) GetPaymentHistory(pubkey []byte) ([]database.Payment, error) {
prefix := PaymentsPrefix + string(pubkey) + ":"
tx, err := w.db.Transaction(idb.TransactionReadOnly, SubscriptionsStoreName)
if err != nil {
return nil, err
}
store, err := tx.ObjectStore(SubscriptionsStoreName)
if err != nil {
return nil, err
}
var payments []database.Payment
cursorReq, err := store.OpenCursor(idb.CursorNext)
if err != nil {
return nil, err
}
prefixBytes := []byte(prefix)
err = cursorReq.Iter(w.ctx, func(cursor *idb.CursorWithValue) error {
keyVal, keyErr := cursor.Key()
if keyErr != nil {
return keyErr
}
keyBytes := safeValueToBytes(keyVal)
if bytes.HasPrefix(keyBytes, prefixBytes) {
val, valErr := cursor.Value()
if valErr != nil {
return valErr
}
valBytes := safeValueToBytes(val)
if payment, err := w.deserializePayment(valBytes); err == nil {
payments = append(payments, *payment)
}
}
return cursor.Continue()
})
if err != nil {
return nil, err
}
return payments, nil
}
// ExtendBlossomSubscription extends a blossom subscription with storage quota
func (w *W) ExtendBlossomSubscription(pubkey []byte, level string, storageMB int64, days int) error {
if days <= 0 {
return errors.New("invalid days")
}
key := "sub:" + string(pubkey)
data, err := w.getStoreValue(SubscriptionsStoreName, key)
if err != nil {
return err
}
now := time.Now()
var sub *database.Subscription
if data == nil {
sub = &database.Subscription{
PaidUntil: now.AddDate(0, 0, days),
BlossomLevel: level,
BlossomStorage: storageMB,
}
} else {
sub, err = w.deserializeSubscription(data)
if err != nil {
return err
}
// Extend from current paid date if still active
extendFrom := now
if !sub.PaidUntil.IsZero() && sub.PaidUntil.After(now) {
extendFrom = sub.PaidUntil
}
sub.PaidUntil = extendFrom.AddDate(0, 0, days)
// Set level and accumulate storage
sub.BlossomLevel = level
if sub.BlossomStorage > 0 && sub.PaidUntil.After(now) {
sub.BlossomStorage += storageMB
} else {
sub.BlossomStorage = storageMB
}
}
subData := w.serializeSubscription(sub)
return w.setStoreValue(SubscriptionsStoreName, key, subData)
}
// GetBlossomStorageQuota returns the storage quota for a pubkey
func (w *W) GetBlossomStorageQuota(pubkey []byte) (quotaMB int64, err error) {
sub, err := w.GetSubscription(pubkey)
if err != nil {
return 0, err
}
if sub == nil {
return 0, nil
}
// Only return quota if subscription is active
if sub.PaidUntil.IsZero() || time.Now().After(sub.PaidUntil) {
return 0, nil
}
return sub.BlossomStorage, nil
}
// IsFirstTimeUser checks if a pubkey is a first-time user (no subscription history)
func (w *W) IsFirstTimeUser(pubkey []byte) (bool, error) {
key := "firstlogin:" + string(pubkey)
data, err := w.getStoreValue(SubscriptionsStoreName, key)
if err != nil {
return false, err
}
if data == nil {
// First time - record the login
now := time.Now()
loginData, _ := json.Marshal(map[string]interface{}{
"first_login": now,
})
_ = w.setStoreValue(SubscriptionsStoreName, key, loginData)
return true, nil
}
return false, nil
}
// serializeSubscription converts a subscription to bytes using JSON
func (w *W) serializeSubscription(s *database.Subscription) []byte {
data, _ := json.Marshal(s)
return data
}
// deserializeSubscription converts bytes to a subscription
func (w *W) deserializeSubscription(data []byte) (*database.Subscription, error) {
s := &database.Subscription{}
if err := json.Unmarshal(data, s); err != nil {
return nil, err
}
return s, nil
}
// serializePayment converts a payment to bytes
func (w *W) serializePayment(p *database.Payment) []byte {
buf := new(bytes.Buffer)
// Amount (8 bytes)
amt := make([]byte, 8)
binary.BigEndian.PutUint64(amt, uint64(p.Amount))
buf.Write(amt)
// Timestamp (8 bytes)
ts := make([]byte, 8)
binary.BigEndian.PutUint64(ts, uint64(p.Timestamp.Unix()))
buf.Write(ts)
// Invoice length (4 bytes) + Invoice
invBytes := []byte(p.Invoice)
invLen := make([]byte, 4)
binary.BigEndian.PutUint32(invLen, uint32(len(invBytes)))
buf.Write(invLen)
buf.Write(invBytes)
// Preimage length (4 bytes) + Preimage
preBytes := []byte(p.Preimage)
preLen := make([]byte, 4)
binary.BigEndian.PutUint32(preLen, uint32(len(preBytes)))
buf.Write(preLen)
buf.Write(preBytes)
return buf.Bytes()
}
// deserializePayment converts bytes to a payment
func (w *W) deserializePayment(data []byte) (*database.Payment, error) {
if len(data) < 24 { // 8 + 8 + 4 + 4 minimum
return nil, errors.New("invalid payment data")
}
p := &database.Payment{}
p.Amount = int64(binary.BigEndian.Uint64(data[0:8]))
p.Timestamp = time.Unix(int64(binary.BigEndian.Uint64(data[8:16])), 0)
invLen := binary.BigEndian.Uint32(data[16:20])
if len(data) < int(20+invLen+4) {
return nil, errors.New("invalid invoice length")
}
p.Invoice = string(data[20 : 20+invLen])
offset := 20 + invLen
preLen := binary.BigEndian.Uint32(data[offset : offset+4])
if len(data) < int(offset+4+preLen) {
return nil, errors.New("invalid preimage length")
}
p.Preimage = string(data[offset+4 : offset+4+preLen])
return p, nil
}

24
pkg/wasmdb/testdata/package-lock.json generated vendored

@ -0,0 +1,24 @@ @@ -0,0 +1,24 @@
{
"name": "wasmdb-test",
"version": "1.0.0",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"name": "wasmdb-test",
"version": "1.0.0",
"dependencies": {
"fake-indexeddb": "^6.0.0"
}
},
"node_modules/fake-indexeddb": {
"version": "6.2.5",
"resolved": "https://registry.npmjs.org/fake-indexeddb/-/fake-indexeddb-6.2.5.tgz",
"integrity": "sha512-CGnyrvbhPlWYMngksqrSSUT1BAVP49dZocrHuK0SvtR0D5TMs5wP0o3j7jexDJW01KSadjBp1M/71o/KR3nD1w==",
"license": "Apache-2.0",
"engines": {
"node": ">=18"
}
}
}
}

12
pkg/wasmdb/testdata/package.json vendored

@ -0,0 +1,12 @@ @@ -0,0 +1,12 @@
{
"name": "wasmdb-test",
"version": "1.0.0",
"description": "Node.js test harness for wasmdb WASM tests",
"type": "module",
"scripts": {
"test": "node run_wasm_tests.mjs"
},
"dependencies": {
"fake-indexeddb": "^6.0.0"
}
}

572
pkg/wasmdb/wasmdb.go

@ -0,0 +1,572 @@ @@ -0,0 +1,572 @@
//go:build js && wasm
// Package wasmdb provides a WebAssembly-compatible database implementation
// using IndexedDB as the storage backend. It replicates the Badger database's
// index schema for full query compatibility.
//
// This implementation uses aperturerobotics/go-indexeddb (a fork of hack-pad/go-indexeddb)
// which provides full IndexedDB bindings with cursor/range support and transaction retry
// mechanisms to handle IndexedDB's transaction expiration issues in Go WASM.
//
// Architecture:
// - Each index type (evt, eid, kc-, pc-, etc.) maps to an IndexedDB object store
// - Keys are binary-encoded using the same format as the Badger implementation
// - Range queries use IndexedDB cursors with KeyRange bounds
// - Serial numbers are managed using a dedicated "meta" object store
package wasmdb
import (
"context"
"encoding/binary"
"errors"
"fmt"
"sync"
"github.com/aperturerobotics/go-indexeddb/idb"
"github.com/hack-pad/safejs"
"lol.mleku.dev"
"lol.mleku.dev/chk"
"git.mleku.dev/mleku/nostr/encoders/event"
"git.mleku.dev/mleku/nostr/encoders/filter"
"next.orly.dev/pkg/database"
"next.orly.dev/pkg/database/indexes"
)
const (
// DatabaseName is the IndexedDB database name
DatabaseName = "orly-nostr-relay"
// DatabaseVersion is incremented when schema changes require migration
DatabaseVersion = 1
// MetaStoreName holds metadata like serial counters
MetaStoreName = "meta"
// EventSerialKey is the key for the event serial counter in meta store
EventSerialKey = "event_serial"
// PubkeySerialKey is the key for the pubkey serial counter in meta store
PubkeySerialKey = "pubkey_serial"
// RelayIdentityKey is the key for the relay identity secret
RelayIdentityKey = "relay_identity"
)
// Object store names matching Badger index prefixes
var objectStoreNames = []string{
MetaStoreName,
string(indexes.EventPrefix), // "evt" - full events
string(indexes.SmallEventPrefix), // "sev" - small events inline
string(indexes.ReplaceableEventPrefix), // "rev" - replaceable events
string(indexes.AddressableEventPrefix), // "aev" - addressable events
string(indexes.IdPrefix), // "eid" - event ID index
string(indexes.FullIdPubkeyPrefix), // "fpc" - full ID + pubkey + timestamp
string(indexes.CreatedAtPrefix), // "c--" - created_at index
string(indexes.KindPrefix), // "kc-" - kind index
string(indexes.PubkeyPrefix), // "pc-" - pubkey index
string(indexes.KindPubkeyPrefix), // "kpc" - kind + pubkey index
string(indexes.TagPrefix), // "tc-" - tag index
string(indexes.TagKindPrefix), // "tkc" - tag + kind index
string(indexes.TagPubkeyPrefix), // "tpc" - tag + pubkey index
string(indexes.TagKindPubkeyPrefix), // "tkp" - tag + kind + pubkey index
string(indexes.WordPrefix), // "wrd" - word search index
string(indexes.ExpirationPrefix), // "exp" - expiration index
string(indexes.VersionPrefix), // "ver" - schema version
string(indexes.PubkeySerialPrefix), // "pks" - pubkey serial index
string(indexes.SerialPubkeyPrefix), // "spk" - serial to pubkey
string(indexes.EventPubkeyGraphPrefix), // "epg" - event-pubkey graph
string(indexes.PubkeyEventGraphPrefix), // "peg" - pubkey-event graph
"markers", // metadata key-value storage
"subscriptions", // payment subscriptions
"nip43", // NIP-43 membership
"invites", // invite codes
}
// W implements the database.Database interface using IndexedDB
type W struct {
ctx context.Context
cancel context.CancelFunc
dataDir string // Not really used in WASM, but kept for interface compatibility
Logger *logger
db *idb.Database
dbMu sync.RWMutex
ready chan struct{}
// Serial counters (cached in memory, persisted to IndexedDB)
eventSerial uint64
pubkeySerial uint64
serialMu sync.Mutex
}
// Ensure W implements database.Database interface at compile time
var _ database.Database = (*W)(nil)
// init registers the wasmdb database factory
func init() {
database.RegisterWasmDBFactory(func(
ctx context.Context,
cancel context.CancelFunc,
cfg *database.DatabaseConfig,
) (database.Database, error) {
return NewWithConfig(ctx, cancel, cfg)
})
}
// NewWithConfig creates a new IndexedDB-based database instance
func NewWithConfig(
ctx context.Context, cancel context.CancelFunc, cfg *database.DatabaseConfig,
) (*W, error) {
w := &W{
ctx: ctx,
cancel: cancel,
dataDir: cfg.DataDir,
Logger: NewLogger(lol.GetLogLevel(cfg.LogLevel)),
ready: make(chan struct{}),
}
// Open or create the IndexedDB database
if err := w.openDatabase(); err != nil {
return nil, fmt.Errorf("failed to open IndexedDB: %w", err)
}
// Load serial counters from storage
if err := w.loadSerialCounters(); err != nil {
return nil, fmt.Errorf("failed to load serial counters: %w", err)
}
// Start warmup goroutine
go w.warmup()
// Setup shutdown handler
go func() {
<-w.ctx.Done()
w.cancel()
w.Close()
}()
return w, nil
}
// New creates a new IndexedDB-based database instance with default configuration
func New(
ctx context.Context, cancel context.CancelFunc, dataDir, logLevel string,
) (*W, error) {
cfg := &database.DatabaseConfig{
DataDir: dataDir,
LogLevel: logLevel,
}
return NewWithConfig(ctx, cancel, cfg)
}
// openDatabase opens or creates the IndexedDB database with all required object stores
func (w *W) openDatabase() error {
w.dbMu.Lock()
defer w.dbMu.Unlock()
// Get the IndexedDB factory (panics if not available)
factory := idb.Global()
// Open the database with upgrade handler
openReq, err := factory.Open(w.ctx, DatabaseName, DatabaseVersion, func(db *idb.Database, oldVersion, newVersion uint) error {
// This is called when the database needs to be created or upgraded
w.Logger.Infof("IndexedDB upgrade: version %d -> %d", oldVersion, newVersion)
// Create all object stores
for _, storeName := range objectStoreNames {
// Check if store already exists
if !w.hasObjectStore(db, storeName) {
// Create object store without auto-increment (we manage keys manually)
opts := idb.ObjectStoreOptions{}
if _, err := db.CreateObjectStore(storeName, opts); err != nil {
return fmt.Errorf("failed to create object store %s: %w", storeName, err)
}
w.Logger.Debugf("created object store: %s", storeName)
}
}
return nil
})
if err != nil {
return fmt.Errorf("failed to open IndexedDB: %w", err)
}
db, err := openReq.Await(w.ctx)
if err != nil {
return fmt.Errorf("failed to await IndexedDB open: %w", err)
}
w.db = db
return nil
}
// hasObjectStore checks if an object store exists in the database
func (w *W) hasObjectStore(db *idb.Database, name string) bool {
names, err := db.ObjectStoreNames()
if err != nil {
return false
}
for _, n := range names {
if n == name {
return true
}
}
return false
}
// loadSerialCounters loads the event and pubkey serial counters from IndexedDB
func (w *W) loadSerialCounters() error {
w.serialMu.Lock()
defer w.serialMu.Unlock()
// Load event serial
eventSerialBytes, err := w.getMeta(EventSerialKey)
if err != nil {
return err
}
if eventSerialBytes != nil && len(eventSerialBytes) == 8 {
w.eventSerial = binary.BigEndian.Uint64(eventSerialBytes)
}
// Load pubkey serial
pubkeySerialBytes, err := w.getMeta(PubkeySerialKey)
if err != nil {
return err
}
if pubkeySerialBytes != nil && len(pubkeySerialBytes) == 8 {
w.pubkeySerial = binary.BigEndian.Uint64(pubkeySerialBytes)
}
w.Logger.Infof("loaded serials: event=%d, pubkey=%d", w.eventSerial, w.pubkeySerial)
return nil
}
// getMeta retrieves a value from the meta object store
func (w *W) getMeta(key string) ([]byte, error) {
tx, err := w.db.Transaction(idb.TransactionReadOnly, MetaStoreName)
if err != nil {
return nil, err
}
store, err := tx.ObjectStore(MetaStoreName)
if err != nil {
return nil, err
}
keyVal, err := safejs.ValueOf(key)
if err != nil {
return nil, err
}
req, err := store.Get(keyVal)
if err != nil {
return nil, err
}
val, err := req.Await(w.ctx)
if err != nil {
// Key not found is not an error
return nil, nil
}
if val.IsUndefined() || val.IsNull() {
return nil, nil
}
// Convert safejs.Value to []byte
return safeValueToBytes(val), nil
}
// setMeta stores a value in the meta object store
func (w *W) setMeta(key string, value []byte) error {
tx, err := w.db.Transaction(idb.TransactionReadWrite, MetaStoreName)
if err != nil {
return err
}
store, err := tx.ObjectStore(MetaStoreName)
if err != nil {
return err
}
// Convert value to Uint8Array for IndexedDB storage
valueJS := bytesToSafeValue(value)
// Put with key - using PutKey since we're managing keys
keyVal, err := safejs.ValueOf(key)
if err != nil {
return err
}
_, err = store.PutKey(keyVal, valueJS)
if err != nil {
return err
}
return tx.Await(w.ctx)
}
// nextEventSerial returns the next event serial number and persists it
func (w *W) nextEventSerial() (uint64, error) {
w.serialMu.Lock()
defer w.serialMu.Unlock()
w.eventSerial++
serial := w.eventSerial
// Persist to IndexedDB
buf := make([]byte, 8)
binary.BigEndian.PutUint64(buf, serial)
if err := w.setMeta(EventSerialKey, buf); err != nil {
return 0, err
}
return serial, nil
}
// nextPubkeySerial returns the next pubkey serial number and persists it
func (w *W) nextPubkeySerial() (uint64, error) {
w.serialMu.Lock()
defer w.serialMu.Unlock()
w.pubkeySerial++
serial := w.pubkeySerial
// Persist to IndexedDB
buf := make([]byte, 8)
binary.BigEndian.PutUint64(buf, serial)
if err := w.setMeta(PubkeySerialKey, buf); err != nil {
return 0, err
}
return serial, nil
}
// warmup performs database warmup and closes the ready channel when complete
func (w *W) warmup() {
defer close(w.ready)
// IndexedDB is ready immediately after opening
w.Logger.Infof("IndexedDB database warmup complete, ready to serve requests")
}
// Path returns the database path (not used in WASM)
func (w *W) Path() string { return w.dataDir }
// Init initializes the database (no-op, done in New)
func (w *W) Init(path string) error { return nil }
// Sync flushes pending writes (IndexedDB handles persistence automatically)
func (w *W) Sync() error { return nil }
// Close closes the database
func (w *W) Close() error {
w.dbMu.Lock()
defer w.dbMu.Unlock()
if w.db != nil {
w.db.Close()
w.db = nil
}
return nil
}
// Wipe removes all data and recreates object stores
func (w *W) Wipe() error {
w.dbMu.Lock()
defer w.dbMu.Unlock()
// Close the current database
if w.db != nil {
w.db.Close()
w.db = nil
}
// Delete the database
factory := idb.Global()
delReq, err := factory.DeleteDatabase(DatabaseName)
if err != nil {
return fmt.Errorf("failed to delete IndexedDB: %w", err)
}
if err := delReq.Await(w.ctx); err != nil {
return fmt.Errorf("failed to await IndexedDB delete: %w", err)
}
// Reset serial counters
w.serialMu.Lock()
w.eventSerial = 0
w.pubkeySerial = 0
w.serialMu.Unlock()
// Reopen the database (this will recreate all object stores)
w.dbMu.Unlock()
err = w.openDatabase()
w.dbMu.Lock()
return err
}
// SetLogLevel sets the logging level
func (w *W) SetLogLevel(level string) {
w.Logger.SetLogLevel(lol.GetLogLevel(level))
}
// Ready returns a channel that closes when the database is ready
func (w *W) Ready() <-chan struct{} { return w.ready }
// RunMigrations runs database migrations (handled by IndexedDB upgrade)
func (w *W) RunMigrations() {}
// EventIdsBySerial retrieves event IDs by serial range
func (w *W) EventIdsBySerial(start uint64, count int) ([]uint64, error) {
return nil, errors.New("not implemented")
}
// Query cache methods (simplified for WASM - no caching)
func (w *W) GetCachedJSON(f *filter.F) ([][]byte, bool) { return nil, false }
func (w *W) CacheMarshaledJSON(f *filter.F, marshaledJSON [][]byte) {}
func (w *W) GetCachedEvents(f *filter.F) (event.S, bool) { return nil, false }
func (w *W) CacheEvents(f *filter.F, events event.S) {}
func (w *W) InvalidateQueryCache() {}
// Placeholder implementations for remaining interface methods
// Query methods are implemented in query-events.go
// Delete methods are implemented in delete-event.go
// Import, Export, and ImportEvents methods are implemented in import-export.go
func (w *W) GetRelayIdentitySecret() (skb []byte, err error) {
return w.getMeta(RelayIdentityKey)
}
func (w *W) SetRelayIdentitySecret(skb []byte) error {
return w.setMeta(RelayIdentityKey, skb)
}
func (w *W) GetOrCreateRelayIdentitySecret() (skb []byte, err error) {
skb, err = w.GetRelayIdentitySecret()
if err != nil {
return nil, err
}
if skb != nil {
return skb, nil
}
// Generate new secret key (32 random bytes)
// In WASM, we use crypto.getRandomValues
skb = make([]byte, 32)
if err := cryptoRandom(skb); err != nil {
return nil, err
}
if err := w.SetRelayIdentitySecret(skb); err != nil {
return nil, err
}
return skb, nil
}
func (w *W) SetMarker(key string, value []byte) error {
return w.setStoreValue("markers", key, value)
}
func (w *W) GetMarker(key string) (value []byte, err error) {
return w.getStoreValue("markers", key)
}
func (w *W) HasMarker(key string) bool {
val, err := w.GetMarker(key)
return err == nil && val != nil
}
func (w *W) DeleteMarker(key string) error {
return w.deleteStoreValue("markers", key)
}
// Subscription methods are implemented in subscriptions.go
// NIP-43 methods are implemented in nip43.go
// Helper methods for object store operations
func (w *W) setStoreValue(storeName, key string, value []byte) error {
tx, err := w.db.Transaction(idb.TransactionReadWrite, storeName)
if err != nil {
return err
}
store, err := tx.ObjectStore(storeName)
if err != nil {
return err
}
keyVal, err := safejs.ValueOf(key)
if err != nil {
return err
}
valueJS := bytesToSafeValue(value)
_, err = store.PutKey(keyVal, valueJS)
if err != nil {
return err
}
return tx.Await(w.ctx)
}
func (w *W) getStoreValue(storeName, key string) ([]byte, error) {
tx, err := w.db.Transaction(idb.TransactionReadOnly, storeName)
if err != nil {
return nil, err
}
store, err := tx.ObjectStore(storeName)
if err != nil {
return nil, err
}
keyVal, err := safejs.ValueOf(key)
if err != nil {
return nil, err
}
req, err := store.Get(keyVal)
if err != nil {
return nil, err
}
val, err := req.Await(w.ctx)
if err != nil {
return nil, nil
}
if val.IsUndefined() || val.IsNull() {
return nil, nil
}
return safeValueToBytes(val), nil
}
func (w *W) deleteStoreValue(storeName, key string) error {
tx, err := w.db.Transaction(idb.TransactionReadWrite, storeName)
if err != nil {
return err
}
store, err := tx.ObjectStore(storeName)
if err != nil {
return err
}
keyVal, err := safejs.ValueOf(key)
if err != nil {
return err
}
_, err = store.Delete(keyVal)
if err != nil {
return err
}
return tx.Await(w.ctx)
}
// Placeholder for unused variable
var _ = chk.E

1739
pkg/wasmdb/wasmdb_test.go

File diff suppressed because it is too large Load Diff
Loading…
Cancel
Save