Browse Source

fix relay management and caching

master
Silberengel 1 month ago
parent
commit
21e01bae28
  1. 5
      Dockerfile
  2. 4
      README.md
  3. 2
      docker-compose.yml
  4. 26
      docker-entrypoint.sh
  5. 44
      httpd.conf.template
  6. 100
      ideas.txt
  7. 4
      public/healthz.json
  8. 6
      src/lib/components/relay/RelayInfo.svelte
  9. 281
      src/lib/modules/feed/FeedPage.svelte
  10. 174
      src/lib/modules/threads/ThreadList.svelte
  11. 1
      src/lib/services/nostr/config.ts
  12. 166
      src/lib/services/nostr/memory-manager.ts
  13. 523
      src/lib/services/nostr/nostr-client.ts

5
Dockerfile

@ -15,7 +15,12 @@ ENV VITE_PWA_ENABLED=${VITE_PWA_ENABLED} @@ -15,7 +15,12 @@ ENV VITE_PWA_ENABLED=${VITE_PWA_ENABLED}
RUN npm run build
FROM httpd:alpine
RUN apk add --no-cache gettext && \
mkdir -p /usr/local/apache2/logs && \
chown -R daemon:daemon /usr/local/apache2/logs
COPY --from=builder /app/build /usr/local/apache2/htdocs/
# Ensure healthz.json exists (copy from public if not in build, or create if missing)
COPY --from=builder /app/public/healthz.json /usr/local/apache2/htdocs/healthz.json
COPY httpd.conf.template /usr/local/apache2/conf/httpd.conf.template
COPY docker-entrypoint.sh /usr/local/bin/
RUN chmod +x /usr/local/bin/docker-entrypoint.sh

4
README.md

@ -450,7 +450,7 @@ aitherboard/ @@ -450,7 +450,7 @@ aitherboard/
| Category | Relays | Purpose |
|----------|--------|---------|
| **Default Relays** | `wss://theforest.nostr1.com`<br>`wss://nostr21.com`<br>`wss://nostr.land`<br>`wss://nostr.sovbit.host`<br>`wss://orly-relay.imwald.eu`<br>`wss://nostr.wine` | Base relays for all operations |
| **Default Relays** | `wss://theforest.nostr1.com`<br>`wss://nostr21.com`<br>`wss://nostr.land`<br>`wss://orly-relay.imwald.eu`<br>`wss://nostr.wine` | Base relays for all operations |
| **Profile Relays** | `wss://relay.damus.io`<br>`wss://aggr.nostr.land`<br>`wss://profiles.nostr1.com` | Additional relays for profile/kind 1 content |
### Relay Selection by Operation
@ -857,7 +857,7 @@ aitherboard/ @@ -857,7 +857,7 @@ aitherboard/
| Variable | Type | Default | Validation |
|----------|------|---------|------------|
| `VITE_DEFAULT_RELAYS` | Comma-separated URLs | `wss://theforest.nostr1.com,wss://nostr21.com,wss://nostr.land,wss://nostr.sovbit.host,wss://orly-relay.imwald.eu` | Empty/invalid falls back to defaults |
| `VITE_DEFAULT_RELAYS` | Comma-separated URLs | `wss://theforest.nostr1.com,wss://nostr21.com,wss://nostr.land,wss://orly-relay.imwald.eu` | Empty/invalid falls back to defaults |
| `VITE_ZAP_THRESHOLD` | Integer | `1` | Must be 0 or positive, invalid defaults to 1 |
| `VITE_THREAD_TIMEOUT_DAYS` | Integer | `30` | - |
| `VITE_PWA_ENABLED` | Boolean | `true` | - |

2
docker-compose.yml

@ -5,7 +5,7 @@ services: @@ -5,7 +5,7 @@ services:
build:
context: .
args:
VITE_DEFAULT_RELAYS: "wss://theforest.nostr1.com,wss://nostr21.com,wss://nostr.land,wss://nostr.sovbit.host,wss://orly-relay.imwald.eu"
VITE_DEFAULT_RELAYS: "wss://theforest.nostr1.com,wss://nostr21.com,wss://nostr.land,wss://orly-relay.imwald.eu"
VITE_ZAP_THRESHOLD: "1"
VITE_THREAD_TIMEOUT_DAYS: "30"
VITE_PWA_ENABLED: "true"

26
docker-entrypoint.sh

@ -1,5 +1,4 @@ @@ -1,5 +1,4 @@
#!/bin/sh
set -e
PORT=${PORT:-9876}
if ! [ "$PORT" -ge 1 ] 2>/dev/null || ! [ "$PORT" -le 65535 ] 2>/dev/null; then
@ -7,6 +6,29 @@ if ! [ "$PORT" -ge 1 ] 2>/dev/null || ! [ "$PORT" -le 65535 ] 2>/dev/null; then @@ -7,6 +6,29 @@ if ! [ "$PORT" -ge 1 ] 2>/dev/null || ! [ "$PORT" -le 65535 ] 2>/dev/null; then
PORT=9876
fi
echo "Generating Apache configuration with PORT=$PORT"
envsubst '${PORT}' < /usr/local/apache2/conf/httpd.conf.template > /usr/local/apache2/conf/httpd.conf
exec httpd -D FOREGROUND
echo "Testing Apache configuration..."
if ! httpd -t; then
echo "ERROR: Apache configuration test failed!"
echo "Configuration file contents:"
cat /usr/local/apache2/conf/httpd.conf
exit 1
fi
echo "Checking htdocs directory..."
ls -la /usr/local/apache2/htdocs/ | head -20
echo "File count: $(find /usr/local/apache2/htdocs -type f | wc -l)"
echo "Checking if port $PORT is available..."
if ! netstat -tuln 2>/dev/null | grep -q ":$PORT "; then
echo "Port $PORT appears to be available"
else
echo "WARNING: Port $PORT might be in use"
fi
echo "Starting Apache on port $PORT..."
echo "Apache will run with PID: $$"
# Run httpd in foreground with error logging, redirect stderr to stdout
exec httpd -D FOREGROUND -e info 2>&1

44
httpd.conf.template

@ -1,26 +1,50 @@ @@ -1,26 +1,50 @@
LoadModule mpm_prefork_module modules/mod_mpm_prefork.so
LoadModule rewrite_module modules/mod_rewrite.so
LoadModule headers_module modules/mod_headers.so
LoadModule authz_core_module modules/mod_authz_core.so
LoadModule log_config_module modules/mod_log_config.so
LoadModule unixd_module modules/mod_unixd.so
LoadModule dir_module modules/mod_dir.so
PidFile "/usr/local/apache2/logs/httpd.pid"
ErrorLog "/proc/self/fd/2"
CustomLog "/proc/self/fd/1" common
LogLevel info
Listen ${PORT}
ServerName localhost
DocumentRoot "/usr/local/apache2/htdocs"
DirectoryIndex index.html
User daemon
Group daemon
RewriteEngine On
RewriteRule ^/healthz$ /healthz.json [L]
<Directory "/usr/local/apache2/htdocs">
Options Indexes FollowSymLinks
AllowOverride All
Require all granted
RewriteEngine On
# Allow direct access to index.html and existing files
RewriteRule ^index\.html$ - [L]
# If file exists, serve it
RewriteCond %{REQUEST_FILENAME} -f
RewriteRule . - [L]
# If directory exists, serve it (DirectoryIndex will handle it)
RewriteCond %{REQUEST_FILENAME} -d
RewriteRule . - [L]
# Otherwise, serve 200.html for SPA routing
RewriteRule . /200.html [L]
</Directory>
<Location "/healthz">
<Location "/healthz.json">
Header set Content-Type "application/json"
Header set Cache-Control "public, max-age=5"
</Location>
RewriteEngine On
RewriteBase /
RewriteRule ^healthz$ /healthz.json [L]
RewriteRule ^index\.html$ - [L]
RewriteCond %{REQUEST_FILENAME} !-f
RewriteCond %{REQUEST_FILENAME} !-d
RewriteRule . /200.html [L]
<IfModule mod_headers.c>
Header set Service-Worker-Allowed "/"
</IfModule>

100
ideas.txt

@ -1,100 +0,0 @@ @@ -1,100 +0,0 @@
1. When I open a ThreadDrawer, the "Replying to:..." blurb at the top should render as full event. And, if that replied-to event is also a reply, it's OP should also be rendered as a full event. And so on, up the hierarchy, until we get to an event that isn't a reference or reply to any other (no e-tag or q-tag or a-tag). I want to see the entire discussion tree, so that the event I clicked in the Feed view is displayed in complete context.
2. Fix the Threads list loading so slowly. I should immediately be seeing what is in cache, and then you update the cache and add anything missing, in a second sweep. And make sure updating doesn't cause the page the jump around or create endless loops.
3. Make sure that pinning and bookmarking (from the event "..." menu) actually create/update and publish the list events.
4. Add a delete event menu item to the event "..." menu, that publishes a deletion request to all available relays.
5. Always render a pretty OpenGraph card, for URLs, if they provide one. Unless the URL is in the middle of a list, paragraph, or otherwise part of some larger structure.
6. Make sure that highlights work, according to NIP-84. refer to ../jumble for a working version.
Some example events:
{
"id": "93bea17f71ed9ea7f6832e3be7e617b3387e0700193cfcebaf3ffbc2e6f48a7f",
"pubkey": "17538dc2a62769d09443f18c37cbe358fab5bbf981173542aa7c5ff171ed77c4",
"created_at": 1769023343,
"kind": 9802,
"tags": [
[
"e",
"6f854ade40cf3f24046249e650f55b33add3ee1526c00cc93cc7dfc80b8dc121",
"source"
]
],
"content": "not real wisdom, being a pretense of knowing the unknown",
"sig": "150279e733e16fa85439916f9f5b8108898a35cbf18062638dfc94e7a38f4a2faae8ce918750ef327fc16b7e7ca8739b1e8aff3b9dd238363d08eec423abba83"
}
{
"id": "1cd2017dd33a2efddffb9814c1993cf62e6d8a8e2e90af40973b6d4d1ea509f0",
"pubkey": "a9434ee165ed01b286becfc2771ef1705d3537d051b387288898cc00d5c885be",
"created_at": 1769288219,
"kind": 9802,
"tags": [
[
"p",
"a9434ee165ed01b286becfc2771ef1705d3537d051b387288898cc00d5c885be"
],
[
"a",
"30023:a9434ee165ed01b286becfc2771ef1705d3537d051b387288898cc00d5c885be:comparing-community-specs"
],
[
"context",
"A single publication can be targeted to up to 12 communities via one Targeted Publication event. The creator's intended audience is explicit and transparent — anyone can see which communities a piece of content was meant for. This can serve as an organic disovery route for related Communities + lowers the bar for bootstrapping new ones."
],
[
"alt",
"This highlight was made by https://primal.net web client"
]
],
"content": " The creator's intended audience is explicit and transparent — anyone can see which communities a piece of content was meant for. This can serve as an organic disovery route for related Communities + lowers the bar for bootstrapping new ones.",
"sig": "b490a12fbc1ab0063c6ddb3ae091212a4fcf76fdf9581d5f0291f24a9443b45d9f11d70e8035ea9c61b95ad47952c46ceeffa6dbb0fa5351bc51aad2e3d54add"
}
In the first highlight event, there is simply the content field, which should be rendered as a quote, with a link to the original source (event or URL) below it. If the URL provides OpenGraph data, display it and add the hyperlink to it. For events: display a card with "A note from: <user badge rendered>" and then the "title", "image", and "summary" tags, if available. Make the card a clickable hyperlink to the event's /event page.
7. Make #hashtags and t-tag topic buttons clickable. Clicking on one should launch a /topics/nameOfHashtag page, that reveals an event list of everything on the relays that includes that topic as a hashtag or a t-tag.
8. Display a metadata card, at the top of the page, when rendering any replaceable event in /event . Render tags like "image", "description", "summary", "author", "title", etc.
9. Add an Asciidoctor library to the packages. Use that for rendering kinds 30818 and 30041. All other kinds use Markdown.
10. If a /event page is opened for a 30040 event, make sure that you analyze and then lazy-load the entire event-index hierarchy (see ../nips-silberengel/NKBIP-01.adoc) into the cache and then into the view. The index can use a-tags or e-tags, or a mix of both. Handle both types of tags and make sure to render the events in the original order. Retry any missing events, after the first loading pass, but don't loop infinitely.
11. Display a metadata card, at the top, for the OP 30040. Only display metadata for nested events, if they differ from the OP.
12. Please note that kind 30040 events typically contain 30041s, but they can actually contain any type of event the creator wants. Make sure to render each one according to its kind (markdown or asciidoc).
13. Both the metadata card and the section events should have their "title" displayed (if none is provided, render the d-tag without hyphens and in Title Case) and have a "..." menu. The section events should have a new menu item: "Open in a new window" that opens the section as a /event in the browser. The index OP should have a new menu item: "Label this as a book" that creates a "general" 1985 label with "booklist".
14. If an event opened in /event has been highlighted, render the highlight on the displayed text. (for 30040s, this needs to run after the publication has finished loading, or it won't find the text). Hovering over the highlight should display the user-badge of the person who created the highlight, with a button "View the highlight". Clicking the button should make the highlight open to the right, in a thread panel.
15. There should be a /replaceable/d-tag-placed-here url path that searches for all replaceable events that have that d-tag and lists them in a list. Clicking one should display it in thread-panel on the right.
16. Add a main menu item, to the right of Feeds: Write
it should open to a page offering two choicees: find an existing event to edit, create a new event
Clicking find should then demand they enter an event id (hex id, nevent, naddr, note) and click "Find".
The event should be searched for in cache and then the relays, (return the newest version found) and the json rendered, below a hyperlink to the related /event page.
They should be able to click an "Edit" button, and then the event is displayed as a form, where they can add/edit/delete tags and change the content. Don't render id, kind, pubkey, sig, created_at as those are to be generated when they click "Publish". Publish to cache and to the standard write-relays. Publishing should reveal the standard success/failure message for the relays. If none were successful, allow them to attempt to republish from cache. If successful, wait 5 seconds and then, open the event in the /event page.
Clicking create should ask them to enter a kind they would like to write: 1, 11, 9802, 1222, 20, 21, 22, 30023, 30818, 30817, 30041, 30040 (metadata-only, no sections added, they can do that manually in the edit function, add that as a help-text), 1068
17. If the user is looking at their own profile page, display a menu item "Adjust profile events" that opens a left-side panel that allows them select one of the following events to create/update: 0, 3, 30315, 10133, 10002, 10432, 10001, 10003, 10895, 10015, 10030, 30030, 10000, 30008. Selecting one should open an appropriate form and preload it with any event found in cache or on the relays. Publish to cache and to the standard write-relays. Publishing should reveal the standard success/failure message for the relays. If none were successful, allow them to attempt to republish from cache. If successful, wait 5 seconds and then, open the event in the /event page.
18. Make sure the /event page can handle metadata-only (no "content") events gracefully, displaying their tag-lists.
Get rid of the light/dark mode button on the main nav bar. Instead, change the preferences button so that it opens a left-side panel with light/dark mode, text-size and paragraph spacing settings, as well as a new checkbox: Create expiring events (6 months: kinds 7, 1, 30315)
that adds a 6-month expiration time stamp to those event they create, like:
["expiration", "1600000000"]
also allow them to determine their preferred media-upload server (see ../jumble for how this is done, in /settings/posts)
and add a button "Manage Cache" that opens a page /cache that contains a full cache browser and manager
Add a short "About" section, at the bottom of the panel.

4
public/healthz.json

@ -2,7 +2,7 @@ @@ -2,7 +2,7 @@
"status": "ok",
"service": "aitherboard",
"version": "0.1.0",
"buildTime": "2026-02-04T16:38:18.760Z",
"buildTime": "2026-02-05T06:58:07.669Z",
"gitCommit": "unknown",
"timestamp": 1770223098760
"timestamp": 1770274687669
}

6
src/lib/components/relay/RelayInfo.svelte

@ -172,11 +172,11 @@ @@ -172,11 +172,11 @@
async function getEventCount() {
try {
// Fetch a small sample to estimate activity
// Fetch a small sample to estimate activity (relay-first for faster response)
const events = await nostrClient.fetchEvents(
[{ kinds: [1], limit: 1 }],
[relayUrl],
{ useCache: false, cacheResults: false, timeout: 5000 }
{ relayFirst: true, useCache: true, cacheResults: false, timeout: 3000 }
);
// This is just a connectivity check, not a real count
// Real event count would require a COUNT query which not all relays support
@ -194,7 +194,7 @@ @@ -194,7 +194,7 @@
const favoriteRelayEvents = await nostrClient.fetchEvents(
[{ kinds: [KIND.FAVORITE_RELAYS], limit: 100 }],
relayManager.getProfileReadRelays(),
{ useCache: true, cacheResults: true }
{ relayFirst: true, useCache: true, cacheResults: true, timeout: 3000 }
);
console.debug(`[RelayInfo] Fetched ${favoriteRelayEvents.length} favorite relay events for ${relayUrl}`);

281
src/lib/modules/feed/FeedPage.svelte

@ -25,6 +25,7 @@ @@ -25,6 +25,7 @@
let loadingMore = $state(false);
let hasMore = $state(true);
let oldestTimestamp = $state<number | null>(null);
let relayError = $state<string | null>(null); // Error message for single-relay mode
// List filter state
let availableLists = $state<Array<{ kind: number; name: string; event: NostrEvent }>>([]);
@ -314,7 +315,8 @@ @@ -314,7 +315,8 @@
// Refresh every 30 seconds
refreshInterval = setInterval(async () => {
try {
const relays = relayManager.getFeedReadRelays();
// Use single relay if provided, otherwise use normal relay list
const relays = singleRelay ? [singleRelay] : relayManager.getFeedReadRelays();
// Get the newest event's timestamp from all feed event types to only fetch newer events
const allFeedEvents = [...posts, ...highlights, ...otherFeedEvents];
@ -329,14 +331,21 @@ @@ -329,14 +331,21 @@
since: newestTimestamp + 1 // Only get events newer than what we have
}));
// Fetch new events (without cache to ensure we query relays)
// Fetch new events (relay-first for refresh)
// In single-relay mode: never use cache
const events = await nostrClient.fetchEvents(
filters,
relays,
{
useCache: false, // Don't use cache for refresh - always query relays
singleRelay ? {
relayFirst: true, // Query relay first
useCache: false, // Never use cache in single-relay mode
cacheResults: false, // Don't cache in single-relay mode
timeout: 3000
} : {
relayFirst: true, // Query relays first
useCache: true, // Fill from cache if needed
cacheResults: true,
timeout: 10000
timeout: 3000
}
);
@ -378,129 +387,62 @@ @@ -378,129 +387,62 @@
async function loadFeed() {
loading = true;
relayError = null; // Clear any previous errors
try {
const config = nostrClient.getConfig();
// Use single relay if provided, otherwise use normal relay list
const relays = singleRelay ? [singleRelay] : relayManager.getFeedReadRelays();
// For single-relay mode, check if relay is available
if (singleRelay) {
try {
const relay = await nostrClient.getRelay(singleRelay);
if (!relay) {
relayError = `Relay ${singleRelay} is unavailable or returned an error. The relay may be down or unreachable.`;
loading = false;
return;
}
} catch (error) {
relayError = `Failed to connect to relay ${singleRelay}: ${error instanceof Error ? error.message : 'Unknown error'}`;
loading = false;
return;
}
}
// Load all feed kinds
const feedKinds = getFeedKinds();
const filters = feedKinds.map(kind => ({ kinds: [kind], limit: 20 }));
// For single relay mode, load from cache first for immediate display
// Then query the relay in background to get fresh data
let events: NostrEvent[] = [];
if (singleRelay) {
// Step 1: Load from cache immediately (fast, shows something right away)
const cachedEvents = await nostrClient.fetchEvents(
filters,
relays,
{
useCache: true, // Use cache for fast initial load
cacheResults: false, // Don't cache again
timeout: 2000 // Short timeout for cache
}
);
// Show cached data immediately if available
if (cachedEvents.length > 0) {
events = cachedEvents;
console.log(`[FeedPage] Loaded ${cachedEvents.length} cached events from ${singleRelay}`);
// Process cached events immediately so they show up
// (will be processed below)
// In single-relay mode: never use cache, only fetch directly from relay
// In normal mode: use relay-first with cache fallback
const fetchOptions = singleRelay ? {
relayFirst: true, // Query relay first
useCache: false, // Never use cache in single-relay mode
cacheResults: false, // Don't cache results in single-relay mode
timeout: 15000, // 15-second timeout for single-relay (relays can be slow, especially if auth is required)
onUpdate: (updatedEvents: NostrEvent[]) => {
// Update incrementally as events arrive
handleUpdate(updatedEvents);
}
// Step 2: Ensure relay is connected and query for fresh data
// If we have cached data, do this in background. Otherwise, wait for it.
const queryPromise = (async () => {
try {
console.log(`[FeedPage] Single relay mode: ensuring ${singleRelay} is connected...`);
// Force connection to the relay
await nostrClient.addRelay(singleRelay);
// Give it a moment to establish connection
await new Promise(resolve => setTimeout(resolve, 1000));
// Query relay for fresh data
const freshEvents = await nostrClient.fetchEvents(
filters,
relays,
{
useCache: false, // Force query relay
cacheResults: true, // Cache the results
timeout: 15000
}
);
console.log(`[FeedPage] Fresh query returned ${freshEvents.length} events from ${singleRelay}`);
// Update with fresh data
if (freshEvents.length > 0) {
const existingIds = new Set([...posts.map(p => p.id), ...highlights.map(h => h.id)]);
const trulyNew = freshEvents.filter(e => !existingIds.has(e.id));
if (trulyNew.length > 0 || freshEvents.length !== events.length) {
handleUpdate(freshEvents);
}
}
return freshEvents;
} catch (error) {
console.warn(`[FeedPage] Failed to query relay ${singleRelay}:`, error);
// If query fails but we have cached data, that's okay - keep showing cached data
return [];
}
})();
// If we don't have cached data, wait for the relay query
if (events.length === 0) {
const freshEvents = await queryPromise;
if (freshEvents.length > 0) {
events = freshEvents;
}
} else {
// If we have cached data, query in background (don't await)
queryPromise.catch(() => {
// Already logged error above
});
}
} else {
// Normal mode: use cache first, then query relays
events = await nostrClient.fetchEvents(
filters,
relays,
{
useCache: true, // Use cache for fast initial load
cacheResults: true, // Cache results
timeout: 15000
} : {
relayFirst: true, // Query relays first with timeout
useCache: true, // Fill from cache if relay query returns nothing
cacheResults: true, // Cache the results
timeout: 3000, // 3-second timeout
onUpdate: (updatedEvents: NostrEvent[]) => {
// Update incrementally as events arrive
handleUpdate(updatedEvents);
}
);
console.log(`[FeedPage] Loaded ${events.length} events from relays`);
// Also immediately query relays to ensure we get fresh data in background
nostrClient.fetchEvents(
filters,
relays,
{
useCache: false, // Force query relays
cacheResults: true, // Cache results
timeout: 15000
}
).then((newEvents) => {
console.log(`[FeedPage] Background query returned ${newEvents.length} events`);
// Only update if we got new events that aren't already in posts
if (newEvents.length > 0) {
const existingIds = new Set([...posts.map(p => p.id), ...highlights.map(h => h.id)]);
const trulyNew = newEvents.filter(e => !existingIds.has(e.id));
if (trulyNew.length > 0) {
handleUpdate(trulyNew);
}
}
}).catch(error => {
console.warn('[FeedPage] Background relay query error:', error);
});
};
if (singleRelay) {
console.log(`[FeedPage] Single-relay mode: fetching from ${singleRelay} with useCache=false, cacheResults=false`);
}
const events = await nostrClient.fetchEvents(filters, relays, fetchOptions);
console.log(`[FeedPage] Loaded ${events.length} events from ${singleRelay ? `single relay ${singleRelay}` : 'relays'} (relay-first mode)`);
// Separate events by kind - we'll handle all showInFeed kinds
const postsList = events.filter(e => e.kind === KIND.SHORT_TEXT_NOTE);
@ -559,7 +501,7 @@ @@ -559,7 +501,7 @@
// This allows the UI to render while fresh data loads in background
loading = false;
console.log(`[FeedPage] Loaded ${sortedPosts.length} posts and ${sortedHighlights.length} highlights`);
console.log(`[FeedPage] Loaded ${sortedPosts.length} posts and ${sortedHighlights.length} highlights`);
if (sortedPosts.length > 0 || sortedHighlights.length > 0) {
const allTimestamps = [...sortedPosts.map(e => e.created_at), ...sortedHighlights.map(e => e.created_at)];
@ -568,6 +510,10 @@ @@ -568,6 +510,10 @@
await loadReactionsForPosts(sortedPosts);
} else {
console.log('[FeedPage] No events found. Relays:', relays);
// In single-relay mode, if we got 0 events, it might mean the relay doesn't have any
if (singleRelay && events.length === 0) {
relayError = `No events found on relay ${singleRelay}. This relay may not store feed event types, or it may be empty.`;
}
}
hasMore = events.length >= 20;
@ -595,58 +541,23 @@ @@ -595,58 +541,23 @@
until: oldestTimestamp || undefined
}));
// For single relay mode, try cache first, then query relay
let events: NostrEvent[] = [];
if (singleRelay) {
// Try cache first
const cachedEvents = await nostrClient.fetchEvents(
// In single-relay mode: never use cache, only fetch directly from relay
// In normal mode: use relay-first with cache fallback
const events = await nostrClient.fetchEvents(
filters,
relays,
{
useCache: true,
cacheResults: false,
timeout: 2000
}
);
if (cachedEvents.length > 0) {
events = cachedEvents;
}
// Query relay in background for fresh data
nostrClient.fetchEvents(
filters,
relays,
{
useCache: false,
cacheResults: true,
timeout: 10000
}
).then((freshEvents) => {
if (freshEvents.length > 0) {
const existingIds = new Set([...allPosts.map(p => p.id), ...allHighlights.map(h => h.id)]);
const uniqueNewPosts = freshEvents.filter(e => e.kind === KIND.SHORT_TEXT_NOTE && !existingIds.has(e.id));
const uniqueNewHighlights = freshEvents.filter(e => e.kind === KIND.HIGHLIGHTED_ARTICLE && !existingIds.has(e.id));
if (uniqueNewPosts.length > 0 || uniqueNewHighlights.length > 0) {
handleUpdate(freshEvents);
}
}
}).catch(error => {
console.warn('[FeedPage] Background query error:', error);
});
} else {
events = await nostrClient.fetchEvents(
filters,
relays,
{
useCache: true,
cacheResults: true,
timeout: 10000
singleRelay ? {
relayFirst: true, // Query relay first
useCache: false, // Never use cache in single-relay mode
cacheResults: false, // Don't cache results in single-relay mode
timeout: 3000 // 3-second timeout
} : {
relayFirst: true, // Query relays first with timeout
useCache: true, // Fill from cache if relay query returns nothing
cacheResults: true, // Cache the results
timeout: 3000 // 3-second timeout
}
);
}
if (events.length === 0) {
hasMore = false;
@ -866,18 +777,25 @@ @@ -866,18 +777,25 @@
// Use single relay if provided, otherwise use normal reaction relays
const relaysForReactions = singleRelay ? [singleRelay] : reactionRelays;
// For single relay mode, disable cache completely
const useCache = !singleRelay;
const cacheResults = !singleRelay;
// Batch fetch all reactions for all posts in one query
// In single-relay mode: never use cache
const allReactions = await nostrClient.fetchEvents(
[
{ kinds: [KIND.REACTION], '#e': eventIds, limit: 1000 },
{ kinds: [KIND.REACTION], '#E': eventIds, limit: 1000 }
],
relaysForReactions,
{ useCache, cacheResults, timeout: 10000 }
singleRelay ? {
relayFirst: true,
useCache: false, // Never use cache in single-relay mode
cacheResults: false, // Don't cache in single-relay mode
timeout: 3000
} : {
relayFirst: true,
useCache: true,
cacheResults: true,
timeout: 3000
}
);
// Group reactions by event ID
@ -940,6 +858,13 @@ @@ -940,6 +858,13 @@
<div class="loading-state">
<p class="text-fog-text dark:text-fog-dark-text">Loading feed...</p>
</div>
{:else if relayError}
<div class="error-state">
<p class="text-fog-text dark:text-fog-dark-text error-message">{relayError}</p>
<p class="text-fog-text-light dark:text-fog-dark-text-light error-hint">
The relay may be temporarily unavailable, or it may not store the event types you're looking for.
</p>
</div>
{:else if posts.length === 0 && highlights.length === 0 && otherFeedEvents.length === 0}
<div class="empty-state">
<p class="text-fog-text dark:text-fog-dark-text">
@ -1028,11 +953,27 @@ @@ -1028,11 +953,27 @@
}
.loading-state,
.empty-state {
.empty-state,
.error-state {
padding: 2rem;
text-align: center;
}
.error-message {
font-weight: 600;
color: var(--fog-accent, #64748b);
margin-bottom: 0.5rem;
}
:global(.dark) .error-message {
color: var(--fog-dark-accent, #94a3b8);
}
.error-hint {
font-size: 0.875rem;
margin-top: 0.5rem;
}
.feed-posts {
display: flex;
flex-direction: column;

174
src/lib/modules/threads/ThreadList.svelte

@ -29,12 +29,42 @@ @@ -29,12 +29,42 @@
return sorted;
});
$effect(() => {
// Track if we're currently loading to prevent loops
let isLoading = $state(false);
let prevSortBy = $state<'newest' | 'active' | 'upvoted' | null>(null);
let prevShowOlder = $state<boolean | null>(null);
let prevSelectedTopic = $state<string | null | undefined | null>(null);
// Initial load on mount
onMount(() => {
prevSortBy = sortBy;
prevShowOlder = showOlder;
prevSelectedTopic = selectedTopic;
loadAllData();
});
// Only reload when sortBy, showOlder, or selectedTopic changes (after initial values are set)
$effect(() => {
// Skip if we haven't set initial values yet (onMount hasn't run)
if (prevSortBy === null) return;
// Check if any filter parameter actually changed
if (sortBy !== prevSortBy || showOlder !== prevShowOlder || selectedTopic !== prevSelectedTopic) {
prevSortBy = sortBy;
prevShowOlder = showOlder;
prevSelectedTopic = selectedTopic;
// Only reload if not already loading
if (!isLoading) {
loadAllData();
}
}
});
async function loadAllData() {
if (isLoading) return; // Prevent concurrent loads
loading = true;
isLoading = true;
try {
const config = nostrClient.getConfig();
const since = showOlder
@ -48,88 +78,54 @@ @@ -48,88 +78,54 @@
const reactionRelays = relayManager.getProfileReadRelays();
const zapRelays = relayManager.getZapReceiptReadRelays();
// Step 1: Load from cache first (immediate display)
const cachedThreads = await nostrClient.fetchEvents(
[{ kinds: [KIND.DISCUSSION_THREAD], since, limit: 50 }],
threadRelays,
{
useCache: true,
cacheResults: false, // Don't cache again, we already have it
timeout: 100 // Quick timeout for cache-only fetch
}
);
// Build threads map from cache immediately
const newThreadsMap = new Map<string, NostrEvent>();
for (const event of cachedThreads) {
newThreadsMap.set(event.id, event);
}
threadsMap = newThreadsMap;
loading = false; // Show cached data immediately
// Step 2: Fetch from relays in background and update incrementally
// Use a Set to track which events we've already processed to avoid loops
const processedEventIds = new Set<string>(Array.from(newThreadsMap.keys()));
nostrClient.fetchEvents(
// Query relays first with 3-second timeout, then fill from cache if needed
const relayThreads = await nostrClient.fetchEvents(
[{ kinds: [KIND.DISCUSSION_THREAD], since, limit: 50 }],
threadRelays,
{
useCache: false, // Force query relays
cacheResults: true,
relayFirst: true, // Query relays first with timeout
useCache: true, // Fill from cache if relay query returns nothing
cacheResults: true, // Cache the results
timeout: 3000, // 3-second timeout
onUpdate: async (updatedEvents) => {
// Only add new events that aren't already in the map
// Update incrementally as events arrive
const newThreadsMap = new Map(threadsMap);
let hasNewEvents = false;
for (const event of updatedEvents) {
if (!processedEventIds.has(event.id)) {
const existing = newThreadsMap.get(event.id);
if (!existing) {
newThreadsMap.set(event.id, event);
processedEventIds.add(event.id);
hasNewEvents = true;
} else {
} else if (event.created_at > existing.created_at) {
// Update existing event if this one is newer
const existing = newThreadsMap.get(event.id);
if (existing && event.created_at > existing.created_at) {
newThreadsMap.set(event.id, event);
}
newThreadsMap.set(event.id, event);
hasNewEvents = true;
}
}
if (hasNewEvents) {
threadsMap = new Map(newThreadsMap); // Trigger reactivity
}
}
}
).then((relayThreads) => {
// Final update after relay fetch completes
let hasNewEvents = false;
for (const event of relayThreads) {
if (!processedEventIds.has(event.id)) {
newThreadsMap.set(event.id, event);
processedEventIds.add(event.id);
hasNewEvents = true;
} else {
// Update existing event if this one is newer
const existing = newThreadsMap.get(event.id);
if (existing && event.created_at > existing.created_at) {
newThreadsMap.set(event.id, event);
threadsMap = newThreadsMap; // Trigger reactivity
}
}
}
if (hasNewEvents) {
threadsMap = new Map(newThreadsMap); // Trigger reactivity
}
}).catch((error) => {
console.debug('Background relay fetch error (non-critical):', error);
});
);
// Build threads map from results
const newThreadsMap = new Map<string, NostrEvent>();
for (const event of relayThreads) {
newThreadsMap.set(event.id, event);
}
threadsMap = newThreadsMap;
loading = false; // Show data immediately
// Get all thread IDs (use current threadsMap, not newThreadsMap, since it may have been updated)
const threadIds = Array.from(threadsMap.keys());
if (threadIds.length > 0) {
// Fetch all comments in parallel
// Fetch all comments in parallel (relay-first for first-time users)
const allComments = await nostrClient.fetchEvents(
[{ kinds: [KIND.COMMENT], '#E': threadIds, '#K': ['11'] }],
commentRelays,
{ useCache: true }
{ relayFirst: true, useCache: true, cacheResults: true, timeout: 3000 }
);
// Fetch all reactions in parallel
@ -140,7 +136,7 @@ @@ -140,7 +136,7 @@
// Function to process and group reactions (called initially and on updates)
const processReactionUpdates = async () => {
const allReactions = Array.from(allReactionsMap.values());
console.log('[ThreadList] Processing reaction updates, total reactions:', allReactions.length);
// Processing reaction updates
if (allReactions.length === 0) return;
@ -148,7 +144,7 @@ @@ -148,7 +144,7 @@
const deletionEvents = await nostrClient.fetchEvents(
[{ kinds: [KIND.EVENT_DELETION], authors: Array.from(new Set(allReactions.map(r => r.pubkey))) }],
reactionRelays,
{ useCache: true }
{ relayFirst: true, useCache: true, cacheResults: true, timeout: 3000 }
);
// Build deleted reaction IDs map
@ -187,35 +183,10 @@ @@ -187,35 +183,10 @@
}
reactionsMap = updatedReactionsMap;
console.log('[ThreadList] Updated reactions map:', {
threadCounts: Array.from(updatedReactionsMap.entries()).map(([threadId, reactions]) => ({
threadId: threadId.substring(0, 16) + '...',
count: reactions.length,
upvotes: reactions.filter(r => {
const content = r.content.trim();
return content === '+' || content === '⬆' || content === '↑';
}).length,
reactionEvents: reactions.map(r => ({
id: r.id.substring(0, 16) + '...',
pubkey: r.pubkey.substring(0, 16) + '...',
content: r.content,
fullEvent: r
}))
}))
});
// Updated reactions map
};
const handleReactionUpdate = async (updated: NostrEvent[]) => {
console.log('[ThreadList] Received reaction update:', {
count: updated.length,
events: updated.map(r => ({
id: r.id.substring(0, 16) + '...',
pubkey: r.pubkey.substring(0, 16) + '...',
content: r.content,
tags: r.tags.filter(t => t[0] === 'e' || t[0] === 'E'),
fullEvent: r
}))
});
for (const r of updated) {
allReactionsMap.set(r.id, r);
}
@ -227,7 +198,10 @@ @@ -227,7 +198,10 @@
[{ kinds: [KIND.REACTION], '#e': threadIds }],
reactionRelays,
{
relayFirst: true,
useCache: true,
cacheResults: true,
timeout: 3000,
onUpdate: handleReactionUpdate
}
);
@ -239,7 +213,10 @@ @@ -239,7 +213,10 @@
[{ kinds: [KIND.REACTION], '#E': threadIds }],
reactionRelays,
{
relayFirst: true,
useCache: true,
cacheResults: true,
timeout: 3000,
onUpdate: handleReactionUpdate
}
);
@ -247,12 +224,7 @@ @@ -247,12 +224,7 @@
console.log('[ThreadList] Upper case #E filter rejected by relay (this is normal):', error);
}
console.log('[ThreadList] Reactions fetched:', {
withLowerE: reactionsWithLowerE.length,
withUpperE: reactionsWithUpperE.length,
lowerE_events: reactionsWithLowerE,
upperE_events: reactionsWithUpperE
});
// Reactions fetched
// Combine and deduplicate by reaction ID
for (const r of reactionsWithLowerE) {
@ -263,22 +235,12 @@ @@ -263,22 +235,12 @@
}
const allReactions = Array.from(allReactionsMap.values());
console.log('[ThreadList] All reactions (deduplicated):', {
total: allReactions.length,
events: allReactions.map(r => ({
id: r.id.substring(0, 16) + '...',
pubkey: r.pubkey.substring(0, 16) + '...',
content: r.content,
tags: r.tags.filter(t => t[0] === 'e' || t[0] === 'E'),
created_at: new Date(r.created_at * 1000).toISOString()
}))
});
// Fetch all zap receipts in parallel
// Fetch all zap receipts in parallel (relay-first for first-time users)
const allZapReceipts = await nostrClient.fetchEvents(
[{ kinds: [KIND.ZAP_RECEIPT], '#e': threadIds }],
zapRelays,
{ useCache: true }
{ relayFirst: true, useCache: true, cacheResults: true, timeout: 3000 }
);
// Build maps

1
src/lib/services/nostr/config.ts

@ -7,7 +7,6 @@ const DEFAULT_RELAYS = [ @@ -7,7 +7,6 @@ const DEFAULT_RELAYS = [
'wss://theforest.nostr1.com',
'wss://nostr21.com',
'wss://nostr.land',
'wss://nostr.sovbit.host',
'wss://orly-relay.imwald.eu',
'wss://nostr.wine'
];

166
src/lib/services/nostr/memory-manager.ts

@ -0,0 +1,166 @@ @@ -0,0 +1,166 @@
/**
* Memory management for Nostr client
* Tracks in-memory event usage and provides cleanup mechanisms
*/
import type { NostrEvent } from '../../types/nostr.js';
interface EventReference {
id: string;
size: number;
timestamp: number;
}
class MemoryManager {
private eventReferences: Map<string, EventReference> = new Map();
private totalSize: number = 0;
private warningThresholds = [50 * 1024 * 1024, 100 * 1024 * 1024, 200 * 1024 * 1024]; // 50MB, 100MB, 200MB
private lastWarningLevel: number = -1;
/**
* Estimate size of an event in bytes
*/
private estimateEventSize(event: NostrEvent): number {
// Rough estimate: JSON string length + overhead
return JSON.stringify(event).length + 100; // 100 bytes overhead for Map entry
}
/**
* Track an event in memory
*/
trackEvent(event: NostrEvent): void {
const id = event.id;
const size = this.estimateEventSize(event);
// If already tracked, update size if different
const existing = this.eventReferences.get(id);
if (existing) {
this.totalSize -= existing.size;
}
this.eventReferences.set(id, {
id,
size,
timestamp: Date.now()
});
this.totalSize += size;
this.checkThresholds();
}
/**
* Untrack an event (when it's removed from memory)
*/
untrackEvent(eventId: string): void {
const ref = this.eventReferences.get(eventId);
if (ref) {
this.totalSize -= ref.size;
this.eventReferences.delete(eventId);
}
}
/**
* Untrack multiple events
*/
untrackEvents(eventIds: string[]): void {
for (const id of eventIds) {
this.untrackEvent(id);
}
}
/**
* Check memory thresholds and warn if exceeded
*/
private checkThresholds(): void {
const currentLevel = this.warningThresholds.findIndex(threshold => this.totalSize >= threshold);
if (currentLevel > this.lastWarningLevel) {
const thresholdMB = this.warningThresholds[currentLevel] / (1024 * 1024);
const currentMB = (this.totalSize / (1024 * 1024)).toFixed(2);
console.warn(
`[memory-manager] Memory usage exceeded ${thresholdMB}MB threshold. ` +
`Current usage: ${currentMB}MB (${this.eventReferences.size} events)`
);
this.lastWarningLevel = currentLevel;
// If we've exceeded the highest threshold, suggest cleanup
if (currentLevel === this.warningThresholds.length - 1) {
console.warn(
`[memory-manager] Consider cleaning up old events. ` +
`Use cleanupOldEvents() to free memory.`
);
}
}
}
/**
* Clean up oldest events until we're under the target size
*/
cleanupOldEvents(targetSize: number): string[] {
if (this.totalSize <= targetSize) {
return [];
}
const sorted = Array.from(this.eventReferences.values())
.sort((a, b) => a.timestamp - b.timestamp); // Oldest first
const removed: string[] = [];
let freedSize = 0;
const targetFreed = this.totalSize - targetSize;
for (const ref of sorted) {
if (freedSize >= targetFreed) {
break;
}
this.totalSize -= ref.size;
this.eventReferences.delete(ref.id);
removed.push(ref.id);
freedSize += ref.size;
}
const freedMB = (freedSize / (1024 * 1024)).toFixed(2);
console.debug(
`[memory-manager] Cleaned up ${removed.length} old events, freed ${freedMB}MB`
);
return removed;
}
/**
* Get current memory usage statistics
*/
getStats(): {
totalSize: number;
totalSizeMB: number;
eventCount: number;
averageEventSize: number;
} {
return {
totalSize: this.totalSize,
totalSizeMB: this.totalSize / (1024 * 1024),
eventCount: this.eventReferences.size,
averageEventSize: this.eventReferences.size > 0
? this.totalSize / this.eventReferences.size
: 0
};
}
/**
* Clear all tracking
*/
clear(): void {
this.eventReferences.clear();
this.totalSize = 0;
this.lastWarningLevel = -1;
}
/**
* Get list of tracked event IDs (for debugging)
*/
getTrackedEventIds(): string[] {
return Array.from(this.eventReferences.keys());
}
}
export const memoryManager = new MemoryManager();

523
src/lib/services/nostr/nostr-client.ts

@ -11,6 +11,7 @@ import { getDB } from '../cache/indexeddb-store.js'; @@ -11,6 +11,7 @@ import { getDB } from '../cache/indexeddb-store.js';
import { filterEvents, shouldHideEvent } from '../event-filter.js';
import { sessionManager } from '../auth/session-manager.js';
import { KIND } from '../../types/kind-lookup.js';
import { memoryManager } from './memory-manager.js';
export interface PublishOptions {
relays?: string[];
@ -22,15 +23,20 @@ interface FetchOptions { @@ -22,15 +23,20 @@ interface FetchOptions {
cacheResults?: boolean;
onUpdate?: (events: NostrEvent[]) => void;
timeout?: number;
relayFirst?: boolean; // If true, query relays first with timeout, then fill from cache
}
class NostrClient {
private initialized = false;
private relays: Map<string, Relay> = new Map();
private subscriptions: Map<string, { relay: Relay; sub: any }> = new Map();
private subscriptions: Map<string, { relay: Relay; sub: any; lastActivity: number }> = new Map();
private nextSubId = 1;
private activeFetches: Map<string, Promise<NostrEvent[]>> = new Map();
// Subscription TTL cleanup
private subscriptionCleanupInterval: ReturnType<typeof setInterval> | null = null;
private readonly SUBSCRIPTION_TTL = 30000; // 30 seconds
// Rate limiting and throttling
private requestQueue: Array<() => void> = [];
private processingQueue = false;
@ -41,9 +47,6 @@ class NostrClient { @@ -41,9 +47,6 @@ class NostrClient {
private readonly MAX_CONCURRENT_TOTAL = 3; // Max 3 total concurrent requests
private totalActiveRequests = 0;
// Track background refresh operations to prevent duplicates
private backgroundRefreshes: Set<string> = new Set();
// Failed relay tracking with exponential backoff
private failedRelays: Map<string, { lastFailure: number; retryAfter: number; failureCount: number }> = new Map();
private readonly INITIAL_RETRY_DELAY = 5000; // 5 seconds
@ -53,6 +56,10 @@ class NostrClient { @@ -53,6 +56,10 @@ class NostrClient {
// Track authenticated relays to avoid re-authenticating
private authenticatedRelays: Set<string> = new Set();
// Cache NIP-11 metadata to avoid repeated HTTP requests
private nip11MetadataCache: Map<string, { requiresAuth: boolean; cachedAt: number }> = new Map();
private readonly NIP11_CACHE_TTL = 300000; // 5 minutes
async initialize(): Promise<void> {
if (this.initialized) return;
@ -72,9 +79,47 @@ class NostrClient { @@ -72,9 +79,47 @@ class NostrClient {
});
await Promise.allSettled(connectionPromises);
// Start subscription cleanup interval
this.startSubscriptionCleanup();
this.initialized = true;
}
/**
* Start periodic cleanup of inactive subscriptions
*/
private startSubscriptionCleanup(): void {
if (this.subscriptionCleanupInterval) return;
this.subscriptionCleanupInterval = setInterval(() => {
const now = Date.now();
const toRemove: string[] = [];
for (const [key, { lastActivity }] of this.subscriptions.entries()) {
if (now - lastActivity > this.SUBSCRIPTION_TTL) {
toRemove.push(key);
}
}
for (const key of toRemove) {
const sub = this.subscriptions.get(key);
if (sub) {
try {
sub.sub.close();
} catch (error) {
// Ignore errors closing subscriptions
}
this.subscriptions.delete(key);
}
}
if (toRemove.length > 0) {
console.debug(`[nostr-client] Cleaned up ${toRemove.length} inactive subscriptions`);
}
}, 10000); // Check every 10 seconds
}
/**
* Authenticate with a relay using NIP-42 AUTH
* Only call this when the relay has sent an AUTH challenge or an operation failed with auth-required
@ -309,10 +354,68 @@ class NostrClient { @@ -309,10 +354,68 @@ class NostrClient {
});
}
/**
* Get events by kind with pagination
*/
private async getEventsByKindPaginated(kind: number, limit: number, maxEvents: number, pageSize: number): Promise<NostrEvent[]> {
try {
const db = await getDB();
const tx = db.transaction('events', 'readonly');
const index = tx.store.index('kind');
const events: NostrEvent[] = [];
let count = 0;
const targetLimit = Math.min(limit, maxEvents);
// Use cursor to paginate through events
let cursor = await index.openCursor(IDBKeyRange.only(kind), 'prev');
while (cursor && count < targetLimit) {
events.push(cursor.value as NostrEvent);
count++;
cursor = await cursor.continue();
}
await tx.done;
return events;
} catch (error) {
console.debug('Error getting events by kind from cache:', error);
return [];
}
}
/**
* Get events by pubkey with pagination
*/
private async getEventsByPubkeyPaginated(pubkey: string, limit: number, maxEvents: number, pageSize: number): Promise<NostrEvent[]> {
try {
const db = await getDB();
const tx = db.transaction('events', 'readonly');
const index = tx.store.index('pubkey');
const events: NostrEvent[] = [];
let count = 0;
const targetLimit = Math.min(limit, maxEvents);
// Use cursor to paginate through events
let cursor = await index.openCursor(IDBKeyRange.only(pubkey), 'prev');
while (cursor && count < targetLimit) {
events.push(cursor.value as NostrEvent);
count++;
cursor = await cursor.continue();
}
await tx.done;
return events;
} catch (error) {
console.debug('Error getting events by pubkey from cache:', error);
return [];
}
}
private async getCachedEvents(filters: Filter[]): Promise<NostrEvent[]> {
try {
const results: NostrEvent[] = [];
const seen = new Set<string>();
const MAX_EVENTS = 1000; // Maximum events to load from cache
const PAGE_SIZE = 100; // Load in pages of 100
for (const filter of filters) {
try {
@ -330,44 +433,47 @@ class NostrClient { @@ -330,44 +433,47 @@ class NostrClient {
}
candidateEvents = idEvents;
} else if (filter.kinds && filter.kinds.length > 0) {
// Query by kind(s) if specified
// If single kind, use index for efficiency
// Query by kind(s) if specified - use pagination
if (filter.kinds.length === 1) {
candidateEvents = await getEventsByKind(filter.kinds[0], (filter.limit || 100) * 3);
// Single kind - use paginated query
candidateEvents = await this.getEventsByKindPaginated(filter.kinds[0], filter.limit || 100, MAX_EVENTS, PAGE_SIZE);
} else {
// Multiple kinds - query each and combine
// Multiple kinds - query each with pagination and combine
const allEvents: NostrEvent[] = [];
const perKindLimit = Math.ceil((filter.limit || 100) / filter.kinds.length);
for (const kind of filter.kinds) {
const kindEvents = await getEventsByKind(kind, (filter.limit || 100) * 3);
const kindEvents = await this.getEventsByKindPaginated(kind, perKindLimit, MAX_EVENTS, PAGE_SIZE);
allEvents.push(...kindEvents);
if (allEvents.length >= MAX_EVENTS) break; // Stop if we've hit max
}
candidateEvents = allEvents;
}
} else if (filter.authors && filter.authors.length > 0) {
// Query by author(s) if no kinds specified
// Query by author(s) if no kinds specified - use pagination
if (filter.authors.length === 1) {
candidateEvents = await getEventsByPubkey(filter.authors[0], (filter.limit || 100) * 3);
candidateEvents = await this.getEventsByPubkeyPaginated(filter.authors[0], filter.limit || 100, MAX_EVENTS, PAGE_SIZE);
} else {
// Multiple authors - query each and combine
// Multiple authors - query each with pagination and combine
const allEvents: NostrEvent[] = [];
const perAuthorLimit = Math.ceil((filter.limit || 100) / filter.authors.length);
for (const author of filter.authors) {
const authorEvents = await getEventsByPubkey(author, (filter.limit || 100) * 3);
const authorEvents = await this.getEventsByPubkeyPaginated(author, perAuthorLimit, MAX_EVENTS, PAGE_SIZE);
allEvents.push(...authorEvents);
if (allEvents.length >= MAX_EVENTS) break; // Stop if we've hit max
}
candidateEvents = allEvents;
}
} else {
// No specific kind or author - get recent events by created_at
// This is a fallback for broad queries
// No specific kind or author - get recent events by created_at with pagination
try {
const db = await getDB();
const tx = db.transaction('events', 'readonly');
const index = tx.store.index('created_at');
const events: NostrEvent[] = [];
let count = 0;
const limit = (filter.limit || 100) * 3;
const limit = Math.min(filter.limit || 100, MAX_EVENTS);
// Iterate in reverse (newest first)
// Iterate in reverse (newest first) with pagination
let cursor = await index.openCursor(null, 'prev');
while (cursor && count < limit) {
events.push(cursor.value as NostrEvent);
@ -458,7 +564,7 @@ class NostrClient { @@ -458,7 +564,7 @@ class NostrClient {
// Sort by created_at descending and apply limit
const sorted = results.sort((a, b) => b.created_at - a.created_at);
const limit = filters[0]?.limit || 100;
const limited = sorted.slice(0, limit);
const limited = sorted.slice(0, Math.min(limit, MAX_EVENTS));
const filtered = filterEvents(limited);
// Only log cache queries at debug level to reduce console noise
@ -722,7 +828,7 @@ class NostrClient { @@ -722,7 +828,7 @@ class NostrClient {
}
});
client.subscriptions.set(`${url}_${subId}`, { relay, sub });
client.subscriptions.set(`${url}_${subId}`, { relay, sub, lastActivity: Date.now() });
};
startSub();
@ -759,7 +865,8 @@ class NostrClient { @@ -759,7 +865,8 @@ class NostrClient {
relayUrl: string,
filters: Filter[],
events: Map<string, NostrEvent>,
timeout: number
timeout: number,
onUpdate?: (events: NostrEvent[]) => void
): Promise<void> {
return new Promise((resolve) => {
const makeRequest = () => {
@ -779,7 +886,7 @@ class NostrClient { @@ -779,7 +886,7 @@ class NostrClient {
this.totalActiveRequests++;
// Make the request
this.makeRelayRequest(relayUrl, filters, events, timeout)
this.makeRelayRequest(relayUrl, filters, events, timeout, onUpdate)
.finally(() => {
const current = this.activeRequestsPerRelay.get(relayUrl) || 0;
if (current > 0) {
@ -809,7 +916,8 @@ class NostrClient { @@ -809,7 +916,8 @@ class NostrClient {
relayUrl: string,
filters: Filter[],
events: Map<string, NostrEvent>,
timeout: number
timeout: number,
onUpdate?: (events: NostrEvent[]) => void
): Promise<void> {
const relay = this.relays.get(relayUrl);
if (!relay || !this.checkAndCleanupRelay(relayUrl)) {
@ -820,39 +928,171 @@ class NostrClient { @@ -820,39 +928,171 @@ class NostrClient {
let resolved = false;
let timeoutId: ReturnType<typeof setTimeout> | null = null;
let hasAuthed = this.authenticatedRelays.has(relayUrl);
let authInProgress = false;
let relayRequiresAuth = false;
let seenEventIds = new Set<string>(); // Track seen event IDs for deduplication
// Create a promise that resolves when the subscription finishes
let finishResolve: (() => void) | null = null;
const finishPromise = new Promise<void>((resolve) => {
finishResolve = resolve;
});
const finish = () => {
if (resolved) return;
resolved = true;
if (timeoutId) clearTimeout(timeoutId);
this.unsubscribe(subId);
if (finishResolve) {
finishResolve();
}
};
// Simple timeout - single timeout per fetch
timeoutId = setTimeout(() => {
if (!resolved) {
console.debug(`[nostr-client] Timeout reached for ${relayUrl}`);
finish();
}
}, timeout);
// Check if relay requires auth from NIP-11 metadata (async, but we'll check it)
const checkRelayAuthRequirement = async () => {
if (!relayRequiresAuth && !hasAuthed) {
// Check cache first
const cached = this.nip11MetadataCache.get(relayUrl);
if (cached && (Date.now() - cached.cachedAt) < this.NIP11_CACHE_TTL) {
relayRequiresAuth = cached.requiresAuth;
return;
}
try {
const httpUrl = relayUrl.replace(/^wss?:\/\//, (match) => {
return match === 'wss://' ? 'https://' : 'http://';
});
const nip11Url = `${httpUrl}/.well-known/nostr.json`;
const response = await fetch(nip11Url, {
method: 'GET',
headers: { 'Accept': 'application/nostr+json' },
signal: AbortSignal.timeout(2000) // 2 second timeout for metadata fetch
});
if (response.ok) {
const metadata = await response.json();
const requiresAuth = !!metadata?.limitation?.auth_required;
relayRequiresAuth = requiresAuth;
// Cache the result
this.nip11MetadataCache.set(relayUrl, { requiresAuth, cachedAt: Date.now() });
}
} catch (error) {
// Metadata fetch failed - that's okay, we'll proceed normally
// Cache negative result to avoid repeated failed requests
this.nip11MetadataCache.set(relayUrl, { requiresAuth: false, cachedAt: Date.now() });
}
}
};
// Check auth requirement in background (don't wait for it)
checkRelayAuthRequirement();
const startSub = () => {
try {
// Check relay status before subscribing
if (!this.checkAndCleanupRelay(relayUrl)) {
console.debug(`[nostr-client] Relay ${relayUrl} is closed, skipping subscription`);
finish();
return;
}
const client = this;
// Re-check authentication state right before subscribing (it might have changed)
hasAuthed = client.authenticatedRelays.has(relayUrl);
const sub = relay.subscribe(filters, {
onevent: (event: NostrEvent) => {
try {
if (!client.relays.has(relayUrl)) return;
if (!client.relays.has(relayUrl) || resolved) return;
if (shouldHideEvent(event)) return;
if (client.shouldFilterZapReceipt(event)) return;
// Deduplicate events
if (seenEventIds.has(event.id)) return;
seenEventIds.add(event.id);
// Update subscription activity timestamp
const subEntry = client.subscriptions.get(`${relayUrl}_${subId}`);
if (subEntry) {
subEntry.lastActivity = Date.now();
}
// Add to events Map for return value
events.set(event.id, event);
// Track in memory manager (for monitoring)
memoryManager.trackEvent(event);
// Cache the event
client.addToCache(event);
// Check memory usage and cleanup if needed (soft limits)
const stats = memoryManager.getStats();
if (stats.totalSizeMB > 200) {
// If over 200MB, cleanup oldest events to get back to 100MB
const cleanedIds = memoryManager.cleanupOldEvents(100 * 1024 * 1024);
// Note: We don't remove from events Map here as those are needed for return value
// The cleanup is just for tracking/monitoring purposes
}
// Stream event directly to onUpdate callback immediately
if (onUpdate && !resolved) {
try {
const filtered = filterEvents([event]);
const zapFiltered = filtered.filter(e => !client.shouldFilterZapReceipt(e));
if (zapFiltered.length > 0) {
onUpdate(zapFiltered);
}
} catch (error) {
// Ignore errors from onUpdate callback
}
}
} catch (error) {
// Silently handle errors - connection may be closed
}
},
oneose: () => {
try {
if (!resolved) finish();
if (resolved) return;
// If we have no events and haven't authenticated, try to authenticate if relay requires it
if (events.size === 0 && !hasAuthed && relayRequiresAuth) {
const session = sessionManager.getSession();
if (session && !authInProgress) {
authInProgress = true;
// Relay requires auth, attempting authentication
client.authenticateRelay(relay, relayUrl)
.then((authSuccess) => {
authInProgress = false;
if (authSuccess) {
hasAuthed = true;
// Successfully authenticated, retrying subscription
if (!resolved) {
startSub(); // Retry subscription after authentication
}
} else {
console.debug(`[nostr-client] Authentication failed for ${relayUrl} after 0-event EOSE`);
finish();
}
})
.catch((error) => {
authInProgress = false;
console.debug(`[nostr-client] Authentication error for ${relayUrl} after 0-event EOSE:`, error);
finish();
});
return; // Don't finish yet, wait for auth
}
}
// Simple finish on EOSE - no complex timeout chains
// EOSE received - subscription complete
finish();
} catch (error) {
// Silently handle errors - connection may be closed
}
@ -862,33 +1102,39 @@ class NostrClient { @@ -862,33 +1102,39 @@ class NostrClient {
if (reason.startsWith('auth-required') && !hasAuthed && !resolved) {
const session = sessionManager.getSession();
if (session) {
authInProgress = true;
// Relay requires authentication, authenticating
client.authenticateRelay(relay, relayUrl)
.then((authSuccess) => {
authInProgress = false;
if (authSuccess) {
hasAuthed = true;
// Retry subscription after authentication
// Successfully authenticated, retrying subscription
if (!resolved) {
startSub();
}
} else {
console.debug(`[nostr-client] Authentication failed for ${relayUrl}`);
finish();
}
})
.catch(() => {
.catch((error) => {
authInProgress = false;
console.debug(`[nostr-client] Authentication error for ${relayUrl}:`, error);
finish();
});
} else {
console.debug(`[nostr-client] Relay ${relayUrl} requires authentication but user is not logged in`);
finish();
}
} else if (!resolved) {
// If subscription closed for other reasons, finish
finish();
}
}
});
client.subscriptions.set(`${relayUrl}_${subId}`, { relay, sub });
timeoutId = setTimeout(() => {
if (!resolved) finish();
}, timeout);
client.subscriptions.set(`${relayUrl}_${subId}`, { relay, sub, lastActivity: Date.now() });
} catch (error) {
// Handle SendingOnClosedConnection and other errors
const errorMessage = error instanceof Error ? error.message : String(error);
@ -902,6 +1148,9 @@ class NostrClient { @@ -902,6 +1148,9 @@ class NostrClient {
};
startSub();
// Wait for the subscription to finish before returning
await finishPromise;
}
private processQueue(): void {
@ -921,62 +1170,128 @@ class NostrClient { @@ -921,62 +1170,128 @@ class NostrClient {
relays: string[],
options: FetchOptions = {}
): Promise<NostrEvent[]> {
const { useCache = true, cacheResults = true, onUpdate, timeout = 10000 } = options;
const { useCache = true, cacheResults = true, onUpdate, timeout = 10000, relayFirst = false } = options;
// Create a key for this fetch to prevent duplicates
const fetchKey = JSON.stringify({ filters, relays: relays.sort() });
const fetchKey = JSON.stringify({
filters,
relays: relays.sort()
});
const activeFetch = this.activeFetches.get(fetchKey);
if (activeFetch) {
return activeFetch;
}
// Query cache first
if (useCache) {
try {
const cachedEvents = await this.getCachedEvents(filters);
if (cachedEvents.length > 0) {
console.debug(`[nostr-client] Returning ${cachedEvents.length} cached events for filter:`, filters);
// Return cached immediately, fetch fresh in background with delay
// Don't pass onUpdate to background fetch to avoid interfering with cached results
if (cacheResults) {
// Prevent duplicate background refreshes for the same filter
if (!this.backgroundRefreshes.has(fetchKey)) {
this.backgroundRefreshes.add(fetchKey);
// Use a longer delay for background refresh to avoid interfering with initial load
setTimeout(() => {
// Only update cache, don't call onUpdate for background refresh
// This ensures cached events persist and are not cleared by background refresh
const bgPromise = this.fetchFromRelays(filters, relays, { cacheResults: true, onUpdate: undefined, timeout });
bgPromise.finally(() => {
// Remove from background refreshes set after a delay to allow re-refresh if needed
setTimeout(() => {
this.backgroundRefreshes.delete(fetchKey);
}, 60000); // Allow re-refresh after 60 seconds
}).catch((error) => {
// Log but don't throw - background refresh failures shouldn't affect cached results
console.debug('[nostr-client] Background refresh failed:', error);
this.backgroundRefreshes.delete(fetchKey);
});
}, 5000); // 5 second delay for background refresh to avoid interfering
// Always use relay-first mode: query relays first with timeout, then fill from cache if needed
{
// Fetching events from relays
const relayTimeout = timeout || 10000; // Default 10s timeout
const fetchPromise = (async () => {
// For single relay queries, ensure connection is established and authenticated first
if (relays.length === 1) {
const relayUrl = relays[0];
if (!this.relays.has(relayUrl)) {
// Try to connect first
await this.addRelay(relayUrl);
// Wait for connection to establish (up to 2 seconds)
let attempts = 0;
const maxAttempts = 4; // 2 seconds max
while (attempts < maxAttempts && !this.relays.has(relayUrl)) {
await new Promise(resolve => setTimeout(resolve, 500));
attempts++;
}
// If still not connected, try one more time
if (!this.relays.has(relayUrl)) {
await this.addRelay(relayUrl);
await new Promise(resolve => setTimeout(resolve, 500));
}
}
// Check if relay requires authentication from NIP-11 metadata
// If it does, authenticate proactively before starting the subscription
const relay = this.relays.get(relayUrl);
if (relay && !this.authenticatedRelays.has(relayUrl)) {
try {
const httpUrl = relayUrl.replace(/^wss?:\/\//, (match) => {
return match === 'wss://' ? 'https://' : 'http://';
});
const nip11Url = `${httpUrl}/.well-known/nostr.json`;
const response = await fetch(nip11Url, {
method: 'GET',
headers: { 'Accept': 'application/nostr+json' },
signal: AbortSignal.timeout(3000) // 3 second timeout for metadata fetch
});
if (response.ok) {
const metadata = await response.json();
if (metadata?.limitation?.auth_required) {
console.debug(`[nostr-client] Relay ${relayUrl} requires authentication (from NIP-11), authenticating before subscription...`);
const session = sessionManager.getSession();
if (session) {
// Try to authenticate proactively - wait for it to complete before starting subscription
try {
const authSuccess = await this.authenticateRelay(relay, relayUrl);
if (authSuccess) {
console.debug(`[nostr-client] Successfully authenticated with ${relayUrl} proactively, starting subscription`);
} else {
console.debug(`[nostr-client] Proactive auth not available for ${relayUrl}, will authenticate on challenge`);
}
} catch (error) {
// Auth might fail if relay doesn't send challenge - that's okay
console.debug(`[nostr-client] Proactive auth attempt for ${relayUrl} failed (will try on challenge):`, error);
}
} else {
console.debug(`[nostr-client] Relay ${relayUrl} requires authentication but user is not logged in`);
}
}
}
} catch (error) {
// Metadata fetch failed - that's okay, we'll proceed normally
console.debug(`[nostr-client] Could not fetch NIP-11 metadata for ${relayUrl}:`, error);
}
}
return cachedEvents;
}
// No cached events - this is expected and normal, so don't log it
} catch (error) {
console.error('[nostr-client] Error querying cache:', error);
// Continue to fetch from relays
}
// Query relays first with timeout
// Respect cacheResults option - don't cache if explicitly disabled
const relayEvents = await this.fetchFromRelays(filters, relays, {
cacheResults: cacheResults,
onUpdate,
timeout: relayTimeout
});
// If we got results from relays, return them immediately
if (relayEvents.length > 0) {
// Got events from relays
return relayEvents;
}
// If no results from relays, try to fill from cache (only if useCache is true)
// IMPORTANT: In single-relay mode, useCache should be false to avoid showing events from other relays
if (useCache) {
try {
const cachedEvents = await this.getCachedEvents(filters);
if (cachedEvents.length > 0) {
console.debug(`[nostr-client] Relay query returned 0 events, using ${cachedEvents.length} cached events`);
return cachedEvents;
}
} catch (error) {
console.error('[nostr-client] Error querying cache:', error);
}
} else {
console.debug(`[nostr-client] No events from relays, useCache=false, returning empty array`);
}
return relayEvents; // Return empty array if both failed
})();
this.activeFetches.set(fetchKey, fetchPromise);
fetchPromise.finally(() => {
this.activeFetches.delete(fetchKey);
});
return fetchPromise;
}
// Fetch from relays
const fetchPromise = this.fetchFromRelays(filters, relays, { cacheResults, onUpdate, timeout });
this.activeFetches.set(fetchKey, fetchPromise);
fetchPromise.finally(() => {
this.activeFetches.delete(fetchKey);
});
return fetchPromise;
}
private async fetchFromRelays(
@ -1016,9 +1331,26 @@ class NostrClient { @@ -1016,9 +1331,26 @@ class NostrClient {
await Promise.allSettled(
relaysToConnect.map(url => this.addRelay(url))
);
// For single relay, wait a bit longer for connection to establish
// For single relay, wait for connection to actually establish
if (relays.length === 1 && relaysToConnect.length > 0) {
await new Promise(resolve => setTimeout(resolve, 1000));
const relayUrl = relaysToConnect[0];
let attempts = 0;
const maxAttempts = 6; // Wait up to 3 seconds (6 * 500ms) for connection
while (attempts < maxAttempts) {
const relay = this.relays.get(relayUrl);
if (relay) {
try {
const ws = (relay as any).ws;
if (ws && ws.readyState === WebSocket.OPEN) {
break; // Connection is open, proceed
}
} catch {
// Ignore errors checking WebSocket
}
}
await new Promise(resolve => setTimeout(resolve, 500));
attempts++;
}
}
}
@ -1064,7 +1396,7 @@ class NostrClient { @@ -1064,7 +1396,7 @@ class NostrClient {
const events: Map<string, NostrEvent> = new Map();
for (const relayUrl of connectedRelays) {
await this.throttledRelayRequest(relayUrl, filters, events, timeout);
await this.throttledRelayRequest(relayUrl, filters, events, timeout, options.onUpdate);
// Small delay between relays
await new Promise(resolve => setTimeout(resolve, 100));
}
@ -1082,14 +1414,14 @@ class NostrClient { @@ -1082,14 +1414,14 @@ class NostrClient {
// Only call onUpdate if we got new events AND onUpdate is provided
// This prevents clearing the UI when background fetch returns fewer results
if (options.onUpdate && filtered.length > 0) {
console.log(`[nostr-client] Fetch returned ${filtered.length} events, calling onUpdate`);
// Calling onUpdate with fetched events
options.onUpdate(filtered);
} else if (options.onUpdate && filtered.length === 0) {
console.debug(`[nostr-client] Fetch returned 0 events, skipping onUpdate to preserve cached results`);
// No new events, skipping onUpdate
} else if (!options.onUpdate) {
// Only log background refreshes that return events, not empty results
if (filtered.length > 0) {
console.debug(`[nostr-client] Fetch returned ${filtered.length} events (background refresh, no onUpdate)`);
// Background refresh completed
}
}
@ -1122,16 +1454,35 @@ class NostrClient { @@ -1122,16 +1454,35 @@ class NostrClient {
}
close(): void {
// Stop subscription cleanup interval
if (this.subscriptionCleanupInterval) {
clearInterval(this.subscriptionCleanupInterval);
this.subscriptionCleanupInterval = null;
}
// Close all subscriptions
for (const { sub } of this.subscriptions.values()) {
sub.close();
try {
sub.close();
} catch (error) {
// Ignore errors
}
}
this.subscriptions.clear();
// Close all relay connections
for (const relay of this.relays.values()) {
relay.close();
try {
relay.close();
} catch (error) {
// Ignore errors
}
}
this.relays.clear();
// Clear memory tracking
memoryManager.clear();
this.initialized = false;
}
}

Loading…
Cancel
Save