Browse Source

fix search and relay connections

Nostr-Signature: 24db15027960b244eb4c8664a3642c64684ebfef8c200250093dd047cd119e7d 573634b648634cbad10f2451776089ea21090d9407f715e83c577b4611ae6edc 561d15ae39b3bf7a5b8a67539a5cfa19d53cbaca9f904589ab7cb69e568ddf056d0d83ced4830cdfdc0b386f13c4bab930264a0f6144cbb833b187b5d452c4ae
main
Silberengel 3 weeks ago
parent
commit
4673fddcd9
  1. 1
      nostr/commit-signatures.jsonl
  2. 9
      src/lib/config.ts
  3. 766
      src/lib/services/nostr/nostr-client.ts
  4. 15
      src/routes/+layout.svelte
  5. 12
      src/routes/api/repos/[npub]/[repo]/file/+server.ts
  6. 12
      src/routes/api/repos/[npub]/[repo]/tree/+server.ts
  7. 1018
      src/routes/api/search/+server.ts
  8. 40
      src/routes/api/transfers/pending/+server.ts
  9. 33
      src/routes/repos/[npub]/[repo]/+page.svelte

1
nostr/commit-signatures.jsonl

@ -60,3 +60,4 @@
{"kind":1640,"pubkey":"573634b648634cbad10f2451776089ea21090d9407f715e83c577b4611ae6edc","created_at":1771836045,"tags":[["author","Silberengel","silberengel7@protonmail.com"],["message","fix repo management and refactor\nimplement more GRASP support"]],"content":"Signed commit: fix repo management and refactor\nimplement more GRASP support","id":"6ae016621b13e22809e7bcebe34e5250fd6e0767d2b12ca634104def4ca78a29","sig":"99c34f66a8a67d352622621536545b7dee11cfd9d14a007ec0550d138109116a2f24483c6836fea59b94b9e96066fba548bcb7600bc55adbe0562d999c3c651d"} {"kind":1640,"pubkey":"573634b648634cbad10f2451776089ea21090d9407f715e83c577b4611ae6edc","created_at":1771836045,"tags":[["author","Silberengel","silberengel7@protonmail.com"],["message","fix repo management and refactor\nimplement more GRASP support"]],"content":"Signed commit: fix repo management and refactor\nimplement more GRASP support","id":"6ae016621b13e22809e7bcebe34e5250fd6e0767d2b12ca634104def4ca78a29","sig":"99c34f66a8a67d352622621536545b7dee11cfd9d14a007ec0550d138109116a2f24483c6836fea59b94b9e96066fba548bcb7600bc55adbe0562d999c3c651d"}
{"kind":1640,"pubkey":"573634b648634cbad10f2451776089ea21090d9407f715e83c577b4611ae6edc","created_at":1771838236,"tags":[["author","Silberengel","silberengel7@protonmail.com"],["message","refactor repo manager"]],"content":"Signed commit: refactor repo manager","id":"d134c35516991f27e47ed8a4aa0d3f1d6e6be41c46c9cf3f6c982c1442b09b4b","sig":"cb699fae6a8e44a3b9123f215749f6fec0470c75a0401a94c37dfb8e572c07281b3941862e704b868663f943c573ab2ee9fec217e87f7be567cc6bb3514cacdb"} {"kind":1640,"pubkey":"573634b648634cbad10f2451776089ea21090d9407f715e83c577b4611ae6edc","created_at":1771838236,"tags":[["author","Silberengel","silberengel7@protonmail.com"],["message","refactor repo manager"]],"content":"Signed commit: refactor repo manager","id":"d134c35516991f27e47ed8a4aa0d3f1d6e6be41c46c9cf3f6c982c1442b09b4b","sig":"cb699fae6a8e44a3b9123f215749f6fec0470c75a0401a94c37dfb8e572c07281b3941862e704b868663f943c573ab2ee9fec217e87f7be567cc6bb3514cacdb"}
{"kind":1640,"pubkey":"573634b648634cbad10f2451776089ea21090d9407f715e83c577b4611ae6edc","created_at":1771840654,"tags":[["author","Silberengel","silberengel7@protonmail.com"],["message","bug-fixes"]],"content":"Signed commit: bug-fixes","id":"0580e0df8000275817f040bbd6c04dfdfbff08a366df7a1686f227d8b7310053","sig":"9a238266f989c0664dc5c9743675907477e2fcb5311e8edeb505dec97027f619f6dc6742ee5f3887ff6a864274b45005fc7dd4432f8e2772dfe0bb7e2d8a449c"} {"kind":1640,"pubkey":"573634b648634cbad10f2451776089ea21090d9407f715e83c577b4611ae6edc","created_at":1771840654,"tags":[["author","Silberengel","silberengel7@protonmail.com"],["message","bug-fixes"]],"content":"Signed commit: bug-fixes","id":"0580e0df8000275817f040bbd6c04dfdfbff08a366df7a1686f227d8b7310053","sig":"9a238266f989c0664dc5c9743675907477e2fcb5311e8edeb505dec97027f619f6dc6742ee5f3887ff6a864274b45005fc7dd4432f8e2772dfe0bb7e2d8a449c"}
{"kind":1640,"pubkey":"573634b648634cbad10f2451776089ea21090d9407f715e83c577b4611ae6edc","created_at":1771840660,"tags":[["author","Silberengel","silberengel7@protonmail.com"],["message","bug-fixes"]],"content":"Signed commit: bug-fixes","id":"e96c955f550a94c9c6d1228d2a7e479ced331334aaa4eea84525b362b8484d6e","sig":"1218bd9e449404ccc56c5727e8bdff5db31e37c2053a2d91ba02d214c0988173ba480010e53401661cb439884308a575230a7a12124f8e6d8f058c8a804a42f6"}

9
src/lib/config.ts

@ -34,13 +34,18 @@ export const DEFAULT_NOSTR_SEARCH_RELAYS =
typeof process !== 'undefined' && process.env?.NOSTR_SEARCH_RELAYS typeof process !== 'undefined' && process.env?.NOSTR_SEARCH_RELAYS
? process.env.NOSTR_SEARCH_RELAYS.split(',').map(r => r.trim()).filter(r => r.length > 0) ? process.env.NOSTR_SEARCH_RELAYS.split(',').map(r => r.trim()).filter(r => r.length > 0)
: [ : [
'wss://theforest.nostr1.com',
'wss://nostr.land', 'wss://nostr.land',
'wss://relay.damus.io', 'wss://relay.damus.io',
'wss://thecitadel.nostr1.com', 'wss://thecitadel.nostr1.com',
'wss://nostr21.com', 'wss://nostr21.com',
'wss://theforest.nostr1.com',
'wss://freelay.sovbit.host',
'wss://nostr.sovbit.host',
'wss://bevos.nostr1.com',
'wss://relay.primal.net', 'wss://relay.primal.net',
'wss://nostr.mom',
'wss://relay.snort.social',
'wss://aggr.nostr.land',
]; ];
/** /**

766
src/lib/services/nostr/nostr-client.ts

@ -1,12 +1,12 @@
/** /**
* Nostr client for fetching and publishing events * Nostr client for fetching and publishing events
* Uses nostr-tools Pool for relay connection management
*/ */
import type { NostrEvent, NostrFilter } from '../../types/nostr.js'; import type { NostrEvent, NostrFilter } from '../../types/nostr.js';
import logger from '../logger.js'; import logger from '../logger.js';
import { isNIP07Available, getPublicKeyWithNIP07, signEventWithNIP07 } from './nip07-signer.js'; import { isNIP07Available, getPublicKeyWithNIP07, signEventWithNIP07 } from './nip07-signer.js';
import { shouldUseTor, getTorProxy } from '../../utils/tor.js'; import { SimplePool, type Filter } from 'nostr-tools';
// Removed separate in-memory cache - persistent cache now has built-in memory layer
import { KIND } from '../../types/nostr.js'; import { KIND } from '../../types/nostr.js';
// Replaceable event kinds (only latest per pubkey matters) // Replaceable event kinds (only latest per pubkey matters)
@ -14,11 +14,25 @@ const REPLACEABLE_KINDS = [0, 3, 10002]; // Profile, Contacts, Relay List
/** /**
* Check if an event is a parameterized replaceable event (NIP-33) * Check if an event is a parameterized replaceable event (NIP-33)
* Parameterized replaceable events have kind >= 10000 && kind < 20000 and a 'd' tag * Parameterized replaceable events have:
* - kind >= 10000 && kind < 20000 (replaceable range) with a 'd' tag, OR
* - kind >= 30000 && kind < 40000 (addressable range) with a 'd' tag
*/ */
function isParameterizedReplaceable(event: NostrEvent): boolean { function isParameterizedReplaceable(event: NostrEvent): boolean {
return event.kind >= 10000 && event.kind < 20000 && const hasDTag = event.tags.some(t => t[0] === 'd' && t[1]);
event.tags.some(t => t[0] === 'd' && t[1]); if (!hasDTag) return false;
// Replaceable range (NIP-33)
if (event.kind >= 10000 && event.kind < 20000) {
return true;
}
// Addressable range (NIP-34) - also parameterized replaceable
if (event.kind >= 30000 && event.kind < 40000) {
return true;
}
return false;
} }
/** /**
@ -109,299 +123,34 @@ if (typeof process !== 'undefined' && process.versions?.node && typeof window ==
}); });
} }
/** // Note: SimplePool from nostr-tools handles WebSocket connections automatically
* Create a WebSocket connection, optionally through Tor SOCKS proxy // Tor support would require custom WebSocket factory, which SimplePool doesn't easily support
*/ // For now, we rely on SimplePool's built-in connection management
async function createWebSocketWithTor(url: string): Promise<WebSocket> {
await initializeWebSocketPolyfill();
// Check if we need Tor
if (!shouldUseTor(url)) {
return new WebSocket(url);
}
// Only use Tor in Node.js environment
if (typeof process === 'undefined' || !process.versions?.node || typeof window !== 'undefined') {
// Browser environment - can't use SOCKS proxy directly
// Fall back to regular WebSocket (will fail for .onion in browser)
logger.warn({ url }, 'Tor support not available in browser. .onion addresses may not work.');
return new WebSocket(url);
}
const proxy = getTorProxy();
if (!proxy) {
logger.warn({ url }, 'Tor proxy not configured. Cannot connect to .onion address.');
return new WebSocket(url);
}
try {
// Dynamic import for SOCKS support
const { SocksClient } = await import('socks');
const { WebSocket: WS } = await import('ws');
// Parse the WebSocket URL
const wsUrl = new URL(url);
const host = wsUrl.hostname;
const port = wsUrl.port ? parseInt(wsUrl.port, 10) : (wsUrl.protocol === 'wss:' ? 443 : 80);
// Create SOCKS connection
const socksOptions = {
proxy: {
host: proxy.host,
port: proxy.port,
type: 5 as const // SOCKS5
},
command: 'connect' as const,
destination: {
host,
port
}
};
const info = await SocksClient.createConnection(socksOptions);
// Create WebSocket over the SOCKS connection
// socket option is supported at runtime but not in types
const ws = new WS(url, {
socket: info.socket,
// For wss://, we need to handle TLS
rejectUnauthorized: false // .onion addresses use self-signed certs
} as any);
return ws as any as WebSocket;
} catch (error) {
logger.error({ error, url, proxy }, 'Failed to create WebSocket through Tor');
// Fall back to regular connection (will likely fail for .onion)
return new WebSocket(url);
}
}
// Connection pool for WebSocket connections
interface RelayConnection {
ws: WebSocket;
lastUsed: number;
pendingRequests: number;
reconnectAttempts: number;
messageHandlers: Map<string, (message: any) => void>; // subscription ID -> handler
nextSubscriptionId: number;
}
export class NostrClient { export class NostrClient {
private relays: string[] = []; private relays: string[] = [];
private pool: SimplePool;
private authenticatedRelays: Set<string> = new Set(); private authenticatedRelays: Set<string> = new Set();
private processingDeletions: boolean = false; // Guard to prevent recursive deletion processing private processingDeletions: boolean = false; // Guard to prevent recursive deletion processing
private connectionPool: Map<string, RelayConnection> = new Map();
private readonly CONNECTION_TIMEOUT = 30000; // Close idle connections after 30 seconds
private readonly MAX_RECONNECT_ATTEMPTS = 3;
private readonly RECONNECT_DELAY = 2000; // 2 seconds between reconnect attempts
private connectionAttempts: Map<string, { count: number; lastAttempt: number }> = new Map();
private readonly MAX_CONCURRENT_CONNECTIONS = 3; // Max concurrent connections per relay
private readonly CONNECTION_BACKOFF_BASE = 1000; // Base backoff in ms
private readonly THROTTLE_RESET_TIME = 5 * 60 * 1000; // Reset throttling after 5 minutes
constructor(relays: string[]) { constructor(relays: string[]) {
this.relays = relays; this.relays = relays;
// Clean up idle connections periodically // Use nostr-tools SimplePool for relay connection management
if (typeof window !== 'undefined') { // SimplePool handles all WebSocket connections, retries, and error handling automatically
setInterval(() => this.cleanupIdleConnections(), 10000); // Check every 10 seconds this.pool = new SimplePool();
}
} }
/** /**
* Clean up idle connections that haven't been used recently * Clean up pool connections when done
*/ */
private cleanupIdleConnections(): void { close(): void {
const now = Date.now(); this.pool.close(this.relays);
for (const [relay, conn] of this.connectionPool.entries()) {
// Close connections that are idle and have no pending requests
if (conn.pendingRequests === 0 &&
now - conn.lastUsed > this.CONNECTION_TIMEOUT &&
(conn.ws.readyState === WebSocket.OPEN || conn.ws.readyState === WebSocket.CLOSED)) {
try {
if (conn.ws.readyState === WebSocket.OPEN) {
conn.ws.close();
}
} catch {
// Ignore errors
}
this.connectionPool.delete(relay);
}
}
}
/**
* Get or create a WebSocket connection to a relay
* @param relay - The relay URL
* @param isReadOperation - If true, this is a read operation (like search) that can bypass throttling more easily
*/
private async getConnection(relay: string, isReadOperation: boolean = false): Promise<WebSocket | null> {
const existing = this.connectionPool.get(relay);
// Reuse existing connection if it's open
if (existing && existing.ws.readyState === WebSocket.OPEN) {
existing.lastUsed = Date.now();
existing.pendingRequests++;
return existing.ws;
}
// Check connection attempt throttling
const attemptInfo = this.connectionAttempts.get(relay) || { count: 0, lastAttempt: 0 };
const now = Date.now();
const timeSinceLastAttempt = now - attemptInfo.lastAttempt;
// Reset throttling if enough time has passed (relays may have recovered)
if (attemptInfo.count > 0 && timeSinceLastAttempt > this.THROTTLE_RESET_TIME) {
logger.debug({ relay, timeSinceLastAttempt }, 'Resetting throttling - enough time has passed');
this.connectionAttempts.set(relay, { count: 0, lastAttempt: now });
}
// If we've had too many recent failures, apply exponential backoff
// For read operations, use less aggressive throttling (half the backoff time)
if (attemptInfo.count > 0) {
const backoffMultiplier = isReadOperation ? 0.5 : 1.0;
const backoffTime = this.CONNECTION_BACKOFF_BASE * Math.pow(2, Math.min(attemptInfo.count - 1, 5)) * backoffMultiplier;
if (timeSinceLastAttempt < backoffTime) {
const waitTime = backoffTime - timeSinceLastAttempt;
// For read operations, be more lenient - allow longer waits or bypass if we have cached data
const maxWaitTime = isReadOperation ? 10000 : 5000; // 10s for reads, 5s for writes
if (waitTime <= maxWaitTime) {
logger.debug({ relay, backoffTime, timeSinceLastAttempt, waitTime, isReadOperation }, 'Throttling connection attempt - waiting for backoff');
await new Promise(resolve => setTimeout(resolve, waitTime));
// After waiting, check if connection is now available
const existingAfterWait = this.connectionPool.get(relay);
if (existingAfterWait && (existingAfterWait.ws.readyState === WebSocket.OPEN || existingAfterWait.ws.readyState === WebSocket.CONNECTING)) {
existingAfterWait.pendingRequests++;
return existingAfterWait.ws;
}
// Continue to create new connection after backoff
} else {
// Backoff is too long, return null to avoid long waits
// For read operations, we might still want to try (if we have no cached data)
// but for now, we'll be conservative and return null
logger.debug({ relay, waitTime, maxWaitTime, isReadOperation }, 'Backoff too long, skipping connection attempt');
return null;
}
}
}
// Check if we have too many concurrent connections to this relay
const openConnections = Array.from(this.connectionPool.values())
.filter(c => c.ws === existing?.ws || (c.ws.readyState === WebSocket.OPEN || c.ws.readyState === WebSocket.CONNECTING))
.length;
if (openConnections >= this.MAX_CONCURRENT_CONNECTIONS) {
logger.debug({ relay, openConnections }, 'Too many concurrent connections, skipping');
return null;
}
// Remove dead connection
if (existing) {
this.connectionPool.delete(relay);
try {
if (existing.ws.readyState !== WebSocket.CLOSED) {
existing.ws.close();
}
} catch {
// Ignore errors
}
}
// Update attempt tracking
this.connectionAttempts.set(relay, { count: attemptInfo.count + 1, lastAttempt: now });
// Create new connection
try {
const ws = await createWebSocketWithTor(relay);
const conn: RelayConnection = {
ws,
lastUsed: Date.now(),
pendingRequests: 1,
reconnectAttempts: 0,
messageHandlers: new Map(),
nextSubscriptionId: 1
};
// Set up shared message handler for routing
ws.onmessage = (event: MessageEvent) => {
try {
const message = JSON.parse(event.data);
// Route to appropriate handler based on message type
if (message[0] === 'EVENT' && message[1]) {
// message[1] is the subscription ID
const handler = conn.messageHandlers.get(message[1]);
if (handler) {
handler(message);
}
} else if (message[0] === 'EOSE' && message[1]) {
// message[1] is the subscription ID
const handler = conn.messageHandlers.get(message[1]);
if (handler) {
handler(message);
}
} else if (message[0] === 'AUTH') {
// AUTH challenge - broadcast to all handlers (they'll handle it)
for (const handler of conn.messageHandlers.values()) {
handler(message);
}
} else if (message[0] === 'OK' && message[1] === 'auth') {
// AUTH response - broadcast to all handlers
for (const handler of conn.messageHandlers.values()) {
handler(message);
}
}
} catch (error) {
// Ignore parse errors
}
};
// Handle connection close/error
ws.onclose = () => {
// Remove from pool when closed
const poolConn = this.connectionPool.get(relay);
if (poolConn && poolConn.ws === ws) {
this.connectionPool.delete(relay);
}
};
ws.onerror = () => {
// Remove from pool on error
const poolConn = this.connectionPool.get(relay);
if (poolConn && poolConn.ws === ws) {
this.connectionPool.delete(relay);
}
};
this.connectionPool.set(relay, conn);
// Reset attempt count on successful connection
ws.onopen = () => {
this.connectionAttempts.set(relay, { count: 0, lastAttempt: Date.now() });
};
return ws;
} catch (error) {
logger.debug({ error, relay }, 'Failed to create WebSocket connection');
return null;
}
}
/**
* Release a connection (decrement pending requests counter)
*/
private releaseConnection(relay: string): void {
const conn = this.connectionPool.get(relay);
if (conn) {
conn.pendingRequests = Math.max(0, conn.pendingRequests - 1);
conn.lastUsed = Date.now();
}
} }
/** /**
* Handle AUTH challenge from relay and authenticate using NIP-42 * Handle AUTH challenge from relay and authenticate using NIP-42
* Note: SimplePool doesn't expose WebSocket directly, so AUTH handling
* may need to be done differently. For now, this is kept for compatibility.
*/ */
private async handleAuthChallenge(ws: WebSocket, relay: string, challenge: string): Promise<boolean> { private async handleAuthChallenge(ws: WebSocket, relay: string, challenge: string): Promise<boolean> {
// Only try to authenticate if NIP-07 is available (browser environment) // Only try to authenticate if NIP-07 is available (browser environment)
@ -516,14 +265,23 @@ export class NostrClient {
private async fetchAndMergeFromRelays(filters: NostrFilter[], existingEvents: NostrEvent[]): Promise<NostrEvent[]> { private async fetchAndMergeFromRelays(filters: NostrFilter[], existingEvents: NostrEvent[]): Promise<NostrEvent[]> {
const events: NostrEvent[] = []; const events: NostrEvent[] = [];
// Fetch from all relays in parallel // Use nostr-tools SimplePool to fetch from all relays in parallel
const promises = this.relays.map(relay => this.fetchFromRelay(relay, filters)); // SimplePool handles connection management, retries, and error handling automatically
const results = await Promise.allSettled(promises); try {
// querySync takes a single filter, so we query each filter and combine results
const queryPromises = filters.map(filter =>
this.pool.querySync(this.relays, filter as Filter, { maxWait: 8000 })
);
const results = await Promise.allSettled(queryPromises);
for (const result of results) { for (const result of results) {
if (result.status === 'fulfilled') { if (result.status === 'fulfilled') {
events.push(...result.value); events.push(...result.value);
}
} }
} catch (err) {
logger.debug({ error: err, filters }, 'Pool querySync failed');
// Continue with empty events - will use cached events
} }
// Merge with existing events - handle replaceable and parameterized replaceable events // Merge with existing events - handle replaceable and parameterized replaceable events
@ -563,18 +321,8 @@ export class NostrClient {
} }
} }
// Remove events that should be deleted // eventMap already contains only the latest events per deduplication key
for (const eventId of eventsToDelete) { // No need to remove from eventMap - the merge logic above already handles that
eventMap.delete(eventId); // Remove by ID if it was keyed by ID
// Also remove from map if it's keyed by deduplication key
for (const [key, event] of eventMap.entries()) {
if (event.id === eventId) {
eventMap.delete(key);
break;
}
}
}
const finalEvents = Array.from(eventMap.values()); const finalEvents = Array.from(eventMap.values());
// Sort by created_at descending // Sort by created_at descending
@ -607,7 +355,7 @@ export class NostrClient {
// Cache the merged results (skip cache for search queries) // Cache the merged results (skip cache for search queries)
const hasSearchQuery = filters.some(f => f.search && f.search.trim().length > 0); const hasSearchQuery = filters.some(f => f.search && f.search.trim().length > 0);
if (!hasSearchQuery) { if (!hasSearchQuery) {
if (finalEvents.length > 0 || results.some(r => r.status === 'fulfilled')) { if (finalEvents.length > 0 || events.length > 0) {
// Cache successful fetches for 5 minutes, empty results for 1 minute // Cache successful fetches for 5 minutes, empty results for 1 minute
const ttl = finalEvents.length > 0 ? 5 * 60 * 1000 : 60 * 1000; const ttl = finalEvents.length > 0 ? 5 * 60 * 1000 : 60 * 1000;
@ -658,18 +406,16 @@ export class NostrClient {
const since = Math.floor((Date.now() - 24 * 60 * 60 * 1000) / 1000); const since = Math.floor((Date.now() - 24 * 60 * 60 * 1000) / 1000);
const events: NostrEvent[] = []; const events: NostrEvent[] = [];
// Fetch from all relays in parallel, bypassing cache to avoid recursion // Fetch from all relays in parallel using SimplePool, bypassing cache to avoid recursion
const promises = this.relays.map(relay => this.fetchFromRelay(relay, [{ try {
kinds: [KIND.DELETION_REQUEST], const relayEvents = await this.pool.querySync(this.relays, {
since, kinds: [KIND.DELETION_REQUEST],
limit: 100 since,
}])); limit: 100
const results = await Promise.allSettled(promises); } as Filter, { maxWait: 8000 });
events.push(...relayEvents);
for (const result of results) { } catch (err) {
if (result.status === 'fulfilled') { logger.debug({ error: err }, 'Failed to fetch deletion events from pool');
events.push(...result.value);
}
} }
// Deduplicate deletion events by ID // Deduplicate deletion events by ID
@ -696,248 +442,26 @@ export class NostrClient {
} }
} }
private async fetchFromRelay(relay: string, filters: NostrFilter[]): Promise<NostrEvent[]> {
// Ensure WebSocket polyfill is initialized
await initializeWebSocketPolyfill();
const self = this;
return new Promise((resolve) => {
let ws: WebSocket | null = null;
const events: NostrEvent[] = [];
let resolved = false;
let timeoutId: ReturnType<typeof setTimeout> | null = null;
let connectionTimeoutId: ReturnType<typeof setTimeout> | null = null;
let authHandled = false;
let isNewConnection = false;
const cleanup = () => {
if (timeoutId) {
clearTimeout(timeoutId);
timeoutId = null;
}
if (connectionTimeoutId) {
clearTimeout(connectionTimeoutId);
connectionTimeoutId = null;
}
// Only close if it's a new connection we created (not from pool)
// Pool connections are managed separately
if (isNewConnection && ws && (ws.readyState === WebSocket.OPEN || ws.readyState === WebSocket.CONNECTING)) {
try {
ws.close();
} catch {
// Ignore errors during cleanup
}
} else {
// Release connection back to pool
self.releaseConnection(relay);
}
};
const resolveOnce = (value: NostrEvent[] = []) => {
if (!resolved) {
resolved = true;
cleanup();
resolve(value);
}
};
let authPromise: Promise<boolean> | null = null;
// Get connection from pool or create new one
// fetchFromRelay is always a read operation, so pass isReadOperation: true
this.getConnection(relay, true).then(websocket => {
if (!websocket) {
resolveOnce([]);
return;
}
ws = websocket;
isNewConnection = false; // From pool
setupWebSocketHandlers();
}).catch(error => {
// Connection failed, try creating new one
createWebSocketWithTor(relay).then(websocket => {
ws = websocket;
isNewConnection = true; // New connection
setupWebSocketHandlers();
}).catch(err => {
// Connection failed immediately
resolveOnce([]);
});
});
function setupWebSocketHandlers() {
if (!ws) return;
const conn = self.connectionPool.get(relay);
if (!conn) {
resolveOnce([]);
return;
}
// Get unique subscription ID for this request
const subscriptionId = `sub${conn.nextSubscriptionId++}`;
// Connection timeout - if we can't connect within 3 seconds, give up
connectionTimeoutId = setTimeout(() => {
if (!resolved && ws && ws.readyState !== WebSocket.OPEN) {
conn.messageHandlers.delete(subscriptionId);
resolveOnce([]);
}
}, 3000);
// Set up message handler for this subscription
const messageHandler = async (message: any) => {
try {
// Handle AUTH challenge
if (message[0] === 'AUTH' && message[1] && !authHandled) {
authHandled = true;
authPromise = self.handleAuthChallenge(ws!, relay, message[1]);
const authenticated = await authPromise;
// After authentication, send the REQ
if (ws && ws.readyState === WebSocket.OPEN) {
try {
ws.send(JSON.stringify(['REQ', subscriptionId, ...filters]));
} catch {
conn.messageHandlers.delete(subscriptionId);
resolveOnce(events);
}
}
return;
}
// Handle AUTH OK response
if (message[0] === 'OK' && message[1] === 'auth' && ws) {
// AUTH completed, send REQ if not already sent
if (ws.readyState === WebSocket.OPEN && !authHandled) {
setTimeout(() => {
if (ws && ws.readyState === WebSocket.OPEN) {
try {
ws.send(JSON.stringify(['REQ', subscriptionId, ...filters]));
} catch {
conn.messageHandlers.delete(subscriptionId);
resolveOnce(events);
}
}
}, 100);
}
return;
}
// Wait for auth to complete before processing other messages
if (authPromise) {
await authPromise;
}
// Only process messages for this subscription
if (message[1] === subscriptionId) {
if (message[0] === 'EVENT') {
events.push(message[2]);
} else if (message[0] === 'EOSE') {
conn.messageHandlers.delete(subscriptionId);
resolveOnce(events);
}
}
} catch (error) {
// Ignore parse errors, continue receiving events
}
};
conn.messageHandlers.set(subscriptionId, messageHandler);
// If connection is already open, send REQ immediately
if (ws.readyState === WebSocket.OPEN) {
// Wait a bit for AUTH challenge if needed
setTimeout(() => {
if (!authHandled && ws && ws.readyState === WebSocket.OPEN) {
try {
ws.send(JSON.stringify(['REQ', subscriptionId, ...filters]));
} catch {
conn.messageHandlers.delete(subscriptionId);
resolveOnce(events);
}
}
}, 1000);
} else {
// Wait for connection to open
ws.onopen = () => {
if (connectionTimeoutId) {
clearTimeout(connectionTimeoutId);
connectionTimeoutId = null;
}
// Connection opened, wait for AUTH challenge or proceed
// If no AUTH challenge comes within 1 second, send REQ
setTimeout(() => {
if (!authHandled && ws && ws.readyState === WebSocket.OPEN) {
try {
ws.send(JSON.stringify(['REQ', subscriptionId, ...filters]));
} catch {
conn.messageHandlers.delete(subscriptionId);
resolveOnce(events);
}
}
}, 1000);
};
}
// Error and close handlers are set on the connection itself
// But we need to clean up our handler
if (ws) {
const wsRef = ws; // Capture for closure
const originalOnError = ws.onerror;
ws.onerror = () => {
conn.messageHandlers.delete(subscriptionId);
if (originalOnError) {
// Create an Event-like object for Node.js compatibility
const errorEvent = typeof Event !== 'undefined'
? new Event('error')
: ({ type: 'error', target: wsRef } as unknown as Event);
originalOnError.call(wsRef, errorEvent);
}
if (!resolved) {
resolveOnce([]);
}
};
const originalOnClose = ws.onclose;
ws.onclose = () => {
conn.messageHandlers.delete(subscriptionId);
if (originalOnClose) {
// Create a CloseEvent-like object for Node.js compatibility
const closeEvent = typeof CloseEvent !== 'undefined'
? new CloseEvent('close')
: ({ type: 'close', code: 1000, reason: '', wasClean: true } as unknown as CloseEvent);
originalOnClose.call(wsRef, closeEvent);
}
// If we haven't resolved yet, resolve with what we have
if (!resolved) {
resolveOnce(events);
}
};
}
// Overall timeout - resolve with what we have after 8 seconds
timeoutId = setTimeout(() => {
resolveOnce(events);
}, 8000);
}
});
}
async publishEvent(event: NostrEvent, relays?: string[]): Promise<{ success: string[]; failed: Array<{ relay: string; error: string }> }> { async publishEvent(event: NostrEvent, relays?: string[]): Promise<{ success: string[]; failed: Array<{ relay: string; error: string }> }> {
const targetRelays = relays || this.relays; const targetRelays = relays || this.relays;
const success: string[] = []; const success: string[] = [];
const failed: Array<{ relay: string; error: string }> = []; const failed: Array<{ relay: string; error: string }> = [];
const promises = targetRelays.map(async (relay) => { // Use nostr-tools SimplePool to publish to all relays
try { try {
await this.publishToRelay(relay, event); await this.pool.publish(targetRelays, event);
success.push(relay);
} catch (error) {
failed.push({ relay, error: String(error) });
}
});
await Promise.allSettled(promises); // If publish succeeded, all relays succeeded
// Note: SimplePool.publish doesn't return per-relay results, so we assume all succeeded
success.push(...targetRelays);
} catch (error) {
// If publish failed, mark all as failed
// In a more sophisticated implementation, we could check individual relays
targetRelays.forEach(relay => {
failed.push({ relay, error: String(error) });
});
}
// Invalidate cache for events from this pubkey (new event published) // Invalidate cache for events from this pubkey (new event published)
// This ensures fresh data on next fetch // This ensures fresh data on next fetch
@ -956,150 +480,4 @@ export class NostrClient {
return { success, failed }; return { success, failed };
} }
private async publishToRelay(relay: string, nostrEvent: NostrEvent): Promise<void> {
// Ensure WebSocket polyfill is initialized
await initializeWebSocketPolyfill();
const self = this;
return new Promise((resolve, reject) => {
let ws: WebSocket | null = null;
let resolved = false;
let timeoutId: ReturnType<typeof setTimeout> | null = null;
let connectionTimeoutId: ReturnType<typeof setTimeout> | null = null;
let authHandled = false;
const cleanup = () => {
if (timeoutId) {
clearTimeout(timeoutId);
timeoutId = null;
}
if (connectionTimeoutId) {
clearTimeout(connectionTimeoutId);
connectionTimeoutId = null;
}
if (ws && (ws.readyState === WebSocket.OPEN || ws.readyState === WebSocket.CONNECTING)) {
try {
ws.close();
} catch {
// Ignore errors during cleanup
}
}
};
const resolveOnce = () => {
if (!resolved) {
resolved = true;
cleanup();
resolve();
}
};
const rejectOnce = (error: Error) => {
if (!resolved) {
resolved = true;
cleanup();
reject(error);
}
};
let authPromise: Promise<boolean> | null = null;
// Create WebSocket connection (with Tor support if needed)
createWebSocketWithTor(relay).then(websocket => {
ws = websocket;
setupWebSocketHandlers();
}).catch(error => {
rejectOnce(new Error(`Failed to create WebSocket connection to ${relay}: ${error}`));
});
function setupWebSocketHandlers() {
if (!ws) return;
// Connection timeout - if we can't connect within 3 seconds, reject
connectionTimeoutId = setTimeout(() => {
if (!resolved && ws && ws.readyState !== WebSocket.OPEN) {
rejectOnce(new Error(`Connection timeout for ${relay}`));
}
}, 3000);
ws.onopen = () => {
if (connectionTimeoutId) {
clearTimeout(connectionTimeoutId);
connectionTimeoutId = null;
}
// Connection opened, wait for AUTH challenge or proceed
// If no AUTH challenge comes within 1 second, send EVENT
setTimeout(() => {
if (!authHandled && ws && ws.readyState === WebSocket.OPEN) {
try {
ws.send(JSON.stringify(['EVENT', nostrEvent]));
} catch (error) {
rejectOnce(error instanceof Error ? error : new Error(String(error)));
}
}
}, 1000);
};
ws.onmessage = async (event: MessageEvent) => {
try {
const message = JSON.parse(event.data);
// Handle AUTH challenge
if (message[0] === 'AUTH' && message[1] && !authHandled) {
authHandled = true;
authPromise = self.handleAuthChallenge(ws!, relay, message[1]);
await authPromise;
// After authentication attempt, send the EVENT
if (ws && ws.readyState === WebSocket.OPEN) {
try {
ws.send(JSON.stringify(['EVENT', nostrEvent]));
} catch (error) {
rejectOnce(error instanceof Error ? error : new Error(String(error)));
}
}
return;
}
// Wait for auth to complete before processing other messages
if (authPromise) {
await authPromise;
}
if (message[0] === 'OK' && message[1] === nostrEvent.id) {
if (message[2] === true) {
resolveOnce();
} else {
rejectOnce(new Error(message[3] || 'Publish rejected'));
}
}
} catch (error) {
// Ignore parse errors, continue waiting for OK message
}
};
ws.onerror = () => {
// Silently handle connection errors - reject after a short delay
// to allow connection to attempt
if (!resolved) {
setTimeout(() => {
if (!resolved) {
rejectOnce(new Error(`Connection failed for ${relay}`));
}
}, 100);
}
};
ws.onclose = () => {
// If we haven't resolved yet, it's an unexpected close
if (!resolved) {
rejectOnce(new Error('WebSocket closed unexpectedly'));
}
};
timeoutId = setTimeout(() => {
rejectOnce(new Error('Publish timeout'));
}, 10000);
}
});
}
} }

15
src/routes/+layout.svelte

@ -180,12 +180,19 @@
async function checkPendingTransfers(userPubkeyHex: string) { async function checkPendingTransfers(userPubkeyHex: string) {
try { try {
// Add timeout to prevent hanging
const controller = new AbortController();
const timeoutId = setTimeout(() => controller.abort(), 10000); // 10 second timeout
const response = await fetch('/api/transfers/pending', { const response = await fetch('/api/transfers/pending', {
headers: { headers: {
'X-User-Pubkey': userPubkeyHex 'X-User-Pubkey': userPubkeyHex
} },
signal: controller.signal
}); });
clearTimeout(timeoutId);
if (response.ok) { if (response.ok) {
const data = await response.json(); const data = await response.json();
if (data.pendingTransfers && data.pendingTransfers.length > 0) { if (data.pendingTransfers && data.pendingTransfers.length > 0) {
@ -196,7 +203,11 @@
} }
} }
} catch (err) { } catch (err) {
console.error('Failed to check for pending transfers:', err); // Only log if it's not an abort (timeout)
if (err instanceof Error && err.name !== 'AbortError') {
console.error('Failed to check for pending transfers:', err);
}
// Silently ignore timeouts - they're expected if the server is slow
} }
} }

12
src/routes/api/repos/[npub]/[repo]/file/+server.ts

@ -79,6 +79,7 @@ export const GET: RequestHandler = async (event) => {
const fileContent = await tryApiFetchFile(announcement, npub, repo, filePath, ref); const fileContent = await tryApiFetchFile(announcement, npub, repo, filePath, ref);
if (fileContent && fileContent.content) { if (fileContent && fileContent.content) {
logger.debug({ npub, repo, filePath, ref }, 'Successfully fetched file via API fallback');
return json(fileContent); return json(fileContent);
} }
} catch (apiErr) { } catch (apiErr) {
@ -87,7 +88,16 @@ export const GET: RequestHandler = async (event) => {
} }
// API fetch failed - repo is not cloned and API fetch didn't work // API fetch failed - repo is not cloned and API fetch didn't work
return error(404, 'Repository is not cloned locally and could not fetch file via API. Privileged users can clone this repository using the "Clone to Server" button.'); // Check if announcement has clone URLs to provide better error message
const { extractCloneUrls } = await import('$lib/utils/nostr-utils.js');
const cloneUrls = extractCloneUrls(announcement);
const hasCloneUrls = cloneUrls.length > 0;
logger.debug({ npub, repo, filePath, hasCloneUrls, cloneUrlCount: cloneUrls.length }, 'API fallback failed or no clone URLs available');
return error(404, hasCloneUrls
? 'Repository is not cloned locally and could not fetch file via API. Privileged users can clone this repository using the "Clone to Server" button.'
: 'Repository is not cloned locally and has no external clone URLs for API fallback. Privileged users can clone this repository using the "Clone to Server" button.');
} else { } else {
return error(404, 'Repository announcement not found in Nostr'); return error(404, 'Repository announcement not found in Nostr');
} }

12
src/routes/api/repos/[npub]/[repo]/tree/+server.ts

@ -37,6 +37,7 @@ export const GET: RequestHandler = createRepoGetHandler(
const apiData = await tryApiFetch(announcement, context.npub, context.repo); const apiData = await tryApiFetch(announcement, context.npub, context.repo);
if (apiData && apiData.files) { if (apiData && apiData.files) {
logger.debug({ npub: context.npub, repo: context.repo, fileCount: apiData.files.length }, 'Successfully fetched files via API fallback');
// Return API data directly without cloning // Return API data directly without cloning
const path = context.path || ''; const path = context.path || '';
// Filter files by path if specified // Filter files by path if specified
@ -92,8 +93,17 @@ export const GET: RequestHandler = createRepoGetHandler(
} }
// API fetch failed - repo is not cloned and API fetch didn't work // API fetch failed - repo is not cloned and API fetch didn't work
// Check if announcement has clone URLs to provide better error message
const { extractCloneUrls } = await import('$lib/utils/nostr-utils.js');
const cloneUrls = extractCloneUrls(announcement);
const hasCloneUrls = cloneUrls.length > 0;
logger.debug({ npub: context.npub, repo: context.repo, hasCloneUrls, cloneUrlCount: cloneUrls.length }, 'API fallback failed or no clone URLs available');
throw handleNotFoundError( throw handleNotFoundError(
'Repository is not cloned locally and could not be fetched via API. Privileged users can clone this repository using the "Clone to Server" button.', hasCloneUrls
? 'Repository is not cloned locally and could not be fetched via API. Privileged users can clone this repository using the "Clone to Server" button.'
: 'Repository is not cloned locally and has no external clone URLs for API fallback. Privileged users can clone this repository using the "Clone to Server" button.',
{ operation: 'listFiles', npub: context.npub, repo: context.repo } { operation: 'listFiles', npub: context.npub, repo: context.repo }
); );
} else { } else {

1018
src/routes/api/search/+server.ts

File diff suppressed because it is too large Load Diff

40
src/routes/api/transfers/pending/+server.ts

@ -22,8 +22,22 @@ export const GET: RequestHandler = async ({ request }) => {
} }
try { try {
// Get user's relays for comprehensive search // Get user's relays for comprehensive search (with timeout)
const { inbox, outbox } = await getUserRelays(userPubkeyHex, nostrClient); let inbox: string[] = [];
let outbox: string[] = [];
try {
const userRelaysResult = await Promise.race([
getUserRelays(userPubkeyHex, nostrClient),
new Promise<{ inbox: string[]; outbox: string[] }>((resolve) => {
setTimeout(() => resolve({ inbox: [], outbox: [] }), 3000); // 3s timeout for user relays
})
]);
inbox = userRelaysResult.inbox;
outbox = userRelaysResult.outbox;
} catch (err) {
logger.debug({ error: err, userPubkeyHex }, 'Failed to get user relays, using defaults');
}
// Combine user relays with default and search relays // Combine user relays with default and search relays
const userRelays = [...inbox, ...outbox]; const userRelays = [...inbox, ...outbox];
const allRelays = [...new Set([...userRelays, ...DEFAULT_NOSTR_RELAYS, ...DEFAULT_NOSTR_SEARCH_RELAYS])]; const allRelays = [...new Set([...userRelays, ...DEFAULT_NOSTR_RELAYS, ...DEFAULT_NOSTR_SEARCH_RELAYS])];
@ -32,13 +46,21 @@ export const GET: RequestHandler = async ({ request }) => {
const { NostrClient } = await import('$lib/services/nostr/nostr-client.js'); const { NostrClient } = await import('$lib/services/nostr/nostr-client.js');
const searchClient = new NostrClient(allRelays); const searchClient = new NostrClient(allRelays);
// Search for transfer events where this user is the new owner (p tag) // Search for transfer events where this user is the new owner (p tag) (with timeout)
const transferEvents = await searchClient.fetchEvents([ const transferEvents = await Promise.race([
{ searchClient.fetchEvents([
kinds: [KIND.OWNERSHIP_TRANSFER], {
'#p': [userPubkeyHex], kinds: [KIND.OWNERSHIP_TRANSFER],
limit: 100 '#p': [userPubkeyHex],
} limit: 100
}
]),
new Promise<NostrEvent[]>((resolve) => {
setTimeout(() => {
logger.debug({ userPubkeyHex }, 'Transfer events fetch timeout (5s)');
resolve([]);
}, 5000); // 5s timeout
})
]); ]);
// Filter for valid, non-self-transfer events that haven't been completed // Filter for valid, non-self-transfer events that haven't been completed

33
src/routes/repos/[npub]/[repo]/+page.svelte

@ -2814,23 +2814,26 @@
} }
} }
} else if (response.status === 404) { } else if (response.status === 404) {
// Check if this is a "not cloned" error with API fallback suggestion // Check if this is a "not cloned" error - API fallback might be available
const errorText = await response.text().catch(() => ''); const errorText = await response.text().catch(() => '');
if (errorText.includes('not cloned locally') && errorText.includes('API')) { if (errorText.includes('not cloned locally')) {
// API fallback might be available, but this specific request failed // Repository is not cloned - check if API fallback might be available
// Try to detect if API fallback works by checking if we have clone URLs
if (pageData.repoCloneUrls && pageData.repoCloneUrls.length > 0) { if (pageData.repoCloneUrls && pageData.repoCloneUrls.length > 0) {
// We have clone URLs, so API fallback might work - mark as unknown for now // We have clone URLs, so API fallback might work - mark as unknown for now
// It will be set to true if a subsequent request succeeds // It will be set to true if a subsequent request succeeds
apiFallbackAvailable = null; apiFallbackAvailable = null;
// Don't set repoNotFound or error yet - allow API fallback to be attempted
} else { } else {
// No clone URLs, API fallback won't work
repoNotFound = true;
apiFallbackAvailable = false; apiFallbackAvailable = false;
error = errorText || `Repository not found. This repository exists in Nostr but hasn't been provisioned on this server yet. The server will automatically provision it soon, or you can contact the server administrator.`;
} }
} else { } else {
// Repository not provisioned yet - set error message and flag // Generic 404 - repository doesn't exist
repoNotFound = true; repoNotFound = true;
error = `Repository not found. This repository exists in Nostr but hasn't been provisioned on this server yet. The server will automatically provision it soon, or you can contact the server administrator.`;
apiFallbackAvailable = false; apiFallbackAvailable = false;
error = `Repository not found. This repository exists in Nostr but hasn't been provisioned on this server yet. The server will automatically provision it soon, or you can contact the server administrator.`;
} }
} else if (response.status === 403) { } else if (response.status === 403) {
// Access denied - don't set repoNotFound, allow retry after login // Access denied - don't set repoNotFound, allow retry after login
@ -2874,20 +2877,28 @@
if (!response.ok) { if (!response.ok) {
if (response.status === 404) { if (response.status === 404) {
// Check if this is a "not cloned" error with API fallback suggestion // Check if this is a "not cloned" error - API fallback might be available
const errorText = await response.text().catch(() => ''); const errorText = await response.text().catch(() => '');
if (errorText.includes('not cloned locally') && errorText.includes('API')) { if (errorText.includes('not cloned locally')) {
// API fallback might be available, but this specific request failed // Repository is not cloned - check if API fallback might be available
if (pageData.repoCloneUrls && pageData.repoCloneUrls.length > 0) { if (pageData.repoCloneUrls && pageData.repoCloneUrls.length > 0) {
apiFallbackAvailable = null; // Unknown, will be set if a request succeeds // We have clone URLs, so API fallback might work - mark as unknown for now
// It will be set to true if a subsequent request succeeds
apiFallbackAvailable = null;
// Don't set repoNotFound - allow API fallback to be attempted
} else { } else {
// No clone URLs, API fallback won't work
repoNotFound = true;
apiFallbackAvailable = false; apiFallbackAvailable = false;
} }
// Throw error but use the actual error text from the API
throw new Error(errorText || 'Repository not found. This repository exists in Nostr but hasn\'t been provisioned on this server yet. The server will automatically provision it soon, or you can contact the server administrator.');
} else { } else {
// Generic 404 - repository doesn't exist
repoNotFound = true; repoNotFound = true;
apiFallbackAvailable = false; apiFallbackAvailable = false;
throw new Error(`Repository not found. This repository exists in Nostr but hasn't been provisioned on this server yet. The server will automatically provision it soon, or you can contact the server administrator.`);
} }
throw new Error(`Repository not found. This repository exists in Nostr but hasn't been provisioned on this server yet. The server will automatically provision it soon, or you can contact the server administrator.`);
} else if (response.status === 403) { } else if (response.status === 403) {
// 403 means access denied - don't set repoNotFound, just show error // 403 means access denied - don't set repoNotFound, just show error
// This allows retry after login // This allows retry after login

Loading…
Cancel
Save