@ -24,6 +24,7 @@ interface FetchOptions {
@@ -24,6 +24,7 @@ interface FetchOptions {
onUpdate ? : ( events : NostrEvent [ ] ) = > void ;
timeout? : number ;
relayFirst? : boolean ; // If true, query relays first with timeout, then fill from cache
priority ? : 'high' | 'medium' | 'low' ; // Priority level: high for critical UI (comments), low for background (reactions, profiles)
}
class NostrClient {
@ -60,6 +61,55 @@ class NostrClient {
@@ -60,6 +61,55 @@ class NostrClient {
// Cache NIP-11 metadata to avoid repeated HTTP requests
private nip11MetadataCache : Map < string , { requiresAuth : boolean ; cachedAt : number } > = new Map ( ) ;
private readonly NIP11_CACHE_TTL = 300000 ; // 5 minutes
// Track fetch patterns to identify repeated fetches
private fetchPatterns : Map < string , { count : number ; lastFetch : number ; totalEvents : number } > = new Map ( ) ;
// Cache empty results to prevent repeated fetches of non-existent data
// Also track pending fetches to prevent concurrent duplicate fetches
private emptyResultCache : Map < string , { cachedAt : number ; pending ? : boolean } > = new Map ( ) ;
private readonly EMPTY_RESULT_CACHE_TTL = 30000 ; // 30 seconds - cache empty results briefly
private readonly PENDING_FETCH_TTL = 5000 ; // 5 seconds - how long to wait for a pending fetch
/ * *
* Check if a relay requires authentication ( using cached NIP - 11 metadata )
* Returns null if unknown , true if requires auth , false if doesn ' t require auth
* /
private async checkRelayRequiresAuth ( relayUrl : string ) : Promise < boolean | null > {
// Check cache first
const cached = this . nip11MetadataCache . get ( relayUrl ) ;
if ( cached && ( Date . now ( ) - cached . cachedAt ) < this . NIP11_CACHE_TTL ) {
return cached . requiresAuth ;
}
// Fetch NIP-11 metadata
try {
const httpUrl = relayUrl . replace ( /^wss?:\/\// , ( match ) = > {
return match === 'wss://' ? 'https://' : 'http://' ;
} ) ;
const nip11Url = ` ${ httpUrl } /.well-known/nostr.json ` ;
const response = await fetch ( nip11Url , {
method : 'GET' ,
headers : { 'Accept' : 'application/nostr+json' } ,
signal : AbortSignal.timeout ( 2000 ) // 2 second timeout
} ) ;
if ( response . ok ) {
const metadata = await response . json ( ) ;
const requiresAuth = metadata ? . limitation ? . auth_required === true ;
// Cache the result
this . nip11MetadataCache . set ( relayUrl , {
requiresAuth ,
cachedAt : Date.now ( )
} ) ;
return requiresAuth ;
}
} catch ( error ) {
// Metadata fetch failed - return null (unknown)
}
return null ; // Unknown
}
async initialize ( ) : Promise < void > {
if ( this . initialized ) return ;
@ -83,8 +133,27 @@ class NostrClient {
@@ -83,8 +133,27 @@ class NostrClient {
// Start subscription cleanup interval
this . startSubscriptionCleanup ( ) ;
// Start periodic memory cleanup
this . startMemoryCleanup ( ) ;
this . initialized = true ;
}
/ * *
* Start periodic memory cleanup to prevent gradual memory growth
* /
private startMemoryCleanup ( ) : void {
// Clean up memory every 30 seconds
setInterval ( ( ) = > {
const stats = memoryManager . getStats ( ) ;
if ( stats . totalSizeMB > 50 ) {
const cleanedIds = memoryManager . cleanupOldEvents ( 25 * 1024 * 1024 ) ;
if ( cleanedIds . length > 0 ) {
console . debug ( ` [nostr-client] Periodic cleanup: removed ${ cleanedIds . length } tracked events ( ${ stats . totalSizeMB . toFixed ( 2 ) } MB -> target 25MB) ` ) ;
}
}
} , 30000 ) ; // Every 30 seconds
}
/ * *
* Start periodic cleanup of inactive subscriptions
@ -567,9 +636,9 @@ class NostrClient {
@@ -567,9 +636,9 @@ class NostrClient {
const limited = sorted . slice ( 0 , Math . min ( limit , MAX_EVENTS ) ) ;
const filtered = filterEvents ( limited ) ;
// Only log cache queries at debug level to reduce console noise
// Only log if we got multiple events or if it's an interesting query
if ( filtered . length > 5 || ( filtered . length > 0 && limited . length > filtered . length * 2 ) ) {
// Only log cache queries when significant filtering happens or large result sets
// This reduces noise from background cache enhancement queries
if ( filtered . length > 10 || ( filtered . length > 0 && limited . length > filtered . length * 3 ) ) {
console . debug ( ` [nostr-client] Cache query: ${ limited . length } events before filter, ${ filtered . length } after filter ` ) ;
}
@ -866,7 +935,8 @@ class NostrClient {
@@ -866,7 +935,8 @@ class NostrClient {
filters : Filter [ ] ,
events : Map < string , NostrEvent > ,
timeout : number ,
onUpdate ? : ( events : NostrEvent [ ] ) = > void
onUpdate ? : ( events : NostrEvent [ ] ) = > void ,
priority : 'high' | 'medium' | 'low' = 'medium'
) : Promise < void > {
return new Promise ( ( resolve ) = > {
const makeRequest = ( ) = > {
@ -874,11 +944,17 @@ class NostrClient {
@@ -874,11 +944,17 @@ class NostrClient {
const lastRequest = this . lastRequestTime . get ( relayUrl ) || 0 ;
const timeSinceLastRequest = now - lastRequest ;
const activeForRelay = this . activeRequestsPerRelay . get ( relayUrl ) || 0 ;
// High priority requests get higher concurrent limits and can bypass some throttling
const maxConcurrentForPriority = priority === 'high' ? 2 : this.MAX_CONCURRENT_PER_RELAY ;
const maxTotalForPriority = priority === 'high' ? 5 : this.MAX_CONCURRENT_TOTAL ;
const minIntervalForPriority = priority === 'high' ? 50 : this.MIN_REQUEST_INTERVAL ;
// Check if we can make the request now
if ( timeSinceLastRequest >= this . MIN_REQUEST_INTERVAL &&
activeForRelay < this . MAX_CONCURRENT_PER_RELAY &&
this . totalActiveRequests < this . MAX_CONCURRENT_TOTAL ) {
// High priority requests can bypass some throttling
if ( timeSinceLastRequest >= minIntervalForPriority &&
activeForRelay < maxConcurrentForPriority &&
this . totalActiveRequests < maxTotalForPriority ) {
// Update tracking
this . lastRequestTime . set ( relayUrl , now ) ;
@ -900,10 +976,10 @@ class NostrClient {
@@ -900,10 +976,10 @@ class NostrClient {
} ) ;
} else {
// Wait and retry
const waitTime = Math . max (
this . MIN_REQUEST_INTERVAL - timeSinceLastRequest ,
100
) ;
// High priority requests wait less
const waitTime = priority === 'high'
? Math . max ( minIntervalForPriority - timeSinceLastRequest , 10 )
: Math . max ( this . MIN_REQUEST_INTERVAL - timeSinceLastRequest , 100 ) ;
setTimeout ( makeRequest , waitTime ) ;
}
} ;
@ -1032,13 +1108,29 @@ class NostrClient {
@@ -1032,13 +1108,29 @@ class NostrClient {
// Cache the event
client . addToCache ( event ) ;
// Check memory usage and cleanup if needed (soft limits)
// Check memory usage and cleanup if needed (aggressive limits)
const stats = memoryManager . getStats ( ) ;
if ( stats . totalSizeMB > 200 ) {
// If over 200MB, cleanup oldest events to get back to 100MB
const cleanedIds = memoryManager . cleanupOldEvents ( 100 * 1024 * 1024 ) ;
// Note: We don't remove from events Map here as those are needed for return value
// The cleanup is just for tracking/monitoring purposes
if ( stats . totalSizeMB > 50 ) {
// If over 50MB, cleanup oldest events to get back to 25MB
const cleanedIds = memoryManager . cleanupOldEvents ( 25 * 1024 * 1024 ) ;
// Actually remove cleaned events from the Map to free memory
if ( cleanedIds . length > 0 ) {
for ( const id of cleanedIds ) {
events . delete ( id ) ;
}
console . warn ( ` [nostr-client] Memory cleanup: removed ${ cleanedIds . length } events from Map ( ${ stats . totalSizeMB . toFixed ( 2 ) } MB -> target 25MB) ` ) ;
}
}
// Also limit Map size per-request to prevent unbounded growth
if ( events . size > 500 ) {
const sorted = Array . from ( events . entries ( ) )
. sort ( ( a , b ) = > a [ 1 ] . created_at - b [ 1 ] . created_at ) ; // Oldest first
const toRemove = sorted . slice ( 0 , events . size - 500 ) ;
for ( const [ id ] of toRemove ) {
events . delete ( id ) ;
memoryManager . untrackEvent ( id ) ;
}
}
// Stream event directly to onUpdate callback immediately
@ -1165,23 +1257,120 @@ class NostrClient {
@@ -1165,23 +1257,120 @@ class NostrClient {
this . processingQueue = false ;
}
/ * *
* Create a human - readable description of a filter for logging
* /
private describeFilter ( filter : Filter ) : string {
const parts : string [ ] = [ ] ;
if ( filter . kinds && filter . kinds . length > 0 ) {
parts . push ( ` kind ${ filter . kinds . length > 1 ? 's' : '' } : ${ filter . kinds . join ( ',' ) } ` ) ;
}
if ( filter . ids && filter . ids . length > 0 ) {
parts . push ( ` id ${ filter . ids . length > 1 ? 's' : '' } : ${ filter . ids . length } ` ) ;
}
if ( filter . authors && filter . authors . length > 0 ) {
parts . push ( ` author ${ filter . authors . length > 1 ? 's' : '' } : ${ filter . authors . length } ` ) ;
}
if ( filter [ '#e' ] && filter [ '#e' ] . length > 0 ) {
parts . push ( ` #e: ${ filter [ '#e' ] . length } ` ) ;
}
if ( filter [ '#p' ] && filter [ '#p' ] . length > 0 ) {
parts . push ( ` #p: ${ filter [ '#p' ] . length } ` ) ;
}
if ( filter . limit ) {
parts . push ( ` limit: ${ filter . limit } ` ) ;
}
return parts . length > 0 ? parts . join ( ' ' ) : 'empty filter' ;
}
async fetchEvents (
filters : Filter [ ] ,
relays : string [ ] ,
options : FetchOptions = { }
) : Promise < NostrEvent [ ] > {
const { useCache = true , cacheResults = true , onUpdate , timeout = 10000 , relayFirst = false } = options ;
const { useCache = true , cacheResults = true , onUpdate , timeout = 10000 , relayFirst = false , priority = 'medium' } = options ;
// Create a key for this fetch to prevent duplicates
const fetchKey = JSON . stringify ( {
filters ,
relays : relays.sort ( )
} ) ;
// Create filter description for logging
const filterDesc = filters . length === 1
? this . describeFilter ( filters [ 0 ] )
: filters . map ( f = > this . describeFilter ( f ) ) . join ( ' | ' ) ;
// Create relay description for logging (show count and first few)
const relayDesc = relays . length <= 3
? relays . join ( ', ' )
: ` ${ relays . length } relays ( ${ relays . slice ( 0 , 2 ) . join ( ', ' ) } , ...) ` ;
const activeFetch = this . activeFetches . get ( fetchKey ) ;
if ( activeFetch ) {
console . debug ( ` [nostr-client] Deduplicating fetch [ ${ filterDesc } ] from [ ${ relayDesc } ] - already in progress ` ) ;
return activeFetch ;
}
// Check if we recently got an empty result for this exact fetch
// This prevents repeated fetches of non-existent data
// Use a key based on filters (with actual IDs) but not relays, so different relay sets can share cache
// Create a stable key from filters that includes actual event IDs, not just counts
const filterKey = JSON . stringify ( filters . map ( f = > ( {
kinds : f.kinds ,
ids : f.ids ,
authors : f.authors ,
'#e' : f [ '#e' ] ,
'#p' : f [ '#p' ] ,
limit : f.limit
} ) ) ) ;
const emptyCacheKey = filterKey ;
// Check and set pending flag atomically to prevent race conditions
const emptyCacheEntry = this . emptyResultCache . get ( emptyCacheKey ) ;
if ( emptyCacheEntry ) {
const age = Date . now ( ) - emptyCacheEntry . cachedAt ;
if ( emptyCacheEntry . pending && age < this . PENDING_FETCH_TTL ) {
// Another fetch for this is in progress, wait for it
console . log ( ` [nostr-client] Waiting for pending fetch [ ${ filterDesc } ] from [ ${ relayDesc } ] - another fetch in progress ` ) ;
// Wait and check multiple times (up to 2 seconds)
for ( let i = 0 ; i < 4 ; i ++ ) {
await new Promise ( resolve = > setTimeout ( resolve , 500 ) ) ;
const updatedEntry = this . emptyResultCache . get ( emptyCacheKey ) ;
if ( updatedEntry && ! updatedEntry . pending ) {
if ( ( Date . now ( ) - updatedEntry . cachedAt ) < this . EMPTY_RESULT_CACHE_TTL ) {
const finalAge = Math . round ( ( Date . now ( ) - updatedEntry . cachedAt ) / 1000 ) ;
console . log ( ` [nostr-client] Skipping fetch [ ${ filterDesc } ] from [ ${ relayDesc } ] - empty result cached ${ finalAge } s ago (waited for pending) ` ) ;
return [ ] ;
}
break ; // No longer pending, but result expired or had data
}
if ( ! updatedEntry || ! updatedEntry . pending ) {
break ; // No longer pending
}
}
// If still pending after waiting, proceed (might be a slow fetch)
} else if ( ! emptyCacheEntry . pending && age < this . EMPTY_RESULT_CACHE_TTL ) {
const ageSeconds = Math . round ( age / 1000 ) ;
console . log ( ` [nostr-client] Skipping fetch [ ${ filterDesc } ] from [ ${ relayDesc } ] - empty result cached ${ ageSeconds } s ago ` ) ;
return Promise . resolve ( [ ] ) ;
}
}
// Atomically check and set pending flag - if another fetch just set it, wait
const existingEntry = this . emptyResultCache . get ( emptyCacheKey ) ;
if ( existingEntry ? . pending && ( Date . now ( ) - existingEntry . cachedAt ) < 1000 ) {
// Very recent pending entry, wait a bit
await new Promise ( resolve = > setTimeout ( resolve , 200 ) ) ;
const recheck = this . emptyResultCache . get ( emptyCacheKey ) ;
if ( recheck && ! recheck . pending && ( Date . now ( ) - recheck . cachedAt ) < this . EMPTY_RESULT_CACHE_TTL ) {
const finalAge = Math . round ( ( Date . now ( ) - recheck . cachedAt ) / 1000 ) ;
console . log ( ` [nostr-client] Skipping fetch [ ${ filterDesc } ] from [ ${ relayDesc } ] - empty result cached ${ finalAge } s ago (waited for concurrent) ` ) ;
return [ ] ;
}
}
// Mark this fetch as pending
this . emptyResultCache . set ( emptyCacheKey , { cachedAt : Date.now ( ) , pending : true } ) ;
// Always use relay-first mode: query relays first with timeout, then fill from cache if needed
{
@ -1225,7 +1414,14 @@ class NostrClient {
@@ -1225,7 +1414,14 @@ class NostrClient {
if ( response . ok ) {
const metadata = await response . json ( ) ;
if ( metadata ? . limitation ? . auth_required ) {
const requiresAuth = metadata ? . limitation ? . auth_required === true ;
// Cache the metadata
this . nip11MetadataCache . set ( relayUrl , {
requiresAuth ,
cachedAt : Date.now ( )
} ) ;
if ( requiresAuth ) {
console . debug ( ` [nostr-client] Relay ${ relayUrl } requires authentication (from NIP-11), authenticating before subscription... ` ) ;
const session = sessionManager . getSession ( ) ;
if ( session ) {
@ -1242,7 +1438,7 @@ class NostrClient {
@@ -1242,7 +1438,7 @@ class NostrClient {
console . debug ( ` [nostr-client] Proactive auth attempt for ${ relayUrl } failed (will try on challenge): ` , error ) ;
}
} else {
console . debug ( ` [nostr-client] Relay ${ relayUrl } requires authentication but user is not logged in ` ) ;
// Don't log this - it's expected when not logged in and we'll skip these relays now
}
}
}
@ -1253,17 +1449,72 @@ class NostrClient {
@@ -1253,17 +1449,72 @@ class NostrClient {
}
}
// Check empty result cache again (in case another concurrent fetch already completed)
const emptyCacheEntry2 = this . emptyResultCache . get ( emptyCacheKey ) ;
if ( emptyCacheEntry2 && ! emptyCacheEntry2 . pending && ( Date . now ( ) - emptyCacheEntry2 . cachedAt ) < this . EMPTY_RESULT_CACHE_TTL ) {
const age = Math . round ( ( Date . now ( ) - emptyCacheEntry2 . cachedAt ) / 1000 ) ;
console . log ( ` [nostr-client] Skipping fetch [ ${ filterDesc } ] from [ ${ relayDesc } ] - empty result cached ${ age } s ago (checked during fetch) ` ) ;
// Clear pending flag if it was set
this . emptyResultCache . delete ( emptyCacheKey ) ;
return [ ] ;
}
// Query relays first with timeout
// Respect cacheResults option - don't cache if explicitly disabled
const relayEvents = await this . fetchFromRelays ( filters , relays , {
cacheResults : cacheResults ,
onUpdate ,
timeout : relayTimeout
timeout : relayTimeout ,
priority : options.priority
} ) ;
// If we got results from relays, return them immediately
// Track results for composite log
let finalEvents = relayEvents ;
let cacheEnhancementCount = 0 ;
let usedCacheFallback = false ;
// If we got results from relays, return them immediately for fast display
// Then enhance with cache delta and log summary
if ( relayEvents . length > 0 ) {
// Got events from relays
// Start cache enhancement in background, but wait for it before logging
const cacheEnhancementPromise = ( async ( ) = > {
if ( useCache && onUpdate ) {
try {
const cachedEvents = await this . getCachedEvents ( filters ) ;
if ( cachedEvents . length > 0 ) {
// Find events in cache that aren't in relay results (delta)
const relayEventIds = new Set ( relayEvents . map ( e = > e . id ) ) ;
const cacheDelta = cachedEvents . filter ( e = > ! relayEventIds . has ( e . id ) ) ;
if ( cacheDelta . length > 0 ) {
cacheEnhancementCount = cacheDelta . length ;
// Enhance results with cache delta via onUpdate callback
onUpdate ( cacheDelta ) ;
}
}
} catch ( error ) {
// Silently fail - cache enhancement is optional
}
}
return cacheEnhancementCount ;
} ) ( ) ;
// Wait for cache enhancement to complete, then log composite summary
cacheEnhancementPromise . then ( ( enhancementCount ) = > {
// Track fetch patterns
const patternKey = ` ${ filterDesc } ` ;
const pattern = this . fetchPatterns . get ( patternKey ) || { count : 0 , lastFetch : 0 , totalEvents : 0 } ;
pattern . count ++ ;
pattern . lastFetch = Date . now ( ) ;
pattern . totalEvents += relayEvents . length + enhancementCount ;
this . fetchPatterns . set ( patternKey , pattern ) ;
const summary = enhancementCount > 0
? ` [nostr-client] Fetch complete: ${ relayEvents . length } from relays, ${ enhancementCount } from cache (enhanced) [ ${ filterDesc } ] from [ ${ relayDesc } ] `
: ` [nostr-client] Fetch complete: ${ relayEvents . length } events from relays [ ${ filterDesc } ] from [ ${ relayDesc } ] ` ;
console . log ( summary ) ;
} ) ;
return relayEvents ;
}
@ -1273,17 +1524,54 @@ class NostrClient {
@@ -1273,17 +1524,54 @@ class NostrClient {
try {
const cachedEvents = await this . getCachedEvents ( filters ) ;
if ( cachedEvents . length > 0 ) {
console . debug ( ` [nostr-client] Relay query returned 0 events, using ${ cachedEvents . length } cached events ` ) ;
return cachedEvents ;
usedCacheFallback = true ;
finalEvents = cachedEvents ;
console . debug ( ` [nostr-client] Using ${ cachedEvents . length } cached events (relays returned 0) ` ) ;
} else {
console . debug ( ` [nostr-client] No cached events available, returning empty result ` ) ;
}
} catch ( error ) {
console . error ( '[nostr-client] Error querying cache:' , error ) ;
}
}
// Track fetch patterns for analysis
const patternKey = ` ${ filterDesc } ` ;
const pattern = this . fetchPatterns . get ( patternKey ) || { count : 0 , lastFetch : 0 , totalEvents : 0 } ;
pattern . count ++ ;
pattern . lastFetch = Date . now ( ) ;
pattern . totalEvents += finalEvents . length ;
this . fetchPatterns . set ( patternKey , pattern ) ;
// Cache empty results to prevent repeated fetches of non-existent data
if ( finalEvents . length === 0 && ! usedCacheFallback ) {
const wasAlreadyCached = this . emptyResultCache . has ( emptyCacheKey ) && ! this . emptyResultCache . get ( emptyCacheKey ) ? . pending ;
this . emptyResultCache . set ( emptyCacheKey , { cachedAt : Date.now ( ) , pending : false } ) ;
// Only log if this is a new cache entry (not updating an existing one)
if ( ! wasAlreadyCached ) {
console . log ( ` [nostr-client] Cached empty result for [ ${ filterDesc } ] - will skip similar fetches for 30s ` ) ;
}
} else {
console . debug ( ` [nostr-client] No events from relays, useCache=false, returning empty array ` ) ;
// Clear pending flag if we got results
const currentEntry = this . emptyResultCache . get ( emptyCacheKey ) ;
if ( currentEntry ? . pending ) {
this . emptyResultCache . delete ( emptyCacheKey ) ;
}
}
// Log composite summary
if ( usedCacheFallback ) {
console . log ( ` [nostr-client] Fetch complete: 0 from relays, ${ finalEvents . length } from cache (fallback) [ ${ filterDesc } ] from [ ${ relayDesc } ] ` ) ;
} else if ( finalEvents . length === 0 ) {
// Only log 0-event fetches if they're repeated many times (likely a problem)
if ( pattern . count > 5 && pattern . totalEvents === 0 ) {
console . warn ( ` [nostr-client] Repeated empty fetch ( ${ pattern . count } x): [ ${ filterDesc } ] from [ ${ relayDesc } ] - consider caching or skipping ` ) ;
} else {
console . log ( ` [nostr-client] Fetch complete: 0 events (relays returned 0, no cache available) [ ${ filterDesc } ] from [ ${ relayDesc } ] ` ) ;
}
}
return relayEvents ; // Return empty array if both failed
return finalEvents ;
} ) ( ) ;
this . activeFetches . set ( fetchKey , fetchPromise ) ;
@ -1297,42 +1585,62 @@ class NostrClient {
@@ -1297,42 +1585,62 @@ class NostrClient {
private async fetchFromRelays (
filters : Filter [ ] ,
relays : string [ ] ,
options : { cacheResults : boolean ; onUpdate ? : ( events : NostrEvent [ ] ) = > void ; timeout : number }
options : { cacheResults : boolean ; onUpdate ? : ( events : NostrEvent [ ] ) = > void ; timeout : number ; priority ? : 'high' | 'medium' | 'low' }
) : Promise < NostrEvent [ ] > {
const timeout = options . timeout || config . relayTimeout ;
// Check if user is logged in
const session = sessionManager . getSession ( ) ;
const isLoggedIn = ! ! session ;
// Filter out relays that have failed recently or permanently
// Also filter out auth-required relays if user is not logged in
const now = Date . now ( ) ;
const availableRelays = relays . filter ( url = > {
if ( this . relays . has ( url ) ) return true ; // Already connected
const failureInfo = this . failedRelays . get ( url ) ;
if ( failureInfo ) {
// Skip permanently failed relays
if ( failureInfo . failureCount >= this . PERMANENT_FAILURE_THRESHOLD ) {
return false ; // Skip this relay, it has failed too many times
const availableRelays = await Promise . all (
relays . map ( async ( url ) = > {
if ( this . relays . has ( url ) ) return { url , available : true } ; // Already connected
const failureInfo = this . failedRelays . get ( url ) ;
if ( failureInfo ) {
// Skip permanently failed relays
if ( failureInfo . failureCount >= this . PERMANENT_FAILURE_THRESHOLD ) {
return { url , available : false } ;
}
// Skip relays that failed recently (still in backoff period)
const timeSinceFailure = now - failureInfo . lastFailure ;
if ( timeSinceFailure < failureInfo . retryAfter ) {
return { url , available : false } ;
}
}
// Skip relays that failed recently (still in backoff period)
const timeSinceFailure = now - failureInfo . lastFailure ;
if ( timeSinceFailure < failureInfo . retryAfter ) {
return false ; // Skip this relay, it failed recently
// If not logged in, check if relay requires auth and skip it
if ( ! isLoggedIn ) {
const requiresAuth = await this . checkRelayRequiresAuth ( url ) ;
if ( requiresAuth === true ) {
return { url , available : false } ; // Skip auth-required relay
}
}
}
return true ; // Can try to connect
} ) ;
return { url , available : true } ;
} )
) ;
const filteredRelays = availableRelays
. filter ( r = > r . available )
. map ( r = > r . url ) ;
// Try to connect to relays that aren't already connected
// Like jumble, we gracefully handle failures - addRelay doesn't throw, it just doesn't add failed relays
const relaysToConnect = availableRelays . filter ( url = > ! this . relays . has ( url ) ) ;
const relaysToConnect = filtered Relays. filter ( url = > ! this . relays . has ( url ) ) ;
if ( relaysToConnect . length > 0 ) {
if ( relays . length === 1 ) {
if ( filte redR elays. length === 1 ) {
console . log ( ` [nostr-client] Attempting to connect to relay ${ relaysToConnect [ 0 ] } ... ` ) ;
}
await Promise . allSettled (
relaysToConnect . map ( url = > this . addRelay ( url ) )
) ;
// For single relay, wait for connection to actually establish
if ( relays . length === 1 && relaysToConnect . length > 0 ) {
if ( filte redR elays. length === 1 && relaysToConnect . length > 0 ) {
const relayUrl = relaysToConnect [ 0 ] ;
let attempts = 0 ;
const maxAttempts = 6 ; // Wait up to 3 seconds (6 * 500ms) for connection
@ -1355,15 +1663,17 @@ class NostrClient {
@@ -1355,15 +1663,17 @@ class NostrClient {
}
// Get list of actually connected relays
const connectedRelays = available Relays. filter ( url = > this . relays . has ( url ) ) ;
const connectedRelays = filtered Relays. filter ( url = > this . relays . has ( url ) ) ;
if ( connectedRelays . length === 0 ) {
// Log at warn level for single relay queries (more important to know about failures)
const logLevel = relays . length === 1 ? 'warn' : 'debug' ;
const message = ` [nostr-client] No connected relays available for fetch ( ${ relays . length } requested: ${ relays . join ( ', ' ) } , all failed or unavailable) ` ;
const logLevel = filteredRelays . length === 1 ? 'warn' : 'debug' ;
const skippedCount = relays . length - filteredRelays . length ;
const skipReason = ! isLoggedIn && skippedCount > 0 ? ` ( ${ skippedCount } skipped: auth required) ` : '' ;
const message = ` [nostr-client] No connected relays available for fetch ( ${ relays . length } requested, ${ filteredRelays . length } available ${ skipReason } , all failed or unavailable) ` ;
if ( logLevel === 'warn' ) {
console . warn ( message ) ;
// For single relay, also log which relays were attempted and failure info
if ( relays . length === 1 ) {
if ( filte redR elays. length === 1 ) {
const failureInfo = this . failedRelays . get ( relays [ 0 ] ) ;
if ( failureInfo ) {
console . warn ( ` [nostr-client] Relay ${ relays [ 0 ] } failure info: ` , failureInfo ) ;
@ -1371,7 +1681,7 @@ class NostrClient {
@@ -1371,7 +1681,7 @@ class NostrClient {
if ( relaysToConnect . length > 0 ) {
console . warn ( ` [nostr-client] Attempted to connect to: ${ relaysToConnect . join ( ', ' ) } ` ) ;
}
console . warn ( ` [nostr-client] Available relays (after filtering): ${ available Relays. join ( ', ' ) } ` ) ;
console . warn ( ` [nostr-client] Available relays (after filtering): ${ filtered Relays. join ( ', ' ) } ` ) ;
console . warn ( ` [nostr-client] Currently connected relays: ${ Array . from ( this . relays . keys ( ) ) . join ( ', ' ) } ` ) ;
}
} else {
@ -1383,8 +1693,10 @@ class NostrClient {
@@ -1383,8 +1693,10 @@ class NostrClient {
// Log connection status for single relay queries
if ( relays . length === 1 && connectedRelays . length === 1 ) {
console . log ( ` [nostr-client] Successfully connected to relay ${ relays [ 0 ] } , fetching events... ` ) ;
} else if ( connectedRelays . length < relays . length * 0.5 ) {
console . debug ( ` [nostr-client] Fetching from ${ connectedRelays . length } connected relay(s) out of ${ relays . length } requested ` ) ;
} else if ( connectedRelays . length < filteredRelays . length * 0.5 || filteredRelays . length < relays . length ) {
const skippedCount = relays . length - filteredRelays . length ;
const skipReason = ! isLoggedIn && skippedCount > 0 ? ` ( ${ skippedCount } skipped: auth required) ` : '' ;
console . debug ( ` [nostr-client] Fetching from ${ connectedRelays . length } connected relay(s) out of ${ filteredRelays . length } available ${ skipReason } ( ${ relays . length } requested) ` ) ;
}
// Log connection status for single relay queries
@ -1393,17 +1705,38 @@ class NostrClient {
@@ -1393,17 +1705,38 @@ class NostrClient {
}
// Process relays sequentially with throttling to avoid overload
// High priority requests get processed faster
const events : Map < string , NostrEvent > = new Map ( ) ;
const priority = options . priority || 'medium' ;
const delayBetweenRelays = priority === 'high' ? 10 : 100 ;
// Limit events Map size to prevent memory bloat (keep only most recent 1000 events)
const MAX_EVENTS_IN_MAP = 1000 ;
for ( const relayUrl of connectedRelays ) {
await this . throttledRelayRequest ( relayUrl , filters , events , timeout , options . onUpdate ) ;
// Small delay between relays
await new Promise ( resolve = > setTimeout ( resolve , 100 ) ) ;
await this . throttledRelayRequest ( relayUrl , filters , events , timeout , options . onUpdate , priority ) ;
// Limit Map size - remove oldest if over limit
if ( events . size > MAX_EVENTS_IN_MAP ) {
const sorted = Array . from ( events . entries ( ) )
. sort ( ( a , b ) = > a [ 1 ] . created_at - b [ 1 ] . created_at ) ; // Oldest first
const toRemove = sorted . slice ( 0 , events . size - MAX_EVENTS_IN_MAP ) ;
for ( const [ id ] of toRemove ) {
events . delete ( id ) ;
memoryManager . untrackEvent ( id ) ;
}
}
// Small delay between relays (shorter for high priority)
await new Promise ( resolve = > setTimeout ( resolve , delayBetweenRelays ) ) ;
}
const eventArray = Array . from ( events . values ( ) ) ;
const filtered = filterEvents ( eventArray ) ;
const zapFiltered = filtered . filter ( event = > ! this . shouldFilterZapReceipt ( event ) ) ;
// Clear events Map after processing to free memory
events . clear ( ) ;
if ( options . cacheResults && zapFiltered . length > 0 ) {
cacheEvents ( zapFiltered ) . catch ( ( ) = > {