Browse Source

bug-fixes

imwald
Silberengel 1 month ago
parent
commit
9d802444dd
  1. 1
      nip66-cron/index.mjs
  2. 1
      src/constants.ts
  3. 23
      src/hooks/useFetchProfile.tsx
  4. 46
      src/pages/primary/SpellsPage/index.tsx
  5. 42
      src/services/client-query.service.ts
  6. 27
      src/services/client.service.ts

1
nip66-cron/index.mjs

@ -64,7 +64,6 @@ const DEFAULT_RELAYS_TO_MONITOR = [ @@ -64,7 +64,6 @@ const DEFAULT_RELAYS_TO_MONITOR = [
'wss://nostrelites.org',
'wss://relay.nsec.app',
'wss://bucket.coracle.social',
'wss://relay.nostr.bg',
'wss://spatia-arcana.com',
'wss://sendit.nosflare.com',
'wss://nostr-pub.wellorder.net',

1
src/constants.ts

@ -161,7 +161,6 @@ export const SEARCHABLE_RELAY_URLS = [ @@ -161,7 +161,6 @@ export const SEARCHABLE_RELAY_URLS = [
'wss://nostrelites.org',
'wss://relay.nsec.app',
'wss://bucket.coracle.social',
'wss://relay.nostr.bg',
'wss://spatia-arcana.com',
'wss://sendit.nosflare.com',
'wss://nostr-pub.wellorder.net',

23
src/hooks/useFetchProfile.tsx

@ -10,6 +10,8 @@ import logger from '@/lib/logger' @@ -10,6 +10,8 @@ import logger from '@/lib/logger'
// This prevents multiple components from fetching the same profile simultaneously
const globalFetchPromises = new Map<string, Promise<TProfile | null>>()
const globalFetchingPubkeys = new Set<string>()
// Cooldown period after timeout to prevent cascade of duplicate fetches
const globalFetchCooldowns = new Map<string, number>() // pubkey -> timestamp when cooldown expires
export function useFetchProfile(id?: string, skipCache = false) {
// CRITICAL: Reduce logging to prevent performance issues during infinite loops
@ -47,6 +49,20 @@ export function useFetchProfile(id?: string, skipCache = false) { @@ -47,6 +49,20 @@ export function useFetchProfile(id?: string, skipCache = false) {
return null
}
// CRITICAL: Check cooldown period first to prevent cascade of duplicate fetches after timeout
const cooldownExpiry = globalFetchCooldowns.get(pubkey)
if (cooldownExpiry && Date.now() < cooldownExpiry) {
logger.debug('[useFetchProfile] In cooldown period after timeout, skipping fetch', {
pubkey: pubkey.substring(0, 8),
remainingMs: cooldownExpiry - Date.now()
})
return null
}
// Clean up expired cooldowns
if (cooldownExpiry && Date.now() >= cooldownExpiry) {
globalFetchCooldowns.delete(pubkey)
}
// CRITICAL: Check if another hook instance is already fetching this pubkey
// If so, wait for that fetch to complete instead of starting a new one
// Add timeout protection to prevent infinite waits
@ -71,9 +87,12 @@ export function useFetchProfile(id?: string, skipCache = false) { @@ -71,9 +87,12 @@ export function useFetchProfile(id?: string, skipCache = false) {
// If timeout won: do NOT start a new fetch (avoids pile-up of parallel fetches for same pubkey).
// Return null so caller can show fallback; the original fetch may still complete and update cache.
// Set a cooldown period to prevent immediate retries from other components
if (existingProfile === null && !cancelled.current) {
globalFetchPromises.delete(pubkey)
globalFetchingPubkeys.delete(pubkey)
// Set cooldown for 10 seconds to prevent cascade of duplicate fetches
globalFetchCooldowns.set(pubkey, Date.now() + 10000)
return null
}
if (existingProfile) {
@ -126,6 +145,8 @@ export function useFetchProfile(id?: string, skipCache = false) { @@ -126,6 +145,8 @@ export function useFetchProfile(id?: string, skipCache = false) {
if (retryProfile === null && !cancelled.current) {
globalFetchPromises.delete(pubkey)
globalFetchingPubkeys.delete(pubkey)
// Set cooldown for 10 seconds to prevent cascade of duplicate fetches
globalFetchCooldowns.set(pubkey, Date.now() + 10000)
return null
}
if (retryProfile) {
@ -219,6 +240,8 @@ export function useFetchProfile(id?: string, skipCache = false) { @@ -219,6 +240,8 @@ export function useFetchProfile(id?: string, skipCache = false) {
pubkey: pubkey.substring(0, 8),
error: err.message
})
// Set cooldown period after timeout to prevent cascade of duplicate fetches
globalFetchCooldowns.set(pubkey, Date.now() + 10000) // 10 second cooldown
// Return null on timeout instead of throwing - allows UI to show fallback
return null
}

46
src/pages/primary/SpellsPage/index.tsx

@ -156,7 +156,6 @@ const SpellsPage = forwardRef<TPageRef>(function SpellsPage(_, ref) { @@ -156,7 +156,6 @@ const SpellsPage = forwardRef<TPageRef>(function SpellsPage(_, ref) {
const [spellToEdit, setSpellToEdit] = useState<Event | null>(null)
const [spellToClone, setSpellToClone] = useState<Event | null>(null)
const [definitionSpell, setDefinitionSpell] = useState<Event | null>(null)
const [subRequests, setSubRequests] = useState<TFeedSubRequest[]>([])
const [contacts, setContacts] = useState<string[]>([])
/** True while fetching kind 777 authored by the user from write relays into IndexedDB */
const [spellsCatalogSyncing, setSpellsCatalogSyncing] = useState(false)
@ -290,9 +289,33 @@ const SpellsPage = forwardRef<TPageRef>(function SpellsPage(_, ref) { @@ -290,9 +289,33 @@ const SpellsPage = forwardRef<TPageRef>(function SpellsPage(_, ref) {
client.fetchFollowings(pubkey).then(setContacts).catch(() => setContacts([]))
}, [pubkey])
// Memoize subRequests to prevent NoteList from re-subscribing when array reference changes
// This ensures the array reference only changes when the actual content changes
const subRequests = useMemo<TFeedSubRequest[]>(() => {
if (!selectedSpell) {
return []
}
if (spellIsCount(selectedSpell)) {
return []
}
const relayListWrite = relayList?.write ?? []
const ctx = {
pubkey,
contacts
}
const filter = spellEventToFilter(selectedSpell, ctx)
if (!filter) {
return []
}
const relays = getRelaysForSpell(selectedSpell, { relayListWrite })
if (!relays.length) {
return []
}
return [{ urls: relays, filter }]
}, [selectedSpell, pubkey, contacts, relayList?.write])
useEffect(() => {
if (!selectedSpell) {
setSubRequests([])
setSpellCount({
loading: false,
rows: [],
@ -304,7 +327,6 @@ const SpellsPage = forwardRef<TPageRef>(function SpellsPage(_, ref) { @@ -304,7 +327,6 @@ const SpellsPage = forwardRef<TPageRef>(function SpellsPage(_, ref) {
return
}
if (spellIsCount(selectedSpell)) {
setSubRequests([])
return
}
setSpellCount({
@ -315,23 +337,7 @@ const SpellsPage = forwardRef<TPageRef>(function SpellsPage(_, ref) { @@ -315,23 +337,7 @@ const SpellsPage = forwardRef<TPageRef>(function SpellsPage(_, ref) {
mayHitLimit: false,
usedExplicitRelays: false
})
const relayListWrite = relayList?.write ?? []
const ctx = {
pubkey,
contacts
}
const filter = spellEventToFilter(selectedSpell, ctx)
if (!filter) {
setSubRequests([])
return
}
const relays = getRelaysForSpell(selectedSpell, { relayListWrite })
if (!relays.length) {
setSubRequests([])
return
}
setSubRequests([{ urls: relays, filter }])
}, [selectedSpell, pubkey, contacts, relayList?.write])
}, [selectedSpell])
useEffect(() => {
if (!selectedSpell || !spellIsCount(selectedSpell)) {

42
src/services/client-query.service.ts

@ -17,8 +17,10 @@ function filterForRelay(f: Filter, relaySupportsSearch: boolean): Filter { @@ -17,8 +17,10 @@ function filterForRelay(f: Filter, relaySupportsSearch: boolean): Filter {
export interface QueryOptions {
eoseTimeout?: number
globalTimeout?: number
/** For replaceable events: race strategy - wait 2s after first result, then return best */
/** For replaceable events: race strategy - wait after first result, then return best (per author when batching) */
replaceableRace?: boolean
/** Ms to wait after the first event when replaceableRace is true (lets other relays return a newer version) */
replaceableRaceWaitMs?: number
/** For non-replaceable single events: return immediately on first match */
immediateReturn?: boolean
}
@ -113,6 +115,7 @@ export class QueryService { @@ -113,6 +115,7 @@ export class QueryService {
const eoseTimeout = options?.eoseTimeout ?? 500
const globalTimeout = options?.globalTimeout ?? 10000
const replaceableRace = options?.replaceableRace ?? false
const replaceableRaceWaitMs = options?.replaceableRaceWaitMs ?? 2000
const immediateReturn = options?.immediateReturn ?? false
const isExternalSearch = eoseTimeout > 1000
@ -129,7 +132,6 @@ export class QueryService { @@ -129,7 +132,6 @@ export class QueryService {
}
const FIRST_RESULT_GRACE_MS = 1200
const REPLACEABLE_RACE_WAIT_MS = 1000 // Reduced from 2000ms for faster profile loading in feeds
return await new Promise<NEvent[]>((resolve) => {
const events: NEvent[] = []
@ -142,6 +144,35 @@ export class QueryService { @@ -142,6 +144,35 @@ export class QueryService {
let firstResultTime: number | null = null
let globalTimeoutId: ReturnType<typeof setTimeout> | null = null
const resolveReplaceableRaceEvents = (): NEvent[] => {
if (events.length === 0) return events
const filters = Array.isArray(filter) ? filter : [filter]
const authorSet = new Set<string>()
for (const f of filters) {
if (f.authors) {
for (const a of f.authors) {
if (a) authorSet.add(a)
}
}
}
// Batch profile / replaceable fetch: keep the newest event per pubkey (not one global "winner")
if (authorSet.size > 1) {
const byPk = new Map<string, NEvent>()
for (const e of events) {
if (!authorSet.has(e.pubkey)) continue
const prev = byPk.get(e.pubkey)
if (!prev || e.created_at > prev.created_at) {
byPk.set(e.pubkey, e)
}
}
return Array.from(byPk.values())
}
const bestEvent = events.reduce((best, current) =>
current.created_at > best.created_at ? current : best
)
return [bestEvent]
}
const resolveWithEvents = () => {
if (resolved) return
resolved = true
@ -153,10 +184,7 @@ export class QueryService { @@ -153,10 +184,7 @@ export class QueryService {
sub.close()
if (replaceableRace && events.length > 0) {
const bestEvent = events.reduce((best, current) =>
current.created_at > best.created_at ? current : best
)
resolve([bestEvent])
resolve(resolveReplaceableRaceEvents())
} else {
resolve(events)
}
@ -189,7 +217,7 @@ export class QueryService { @@ -189,7 +217,7 @@ export class QueryService {
replaceableRaceTimeoutId = setTimeout(() => {
replaceableRaceTimeoutId = null
resolveWithEvents()
}, REPLACEABLE_RACE_WAIT_MS)
}, replaceableRaceWaitMs)
}
if (!replaceableRace && !immediateReturn && isSingleEventFetch && events.length === 1 && !firstResultGraceTimeoutId) {

27
src/services/client.service.ts

@ -1239,20 +1239,22 @@ class ClientService extends EventTarget { @@ -1239,20 +1239,22 @@ class ClientService extends EventTarget {
let eosedAt: number | null = null
let initialBatchScheduled = false
let lastDeliveredCount = 0
// CRITICAL FIX: Faster progressive loading - show results as soon as we have them
// Reduced delays to improve perceived performance
const PROGRESSIVE_DELAY_MS = 0 // Show first batch immediately
const PROGRESSIVE_INTERVAL_MS = 100 // Check for new events every 100ms (reduced from 200ms)
const MIN_NEW_EVENTS = 5 // Deliver when we have at least 5 new events
// Progressive loading: show the first event(s) as soon as they arrive (not only after 5+ events)
const PROGRESSIVE_INTERVAL_MS = 100 // Poll for more events while relays are still streaming
const MIN_NEW_EVENTS_AFTER_FIRST = 5 // After first paint, batch updates to limit re-renders
let progressiveIntervalId: ReturnType<typeof setInterval> | null = null
const deliverProgressive = () => {
if (eosedAt || events.length === 0) return
const sortedEvents = [...events].sort((a, b) => b.created_at - a.created_at).slice(0, filter.limit)
const newEventCount = sortedEvents.length - lastDeliveredCount
// Only deliver if we have significantly more events than last time
// This reduces unnecessary re-renders while still showing progress quickly
if (newEventCount >= MIN_NEW_EVENTS || sortedEvents.length >= filter.limit * 0.5) {
const isFirstPaint = lastDeliveredCount === 0
const shouldDeliver =
isFirstPaint
? sortedEvents.length >= 1
: newEventCount >= MIN_NEW_EVENTS_AFTER_FIRST || sortedEvents.length >= filter.limit * 0.5
if (shouldDeliver) {
lastDeliveredCount = sortedEvents.length
const snap = sortedEvents
// Only include cached events if caching is enabled
@ -1266,13 +1268,10 @@ class ClientService extends EventTarget { @@ -1266,13 +1268,10 @@ class ClientService extends EventTarget {
// not eosed yet, push to events
if (!eosedAt) {
events.push(evt)
// Deliver first batch quickly so UI doesn't wait for all relays to EOSE
// CRITICAL FIX: Show results immediately when we have enough events
if (needSort && events.length >= MIN_NEW_EVENTS && !initialBatchScheduled) {
// Deliver as soon as we have any event while waiting for EOSE (then batch further updates)
if (needSort && events.length >= 1 && !initialBatchScheduled) {
initialBatchScheduled = true
// Deliver immediately for better perceived performance
deliverProgressive()
// Then continue checking for more events
if (!progressiveIntervalId) {
progressiveIntervalId = setInterval(deliverProgressive, PROGRESSIVE_INTERVAL_MS)
}

Loading…
Cancel
Save