Browse Source

make relay timeouts more efficient

suppress diff on initial commit

Nostr-Signature: 5fbc2dfb13acab011df5a394a022267e69bbe908e696c4389e0c07ba83d58a0d 573634b648634cbad10f2451776089ea21090d9407f715e83c577b4611ae6edc daf46d563c413e2481be2cbd2b00d3015cf601e19fe0a191ffbb18c2c07508b17e34ebda5c903a1391914f991cecd7a7a4e809fcba45e1f14ebab674117eb53c
main
Silberengel 2 weeks ago
parent
commit
ada4c25047
  1. 1
      nostr/commit-signatures.jsonl
  2. 221
      src/lib/services/nostr/nostr-client.ts
  3. 2
      src/lib/services/nostr/relay-write-proof.ts
  4. 22
      src/routes/repos/[npub]/[repo]/services/commit-operations.ts

1
nostr/commit-signatures.jsonl

@ -124,3 +124,4 @@
{"kind":1640,"pubkey":"573634b648634cbad10f2451776089ea21090d9407f715e83c577b4611ae6edc","created_at":1772293551,"tags":[["author","Silberengel","silberengel7@protonmail.com"],["message","remove polling"]],"content":"Signed commit: remove polling","id":"40f01e84f96661bb7fea13aa63c7da428118061b0a1470a11890d4f9cd6d685b","sig":"dbb6947defac6c7f92a3cf6f72352a94ffe2c4b33e65f8410518a40406c93f1f5a3e13e81f2f04f676d826e6cf03ec802328f5228300f80a8114fa3fd26eaeff"} {"kind":1640,"pubkey":"573634b648634cbad10f2451776089ea21090d9407f715e83c577b4611ae6edc","created_at":1772293551,"tags":[["author","Silberengel","silberengel7@protonmail.com"],["message","remove polling"]],"content":"Signed commit: remove polling","id":"40f01e84f96661bb7fea13aa63c7da428118061b0a1470a11890d4f9cd6d685b","sig":"dbb6947defac6c7f92a3cf6f72352a94ffe2c4b33e65f8410518a40406c93f1f5a3e13e81f2f04f676d826e6cf03ec802328f5228300f80a8114fa3fd26eaeff"}
{"kind":1640,"pubkey":"573634b648634cbad10f2451776089ea21090d9407f715e83c577b4611ae6edc","created_at":1772296288,"tags":[["author","Silberengel","silberengel7@protonmail.com"],["message","administer the repos"]],"content":"Signed commit: administer the repos","id":"8825fb9bd01e099c1369f0c9ea1429dedd0a0116d103b4a640752c0a830fbc61","sig":"676f0817f817204ad910a70540399f71743a54453ae209535dcb30356d042b049138d9cfdeec08c4b7da03bb6bb51c71477bbf8d2f58bd4b602b9f69af4b3405"} {"kind":1640,"pubkey":"573634b648634cbad10f2451776089ea21090d9407f715e83c577b4611ae6edc","created_at":1772296288,"tags":[["author","Silberengel","silberengel7@protonmail.com"],["message","administer the repos"]],"content":"Signed commit: administer the repos","id":"8825fb9bd01e099c1369f0c9ea1429dedd0a0116d103b4a640752c0a830fbc61","sig":"676f0817f817204ad910a70540399f71743a54453ae209535dcb30356d042b049138d9cfdeec08c4b7da03bb6bb51c71477bbf8d2f58bd4b602b9f69af4b3405"}
{"kind":1640,"pubkey":"573634b648634cbad10f2451776089ea21090d9407f715e83c577b4611ae6edc","created_at":1772298906,"tags":[["author","Silberengel","silberengel7@protonmail.com"],["message","bug-fixes"]],"content":"Signed commit: bug-fixes","id":"6aa4dcd1b3d8a933710a6eb43321aa4faaba56598c735a634069c882c83b4f03","sig":"80ce253e890e8e84c8138e004bc2aaea402379d9aa67f62793ac7a4b344de6a7223f46fc733b240215a983a3a9b574ea8d0858a184f06df58ee66212ba58ee53"} {"kind":1640,"pubkey":"573634b648634cbad10f2451776089ea21090d9407f715e83c577b4611ae6edc","created_at":1772298906,"tags":[["author","Silberengel","silberengel7@protonmail.com"],["message","bug-fixes"]],"content":"Signed commit: bug-fixes","id":"6aa4dcd1b3d8a933710a6eb43321aa4faaba56598c735a634069c882c83b4f03","sig":"80ce253e890e8e84c8138e004bc2aaea402379d9aa67f62793ac7a4b344de6a7223f46fc733b240215a983a3a9b574ea8d0858a184f06df58ee66212ba58ee53"}
{"kind":1640,"pubkey":"573634b648634cbad10f2451776089ea21090d9407f715e83c577b4611ae6edc","created_at":1772299137,"tags":[["author","Silberengel","silberengel7@protonmail.com"],["message","more-muted replyt-to"]],"content":"Signed commit: more-muted replyt-to","id":"fc0a91b526083b640d8116592fcac064fcf3cec9625b48dbd41c3877b2fe5444","sig":"998273d70d827ffbb939b4c149ff88e11c9f3aae3c5ddee78d860710f7fbff42c5ceed9433367b530bdc2869f9d382eb449537f813cf745c49f1a87a36926502"}

221
src/lib/services/nostr/nostr-client.ts

@ -188,7 +188,7 @@ export class NostrClient {
} }
} }
async fetchEvents(filters: NostrFilter[]): Promise<NostrEvent[]> { async fetchEvents(filters: NostrFilter[], isWriteVerification: boolean = false): Promise<NostrEvent[]> {
// Strategy: Check persistent cache first, return immediately if available // Strategy: Check persistent cache first, return immediately if available
// Then fetch from relays in background and merge results // Then fetch from relays in background and merge results
@ -206,7 +206,8 @@ export class NostrClient {
logger.debug({ filters, cachedCount: memoryCached.length }, 'Returning cached events from memory'); logger.debug({ filters, cachedCount: memoryCached.length }, 'Returning cached events from memory');
// Return cached events immediately, but also fetch from relays in background to update cache // Return cached events immediately, but also fetch from relays in background to update cache
this.fetchAndMergeFromRelays(filters, memoryCached).catch(err => { // Background fetches are always normal (not write verification)
this.fetchAndMergeFromRelays(filters, memoryCached, false).catch(err => {
logger.debug({ error: err, filters }, 'Background fetch failed, using cached events'); logger.debug({ error: err, filters }, 'Background fetch failed, using cached events');
}); });
@ -219,7 +220,8 @@ export class NostrClient {
logger.debug({ filters, cachedCount: cachedEvents.length }, 'Returning cached events from IndexedDB'); logger.debug({ filters, cachedCount: cachedEvents.length }, 'Returning cached events from IndexedDB');
// Return cached events immediately, but also fetch from relays in background to update cache // Return cached events immediately, but also fetch from relays in background to update cache
this.fetchAndMergeFromRelays(filters, cachedEvents).catch(err => { // Background fetches are always normal (not write verification)
this.fetchAndMergeFromRelays(filters, cachedEvents, false).catch(err => {
logger.debug({ error: err, filters }, 'Background fetch failed, using cached events'); logger.debug({ error: err, filters }, 'Background fetch failed, using cached events');
}); });
@ -234,7 +236,7 @@ export class NostrClient {
} }
// 3. No cache available (or search query), fetch from relays // 3. No cache available (or search query), fetch from relays
return this.fetchAndMergeFromRelays(filters, []); return this.fetchAndMergeFromRelays(filters, [], isWriteVerification);
} }
/** /**
@ -312,8 +314,11 @@ export class NostrClient {
* Fetch events from relays and merge with existing events * Fetch events from relays and merge with existing events
* Never deletes valid events, only appends/integrates new ones * Never deletes valid events, only appends/integrates new ones
* Automatically falls back to fallback relays if primary relays fail * Automatically falls back to fallback relays if primary relays fail
* @param filters - Filters to query
* @param existingEvents - Existing events to merge with
* @param isWriteVerification - If true, uses full timeout (8s). If false, uses dynamic timeout (2s after first response)
*/ */
private async fetchAndMergeFromRelays(filters: NostrFilter[], existingEvents: NostrEvent[]): Promise<NostrEvent[]> { private async fetchAndMergeFromRelays(filters: NostrFilter[], existingEvents: NostrEvent[], isWriteVerification: boolean = false): Promise<NostrEvent[]> {
const events: NostrEvent[] = []; const events: NostrEvent[] = [];
// Sanitize all filters before sending to relays // Sanitize all filters before sending to relays
@ -322,53 +327,177 @@ export class NostrClient {
// Use nostr-tools SimplePool to fetch from all relays in parallel // Use nostr-tools SimplePool to fetch from all relays in parallel
// SimplePool handles connection management, retries, and error handling automatically // SimplePool handles connection management, retries, and error handling automatically
try { try {
// querySync takes a single filter, so we query each filter and combine results // For write verification, use full timeout. For normal fetches, use dynamic timeout
// Wrap each query individually to catch errors from individual relays if (isWriteVerification) {
const queryPromises = sanitizedFilters.map(filter => // Write verification: use full 8 second timeout
this.pool.querySync(this.relays, filter, { maxWait: 8000 }) const queryPromises = sanitizedFilters.map(filter =>
.catch(err => { this.pool.querySync(this.relays, filter, { maxWait: 8000 })
// Log individual relay errors but don't fail the entire request .catch(err => {
logger.debug({ error: err, filter, relays: this.relays }, 'Primary relay query failed, trying fallback'); logger.debug({ error: err, filter, relays: this.relays }, 'Primary relay query failed, trying fallback');
return []; // Return empty array for failed queries return []; // Return empty array for failed queries
}) })
); );
const results = await Promise.allSettled(queryPromises); const results = await Promise.allSettled(queryPromises);
let hasResults = false; let hasResults = false;
for (const result of results) { for (const result of results) {
if (result.status === 'fulfilled' && result.value.length > 0) { if (result.status === 'fulfilled' && result.value.length > 0) {
events.push(...result.value); events.push(...result.value);
hasResults = true; hasResults = true;
} else if (result.status === 'rejected') { } else if (result.status === 'rejected') {
// Log rejected promises (shouldn't happen since we catch above, but just in case) logger.debug({ error: result.reason }, 'Query promise rejected');
logger.debug({ error: result.reason }, 'Query promise rejected'); }
} }
}
// If no results from primary relays and we have fallback relays, try them // If no results from primary relays and we have fallback relays, try them
if (!hasResults && events.length === 0 && FALLBACK_NOSTR_RELAYS.length > 0) { if (!hasResults && events.length === 0 && FALLBACK_NOSTR_RELAYS.length > 0) {
logger.debug({ primaryRelays: this.relays, fallbackRelays: FALLBACK_NOSTR_RELAYS }, 'No results from primary relays, trying fallback relays'); logger.debug({ primaryRelays: this.relays, fallbackRelays: FALLBACK_NOSTR_RELAYS }, 'No results from primary relays, trying fallback relays');
try { try {
const fallbackPromises = sanitizedFilters.map(filter => const fallbackPromises = sanitizedFilters.map(filter =>
this.pool.querySync(FALLBACK_NOSTR_RELAYS, filter, { maxWait: 8000 }) this.pool.querySync(FALLBACK_NOSTR_RELAYS, filter, { maxWait: 8000 })
.catch(err => { .catch(err => {
logger.debug({ error: err, filter }, 'Fallback relay query failed'); logger.debug({ error: err, filter }, 'Fallback relay query failed');
return []; return [];
}) })
); );
const fallbackResults = await Promise.allSettled(fallbackPromises); const fallbackResults = await Promise.allSettled(fallbackPromises);
for (const result of fallbackResults) { for (const result of fallbackResults) {
if (result.status === 'fulfilled') { if (result.status === 'fulfilled') {
events.push(...result.value); events.push(...result.value);
}
}
if (events.length > 0) {
logger.info({ fallbackRelays: FALLBACK_NOSTR_RELAYS, eventCount: events.length }, 'Successfully fetched events from fallback relays');
} }
} catch (fallbackErr) {
logger.debug({ error: fallbackErr }, 'Fallback relay query failed completely');
} }
}
} else {
// Normal fetches: dynamic timeout - 2 seconds after first relay responds
let firstResponseTime: number | null = null;
const DYNAMIC_TIMEOUT_MS = 2000; // 2 seconds after first response
// Create queries for all filters
const baseQueryPromises = sanitizedFilters.map(filter =>
this.pool.querySync(this.relays, filter, { maxWait: 8000 })
.catch(err => {
logger.debug({ error: err, filter, relays: this.relays }, 'Primary relay query failed');
return []; // Return empty array for failed queries
})
);
if (events.length > 0) { // Wrap each query to track first response and apply dynamic timeout
logger.info({ fallbackRelays: FALLBACK_NOSTR_RELAYS, eventCount: events.length }, 'Successfully fetched events from fallback relays'); const queryPromises = baseQueryPromises.map((queryPromise, index) => {
return Promise.race([
queryPromise.then((results) => {
// Track when first response arrives (across all queries)
const now = Date.now();
if (firstResponseTime === null) {
firstResponseTime = now;
logger.debug({ filterIndex: index, firstResponseTime: now }, 'First relay responded, starting 2s timeout for other relays');
}
return results;
}),
// Dynamic timeout: if first response has arrived, timeout after 2 seconds from that point
new Promise<NostrEvent[]>((resolve) => {
const checkTimeout = () => {
if (firstResponseTime !== null) {
const elapsed = Date.now() - firstResponseTime;
if (elapsed >= DYNAMIC_TIMEOUT_MS) {
// Timeout reached - return empty array (query from faster relay already got results)
resolve([]);
} else {
// Check again after remaining time
setTimeout(checkTimeout, DYNAMIC_TIMEOUT_MS - elapsed);
}
} else {
// First response hasn't arrived yet, check again in 100ms
setTimeout(checkTimeout, 100);
}
};
checkTimeout();
// Maximum timeout of 8 seconds to prevent hanging forever
setTimeout(() => resolve([]), 8000);
})
]);
});
const results = await Promise.allSettled(queryPromises);
let hasResults = false;
for (const result of results) {
if (result.status === 'fulfilled' && result.value.length > 0) {
events.push(...result.value);
hasResults = true;
} else if (result.status === 'rejected') {
logger.debug({ error: result.reason }, 'Query promise rejected');
}
}
// If no results from primary relays and we have fallback relays, try them (with dynamic timeout too)
if (!hasResults && events.length === 0 && FALLBACK_NOSTR_RELAYS.length > 0) {
logger.debug({ primaryRelays: this.relays, fallbackRelays: FALLBACK_NOSTR_RELAYS }, 'No results from primary relays, trying fallback relays');
try {
// Reset first response time for fallback relays
firstResponseTime = null;
// Create queries for all filters on fallback relays
const fallbackBaseQueryPromises = sanitizedFilters.map(filter =>
this.pool.querySync(FALLBACK_NOSTR_RELAYS, filter, { maxWait: 8000 })
.catch(err => {
logger.debug({ error: err, filter }, 'Fallback relay query failed');
return [];
})
);
// Wrap each query to track first response and apply dynamic timeout
const fallbackQueryPromises = fallbackBaseQueryPromises.map((queryPromise, index) => {
return Promise.race([
queryPromise.then((results) => {
const now = Date.now();
if (firstResponseTime === null) {
firstResponseTime = now;
logger.debug({ filterIndex: index, firstResponseTime: now }, 'First fallback relay responded, starting 2s timeout');
}
return results;
}),
new Promise<NostrEvent[]>((resolve) => {
const checkTimeout = () => {
if (firstResponseTime !== null) {
const elapsed = Date.now() - firstResponseTime;
if (elapsed >= DYNAMIC_TIMEOUT_MS) {
resolve([]);
} else {
setTimeout(checkTimeout, DYNAMIC_TIMEOUT_MS - elapsed);
}
} else {
setTimeout(checkTimeout, 100);
}
};
checkTimeout();
setTimeout(() => resolve([]), 8000);
})
]);
});
const fallbackResults = await Promise.allSettled(fallbackQueryPromises);
for (const result of fallbackResults) {
if (result.status === 'fulfilled') {
events.push(...result.value);
}
}
if (events.length > 0) {
logger.info({ fallbackRelays: FALLBACK_NOSTR_RELAYS, eventCount: events.length }, 'Successfully fetched events from fallback relays');
}
} catch (fallbackErr) {
logger.debug({ error: fallbackErr }, 'Fallback relay query failed completely');
} }
} catch (fallbackErr) {
logger.debug({ error: fallbackErr }, 'Fallback relay query failed completely');
} }
} }
} catch (err) { } catch (err) {

2
src/lib/services/nostr/relay-write-proof.ts

@ -121,7 +121,7 @@ export async function verifyRelayWriteProof(
authors: [userPubkey], authors: [userPubkey],
limit: 1 limit: 1
} }
]); ], true); // Pass true for isWriteVerification to use full timeout
if (events.length > 0) { if (events.length > 0) {
break; // Found the event, no need to retry break; // Found the event, no need to retry

22
src/routes/repos/[npub]/[repo]/services/commit-operations.ts

@ -158,9 +158,25 @@ export async function viewDiff(
// Normalize commit hash (handle both 'hash' and 'sha' properties) // Normalize commit hash (handle both 'hash' and 'sha' properties)
const getCommitHash = (c: any) => c.hash || c.sha || ''; const getCommitHash = (c: any) => c.hash || c.sha || '';
const commitIndex = state.git.commits.findIndex(c => getCommitHash(c) === commitHash); const commitIndex = state.git.commits.findIndex(c => getCommitHash(c) === commitHash);
const parentHash = commitIndex >= 0
? (state.git.commits[commitIndex + 1] ? getCommitHash(state.git.commits[commitIndex + 1]) : `${commitHash}^`) // Determine parent hash: if this is the last commit (initial commit), use empty tree
: `${commitHash}^`; // Otherwise, use the next commit in the list or the parent commit
let parentHash: string;
if (commitIndex >= 0) {
// Check if this is the last commit (initial commit with no parent)
if (commitIndex === state.git.commits.length - 1) {
// This is the initial commit - use empty tree hash
// Git's empty tree hash: 4b825dc642cb6eb9a060e54bf8d69288fbee4904
parentHash = '4b825dc642cb6eb9a060e54bf8d69288fbee4904';
} else {
// Use the next commit (which is the parent in reverse chronological order)
parentHash = getCommitHash(state.git.commits[commitIndex + 1]);
}
} else {
// Commit not found in list, try to use parent (but this might fail for initial commit)
// We'll let the API handle the error
parentHash = `${commitHash}^`;
}
const diffData = await apiRequest<Array<{ const diffData = await apiRequest<Array<{
file: string; file: string;

Loading…
Cancel
Save