Browse Source

bug-fixes

Nostr-Signature: 0580e0df8000275817f040bbd6c04dfdfbff08a366df7a1686f227d8b7310053 573634b648634cbad10f2451776089ea21090d9407f715e83c577b4611ae6edc 9a238266f989c0664dc5c9743675907477e2fcb5311e8edeb505dec97027f619f6dc6742ee5f3887ff6a864274b45005fc7dd4432f8e2772dfe0bb7e2d8a449c
main
Silberengel 3 weeks ago
parent
commit
091778ba20
  1. 1
      nostr/commit-signatures.jsonl
  2. 43
      src/lib/services/nostr/nostr-client.ts
  3. 190
      src/routes/api/search/+server.ts

1
nostr/commit-signatures.jsonl

@ -58,3 +58,4 @@ @@ -58,3 +58,4 @@
{"kind":1640,"pubkey":"573634b648634cbad10f2451776089ea21090d9407f715e83c577b4611ae6edc","created_at":1771755811,"tags":[["author","Silberengel","silberengel7@protonmail.com"],["message","fix creating new branch"]],"content":"Signed commit: fix creating new branch","id":"bc6c623532064f9b2db08fa41bbc6c5ff42419415ca7e1ecb1162a884face2eb","sig":"ad1152e2848755e1afa7d9350716fa6bb709698a5036e21efa61b3ac755d334155f02a0622ad49f6dc060d523f4f886eb2acc8c80356a426b0d8ba454fdcb8ee"}
{"kind":1640,"pubkey":"573634b648634cbad10f2451776089ea21090d9407f715e83c577b4611ae6edc","created_at":1771829031,"tags":[["author","Silberengel","silberengel7@protonmail.com"],["message","fix file management and refactor"]],"content":"Signed commit: fix file management and refactor","id":"626196cdbf9eab28b44990706281878083d66983b503e8a81df7421054ed6caf","sig":"516c0001a800083411a1e04340e82116a82c975f38b984e92ebe021b61271ba7d6f645466ddba3594320c228193e708675a5d7a144b2f3d5e9bfbc65c4c7372b"}
{"kind":1640,"pubkey":"573634b648634cbad10f2451776089ea21090d9407f715e83c577b4611ae6edc","created_at":1771836045,"tags":[["author","Silberengel","silberengel7@protonmail.com"],["message","fix repo management and refactor\nimplement more GRASP support"]],"content":"Signed commit: fix repo management and refactor\nimplement more GRASP support","id":"6ae016621b13e22809e7bcebe34e5250fd6e0767d2b12ca634104def4ca78a29","sig":"99c34f66a8a67d352622621536545b7dee11cfd9d14a007ec0550d138109116a2f24483c6836fea59b94b9e96066fba548bcb7600bc55adbe0562d999c3c651d"}
{"kind":1640,"pubkey":"573634b648634cbad10f2451776089ea21090d9407f715e83c577b4611ae6edc","created_at":1771838236,"tags":[["author","Silberengel","silberengel7@protonmail.com"],["message","refactor repo manager"]],"content":"Signed commit: refactor repo manager","id":"d134c35516991f27e47ed8a4aa0d3f1d6e6be41c46c9cf3f6c982c1442b09b4b","sig":"cb699fae6a8e44a3b9123f215749f6fec0470c75a0401a94c37dfb8e572c07281b3941862e704b868663f943c573ab2ee9fec217e87f7be567cc6bb3514cacdb"}

43
src/lib/services/nostr/nostr-client.ts

@ -197,6 +197,7 @@ export class NostrClient { @@ -197,6 +197,7 @@ export class NostrClient {
private connectionAttempts: Map<string, { count: number; lastAttempt: number }> = new Map();
private readonly MAX_CONCURRENT_CONNECTIONS = 3; // Max concurrent connections per relay
private readonly CONNECTION_BACKOFF_BASE = 1000; // Base backoff in ms
private readonly THROTTLE_RESET_TIME = 5 * 60 * 1000; // Reset throttling after 5 minutes
constructor(relays: string[]) {
this.relays = relays;
@ -230,8 +231,10 @@ export class NostrClient { @@ -230,8 +231,10 @@ export class NostrClient {
/**
* Get or create a WebSocket connection to a relay
* @param relay - The relay URL
* @param isReadOperation - If true, this is a read operation (like search) that can bypass throttling more easily
*/
private async getConnection(relay: string): Promise<WebSocket | null> {
private async getConnection(relay: string, isReadOperation: boolean = false): Promise<WebSocket | null> {
const existing = this.connectionPool.get(relay);
// Reuse existing connection if it's open
@ -246,12 +249,41 @@ export class NostrClient { @@ -246,12 +249,41 @@ export class NostrClient {
const now = Date.now();
const timeSinceLastAttempt = now - attemptInfo.lastAttempt;
// Reset throttling if enough time has passed (relays may have recovered)
if (attemptInfo.count > 0 && timeSinceLastAttempt > this.THROTTLE_RESET_TIME) {
logger.debug({ relay, timeSinceLastAttempt }, 'Resetting throttling - enough time has passed');
this.connectionAttempts.set(relay, { count: 0, lastAttempt: now });
}
// If we've had too many recent failures, apply exponential backoff
// For read operations, use less aggressive throttling (half the backoff time)
if (attemptInfo.count > 0) {
const backoffTime = this.CONNECTION_BACKOFF_BASE * Math.pow(2, Math.min(attemptInfo.count - 1, 5));
const backoffMultiplier = isReadOperation ? 0.5 : 1.0;
const backoffTime = this.CONNECTION_BACKOFF_BASE * Math.pow(2, Math.min(attemptInfo.count - 1, 5)) * backoffMultiplier;
if (timeSinceLastAttempt < backoffTime) {
logger.debug({ relay, backoffTime, timeSinceLastAttempt }, 'Throttling connection attempt');
return null; // Don't attempt connection yet
const waitTime = backoffTime - timeSinceLastAttempt;
// For read operations, be more lenient - allow longer waits or bypass if we have cached data
const maxWaitTime = isReadOperation ? 10000 : 5000; // 10s for reads, 5s for writes
if (waitTime <= maxWaitTime) {
logger.debug({ relay, backoffTime, timeSinceLastAttempt, waitTime, isReadOperation }, 'Throttling connection attempt - waiting for backoff');
await new Promise(resolve => setTimeout(resolve, waitTime));
// After waiting, check if connection is now available
const existingAfterWait = this.connectionPool.get(relay);
if (existingAfterWait && (existingAfterWait.ws.readyState === WebSocket.OPEN || existingAfterWait.ws.readyState === WebSocket.CONNECTING)) {
existingAfterWait.pendingRequests++;
return existingAfterWait.ws;
}
// Continue to create new connection after backoff
} else {
// Backoff is too long, return null to avoid long waits
// For read operations, we might still want to try (if we have no cached data)
// but for now, we'll be conservative and return null
logger.debug({ relay, waitTime, maxWaitTime, isReadOperation }, 'Backoff too long, skipping connection attempt');
return null;
}
}
}
@ -712,7 +744,8 @@ export class NostrClient { @@ -712,7 +744,8 @@ export class NostrClient {
let authPromise: Promise<boolean> | null = null;
// Get connection from pool or create new one
this.getConnection(relay).then(websocket => {
// fetchFromRelay is always a read operation, so pass isReadOperation: true
this.getConnection(relay, true).then(websocket => {
if (!websocket) {
resolveOnce([]);
return;

190
src/routes/api/search/+server.ts

@ -57,7 +57,12 @@ export const GET: RequestHandler = async (event) => { @@ -57,7 +57,12 @@ export const GET: RequestHandler = async (event) => {
}
const relays = Array.from(allRelays);
logger.debug({ relayCount: relays.length }, 'Using relays for search');
logger.info({
relayCount: relays.length,
relays: relays.slice(0, 5), // Log first 5 relays
query: query.trim().substring(0, 50), // Log first 50 chars of query
hasUserPubkey: !!userPubkey
}, 'Starting search with relays');
// Create client with all available relays
const nostrClient = new NostrClient(relays);
@ -105,20 +110,25 @@ export const GET: RequestHandler = async (event) => { @@ -105,20 +110,25 @@ export const GET: RequestHandler = async (event) => {
// Return cached events immediately, fetch from relays in background
nostrClient.fetchEvents(filters).then(freshEvents => {
// Merge fresh events with cached ones (deduplicate by event ID)
const eventMap = new Map<string, NostrEvent>();
cachedEvents.forEach(e => eventMap.set(e.id, e));
freshEvents.forEach(e => {
const existing = eventMap.get(e.id);
if (!existing || e.created_at > existing.created_at) {
eventMap.set(e.id, e);
}
});
// Only update cache if we got results (don't replace with empty results from failed fetches)
if (freshEvents.length > 0) {
// Merge fresh events with cached ones (deduplicate by event ID)
const eventMap = new Map<string, NostrEvent>();
cachedEvents.forEach(e => eventMap.set(e.id, e));
freshEvents.forEach(e => {
const existing = eventMap.get(e.id);
if (!existing || e.created_at > existing.created_at) {
eventMap.set(e.id, e);
}
});
const mergedEvents = Array.from(eventMap.values());
// Update cache with merged results
eventCache.set(filters, mergedEvents);
logger.debug({ filters, mergedCount: mergedEvents.length }, 'Updated cache with fresh events');
const mergedEvents = Array.from(eventMap.values());
// Update cache with merged results
eventCache.set(filters, mergedEvents);
logger.debug({ filters, mergedCount: mergedEvents.length }, 'Updated cache with fresh events');
} else {
logger.debug({ filters }, 'Background fetch returned no events (relays may be throttled), keeping cached data');
}
}).catch(err => {
logger.debug({ error: err, filters }, 'Background fetch failed, using cached events');
});
@ -126,13 +136,30 @@ export const GET: RequestHandler = async (event) => { @@ -126,13 +136,30 @@ export const GET: RequestHandler = async (event) => {
return cachedEvents;
}
// No cache, fetch from relays
const freshEvents = await nostrClient.fetchEvents(filters);
// Cache the results
if (freshEvents.length > 0) {
eventCache.set(filters, freshEvents);
// No cache, fetch from relays with timeout
try {
const freshEvents = await Promise.race([
nostrClient.fetchEvents(filters),
new Promise<NostrEvent[]>((resolve) => {
setTimeout(() => {
logger.warn({ filters, relayCount: relays.length }, 'Fetch timeout - relays may be throttled or slow');
resolve([]); // Return empty array on timeout
}, 10000); // 10 second timeout for search
})
]);
// Cache the results only if we got some
if (freshEvents.length > 0) {
eventCache.set(filters, freshEvents);
logger.debug({ filters, fetchedCount: freshEvents.length }, 'Fetched and cached events from relays');
} else {
logger.warn({ filters, relayCount: relays.length }, 'No events fetched from relays - may be throttled or unavailable');
}
return freshEvents;
} catch (err) {
logger.warn({ error: err, filters, relayCount: relays.length }, 'Failed to fetch events from relays');
return []; // Return empty array on error
}
return freshEvents;
}
let events: NostrEvent[] = [];
@ -158,12 +185,23 @@ export const GET: RequestHandler = async (event) => { @@ -158,12 +185,23 @@ export const GET: RequestHandler = async (event) => {
const normalizedQuery = normalizeUrl(query.trim());
// Fetch all repos with cache-first strategy
const allRepos = await fetchEventsWithCache([
{
kinds: [KIND.REPO_ANNOUNCEMENT],
limit: 1000 // Get more to find URL matches
let allRepos: NostrEvent[] = [];
try {
allRepos = await fetchEventsWithCache([
{
kinds: [KIND.REPO_ANNOUNCEMENT],
limit: 1000 // Get more to find URL matches
}
]);
// If we got no results and cache was empty, log a warning
if (allRepos.length === 0) {
logger.warn({ query: query.trim(), relayCount: relays.length }, 'No repos found for URL search - relays may be throttled or unavailable');
}
]);
} catch (err) {
logger.warn({ error: err, query: query.trim() }, 'Failed to fetch repos for URL search');
allRepos = [];
}
// Filter for repos that have a matching clone URL
events = allRepos.filter(event => {
@ -191,22 +229,39 @@ export const GET: RequestHandler = async (event) => { @@ -191,22 +229,39 @@ export const GET: RequestHandler = async (event) => {
logger.debug({ query: query.trim(), resolvedPubkey }, 'Searching for repos by pubkey');
// Fetch repos where this pubkey is the owner (cache-first)
const ownerEvents = await fetchEventsWithCache([
{
kinds: [KIND.REPO_ANNOUNCEMENT],
authors: [resolvedPubkey],
limit: limit * 2
}
]);
let ownerEvents: NostrEvent[] = [];
try {
ownerEvents = await fetchEventsWithCache([
{
kinds: [KIND.REPO_ANNOUNCEMENT],
authors: [resolvedPubkey],
limit: limit * 2
}
]);
} catch (err) {
logger.warn({ error: err, resolvedPubkey }, 'Failed to fetch owner repos for pubkey search');
ownerEvents = [];
}
// Fetch repos where this pubkey is a maintainer (cache-first)
// We need to fetch all repos and filter by maintainer tags
const allRepos = await fetchEventsWithCache([
{
kinds: [KIND.REPO_ANNOUNCEMENT],
limit: 1000 // Get more to find maintainer matches
let allRepos: NostrEvent[] = [];
try {
allRepos = await fetchEventsWithCache([
{
kinds: [KIND.REPO_ANNOUNCEMENT],
limit: 1000 // Get more to find maintainer matches
}
]);
// If we got no results, log a warning
if (allRepos.length === 0) {
logger.warn({ resolvedPubkey, relayCount: relays.length }, 'No repos found for maintainer search - relays may be throttled or unavailable');
}
]);
} catch (err) {
logger.warn({ error: err, resolvedPubkey }, 'Failed to fetch repos for maintainer search');
allRepos = [];
}
// Filter for repos where resolvedPubkey is in maintainers tag
const maintainerEvents = allRepos.filter(event => {
@ -256,30 +311,44 @@ export const GET: RequestHandler = async (event) => { @@ -256,30 +311,44 @@ export const GET: RequestHandler = async (event) => {
logger.debug({ cachedCount: cachedAllRepos.length }, 'Using cached repos for text search');
allReposForTextSearch = cachedAllRepos;
// Fetch fresh data in background
// Fetch fresh data in background (don't wait, use cached data immediately)
nostrClient.fetchEvents([{ kinds: [KIND.REPO_ANNOUNCEMENT], limit: 1000 }]).then(freshRepos => {
// Merge and update cache
const eventMap = new Map<string, NostrEvent>();
cachedAllRepos.forEach(e => eventMap.set(e.id, e));
freshRepos.forEach(e => {
const existing = eventMap.get(e.id);
if (!existing || e.created_at > existing.created_at) {
eventMap.set(e.id, e);
}
});
const merged = Array.from(eventMap.values());
eventCache.set([{ kinds: [KIND.REPO_ANNOUNCEMENT], limit: 1000 }], merged);
// Only update cache if we got results (don't replace with empty results)
if (freshRepos.length > 0) {
// Merge and update cache
const eventMap = new Map<string, NostrEvent>();
cachedAllRepos.forEach(e => eventMap.set(e.id, e));
freshRepos.forEach(e => {
const existing = eventMap.get(e.id);
if (!existing || e.created_at > existing.created_at) {
eventMap.set(e.id, e);
}
});
const merged = Array.from(eventMap.values());
eventCache.set([{ kinds: [KIND.REPO_ANNOUNCEMENT], limit: 1000 }], merged);
logger.debug({ mergedCount: merged.length, freshCount: freshRepos.length }, 'Updated cache with fresh repos');
}
}).catch(err => {
logger.debug({ error: err }, 'Background fetch failed for text search');
logger.debug({ error: err }, 'Background fetch failed for text search, using cached data');
});
} else {
// No cache, fetch all repos
allReposForTextSearch = await nostrClient.fetchEvents([
{ kinds: [KIND.REPO_ANNOUNCEMENT], limit: 1000 }
]);
// Cache the results
if (allReposForTextSearch.length > 0) {
eventCache.set([{ kinds: [KIND.REPO_ANNOUNCEMENT], limit: 1000 }], allReposForTextSearch);
// No cache, try to fetch all repos
logger.debug({ relayCount: relays.length }, 'No cache available, fetching repos from relays');
try {
allReposForTextSearch = await nostrClient.fetchEvents([
{ kinds: [KIND.REPO_ANNOUNCEMENT], limit: 1000 }
]);
// Only cache if we got results
if (allReposForTextSearch.length > 0) {
eventCache.set([{ kinds: [KIND.REPO_ANNOUNCEMENT], limit: 1000 }], allReposForTextSearch);
logger.debug({ fetchedCount: allReposForTextSearch.length }, 'Fetched and cached repos from relays');
} else {
logger.warn({ relayCount: relays.length }, 'No repos fetched from relays - all relays may be throttled or unavailable');
}
} catch (err) {
logger.warn({ error: err, relayCount: relays.length }, 'Failed to fetch repos from relays');
allReposForTextSearch = []; // Empty array if fetch fails
}
}
@ -609,6 +678,13 @@ export const GET: RequestHandler = async (event) => { @@ -609,6 +678,13 @@ export const GET: RequestHandler = async (event) => {
results.repos = results.repos.slice(0, limit);
logger.info({
query: query.trim().substring(0, 50),
resultCount: results.repos.length,
total: results.repos.length,
relayCount: relays.length
}, 'Search completed');
return json({
query,
results,

Loading…
Cancel
Save