Browse Source

refactor 12

Nostr-Signature: 73671ae6535309f9eae164f7a3ec403b1bc818ef811b9692fd0122d0b72c2774 573634b648634cbad10f2451776089ea21090d9407f715e83c577b4611ae6edc 0df56b009f5afb77de334225ab30cff55586ac0cf48f5ee435428201a1e72dc357a0fb5e80ef196f5bd76d6d448056d25f0feab0b1bcbe45f9af1a2a0d5453ca
main
Silberengel 2 weeks ago
parent
commit
8155665d3f
  1. 1
      nostr/commit-signatures.jsonl
  2. 56
      src/hooks.server.ts
  3. 2398
      src/lib/services/git/file-manager.ts.backup
  4. 24
      src/lib/services/nostr/nostr-client.ts
  5. 46
      src/lib/services/nostr/repo-polling.ts
  6. 53
      src/lib/utils/git-process.ts
  7. 35
      src/lib/utils/nostr-event-utils.ts
  8. 147
      src/lib/utils/process-cleanup.ts
  9. 192
      src/routes/api/git/[...path]/+server.ts
  10. 25
      src/routes/api/repos/local/+server.ts
  11. 24
      src/routes/api/search/+server.ts
  12. 6619
      src/routes/repos/[npub]/[repo]/+page.svelte.backup
  13. 33
      src/routes/repos/[npub]/[repo]/utils/discussion-utils.ts

1
nostr/commit-signatures.jsonl

@ -106,3 +106,4 @@ @@ -106,3 +106,4 @@
{"kind":1640,"pubkey":"573634b648634cbad10f2451776089ea21090d9407f715e83c577b4611ae6edc","created_at":1772136696,"tags":[["author","Silberengel","silberengel7@protonmail.com"],["message","refactor 10"]],"content":"Signed commit: refactor 10","id":"7fb8d54e26ab59486f3b56d97e225ed02f893140025c03ccb95a991e523e6182","sig":"f4bb5a037c48d06854d9346ebf96aa9f65f11d3f96e23d08b7d38d0ebea9bab242ffa917239aa432d83a55f369586d66603f439f40eac8156aeaaf80737b81a1"}
{"kind":1640,"pubkey":"573634b648634cbad10f2451776089ea21090d9407f715e83c577b4611ae6edc","created_at":1772141183,"tags":[["author","Silberengel","silberengel7@protonmail.com"],["message","bug-fixes"]],"content":"Signed commit: bug-fixes","id":"b92b203686c0629409fef055e7f3189cf9f26be5cca0253ab00cf7e8498e1115","sig":"06a13aac9d2f794e52b0416044db6ebf9dd248d254d2166d7e7f3fefd2b7d37d1a85072c3e92316898c31068e25cf37bc5afd2fcd8ae2050d0a30b1bc1973678"}
{"kind":1640,"pubkey":"573634b648634cbad10f2451776089ea21090d9407f715e83c577b4611ae6edc","created_at":1772142448,"tags":[["author","Silberengel","silberengel7@protonmail.com"],["message","refactor 11"]],"content":"Signed commit: refactor 11","id":"bb9d5c56a291e48221df96868fb925e309cb560aa350c2cf5f9c4ddd5e5c4a6b","sig":"75662c916bf4d8bb3d70cdae4e4882382692c6f1ca67598a69abe3dc96069ef6f2bda5a1b8f91b724aa43b3cb3c6b8ad6cbce286b5d165377a34a881e7275d2a"}
{"kind":1640,"pubkey":"573634b648634cbad10f2451776089ea21090d9407f715e83c577b4611ae6edc","created_at":1772142558,"tags":[["author","Silberengel","silberengel7@protonmail.com"],["message","remove redundancy"]],"content":"Signed commit: remove redundancy","id":"11ac91151bebd4dd49b91bcdef7b0b7157f0afd8ce710f7231be4860fb073d08","sig":"a7efcafa5ea83a0c37eae4562a84a7581c3d5c5dd1416f8f3e2bd2633d8523ae0eb7cc56dc4292c127ea16fb2dd5bc639483cb096263a850956b47312ed7ff6f"}

56
src/hooks.server.ts

@ -29,8 +29,60 @@ if (typeof process !== 'undefined') { @@ -29,8 +29,60 @@ if (typeof process !== 'undefined') {
});
pollingService = new RepoPollingService(DEFAULT_NOSTR_RELAYS, repoRoot, domain);
pollingService.start();
logger.info({ service: 'repo-polling', relays: DEFAULT_NOSTR_RELAYS.length }, 'Started repo polling service');
// Start polling - the initial poll will complete asynchronously
// The local repos endpoint will skip cache for the first 10 seconds after startup
pollingService.start().then(() => {
logger.info({ service: 'repo-polling', relays: DEFAULT_NOSTR_RELAYS.length }, 'Repo polling service ready (initial poll completed)');
}).catch((err) => {
logger.error({ error: err, service: 'repo-polling' }, 'Initial repo poll failed, but continuing');
});
logger.info({ service: 'repo-polling', relays: DEFAULT_NOSTR_RELAYS.length }, 'Started repo polling service (initial poll in progress)');
// Cleanup on server shutdown
const cleanup = (signal: string) => {
logger.info({ signal }, 'Received shutdown signal, cleaning up...');
if (pollingService) {
logger.info('Stopping repo polling service...');
pollingService.stop();
pollingService = null;
}
// Give a moment for cleanup, then exit
setTimeout(() => {
process.exit(0);
}, 1000);
};
process.on('SIGTERM', () => cleanup('SIGTERM'));
process.on('SIGINT', () => {
// SIGINT (Ctrl-C) - exit immediately after cleanup
cleanup('SIGINT');
// Force exit after 2 seconds if cleanup takes too long
setTimeout(() => {
logger.warn('Forcing exit after SIGINT');
process.exit(0);
}, 2000);
});
// Also cleanup on process exit (last resort)
process.on('exit', () => {
if (pollingService) {
pollingService.stop();
}
});
// Periodic zombie process cleanup check
// This helps catch any processes that weren't properly cleaned up
if (typeof setInterval !== 'undefined') {
setInterval(() => {
// Check for zombie processes by attempting to reap them
// Node.js handles this automatically via 'close' events, but this is a safety net
// We can't directly check for zombies, but we can ensure our cleanup is working
// The real cleanup happens in process handlers, this is just monitoring
logger.debug('Zombie cleanup check (process handlers should prevent zombies)');
}, 60000); // Check every minute
}
}
export const handle: Handle = async ({ event, resolve }) => {

2398
src/lib/services/git/file-manager.ts.backup

File diff suppressed because it is too large Load Diff

24
src/lib/services/nostr/nostr-client.ts

@ -8,33 +8,11 @@ import logger from '../logger.js'; @@ -8,33 +8,11 @@ import logger from '../logger.js';
import { isNIP07Available, getPublicKeyWithNIP07, signEventWithNIP07 } from './nip07-signer.js';
import { SimplePool, type Filter } from 'nostr-tools';
import { KIND } from '../../types/nostr.js';
import { isParameterizedReplaceable } from '../../utils/nostr-event-utils.js';
// Replaceable event kinds (only latest per pubkey matters)
const REPLACEABLE_KINDS = [0, 3, 10002]; // Profile, Contacts, Relay List
/**
* Check if an event is a parameterized replaceable event (NIP-33)
* Parameterized replaceable events have:
* - kind >= 10000 && kind < 20000 (replaceable range) with a 'd' tag, OR
* - kind >= 30000 && kind < 40000 (addressable range) with a 'd' tag
*/
function isParameterizedReplaceable(event: NostrEvent): boolean {
const hasDTag = event.tags.some(t => t[0] === 'd' && t[1]);
if (!hasDTag) return false;
// Replaceable range (NIP-33)
if (event.kind >= 10000 && event.kind < 20000) {
return true;
}
// Addressable range (NIP-34) - also parameterized replaceable
if (event.kind >= 30000 && event.kind < 40000) {
return true;
}
return false;
}
/**
* Get the deduplication key for an event
* For replaceable events: kind:pubkey

46
src/lib/services/nostr/repo-polling.ts

@ -18,6 +18,8 @@ export class RepoPollingService { @@ -18,6 +18,8 @@ export class RepoPollingService {
private intervalId: NodeJS.Timeout | null = null;
private domain: string;
private relays: string[];
private initialPollPromise: Promise<void> | null = null;
private isInitialPollComplete: boolean = false;
constructor(
relays: string[],
@ -34,29 +36,52 @@ export class RepoPollingService { @@ -34,29 +36,52 @@ export class RepoPollingService {
/**
* Start polling for repo announcements
* Returns a promise that resolves when the initial poll completes
*/
start(): void {
start(): Promise<void> {
if (this.intervalId) {
this.stop();
}
// Poll immediately
this.poll();
// Poll immediately and wait for it to complete
this.initialPollPromise = this.poll();
// Then poll at intervals
this.intervalId = setInterval(() => {
this.poll();
}, this.pollingInterval);
return this.initialPollPromise;
}
/**
* Stop polling
* Wait for initial poll to complete (useful for server startup)
*/
async waitForInitialPoll(): Promise<void> {
if (this.initialPollPromise) {
await this.initialPollPromise;
}
}
/**
* Check if initial poll has completed
*/
isReady(): boolean {
return this.isInitialPollComplete;
}
/**
* Stop polling and cleanup resources
*/
stop(): void {
if (this.intervalId) {
clearInterval(this.intervalId);
this.intervalId = null;
}
// Close Nostr client connections
if (this.nostrClient) {
this.nostrClient.close();
}
}
/**
@ -64,6 +89,7 @@ export class RepoPollingService { @@ -64,6 +89,7 @@ export class RepoPollingService {
*/
private async poll(): Promise<void> {
try {
logger.debug('Starting repo poll...');
const events = await this.nostrClient.fetchEvents([
{
kinds: [KIND.REPO_ANNOUNCEMENT],
@ -186,8 +212,20 @@ export class RepoPollingService { @@ -186,8 +212,20 @@ export class RepoPollingService {
logger.error({ error, eventId: event.id }, 'Failed to provision repo from announcement');
}
}
// Mark initial poll as complete
if (!this.isInitialPollComplete) {
this.isInitialPollComplete = true;
logger.info('Initial repo poll completed');
}
} catch (error) {
logger.error({ error }, 'Error polling for repo announcements');
// Still mark as complete even on error (to prevent blocking)
if (!this.isInitialPollComplete) {
this.isInitialPollComplete = true;
logger.warn('Initial repo poll completed with errors');
}
}
}

53
src/lib/utils/git-process.ts

@ -5,6 +5,7 @@ @@ -5,6 +5,7 @@
import { spawn, type ChildProcess } from 'child_process';
import logger from '../services/logger.js';
import { killProcessGroup, forceKillProcessGroup, cleanupProcess, closeProcessStreams } from './process-cleanup.js';
export interface GitProcessOptions {
cwd?: string;
@ -50,33 +51,21 @@ export function spawnGitProcess( @@ -50,33 +51,21 @@ export function spawnGitProcess(
let stderr = '';
let resolved = false;
// Set timeout to prevent hanging processes
// Set timeout to prevent hanging processes with aggressive cleanup
let forceKillTimeout: NodeJS.Timeout | null = null;
const timeoutId = timeoutMs > 0 ? setTimeout(() => {
if (!resolved && !gitProcess.killed) {
resolved = true;
logger.warn({ args, timeoutMs }, 'Git process timeout, killing process');
logger.warn({ args, timeoutMs }, 'Git process timeout, killing process group');
// Kill the process tree to prevent zombies
try {
gitProcess.kill('SIGTERM');
// Force kill after grace period
const forceKillTimeout = setTimeout(() => {
if (gitProcess.pid && !gitProcess.killed) {
try {
gitProcess.kill('SIGKILL');
} catch (err) {
logger.warn({ err, pid: gitProcess.pid }, 'Failed to force kill git process');
}
}
}, 5000);
// Clear force kill timeout if process terminates
gitProcess.once('close', () => {
clearTimeout(forceKillTimeout);
});
} catch (err) {
logger.warn({ err }, 'Error killing timed out git process');
}
// Kill entire process group to prevent zombies
killProcessGroup(gitProcess, 'SIGTERM');
// Force kill after grace period
forceKillTimeout = forceKillProcessGroup(gitProcess, 5000);
// Ensure streams are closed
closeProcessStreams(gitProcess);
reject(new Error(`Git command timeout after ${timeoutMs}ms: ${args.join(' ')}`));
}
@ -98,21 +87,12 @@ export function spawnGitProcess( @@ -98,21 +87,12 @@ export function spawnGitProcess(
// Handle process close (main cleanup point)
gitProcess.on('close', (code, signal) => {
if (timeoutId) clearTimeout(timeoutId);
// Aggressive cleanup: clear timeouts and ensure streams are closed
cleanupProcess(gitProcess, [timeoutId, forceKillTimeout]);
if (resolved) return;
resolved = true;
// Ensure process is fully cleaned up
if (gitProcess.pid) {
try {
// Check if process still exists (this helps ensure cleanup)
process.kill(gitProcess.pid, 0);
} catch {
// Process already dead, that's fine
}
}
resolve({
stdout,
stderr,
@ -121,9 +101,10 @@ export function spawnGitProcess( @@ -121,9 +101,10 @@ export function spawnGitProcess(
});
});
// Handle process errors
// Handle process errors with aggressive cleanup
gitProcess.on('error', (err) => {
if (timeoutId) clearTimeout(timeoutId);
// Aggressive cleanup on error
cleanupProcess(gitProcess, [timeoutId, forceKillTimeout], 'SIGTERM');
if (resolved) return;
resolved = true;

35
src/lib/utils/nostr-event-utils.ts

@ -0,0 +1,35 @@ @@ -0,0 +1,35 @@
/**
* Shared utilities for Nostr event handling
* Consolidates duplicate functions used across the codebase
*
* Based on NIP-01: https://github.com/nostr-protocol/nips/blob/master/01.md
*/
import type { NostrEvent } from '../types/nostr.js';
/**
* Check if an event is a parameterized replaceable event (addressable event per NIP-01)
*
* According to NIP-01:
* - Replaceable events (10000-19999, 0, 3): Replaceable by kind+pubkey only (no d-tag needed)
* - Addressable events (30000-39999): Addressable by kind+pubkey+d-tag (d-tag required)
*
* This function returns true only for addressable events (30000-39999) that have a d-tag,
* as these are the events that require a parameter (d-tag) to be uniquely identified.
*
* @param event - The Nostr event to check
* @returns true if the event is an addressable event (30000-39999) with a d-tag
*/
export function isParameterizedReplaceable(event: NostrEvent): boolean {
// Addressable events (30000-39999) require a d-tag to be addressable
// Per NIP-01: "for kind n such that 30000 <= n < 40000, events are addressable
// by their kind, pubkey and d tag value"
if (event.kind >= 30000 && event.kind < 40000) {
const hasDTag = event.tags.some(t => t[0] === 'd' && t[1]);
return hasDTag;
}
// Replaceable events (10000-19999, 0, 3) are NOT parameterized replaceable
// They are replaceable by kind+pubkey only, without needing a d-tag
return false;
}

147
src/lib/utils/process-cleanup.ts

@ -0,0 +1,147 @@ @@ -0,0 +1,147 @@
/**
* Aggressive process cleanup utilities to prevent zombie processes
* Implements process group killing and explicit reaping
*/
import { spawn, type ChildProcess } from 'child_process';
import logger from '../services/logger.js';
/**
* Kill a process and attempt to kill its process group to prevent zombies
* On Unix systems, tries to kill the process group using negative PID
* Falls back to killing just the process if group kill fails
*/
export function killProcessGroup(proc: ChildProcess, signal: NodeJS.Signals = 'SIGTERM'): void {
if (!proc.pid) {
return;
}
try {
// First, try to kill just the process (most reliable)
if (!proc.killed) {
proc.kill(signal);
logger.debug({ pid: proc.pid, signal }, 'Killed process');
}
} catch (err) {
logger.debug({ pid: proc.pid, error: err }, 'Error killing process directly');
}
// On Unix systems, try to kill the process group using negative PID
// This only works if the process is in its own process group
// Note: This may fail if the process wasn't spawned with its own group
if (process.platform !== 'win32') {
try {
// Try killing the process group (negative PID)
// This will fail if the process isn't a group leader, which is fine
process.kill(-proc.pid, signal);
logger.debug({ pid: proc.pid, signal }, 'Killed process group');
} catch (err) {
// Expected to fail if process isn't in its own group - that's okay
// We already killed the main process above
logger.debug({ pid: proc.pid }, 'Process group kill not applicable (process not in own group)');
}
}
}
/**
* Force kill a process group with SIGKILL after a grace period
*/
export function forceKillProcessGroup(
proc: ChildProcess,
gracePeriodMs: number = 5000
): NodeJS.Timeout {
return setTimeout(() => {
if (proc.pid && !proc.killed) {
try {
killProcessGroup(proc, 'SIGKILL');
logger.warn({ pid: proc.pid }, 'Force killed process group with SIGKILL');
} catch (err) {
logger.warn({ pid: proc.pid, error: err }, 'Failed to force kill process group');
}
}
}, gracePeriodMs);
}
/**
* Ensure all streams are closed to prevent resource leaks
*/
export function closeProcessStreams(proc: ChildProcess): void {
try {
if (proc.stdin && !proc.stdin.destroyed) {
proc.stdin.destroy();
}
if (proc.stdout && !proc.stdout.destroyed) {
proc.stdout.destroy();
}
if (proc.stderr && !proc.stderr.destroyed) {
proc.stderr.destroy();
}
} catch (err) {
logger.debug({ error: err }, 'Error closing process streams');
}
}
/**
* Comprehensive cleanup: kill process group, close streams, and clear timeouts
*/
export function cleanupProcess(
proc: ChildProcess,
timeouts: Array<NodeJS.Timeout | null>,
signal: NodeJS.Signals = 'SIGTERM'
): void {
// Clear all timeouts
for (const timeout of timeouts) {
if (timeout) {
clearTimeout(timeout);
}
}
// Close all streams
closeProcessStreams(proc);
// Kill process group
if (proc.pid && !proc.killed) {
killProcessGroup(proc, signal);
}
}
/**
* Spawn a process with process group isolation to enable group killing
* This is critical for preventing zombies when the process spawns children
*/
export function spawnWithProcessGroup(
command: string,
args: string[],
options: Parameters<typeof spawn>[2] = {}
): ChildProcess {
// Create a new process group by making the process a session leader
// This allows us to kill the entire process tree
const proc = spawn(command, args, {
...options,
detached: false, // Keep attached but use process groups
// On Unix, we can't directly set process group in spawn options,
// but we can use setsid-like behavior by ensuring proper cleanup
});
// On Unix systems, we need to ensure the process can be killed as a group
// The key is to ensure proper cleanup and use negative PID when killing
if (proc.pid) {
logger.debug({ pid: proc.pid, command, args: args.slice(0, 3) }, 'Spawned process with group cleanup support');
}
return proc;
}
/**
* Reap zombie processes by explicitly waiting for them
* This should be called periodically to clean up any zombies
*/
export function reapZombies(): void {
// On Unix systems, we can check for zombie processes
// However, Node.js doesn't expose waitpid directly
// The best we can do is ensure all our tracked processes are properly cleaned up
// This is a placeholder for potential future implementation
// In practice, proper cleanup in process handlers should prevent zombies
logger.debug('Zombie reaping check (process handlers should prevent zombies)');
}

192
src/routes/api/git/[...path]/+server.ts

@ -19,6 +19,7 @@ import { isValidBranchName, sanitizeError, validateRepoPath } from '$lib/utils/s @@ -19,6 +19,7 @@ import { isValidBranchName, sanitizeError, validateRepoPath } from '$lib/utils/s
import { extractCloneUrls, fetchRepoAnnouncementsWithCache, findRepoAnnouncement } from '$lib/utils/nostr-utils.js';
import { eventCache } from '$lib/services/nostr/event-cache.js';
import { repoManager, maintainerService, ownershipTransferService, branchProtectionService, nostrClient } from '$lib/services/service-registry.js';
import { killProcessGroup, forceKillProcessGroup, cleanupProcess, closeProcessStreams } from '$lib/utils/process-cleanup.js';
// Resolve GIT_REPO_ROOT to absolute path (handles both relative and absolute paths)
const repoRootEnv = process.env.GIT_REPO_ROOT || '/repos';
@ -353,6 +354,8 @@ export const GET: RequestHandler = async ({ params, url, request }) => { @@ -353,6 +354,8 @@ export const GET: RequestHandler = async ({ params, url, request }) => {
// Security: Set timeout for git operations
const timeoutMs = GIT_OPERATION_TIMEOUT_MS;
let timeoutId: NodeJS.Timeout;
let forceKillTimeout: NodeJS.Timeout | null = null;
let resolved = false;
const gitProcess = spawn(gitHttpBackend, [], {
env: envVars,
@ -361,19 +364,16 @@ export const GET: RequestHandler = async ({ params, url, request }) => { @@ -361,19 +364,16 @@ export const GET: RequestHandler = async ({ params, url, request }) => {
shell: false
});
timeoutId = setTimeout(() => {
gitProcess.kill('SIGTERM');
// Force kill after grace period if process doesn't terminate
const forceKillTimeout = setTimeout(() => {
if (!gitProcess.killed) {
gitProcess.kill('SIGKILL');
}
}, 5000); // 5 second grace period
const chunks: Buffer[] = [];
let errorOutput = '';
// Set up error handlers BEFORE writing to stdin to prevent race conditions
gitProcess.on('error', (err) => {
// Aggressive cleanup on error
cleanupProcess(gitProcess, [timeoutId, forceKillTimeout], 'SIGTERM');
// Clear force kill timeout if process terminates
gitProcess.on('close', () => {
clearTimeout(forceKillTimeout);
});
if (resolved) return;
resolved = true;
auditLogger.logRepoAccess(
originalOwnerPubkey,
@ -381,13 +381,11 @@ export const GET: RequestHandler = async ({ params, url, request }) => { @@ -381,13 +381,11 @@ export const GET: RequestHandler = async ({ params, url, request }) => {
operation,
`${npub}/${repoName}`,
'failure',
'Operation timeout'
`Process error: ${err.message}`
);
resolve(error(504, 'Git operation timeout'));
}, timeoutMs);
const chunks: Buffer[] = [];
let errorOutput = '';
const sanitizedError = sanitizeError(err);
resolve(error(500, `Failed to execute git-http-backend: ${sanitizedError}`));
});
gitProcess.stdout.on('data', (chunk: Buffer) => {
chunks.push(chunk);
@ -397,8 +395,48 @@ export const GET: RequestHandler = async ({ params, url, request }) => { @@ -397,8 +395,48 @@ export const GET: RequestHandler = async ({ params, url, request }) => {
errorOutput += chunk.toString();
});
// Set up timeout handler with aggressive cleanup
timeoutId = setTimeout(() => {
if (resolved) return;
// Kill entire process group (prevents zombies from child processes)
killProcessGroup(gitProcess, 'SIGTERM');
// Force kill after grace period if process doesn't terminate
forceKillTimeout = forceKillProcessGroup(gitProcess, 5000);
// Ensure streams are closed
closeProcessStreams(gitProcess);
auditLogger.logRepoAccess(
originalOwnerPubkey,
clientIp,
operation,
`${npub}/${repoName}`,
'failure',
'Operation timeout'
);
if (!resolved) {
resolved = true;
resolve(error(504, 'Git operation timeout'));
}
}, timeoutMs);
// Add exit handler as backup cleanup (in case close doesn't fire)
gitProcess.on('exit', (code, signal) => {
// Ensure cleanup happens even if close event doesn't fire
if (!resolved) {
cleanupProcess(gitProcess, [timeoutId, forceKillTimeout]);
}
});
gitProcess.on('close', (code) => {
clearTimeout(timeoutId);
// Aggressive cleanup: clear timeouts and ensure streams are closed
cleanupProcess(gitProcess, [timeoutId, forceKillTimeout]);
if (resolved) return;
resolved = true;
// Log audit entry after operation completes
if (code === 0) {
@ -892,6 +930,8 @@ export const POST: RequestHandler = async ({ params, url, request }) => { @@ -892,6 +930,8 @@ export const POST: RequestHandler = async ({ params, url, request }) => {
// Security: Set timeout for git operations
const timeoutMs = GIT_OPERATION_TIMEOUT_MS;
let timeoutId: NodeJS.Timeout;
let forceKillTimeout: NodeJS.Timeout | null = null;
let resolved = false;
const gitProcess = spawn(gitHttpBackend, [], {
env: envVars,
@ -900,19 +940,16 @@ export const POST: RequestHandler = async ({ params, url, request }) => { @@ -900,19 +940,16 @@ export const POST: RequestHandler = async ({ params, url, request }) => {
shell: false
});
timeoutId = setTimeout(() => {
gitProcess.kill('SIGTERM');
// Force kill after grace period if process doesn't terminate
const forceKillTimeout = setTimeout(() => {
if (!gitProcess.killed) {
gitProcess.kill('SIGKILL');
}
}, 5000); // 5 second grace period
const chunks: Buffer[] = [];
let errorOutput = '';
// Set up error handlers BEFORE writing to stdin to prevent race conditions
gitProcess.on('error', (err) => {
// Aggressive cleanup on error
cleanupProcess(gitProcess, [timeoutId, forceKillTimeout], 'SIGTERM');
// Clear force kill timeout if process terminates
gitProcess.on('close', () => {
clearTimeout(forceKillTimeout);
});
if (resolved) return;
resolved = true;
auditLogger.logRepoAccess(
currentOwnerPubkey,
@ -920,17 +957,31 @@ export const POST: RequestHandler = async ({ params, url, request }) => { @@ -920,17 +957,31 @@ export const POST: RequestHandler = async ({ params, url, request }) => {
operation,
`${npub}/${repoName}`,
'failure',
'Operation timeout'
`Process error: ${err.message}`
);
resolve(error(504, 'Git operation timeout'));
}, timeoutMs);
const chunks: Buffer[] = [];
let errorOutput = '';
const sanitizedError = sanitizeError(err);
resolve(error(500, `Failed to execute git-http-backend: ${sanitizedError}`));
});
// Write request body to git-http-backend stdin
gitProcess.stdin.write(bodyBuffer);
gitProcess.stdin.end();
// Handle stdin errors
gitProcess.stdin.on('error', (err) => {
// Aggressive cleanup on stdin error
cleanupProcess(gitProcess, [timeoutId, forceKillTimeout], 'SIGTERM');
if (resolved) return;
resolved = true;
auditLogger.logRepoAccess(
currentOwnerPubkey,
clientIp,
operation,
`${npub}/${repoName}`,
'failure',
`Stdin error: ${err.message}`
);
const sanitizedError = sanitizeError(err);
resolve(error(500, `Failed to write to git-http-backend: ${sanitizedError}`));
});
gitProcess.stdout.on('data', (chunk: Buffer) => {
chunks.push(chunk);
@ -940,8 +991,52 @@ export const POST: RequestHandler = async ({ params, url, request }) => { @@ -940,8 +991,52 @@ export const POST: RequestHandler = async ({ params, url, request }) => {
errorOutput += chunk.toString();
});
// Set up timeout handler with aggressive cleanup
timeoutId = setTimeout(() => {
if (resolved) return;
// Kill entire process group (prevents zombies from child processes)
killProcessGroup(gitProcess, 'SIGTERM');
// Force kill after grace period if process doesn't terminate
forceKillTimeout = forceKillProcessGroup(gitProcess, 5000);
// Ensure streams are closed
closeProcessStreams(gitProcess);
auditLogger.logRepoAccess(
currentOwnerPubkey,
clientIp,
operation,
`${npub}/${repoName}`,
'failure',
'Operation timeout'
);
if (!resolved) {
resolved = true;
resolve(error(504, 'Git operation timeout'));
}
}, timeoutMs);
// Add exit handler as backup cleanup (in case close doesn't fire)
gitProcess.on('exit', (code, signal) => {
// Ensure cleanup happens even if close event doesn't fire
if (!resolved) {
cleanupProcess(gitProcess, [timeoutId, forceKillTimeout]);
}
});
// Write request body to git-http-backend stdin AFTER error handlers are set up
gitProcess.stdin.write(bodyBuffer);
gitProcess.stdin.end();
gitProcess.on('close', async (code) => {
clearTimeout(timeoutId);
// Aggressive cleanup: clear timeouts and ensure streams are closed
cleanupProcess(gitProcess, [timeoutId, forceKillTimeout]);
if (resolved) return;
resolved = true;
// Log audit entry after operation completes
if (code === 0) {
@ -1040,20 +1135,5 @@ export const POST: RequestHandler = async ({ params, url, request }) => { @@ -1040,20 +1135,5 @@ export const POST: RequestHandler = async ({ params, url, request }) => {
}
}));
});
gitProcess.on('error', (err) => {
clearTimeout(timeoutId);
// Log audit entry for process error
auditLogger.logRepoAccess(
currentOwnerPubkey,
clientIp,
operation,
`${npub}/${repoName}`,
'failure',
`Process error: ${err.message}`
);
const sanitizedError = sanitizeError(err);
resolve(error(500, `Failed to execute git-http-backend: ${sanitizedError}`));
});
});
};

25
src/routes/api/repos/local/+server.ts

@ -33,6 +33,19 @@ interface CacheEntry { @@ -33,6 +33,19 @@ interface CacheEntry {
const CACHE_TTL = 5 * 60 * 1000; // 5 minutes
let cache: CacheEntry | null = null;
// Track server startup time to invalidate cache on first request after startup
let serverStartTime = Date.now();
const STARTUP_GRACE_PERIOD = 10000; // 10 seconds - allow time for initial poll
/**
* Invalidate cache (internal use only - not exported to avoid SvelteKit build errors)
*/
function invalidateLocalReposCache(): void {
cache = null;
serverStartTime = Date.now();
logger.debug('Local repos cache invalidated');
}
interface LocalRepoItem {
npub: string;
repoName: string;
@ -207,11 +220,19 @@ export const GET: RequestHandler = async (event) => { @@ -207,11 +220,19 @@ export const GET: RequestHandler = async (event) => {
const gitDomain = event.url.searchParams.get('domain') || GIT_DOMAIN;
const forceRefresh = event.url.searchParams.get('refresh') === 'true';
// Check cache
if (!forceRefresh && cache && (Date.now() - cache.timestamp) < CACHE_TTL) {
// If server just started, always refresh to ensure we get latest repos
const timeSinceStartup = Date.now() - serverStartTime;
const isRecentStartup = timeSinceStartup < STARTUP_GRACE_PERIOD;
// Check cache (but skip if recent startup or force refresh)
if (!forceRefresh && !isRecentStartup && cache && (Date.now() - cache.timestamp) < CACHE_TTL) {
return json(cache.repos);
}
if (isRecentStartup) {
logger.debug({ timeSinceStartup }, 'Skipping cache due to recent server startup');
}
// Scan filesystem
const localRepos = await scanLocalRepos();

24
src/routes/api/search/+server.ts

@ -18,33 +18,11 @@ import { getUserRelays } from '$lib/services/nostr/user-relays.js'; @@ -18,33 +18,11 @@ import { getUserRelays } from '$lib/services/nostr/user-relays.js';
import { eventCache } from '$lib/services/nostr/event-cache.js';
import { decodeNostrAddress } from '$lib/services/nostr/nip19-utils.js';
import logger from '$lib/services/logger.js';
import { isParameterizedReplaceable } from '$lib/utils/nostr-event-utils.js';
// Replaceable event kinds (only latest per pubkey matters)
const REPLACEABLE_KINDS = [0, 3, 10002]; // Profile, Contacts, Relay List
/**
* Check if an event is a parameterized replaceable event (NIP-33)
* Parameterized replaceable events have:
* - kind >= 10000 && kind < 20000 (replaceable range) with a 'd' tag, OR
* - kind >= 30000 && kind < 40000 (addressable range) with a 'd' tag
*/
function isParameterizedReplaceable(event: NostrEvent): boolean {
const hasDTag = event.tags.some(t => t[0] === 'd' && t[1]);
if (!hasDTag) return false;
// Replaceable range (NIP-33)
if (event.kind >= 10000 && event.kind < 20000) {
return true;
}
// Addressable range (NIP-34) - also parameterized replaceable
if (event.kind >= 30000 && event.kind < 40000) {
return true;
}
return false;
}
/**
* Get the deduplication key for an event
* For replaceable events: kind:pubkey

6619
src/routes/repos/[npub]/[repo]/+page.svelte.backup

File diff suppressed because it is too large Load Diff

33
src/routes/repos/[npub]/[repo]/utils/discussion-utils.ts

@ -5,6 +5,7 @@ @@ -5,6 +5,7 @@
import type { NostrEvent } from '$lib/types/nostr.js';
import { KIND } from '$lib/types/nostr.js';
import { getReferencedEventFromDiscussion as getReferencedEventFromDiscussionUtil } from '$lib/utils/nostr-links.js';
/**
* Format discussion timestamp
@ -34,41 +35,13 @@ export function getDiscussionEvent(eventId: string, events: Map<string, NostrEve @@ -34,41 +35,13 @@ export function getDiscussionEvent(eventId: string, events: Map<string, NostrEve
/**
* Get referenced event from discussion (checks e-tag, a-tag, and q-tag)
* Re-exports the shared utility function for convenience
*/
export function getReferencedEventFromDiscussion(
event: NostrEvent,
events: Map<string, NostrEvent>
): NostrEvent | undefined {
// Check e-tag
const eTag = event.tags.find(t => t[0] === 'e' && t[1])?.[1];
if (eTag) {
const referenced = events.get(eTag);
if (referenced) return referenced;
}
// Check a-tag
const aTag = event.tags.find(t => t[0] === 'a' && t[1])?.[1];
if (aTag) {
const parts = aTag.split(':');
if (parts.length === 3) {
const kind = parseInt(parts[0]);
const pubkey = parts[1];
const dTag = parts[2];
return Array.from(events.values()).find(e =>
e.kind === kind &&
e.pubkey === pubkey &&
e.tags.find(t => t[0] === 'd' && t[1] === dTag)
);
}
}
// Check q-tag
const qTag = event.tags.find(t => t[0] === 'q' && t[1])?.[1];
if (qTag) {
return events.get(qTag);
}
return undefined;
return getReferencedEventFromDiscussionUtil(event, events);
}
/**

Loading…
Cancel
Save