diff --git a/src/lib/components/content/MarkdownRenderer.svelte b/src/lib/components/content/MarkdownRenderer.svelte index a4aa061..7d27be4 100644 --- a/src/lib/components/content/MarkdownRenderer.svelte +++ b/src/lib/components/content/MarkdownRenderer.svelte @@ -194,10 +194,87 @@ emojiUrls = resolvedUrls; } - // Replace emoji shortcodes with images in text + // Replace emoji shortcodes with images in text, but skip code blocks function replaceEmojis(text: string): string { let processed = text; + // Find all code blocks (Markdown and AsciiDoc syntax) + const codeBlockRanges: Array<{ start: number; end: number }> = []; + + // First, match AsciiDoc source blocks (---- ... ----) + // AsciiDoc source blocks can have attributes like [source, json] before the dashes + // Match: 4+ dashes on a line, content, then 4+ dashes on a line + // We'll match the dashes and then find the closing dashes + const asciidocSourceBlockPattern = /^----+$/gm; + const lines = text.split(/\r?\n/); + let inAsciidocBlock = false; + let blockStart = -1; + let lineIndex = 0; + let charIndex = 0; + + for (let i = 0; i < lines.length; i++) { + const line = lines[i]; + const lineStart = charIndex; + const lineEnd = charIndex + line.length; + + // Check if this line is 4+ dashes + if (/^----+$/.test(line.trim())) { + if (!inAsciidocBlock) { + // Starting a new block + inAsciidocBlock = true; + blockStart = lineStart; + } else { + // Ending a block + codeBlockRanges.push({ start: blockStart, end: lineEnd }); + inAsciidocBlock = false; + blockStart = -1; + } + } + + charIndex = lineEnd + 1; // +1 for newline + } + + // If we're still in a block at the end, close it + if (inAsciidocBlock && blockStart >= 0) { + codeBlockRanges.push({ start: blockStart, end: text.length }); + } + + // Match Markdown/AsciiDoc fenced code blocks (```...```) + // Match triple backticks with optional language identifier + let match; + const fencedCodeBlockPattern = /```[a-zA-Z]*\n?[\s\S]*?```/g; + while ((match = fencedCodeBlockPattern.exec(text)) !== null) { + const start = match.index; + const end = start + match[0].length; + // Only add if not already inside an AsciiDoc source block + const isInsideAsciidoc = codeBlockRanges.some(range => start >= range.start && end <= range.end); + if (!isInsideAsciidoc) { + codeBlockRanges.push({ start, end }); + } + } + + // Then match inline code (`code`) - but exclude those already inside other blocks + // Match single backtick, but not if it's part of triple backticks + const inlineCodePattern = /`[^`\n]+`/g; + while ((match = inlineCodePattern.exec(text)) !== null) { + const start = match.index; + const end = start + match[0].length; + + // Check if this inline code is already inside another code block + const isInsideBlock = codeBlockRanges.some(range => start >= range.start && end <= range.end); + if (!isInsideBlock) { + codeBlockRanges.push({ start, end }); + } + } + + // Sort ranges by start position + codeBlockRanges.sort((a, b) => a.start - b.start); + + // Helper function to check if a position is inside a code block + function isInCodeBlock(index: number): boolean { + return codeBlockRanges.some(range => index >= range.start && index < range.end); + } + // Replace from end to start to preserve indices const sortedEntries = Array.from(emojiUrls.entries()).sort((a, b) => { const indexA = processed.lastIndexOf(a[0]); @@ -210,7 +287,22 @@ const escapedShortcode = escapeHtml(shortcode); // Replace with img tag, preserving the shortcode as alt text const imgTag = `${escapedShortcode}`; - processed = processed.replaceAll(shortcode, imgTag); + + // Find all occurrences and only replace those outside code blocks + let searchIndex = 0; + while (true) { + const index = processed.indexOf(shortcode, searchIndex); + if (index === -1) break; + + if (!isInCodeBlock(index)) { + // Replace this occurrence + processed = processed.substring(0, index) + imgTag + processed.substring(index + shortcode.length); + searchIndex = index + imgTag.length; + } else { + // Skip this occurrence (it's in a code block) + searchIndex = index + shortcode.length; + } + } } return processed; @@ -454,6 +546,32 @@ // Post-process to fix any greentext that markdown converted to blockquotes html = postProcessGreentext(html); + // Remove emoji images that are inside code blocks (they should be plain text) + // This handles cases where emojis were replaced before markdown/AsciiDoc parsing + // Handle tags (inline code) + html = html.replace(/]*>([\s\S]*?)<\/code>/gi, (match, codeContent) => { + // Remove any emoji img tags inside code blocks and restore the original shortcode + return match.replace(/]*class="emoji-inline"[^>]*alt="([^"]*)"[^>]*>/gi, '$1'); + }); + + // Handle
 tags (code blocks and AsciiDoc source blocks)
+    html = html.replace(/]*>([\s\S]*?)<\/pre>/gi, (match, preContent) => {
+      // Remove any emoji img tags inside pre blocks and restore the original shortcode
+      return match.replace(/]*class="emoji-inline"[^>]*alt="([^"]*)"[^>]*>/gi, '$1');
+    });
+    
+    // Handle AsciiDoc source blocks (they use 
with
 inside)
+    html = html.replace(/]*class="listingblock"[^>]*>([\s\S]*?)<\/div>/gi, (match, divContent) => {
+      // Remove emoji images from AsciiDoc listing blocks
+      return match.replace(/]*class="emoji-inline"[^>]*alt="([^"]*)"[^>]*>/gi, '$1');
+    });
+    
+    // Also handle any other code-related elements that might contain emojis
+    html = html.replace(/<[^>]*class="[^"]*code[^"]*"[^>]*>([\s\S]*?)<\/[^>]+>/gi, (match, content) => {
+      // Remove emoji images from any element with "code" in its class
+      return match.replace(/]*class="emoji-inline"[^>]*alt="([^"]*)"[^>]*>/gi, '$1');
+    });
+    
     // Fix malformed image tags - ensure all img src attributes are absolute URLs
     // This prevents the browser from trying to fetch markdown syntax or malformed tags as relative URLs
     html = html.replace(/]*?)>/gi, (match, attributes) => {
diff --git a/src/lib/components/layout/Header.svelte b/src/lib/components/layout/Header.svelte
index 11d1de3..dda4ab1 100644
--- a/src/lib/components/layout/Header.svelte
+++ b/src/lib/components/layout/Header.svelte
@@ -47,6 +47,7 @@
         {/if}
         /Relay
         /Topics
+        /Repos
         /Cache
       
diff --git a/src/lib/services/content/git-repo-fetcher.ts b/src/lib/services/content/git-repo-fetcher.ts new file mode 100644 index 0000000..94371ce --- /dev/null +++ b/src/lib/services/content/git-repo-fetcher.ts @@ -0,0 +1,476 @@ +/** + * Service for fetching git repository data from various hosting platforms + * Supports GitHub, GitLab, Gitea, and other git hosting services + */ + +export interface GitRepoInfo { + name: string; + description?: string; + url: string; + defaultBranch: string; + branches: GitBranch[]; + commits: GitCommit[]; + files: GitFile[]; + readme?: { + path: string; + content: string; + format: 'markdown' | 'asciidoc'; + }; +} + +export interface GitBranch { + name: string; + commit: { + sha: string; + message: string; + author: string; + date: string; + }; +} + +export interface GitCommit { + sha: string; + message: string; + author: string; + date: string; +} + +export interface GitFile { + name: string; + path: string; + type: 'file' | 'dir'; + size?: number; +} + +/** + * Parse git URL to extract platform, owner, and repo + */ +function parseGitUrl(url: string): { platform: string; owner: string; repo: string; baseUrl: string } | null { + // GitHub + const githubMatch = url.match(/github\.com[/:]([^/]+)\/([^/]+?)(?:\.git)?\/?$/); + if (githubMatch) { + return { + platform: 'github', + owner: githubMatch[1], + repo: githubMatch[2].replace(/\.git$/, ''), + baseUrl: 'https://api.github.com' + }; + } + + // GitLab + const gitlabMatch = url.match(/gitlab\.com[/:]([^/]+)\/([^/]+?)(?:\.git)?\/?$/); + if (gitlabMatch) { + return { + platform: 'gitlab', + owner: gitlabMatch[1], + repo: gitlabMatch[2].replace(/\.git$/, ''), + baseUrl: 'https://gitlab.com/api/v4' + }; + } + + // Gitea (generic pattern) + const giteaMatch = url.match(/(https?:\/\/[^/]+)[/:]([^/]+)\/([^/]+?)(?:\.git)?\/?$/); + if (giteaMatch) { + return { + platform: 'gitea', + owner: giteaMatch[2], + repo: giteaMatch[3].replace(/\.git$/, ''), + baseUrl: `${giteaMatch[1]}/api/v1` + }; + } + + return null; +} + +/** + * Fetch repository data from GitHub + */ +async function fetchFromGitHub(owner: string, repo: string): Promise { + try { + const repoResponse = await fetch(`https://api.github.com/repos/${owner}/${repo}`); + if (!repoResponse.ok) { + console.warn(`GitHub API error for repo ${owner}/${repo}: ${repoResponse.status} ${repoResponse.statusText}`); + return null; + } + const repoData = await repoResponse.json(); + + const defaultBranch = repoData.default_branch || 'main'; + const [branchesResponse, commitsResponse, treeResponse] = await Promise.all([ + fetch(`https://api.github.com/repos/${owner}/${repo}/branches`), + fetch(`https://api.github.com/repos/${owner}/${repo}/commits?per_page=10`), + fetch(`https://api.github.com/repos/${owner}/${repo}/git/trees/${defaultBranch}?recursive=1`).catch(() => null) + ]); + + // Check if responses are OK and parse JSON + let branchesData: any[] = []; + let commitsData: any[] = []; + let treeData: any = null; + + if (branchesResponse && branchesResponse.ok) { + branchesData = await branchesResponse.json(); + if (!Array.isArray(branchesData)) { + console.warn('GitHub branches response is not an array:', branchesData); + branchesData = []; + } + } else { + console.warn(`GitHub API error for branches: ${branchesResponse?.status || 'unknown'}`); + } + + if (commitsResponse && commitsResponse.ok) { + commitsData = await commitsResponse.json(); + if (!Array.isArray(commitsData)) { + console.warn('GitHub commits response is not an array:', commitsData); + commitsData = []; + } + } else { + console.warn(`GitHub API error for commits: ${commitsResponse?.status || 'unknown'}`); + } + + if (treeResponse && treeResponse.ok) { + treeData = await treeResponse.json(); + } + + // Create a map of commit SHAs to commit details for lookup + const commitMap = new Map(); + for (const c of commitsData) { + if (c.sha) { + const commitObj = c.commit || {}; + commitMap.set(c.sha, { + message: commitObj.message ? commitObj.message.split('\n')[0] : '', + author: commitObj.author?.name || commitObj.committer?.name || 'Unknown', + date: commitObj.author?.date || commitObj.committer?.date || new Date().toISOString() + }); + } + } + + const branches: GitBranch[] = branchesData.map((b: any) => { + const commitSha = b.commit?.sha || ''; + // Try to get commit details from the commit object first, then fall back to our commit map + const commitObj = b.commit?.commit || {}; + let commitMessage = commitObj.message ? commitObj.message.split('\n')[0] : ''; + let commitAuthor = commitObj.author?.name || commitObj.committer?.name || ''; + let commitDate = commitObj.author?.date || commitObj.committer?.date || ''; + + // If commit details are missing, try to find them in our commit map + if (!commitMessage && commitSha) { + const mappedCommit = commitMap.get(commitSha); + if (mappedCommit) { + commitMessage = mappedCommit.message; + commitAuthor = mappedCommit.author; + commitDate = mappedCommit.date; + } + } + + // Final fallbacks + if (!commitMessage) commitMessage = 'No commit message'; + if (!commitAuthor) commitAuthor = 'Unknown'; + if (!commitDate) commitDate = new Date().toISOString(); + + return { + name: b.name || '', + commit: { + sha: commitSha, + message: commitMessage, + author: commitAuthor, + date: commitDate + } + }; + }); + + const commits: GitCommit[] = commitsData.map((c: any) => { + const commitObj = c.commit || {}; + const message = commitObj.message ? commitObj.message.split('\n')[0] : ''; + const author = commitObj.author?.name || commitObj.committer?.name || 'Unknown'; + const date = commitObj.author?.date || commitObj.committer?.date || new Date().toISOString(); + + return { + sha: c.sha || '', + message: message, + author: author, + date: date + }; + }); + + const files: GitFile[] = treeData?.tree?.filter((item: any) => item.type === 'blob' || item.type === 'tree').map((item: any) => ({ + name: item.path.split('/').pop(), + path: item.path, + type: item.type === 'tree' ? 'dir' : 'file', + size: item.size + })) || []; + + // Try to fetch README (prioritize .adoc over .md) + let readme: { path: string; content: string; format: 'markdown' | 'asciidoc' } | undefined; + const readmeFiles = ['README.adoc', 'README.md', 'README.rst', 'README.txt']; + for (const readmeFile of readmeFiles) { + try { + const readmeData = await fetch(`https://api.github.com/repos/${owner}/${repo}/contents/${readmeFile}`).then(r => { + if (!r.ok) throw new Error('Not found'); + return r.json(); + }); + if (readmeData.content) { + const content = atob(readmeData.content.replace(/\s/g, '')); + readme = { + path: readmeFile, + content, + format: readmeFile.toLowerCase().endsWith('.adoc') ? 'asciidoc' : 'markdown' + }; + break; // Found a README, stop searching + } + } catch { + continue; // Try next file + } + } + + return { + name: repoData.name, + description: repoData.description, + url: repoData.html_url, + defaultBranch: repoData.default_branch, + branches, + commits, + files, + readme + }; + } catch (error) { + console.error('Error fetching from GitHub:', error); + return null; + } +} + +/** + * Fetch repository data from GitLab + */ +async function fetchFromGitLab(owner: string, repo: string, baseUrl: string): Promise { + try { + const projectPath = `${owner}/${repo}`; + const encodedPath = encodeURIComponent(projectPath); + + const [repoData, branchesData, commitsData] = await Promise.all([ + fetch(`${baseUrl}/projects/${encodedPath}`).then(r => r.json()), + fetch(`${baseUrl}/projects/${encodedPath}/repository/branches`).then(r => r.json()), + fetch(`${baseUrl}/projects/${encodedPath}/repository/commits?per_page=10`).then(r => r.json()) + ]); + + const branches: GitBranch[] = branchesData.map((b: any) => ({ + name: b.name, + commit: { + sha: b.commit.id, + message: b.commit.message.split('\n')[0], + author: b.commit.author_name, + date: b.commit.committed_date + } + })); + + const commits: GitCommit[] = commitsData.map((c: any) => ({ + sha: c.id, + message: c.message.split('\n')[0], + author: c.author_name, + date: c.committed_date + })); + + // Fetch file tree + let files: GitFile[] = []; + try { + const treeData = await fetch(`${baseUrl}/projects/${encodedPath}/repository/tree?recursive=true&per_page=100`).then(r => r.json()); + files = treeData.map((item: any) => ({ + name: item.name, + path: item.path, + type: item.type === 'tree' ? 'dir' : 'file', + size: item.size + })); + } catch { + // Tree fetch failed + } + + // Try to fetch README (prioritize .adoc over .md) + let readme: { path: string; content: string; format: 'markdown' | 'asciidoc' } | undefined; + const readmeFiles = ['README.adoc', 'README.md', 'README.rst', 'README.txt']; + for (const readmeFile of readmeFiles) { + try { + const fileData = await fetch(`${baseUrl}/projects/${encodedPath}/repository/files/${encodeURIComponent(readmeFile)}/raw?ref=${repoData.default_branch}`).then(r => { + if (!r.ok) throw new Error('Not found'); + return r.text(); + }); + readme = { + path: readmeFile, + content: fileData, + format: readmeFile.toLowerCase().endsWith('.adoc') ? 'asciidoc' : 'markdown' + }; + break; // Found a README, stop searching + } catch { + continue; // Try next file + } + } + + return { + name: repoData.name, + description: repoData.description, + url: repoData.web_url, + defaultBranch: repoData.default_branch, + branches, + commits, + files, + readme + }; + } catch (error) { + console.error('Error fetching from GitLab:', error); + return null; + } +} + +/** + * Fetch repository data from Gitea + */ +async function fetchFromGitea(owner: string, repo: string, baseUrl: string): Promise { + try { + const [repoData, branchesData, commitsData] = await Promise.all([ + fetch(`${baseUrl}/repos/${owner}/${repo}`).then(r => r.json()), + fetch(`${baseUrl}/repos/${owner}/${repo}/branches`).then(r => r.json()), + fetch(`${baseUrl}/repos/${owner}/${repo}/commits?limit=10`).then(r => r.json()) + ]); + + const branches: GitBranch[] = branchesData.map((b: any) => ({ + name: b.name, + commit: { + sha: b.commit.id, + message: b.commit.message.split('\n')[0], + author: b.commit.author.name, + date: b.commit.timestamp + } + })); + + const commits: GitCommit[] = commitsData.map((c: any) => ({ + sha: c.sha, + message: c.commit.message.split('\n')[0], + author: c.commit.author.name, + date: c.commit.timestamp + })); + + // Fetch file tree + let files: GitFile[] = []; + try { + const treeData = await fetch(`${baseUrl}/repos/${owner}/${repo}/contents?ref=${repoData.default_branch}`).then(r => r.json()); + files = treeData.map((item: any) => ({ + name: item.name, + path: item.path, + type: item.type === 'dir' ? 'dir' : 'file', + size: item.size + })); + } catch { + // Tree fetch failed + } + + // Try to fetch README (prioritize .adoc over .md) + let readme: { path: string; content: string; format: 'markdown' | 'asciidoc' } | undefined; + const readmeFiles = ['README.adoc', 'README.md', 'README.rst', 'README.txt']; + for (const readmeFile of readmeFiles) { + try { + const fileData = await fetch(`${baseUrl}/repos/${owner}/${repo}/contents/${readmeFile}?ref=${repoData.default_branch}`).then(r => { + if (!r.ok) throw new Error('Not found'); + return r.json(); + }); + if (fileData.content) { + const content = atob(fileData.content.replace(/\s/g, '')); + readme = { + path: readmeFile, + content, + format: readmeFile.toLowerCase().endsWith('.adoc') ? 'asciidoc' : 'markdown' + }; + break; // Found a README, stop searching + } + } catch { + continue; // Try next file + } + } + + return { + name: repoData.name, + description: repoData.description, + url: repoData.html_url, + defaultBranch: repoData.default_branch, + branches, + commits, + files, + readme + }; + } catch (error) { + console.error('Error fetching from Gitea:', error); + return null; + } +} + +/** + * Fetch repository data from a git URL + */ +export async function fetchGitRepo(url: string): Promise { + const parsed = parseGitUrl(url); + if (!parsed) { + console.error('Unable to parse git URL:', url); + return null; + } + + const { platform, owner, repo, baseUrl } = parsed; + + switch (platform) { + case 'github': + return fetchFromGitHub(owner, repo); + case 'gitlab': + return fetchFromGitLab(owner, repo, baseUrl); + case 'gitea': + return fetchFromGitea(owner, repo, baseUrl); + default: + console.error('Unsupported platform:', platform); + return null; + } +} + +/** + * Convert SSH git URL to HTTPS format + */ +function convertSshToHttps(url: string): string | null { + // Handle git@host:user/repo.git format + const sshMatch = url.match(/git@([^:]+):(.+?)(?:\.git)?$/); + if (sshMatch) { + const [, host, path] = sshMatch; + return `https://${host}/${path}${path.endsWith('.git') ? '' : '.git'}`; + } + return null; +} + +/** + * Extract git URLs from a Nostr event + */ +export function extractGitUrls(event: { tags: string[][]; content: string }): string[] { + const urls: string[] = []; + + // Check tags for git URLs (including 'clone' tag which is used in NIP-34) + for (const tag of event.tags) { + if (tag[0] === 'r' || tag[0] === 'url' || tag[0] === 'git' || tag[0] === 'clone') { + const url = tag[1]; + if (!url) continue; + + // Convert SSH URLs to HTTPS + if (url.startsWith('git@')) { + const httpsUrl = convertSshToHttps(url); + if (httpsUrl) { + urls.push(httpsUrl); + continue; + } + } + + // Check if it's a git URL + if (url.includes('github.com') || url.includes('gitlab.com') || url.includes('gitea') || url.includes('.git') || url.startsWith('http')) { + urls.push(url); + } + } + } + + // Check content for git URLs + const urlRegex = /(https?:\/\/[^\s]+\.git|https?:\/\/(?:github|gitlab|gitea)[^\s]+)/gi; + const matches = event.content.match(urlRegex); + if (matches) { + urls.push(...matches); + } + + return [...new Set(urls)]; // Deduplicate +} diff --git a/src/lib/types/kind-lookup.ts b/src/lib/types/kind-lookup.ts index b02960c..13565ea 100644 --- a/src/lib/types/kind-lookup.ts +++ b/src/lib/types/kind-lookup.ts @@ -90,7 +90,13 @@ export const KIND = { MUTE_LIST: 10000, BADGES: 30008, FOLOW_SET: 30000, - HTTP_AUTH: 27235 // NIP-98 HTTP Auth (matches nostr-tools and jumble) + HTTP_AUTH: 27235, // NIP-98 HTTP Auth (matches nostr-tools and jumble) + REPO_ANNOUNCEMENT: 30617, // NIP-34 Repository Announcement + ISSUE: 1621, // NIP-34 Issue + STATUS_OPEN: 1630, // NIP-34 Status: Open + STATUS_APPLIED: 1631, // NIP-34 Status: Applied/Merged/Resolved + STATUS_CLOSED: 1632, // NIP-34 Status: Closed + STATUS_DRAFT: 1633 // NIP-34 Status: Draft } as const; export const KIND_LOOKUP: Record = { @@ -146,6 +152,14 @@ export const KIND_LOOKUP: Record = { [KIND.MUTE_LIST]: { number: KIND.MUTE_LIST, description: 'Mute List', showInFeed: false, isSecondaryKind: false }, [KIND.BADGES]: { number: KIND.BADGES, description: 'Badges', showInFeed: false, isSecondaryKind: false }, [KIND.FOLOW_SET]: { number: KIND.FOLOW_SET, description: 'Follow Set', showInFeed: false, isSecondaryKind: false }, + + // Repository (NIP-34) + [KIND.REPO_ANNOUNCEMENT]: { number: KIND.REPO_ANNOUNCEMENT, description: 'Repository Announcement', showInFeed: false, isSecondaryKind: false }, + [KIND.ISSUE]: { number: KIND.ISSUE, description: 'Issue', showInFeed: true, isSecondaryKind: false }, + [KIND.STATUS_OPEN]: { number: KIND.STATUS_OPEN, description: 'Status: Open', showInFeed: false, isSecondaryKind: true }, + [KIND.STATUS_APPLIED]: { number: KIND.STATUS_APPLIED, description: 'Status: Applied/Merged/Resolved', showInFeed: false, isSecondaryKind: true }, + [KIND.STATUS_CLOSED]: { number: KIND.STATUS_CLOSED, description: 'Status: Closed', showInFeed: false, isSecondaryKind: true }, + [KIND.STATUS_DRAFT]: { number: KIND.STATUS_DRAFT, description: 'Status: Draft', showInFeed: false, isSecondaryKind: true }, }; /** diff --git a/src/routes/repos/+page.svelte b/src/routes/repos/+page.svelte new file mode 100644 index 0000000..eeed9f3 --- /dev/null +++ b/src/routes/repos/+page.svelte @@ -0,0 +1,357 @@ + + +
+ +
+
+

/Repos

+

+ Discover and explore repositories announced on Nostr +

+ +
+ +
+
+ + {#if loading} +
+

Loading repositories...

+
+ {:else if filteredRepos.length === 0} +
+

+ {searchQuery ? 'No repositories found matching your search.' : 'No repositories found.'} +

+
+ {:else} +
+ {#each filteredRepos as repo (repo.id)} +
openRepo(repo)} + onkeydown={(e) => { + if (e.key === 'Enter' || e.key === ' ') { + e.preventDefault(); + openRepo(repo); + } + }} + role="button" + tabindex="0" + > +
+

{getRepoName(repo)}

+ Kind {repo.kind} +
+ {#if getRepoDescription(repo)} +

{getRepoDescription(repo)}

+ {/if} +
+ + {new Date(repo.created_at * 1000).toLocaleDateString()} + +
+
+ {/each} +
+ {/if} +
+ + diff --git a/src/routes/repos/[naddr]/+page.svelte b/src/routes/repos/[naddr]/+page.svelte new file mode 100644 index 0000000..63a09c1 --- /dev/null +++ b/src/routes/repos/[naddr]/+page.svelte @@ -0,0 +1,1836 @@ + + +
+ +
+ {#if loading} +
+

Loading repository...

+
+ {:else if !repoEvent} +
+

Repository not found.

+
+ {:else} +
+

+ {getRepoName()} +

+ {#if getRepoDescription()} +

+ {getRepoDescription()} +

+ {/if} + + +
+ + + + + {#if getDocumentation().length > 0} + + {/if} +
+
+ + +
+ {#if activeTab === 'metadata'} + + {:else if activeTab === 'about'} +
+ + {#if gitRepo?.readme} +
+ {@html renderReadme(gitRepo.readme.content, gitRepo.readme.format)} +
+ {:else} +
+

No README found.

+
+ {/if} +
+ {:else if activeTab === 'repository'} +
+ {#if gitRepo} + +
+

+ Latest Commit +

+ {#if gitRepo.commits.length > 0} +
+
+ {gitRepo.commits[0].sha.slice(0, 7)} + {gitRepo.commits[0].message} +
+
+ {gitRepo.commits[0].author} + {new Date(gitRepo.commits[0].date).toLocaleString()} +
+
+ {/if} + +

+ Branches +

+
+ {#each gitRepo.branches as branch} +
+ {branch.name} + {#if branch.name === gitRepo.defaultBranch} + default + {/if} + {branch.commit.sha.slice(0, 7)} + {branch.commit.message} +
+ {/each} +
+
+ + +
+

+ File Structure +

+ {#if gitRepo.files.length > 0} +
+
{renderFileTree(getFileTree(gitRepo.files))}
+
+ {:else} +
+

No files found.

+
+ {/if} +
+ {:else} +
+

Git repository data not available.

+
+ {/if} +
+ {:else if activeTab === 'issues'} +
+ {#if issues.length > 0} +
+ + + + {#if statusFilter} + Showing {filteredIssues.length} of {issues.length} issues + {:else} + {issues.length} {issues.length === 1 ? 'issue' : 'issues'} + {/if} + +
+
+ {#if filteredIssues.length > 0} + {#each filteredIssues as issue} + {@const currentStatus = getCurrentStatus(issue.id)} + {@const isChanging = changingStatus.get(issue.id) || false} +
+
+
+ + + {#if isChanging} + Updating... + {/if} +
+
+ + {#if issueComments.has(issue.id)} +
+

Comments ({issueComments.get(issue.id)!.length})

+ {#each issueComments.get(issue.id)! as comment} +
+ +
+ {/each} +
+ {/if} +
+ {/each} + {:else} +
+

No issues found with status "{statusFilter}".

+
+ {/if} +
+ {:else} +
+

No issues found.

+
+ {/if} +
+ {:else if activeTab === 'documentation'} +
+ {#if documentationEvents.size > 0} +
+ {#each Array.from(documentationEvents.entries()) as [docNaddr, docEvent]} +
+
+

Documentation: {docNaddr.slice(0, 20)}...

+
+ Kind {docEvent.kind} + View Event +
+
+
+ {#if docEvent.kind === KIND.LONG_FORM_NOTE || docEvent.kind === KIND.SHORT_TEXT_NOTE} + + {:else} + + {@const isAsciidoc = docEvent.content.includes('= ') || docEvent.content.includes('== ') || docEvent.tags.some(t => Array.isArray(t) && t[0] === 'format' && t[1] === 'asciidoc')} + {#if isAsciidoc} +
+ {@html renderReadme(docEvent.content, 'asciidoc')} +
+ {:else} + + {/if} + {/if} +
+
+ {/each} +
+ {:else} +
+

No documentation found.

+
+ {/if} +
+ {/if} +
+ {/if} +
+ +