15 changed files with 1390 additions and 2075 deletions
@ -1,303 +0,0 @@ |
|||||||
import { useSecondaryPage } from '@/PageManager' |
|
||||||
import ImageWithLightbox from '@/components/ImageWithLightbox' |
|
||||||
import { getLongFormArticleMetadataFromEvent } from '@/lib/event-metadata' |
|
||||||
import { toNoteList } from '@/lib/link' |
|
||||||
import { ChevronDown, ChevronRight } from 'lucide-react' |
|
||||||
import { Event, kinds } from 'nostr-tools' |
|
||||||
import { useMemo, useState, useEffect, useRef } from 'react' |
|
||||||
import { useEventFieldParser } from '@/hooks/useContentParser' |
|
||||||
import HighlightSourcePreview from '../../UniversalContent/HighlightSourcePreview' |
|
||||||
import { Button } from '@/components/ui/button' |
|
||||||
import { Collapsible, CollapsibleContent, CollapsibleTrigger } from '@/components/ui/collapsible' |
|
||||||
import { ExtendedKind } from '@/constants' |
|
||||||
|
|
||||||
export default function Article({ |
|
||||||
event, |
|
||||||
className |
|
||||||
}: { |
|
||||||
event: Event |
|
||||||
className?: string |
|
||||||
}) { |
|
||||||
const { push } = useSecondaryPage() |
|
||||||
const metadata = useMemo(() => getLongFormArticleMetadataFromEvent(event), [event]) |
|
||||||
const [isInfoOpen, setIsInfoOpen] = useState(false) |
|
||||||
|
|
||||||
// Determine if this is an article-type event that should show ToC and Article Info
|
|
||||||
const isArticleType = useMemo(() => { |
|
||||||
return event.kind === kinds.LongFormArticle ||
|
|
||||||
event.kind === ExtendedKind.WIKI_ARTICLE ||
|
|
||||||
event.kind === ExtendedKind.PUBLICATION || |
|
||||||
event.kind === ExtendedKind.PUBLICATION_CONTENT |
|
||||||
}, [event.kind]) |
|
||||||
|
|
||||||
// Use the comprehensive content parser
|
|
||||||
const { parsedContent, isLoading, error } = useEventFieldParser(event, 'content', { |
|
||||||
enableMath: true, |
|
||||||
enableSyntaxHighlighting: true |
|
||||||
}) |
|
||||||
|
|
||||||
const contentRef = useRef<HTMLDivElement>(null) |
|
||||||
|
|
||||||
// Handle wikilink clicks
|
|
||||||
useEffect(() => { |
|
||||||
if (!contentRef.current) return |
|
||||||
|
|
||||||
const handleWikilinkClick = (event: MouseEvent) => { |
|
||||||
const target = event.target as HTMLElement |
|
||||||
if (target.classList.contains('wikilink')) { |
|
||||||
event.preventDefault() |
|
||||||
const dTag = target.getAttribute('data-dtag') |
|
||||||
const displayText = target.getAttribute('data-display') |
|
||||||
|
|
||||||
if (dTag && displayText) { |
|
||||||
// Create a simple dropdown menu
|
|
||||||
const existingDropdown = document.querySelector('.wikilink-dropdown') |
|
||||||
if (existingDropdown) { |
|
||||||
existingDropdown.remove() |
|
||||||
} |
|
||||||
|
|
||||||
const dropdown = document.createElement('div') |
|
||||||
dropdown.className = 'wikilink-dropdown fixed bg-white dark:bg-gray-800 border border-gray-200 dark:border-gray-700 rounded-md shadow-lg z-50 p-2' |
|
||||||
dropdown.style.left = `${event.pageX}px` |
|
||||||
dropdown.style.top = `${event.pageY + 10}px` |
|
||||||
|
|
||||||
const wikistrButton = document.createElement('button') |
|
||||||
wikistrButton.className = 'w-full text-left px-3 py-2 text-sm hover:bg-gray-100 dark:hover:bg-gray-700 rounded flex items-center gap-2' |
|
||||||
wikistrButton.innerHTML = '<svg class="w-3 h-3" fill="none" stroke="currentColor" viewBox="0 0 24 24"><path stroke-linecap="round" stroke-linejoin="round" stroke-width="2" d="M10 6H6a2 2 0 00-2 2v10a2 2 0 002 2h10a2 2 0 002-2v-4M14 4h6m0 0v6m0-6L10 14" /></svg>View on Wikistr' |
|
||||||
wikistrButton.onclick = () => { |
|
||||||
window.open(`https://wikistr.imwald.eu/${dTag}`, '_blank', 'noopener,noreferrer') |
|
||||||
dropdown.remove() |
|
||||||
} |
|
||||||
|
|
||||||
const alexandriaButton = document.createElement('button') |
|
||||||
alexandriaButton.className = 'w-full text-left px-3 py-2 text-sm hover:bg-gray-100 dark:hover:bg-gray-700 rounded flex items-center gap-2' |
|
||||||
alexandriaButton.innerHTML = '<svg class="w-3 h-3" fill="none" stroke="currentColor" viewBox="0 0 24 24"><path stroke-linecap="round" stroke-linejoin="round" stroke-width="2" d="M10 6H6a2 2 0 00-2 2v10a2 2 0 002 2h10a2 2 0 002-2v-4M14 4h6m0 0v6m0-6L10 14" /></svg>View on Alexandria' |
|
||||||
alexandriaButton.onclick = () => { |
|
||||||
window.open(`https://next-alexandria.gitcitadel.eu/events?d=${dTag}`, '_blank', 'noopener,noreferrer') |
|
||||||
dropdown.remove() |
|
||||||
} |
|
||||||
|
|
||||||
dropdown.appendChild(wikistrButton) |
|
||||||
dropdown.appendChild(alexandriaButton) |
|
||||||
document.body.appendChild(dropdown) |
|
||||||
|
|
||||||
// Close dropdown when clicking outside
|
|
||||||
const closeDropdown = (e: MouseEvent) => { |
|
||||||
if (!dropdown.contains(e.target as Node)) { |
|
||||||
dropdown.remove() |
|
||||||
document.removeEventListener('click', closeDropdown) |
|
||||||
} |
|
||||||
} |
|
||||||
setTimeout(() => document.addEventListener('click', closeDropdown), 0) |
|
||||||
} |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
contentRef.current.addEventListener('click', handleWikilinkClick) |
|
||||||
|
|
||||||
return () => { |
|
||||||
contentRef.current?.removeEventListener('click', handleWikilinkClick) |
|
||||||
} |
|
||||||
}, [parsedContent]) |
|
||||||
|
|
||||||
// Process nostr addresses and other interactive elements after HTML is rendered
|
|
||||||
useEffect(() => { |
|
||||||
if (!contentRef.current || !parsedContent) return |
|
||||||
|
|
||||||
const processInteractiveElements = () => { |
|
||||||
// Process embedded note containers
|
|
||||||
const embeddedNotes = contentRef.current?.querySelectorAll('[data-embedded-note]') |
|
||||||
embeddedNotes?.forEach((container) => { |
|
||||||
const bech32Id = container.getAttribute('data-embedded-note') |
|
||||||
if (bech32Id) { |
|
||||||
// Replace with actual EmbeddedNote component
|
|
||||||
const embeddedNoteElement = document.createElement('div') |
|
||||||
embeddedNoteElement.innerHTML = `<div data-embedded-note="${bech32Id}">Loading embedded event...</div>` |
|
||||||
container.parentNode?.replaceChild(embeddedNoteElement.firstChild!, container) |
|
||||||
} |
|
||||||
}) |
|
||||||
|
|
||||||
// Process user handles
|
|
||||||
const userHandles = contentRef.current?.querySelectorAll('[data-pubkey]') |
|
||||||
userHandles?.forEach((handle) => { |
|
||||||
const pubkey = handle.getAttribute('data-pubkey') |
|
||||||
if (pubkey) { |
|
||||||
// Replace with actual Username component
|
|
||||||
const usernameElement = document.createElement('span') |
|
||||||
usernameElement.innerHTML = `<span class="user-handle" data-pubkey="${pubkey}">@${handle.textContent}</span>` |
|
||||||
handle.parentNode?.replaceChild(usernameElement.firstChild!, handle) |
|
||||||
} |
|
||||||
}) |
|
||||||
} |
|
||||||
|
|
||||||
// Process elements after a short delay to ensure content is rendered
|
|
||||||
const timeoutId = setTimeout(processInteractiveElements, 100) |
|
||||||
|
|
||||||
return () => clearTimeout(timeoutId) |
|
||||||
}, [parsedContent?.html]) |
|
||||||
|
|
||||||
// Add ToC return buttons to section headers
|
|
||||||
useEffect(() => { |
|
||||||
if (!contentRef.current || !isArticleType || !parsedContent) return |
|
||||||
|
|
||||||
const addTocReturnButtons = () => { |
|
||||||
const headers = contentRef.current?.querySelectorAll('h1, h2, h3, h4, h5, h6') |
|
||||||
if (!headers) return |
|
||||||
|
|
||||||
headers.forEach((header) => { |
|
||||||
// Skip if button already exists
|
|
||||||
if (header.querySelector('.toc-return-btn')) return |
|
||||||
|
|
||||||
// Create the return button
|
|
||||||
const returnBtn = document.createElement('span') |
|
||||||
returnBtn.className = 'toc-return-btn' |
|
||||||
returnBtn.innerHTML = '↑ ToC' |
|
||||||
returnBtn.title = 'Return to Table of Contents' |
|
||||||
|
|
||||||
// Add click handler
|
|
||||||
returnBtn.addEventListener('click', (e) => { |
|
||||||
e.preventDefault() |
|
||||||
e.stopPropagation() |
|
||||||
// Scroll to the ToC
|
|
||||||
const tocElement = document.getElementById('toc') |
|
||||||
if (tocElement) { |
|
||||||
tocElement.scrollIntoView({ behavior: 'smooth', block: 'start' }) |
|
||||||
} |
|
||||||
}) |
|
||||||
|
|
||||||
// Add the button to the header
|
|
||||||
header.appendChild(returnBtn) |
|
||||||
}) |
|
||||||
} |
|
||||||
|
|
||||||
// Add buttons after a short delay to ensure content is rendered
|
|
||||||
const timeoutId = setTimeout(addTocReturnButtons, 100) |
|
||||||
|
|
||||||
return () => clearTimeout(timeoutId) |
|
||||||
}, [parsedContent?.html, isArticleType]) |
|
||||||
|
|
||||||
|
|
||||||
if (isLoading) { |
|
||||||
return ( |
|
||||||
<div className={`prose prose-zinc max-w-none dark:prose-invert break-words ${className || ''}`}> |
|
||||||
<div>Loading content...</div> |
|
||||||
</div> |
|
||||||
) |
|
||||||
} |
|
||||||
|
|
||||||
if (error) { |
|
||||||
return ( |
|
||||||
<div className={`prose prose-zinc max-w-none dark:prose-invert break-words ${className || ''}`}> |
|
||||||
<div className="text-red-500">Error loading content: {error.message}</div> |
|
||||||
</div> |
|
||||||
) |
|
||||||
} |
|
||||||
|
|
||||||
if (!parsedContent) { |
|
||||||
return ( |
|
||||||
<div className={`prose prose-zinc max-w-none dark:prose-invert break-words ${className || ''}`}> |
|
||||||
<div>No content available</div> |
|
||||||
</div> |
|
||||||
) |
|
||||||
} |
|
||||||
|
|
||||||
return ( |
|
||||||
<article className={`prose prose-zinc max-w-none dark:prose-invert break-words leading-relaxed ${parsedContent?.cssClasses || ''} ${className || ''}`}> |
|
||||||
{/* Article metadata */} |
|
||||||
<header className="mb-8"> |
|
||||||
<h1 className="break-words text-4xl font-bold mb-6 leading-tight">{metadata.title}</h1> |
|
||||||
{metadata.summary && ( |
|
||||||
<blockquote className="border-l-4 border-primary pl-6 italic text-muted-foreground mb-8 text-lg leading-relaxed"> |
|
||||||
<p className="break-words">{metadata.summary}</p> |
|
||||||
</blockquote> |
|
||||||
)} |
|
||||||
{metadata.image && ( |
|
||||||
<div className="mb-8"> |
|
||||||
<ImageWithLightbox |
|
||||||
image={{ url: metadata.image, pubkey: event.pubkey }} |
|
||||||
className="w-full max-w-[400px] h-auto object-contain rounded-lg shadow-lg mx-auto" |
|
||||||
/> |
|
||||||
</div> |
|
||||||
)} |
|
||||||
</header> |
|
||||||
|
|
||||||
{/* Render AsciiDoc content (everything is now processed as AsciiDoc) */} |
|
||||||
<div
|
|
||||||
ref={contentRef}
|
|
||||||
className={`prose prose-zinc max-w-none dark:prose-invert break-words leading-relaxed text-base ${isArticleType ? "asciidoc-content" : "simple-content"}`} |
|
||||||
style={{ |
|
||||||
// Override any problematic AsciiDoc styles
|
|
||||||
'--tw-prose-body': 'inherit', |
|
||||||
'--tw-prose-headings': 'inherit', |
|
||||||
'--tw-prose-lead': 'inherit', |
|
||||||
'--tw-prose-links': 'inherit', |
|
||||||
'--tw-prose-bold': 'inherit', |
|
||||||
'--tw-prose-counters': 'inherit', |
|
||||||
'--tw-prose-bullets': 'inherit', |
|
||||||
'--tw-prose-hr': 'inherit', |
|
||||||
'--tw-prose-quotes': 'inherit', |
|
||||||
'--tw-prose-quote-borders': 'inherit', |
|
||||||
'--tw-prose-captions': 'inherit', |
|
||||||
'--tw-prose-code': 'inherit', |
|
||||||
'--tw-prose-pre-code': 'inherit', |
|
||||||
'--tw-prose-pre-bg': 'inherit', |
|
||||||
'--tw-prose-th-borders': 'inherit', |
|
||||||
'--tw-prose-td-borders': 'inherit' |
|
||||||
} as React.CSSProperties} |
|
||||||
dangerouslySetInnerHTML={{ __html: parsedContent?.html || '' }}
|
|
||||||
/> |
|
||||||
|
|
||||||
{/* Collapsible Article Info - only for article-type events */} |
|
||||||
{isArticleType && (parsedContent?.highlightSources?.length > 0 || parsedContent?.hashtags?.length > 0) && ( |
|
||||||
<Collapsible open={isInfoOpen} onOpenChange={setIsInfoOpen} className="mt-4"> |
|
||||||
<CollapsibleTrigger asChild> |
|
||||||
<Button variant="outline" className="w-full justify-between"> |
|
||||||
<span>Article Info</span> |
|
||||||
{isInfoOpen ? <ChevronDown className="h-4 w-4" /> : <ChevronRight className="h-4 w-4" />} |
|
||||||
</Button> |
|
||||||
</CollapsibleTrigger> |
|
||||||
<CollapsibleContent className="space-y-4 mt-2"> |
|
||||||
|
|
||||||
{/* Highlight sources */} |
|
||||||
{parsedContent?.highlightSources?.length > 0 && ( |
|
||||||
<div className="p-4 bg-muted rounded-lg"> |
|
||||||
<h4 className="text-sm font-semibold mb-3">Highlight sources:</h4> |
|
||||||
<div className="space-y-3"> |
|
||||||
{parsedContent?.highlightSources?.map((source, index) => ( |
|
||||||
<HighlightSourcePreview |
|
||||||
key={index} |
|
||||||
source={source} |
|
||||||
className="w-full" |
|
||||||
/> |
|
||||||
))} |
|
||||||
</div> |
|
||||||
</div> |
|
||||||
)} |
|
||||||
|
|
||||||
{/* Hashtags */} |
|
||||||
{parsedContent?.hashtags?.length > 0 && ( |
|
||||||
<div className="p-4 bg-muted rounded-lg"> |
|
||||||
<h4 className="text-sm font-semibold mb-3">Tags:</h4> |
|
||||||
<div className="flex gap-2 flex-wrap"> |
|
||||||
{parsedContent?.hashtags?.map((tag) => ( |
|
||||||
<div |
|
||||||
key={tag} |
|
||||||
title={tag} |
|
||||||
className="flex items-center rounded-full px-3 py-1 bg-background text-muted-foreground max-w-44 cursor-pointer hover:bg-accent hover:text-accent-foreground transition-colors" |
|
||||||
onClick={(e) => { |
|
||||||
e.stopPropagation() |
|
||||||
push(toNoteList({ hashtag: tag, kinds: [kinds.LongFormArticle] })) |
|
||||||
}} |
|
||||||
> |
|
||||||
#<span className="truncate">{tag}</span> |
|
||||||
</div> |
|
||||||
))} |
|
||||||
</div> |
|
||||||
</div> |
|
||||||
)} |
|
||||||
</CollapsibleContent> |
|
||||||
</Collapsible> |
|
||||||
)} |
|
||||||
</article> |
|
||||||
) |
|
||||||
} |
|
||||||
File diff suppressed because it is too large
Load Diff
@ -1,34 +0,0 @@ |
|||||||
import { EmbeddedMention, EmbeddedNote } from '@/components/Embedded' |
|
||||||
import { nip19 } from 'nostr-tools' |
|
||||||
import { useMemo } from 'react' |
|
||||||
import logger from '@/lib/logger' |
|
||||||
|
|
||||||
interface NostrNodeProps { |
|
||||||
rawText: string |
|
||||||
bech32Id?: string |
|
||||||
} |
|
||||||
|
|
||||||
export default function NostrNode({ rawText, bech32Id }: NostrNodeProps) { |
|
||||||
const { type, id } = useMemo(() => { |
|
||||||
if (!bech32Id) return { type: 'invalid', id: '' } |
|
||||||
try { |
|
||||||
const decoded = nip19.decode(bech32Id) |
|
||||||
if (decoded.type === 'npub' || decoded.type === 'nprofile') { |
|
||||||
return { type: 'mention', id: bech32Id } |
|
||||||
} |
|
||||||
if (decoded.type === 'nevent' || decoded.type === 'naddr' || decoded.type === 'note') { |
|
||||||
return { type: 'note', id: bech32Id } |
|
||||||
} |
|
||||||
} catch (error) { |
|
||||||
logger.error('Invalid bech32 ID', { bech32Id, error }) |
|
||||||
} |
|
||||||
return { type: 'invalid', id: '' } |
|
||||||
}, [bech32Id]) |
|
||||||
|
|
||||||
if (type === 'invalid') return rawText |
|
||||||
|
|
||||||
if (type === 'mention') { |
|
||||||
return <EmbeddedMention userId={id} className="not-prose" /> |
|
||||||
} |
|
||||||
return <EmbeddedNote noteId={id} className="not-prose" /> |
|
||||||
} |
|
||||||
File diff suppressed because it is too large
Load Diff
@ -1,30 +0,0 @@ |
|||||||
import { EmbeddedMention, EmbeddedNote } from '@/components/Embedded' |
|
||||||
import { nip19 } from 'nostr-tools' |
|
||||||
import { ComponentProps, useMemo } from 'react' |
|
||||||
import { Components } from './types' |
|
||||||
import logger from '@/lib/logger' |
|
||||||
|
|
||||||
export default function NostrNode({ rawText, bech32Id }: ComponentProps<Components['nostr']>) { |
|
||||||
const { type, id } = useMemo(() => { |
|
||||||
if (!bech32Id) return { type: 'invalid', id: '' } |
|
||||||
try { |
|
||||||
const { type } = nip19.decode(bech32Id) |
|
||||||
if (type === 'npub' || type === 'nprofile') { |
|
||||||
return { type: 'mention', id: bech32Id } |
|
||||||
} |
|
||||||
if (type === 'nevent' || type === 'naddr' || type === 'note') { |
|
||||||
return { type: 'note', id: bech32Id } |
|
||||||
} |
|
||||||
} catch (error) { |
|
||||||
logger.error('Invalid bech32 ID', { bech32Id, error }) |
|
||||||
} |
|
||||||
return { type: 'invalid', id: '' } |
|
||||||
}, [bech32Id]) |
|
||||||
|
|
||||||
if (type === 'invalid') return rawText |
|
||||||
|
|
||||||
if (type === 'mention') { |
|
||||||
return <EmbeddedMention userId={id} className="not-prose" /> |
|
||||||
} |
|
||||||
return <EmbeddedNote noteId={id} className="not-prose" /> |
|
||||||
} |
|
||||||
@ -0,0 +1,207 @@ |
|||||||
|
import { isImage, isVideo, isAudio } from '@/lib/url' |
||||||
|
import { URL_REGEX } from '@/constants' |
||||||
|
|
||||||
|
/** |
||||||
|
* Preprocess content to convert raw media URLs and hyperlinks to markdown syntax |
||||||
|
* - Images: https://example.com/image.png -> 
|
||||||
|
* - Videos: https://example.com/video.mp4 -> 
|
||||||
|
* - Audio: https://example.com/audio.mp3 -> 
|
||||||
|
* - Hyperlinks: https://example.com/page -> [https://example.com/page](https://example.com/page)
|
||||||
|
*/ |
||||||
|
export function preprocessMarkdownMediaLinks(content: string): string { |
||||||
|
let processed = content |
||||||
|
|
||||||
|
// Find all URLs but process them in reverse order to preserve indices
|
||||||
|
const allMatches: Array<{ url: string; index: number }> = [] |
||||||
|
|
||||||
|
let match |
||||||
|
const regex = new RegExp(URL_REGEX.source, URL_REGEX.flags) |
||||||
|
while ((match = regex.exec(content)) !== null) { |
||||||
|
const index = match.index |
||||||
|
const url = match[0] |
||||||
|
const before = content.substring(Math.max(0, index - 20), index) |
||||||
|
|
||||||
|
// Check if this URL is already part of markdown syntax
|
||||||
|
// Skip if preceded by: [text](url,  || before.match(/\]\([^)]*$/) || before.match(/!\[[^\]]*$/)) { |
||||||
|
continue |
||||||
|
} |
||||||
|
|
||||||
|
allMatches.push({ url, index }) |
||||||
|
} |
||||||
|
|
||||||
|
// Process in reverse order to preserve indices
|
||||||
|
for (let i = allMatches.length - 1; i >= 0; i--) { |
||||||
|
const { url, index } = allMatches[i] |
||||||
|
|
||||||
|
// Check if URL is in code block
|
||||||
|
const beforeUrl = content.substring(0, index) |
||||||
|
const backticksCount = (beforeUrl.match(/```/g) || []).length |
||||||
|
if (backticksCount % 2 === 1) { |
||||||
|
continue // In code block
|
||||||
|
} |
||||||
|
|
||||||
|
// Check if URL is in inline code
|
||||||
|
const lastBacktick = beforeUrl.lastIndexOf('`') |
||||||
|
if (lastBacktick !== -1) { |
||||||
|
const afterUrl = content.substring(index + url.length) |
||||||
|
const nextBacktick = afterUrl.indexOf('`') |
||||||
|
if (nextBacktick !== -1) { |
||||||
|
const codeBefore = beforeUrl.substring(lastBacktick + 1) |
||||||
|
const codeAfter = afterUrl.substring(0, nextBacktick) |
||||||
|
// If no newlines between backticks, it's inline code
|
||||||
|
if (!codeBefore.includes('\n') && !codeAfter.includes('\n')) { |
||||||
|
continue |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// Check if it's a media URL
|
||||||
|
const isImageUrl = isImage(url) |
||||||
|
const isVideoUrl = isVideo(url) |
||||||
|
const isAudioUrl = isAudio(url) |
||||||
|
|
||||||
|
let replacement: string |
||||||
|
if (isImageUrl || isVideoUrl || isAudioUrl) { |
||||||
|
// Media URLs: convert to 
|
||||||
|
replacement = `` |
||||||
|
} else { |
||||||
|
// Regular hyperlinks: convert to [url](url) format
|
||||||
|
replacement = `[${url}](${url})` |
||||||
|
} |
||||||
|
|
||||||
|
// Replace the URL
|
||||||
|
processed = processed.substring(0, index) + replacement + processed.substring(index + url.length) |
||||||
|
} |
||||||
|
|
||||||
|
return processed |
||||||
|
} |
||||||
|
|
||||||
|
/** |
||||||
|
* Preprocess content to convert raw media URLs and hyperlinks to AsciiDoc syntax |
||||||
|
* - Images: https://example.com/image.png -> image::https://example.com/image.png[]
|
||||||
|
* - Videos: https://example.com/video.mp4 -> video::https://example.com/video.mp4[]
|
||||||
|
* - Audio: https://example.com/audio.mp3 -> audio::https://example.com/audio.mp3[]
|
||||||
|
* - Hyperlinks: https://example.com/page -> https://example.com/page[link text]
|
||||||
|
* - Wikilinks: [[link]] or [[link|display]] -> +++WIKILINK:link|display+++ (passthrough for post-processing) |
||||||
|
*/ |
||||||
|
export function preprocessAsciidocMediaLinks(content: string): string { |
||||||
|
let processed = content |
||||||
|
|
||||||
|
// First, protect wikilinks by converting them to passthrough format
|
||||||
|
// This prevents AsciiDoc from processing them and prevents URLs inside from being processed
|
||||||
|
const wikilinkRegex = /\[\[([^\]]+)\]\]/g |
||||||
|
const wikilinkRanges: Array<{ start: number; end: number }> = [] |
||||||
|
const wikilinkMatches = Array.from(content.matchAll(wikilinkRegex)) |
||||||
|
wikilinkMatches.forEach(match => { |
||||||
|
if (match.index !== undefined) { |
||||||
|
wikilinkRanges.push({ |
||||||
|
start: match.index, |
||||||
|
end: match.index + match[0].length |
||||||
|
}) |
||||||
|
} |
||||||
|
}) |
||||||
|
|
||||||
|
processed = processed.replace(wikilinkRegex, (_match, linkContent) => { |
||||||
|
// Convert to AsciiDoc passthrough format so it's preserved
|
||||||
|
return `+++WIKILINK:${linkContent}+++` |
||||||
|
}) |
||||||
|
|
||||||
|
// Find all URLs but process them in reverse order to preserve indices
|
||||||
|
const allMatches: Array<{ url: string; index: number }> = [] |
||||||
|
|
||||||
|
let match |
||||||
|
const regex = new RegExp(URL_REGEX.source, URL_REGEX.flags) |
||||||
|
while ((match = regex.exec(content)) !== null) { |
||||||
|
const index = match.index |
||||||
|
const url = match[0] |
||||||
|
const urlEnd = index + url.length |
||||||
|
|
||||||
|
// Skip URLs that are inside wikilinks
|
||||||
|
const isInWikilink = wikilinkRanges.some(range =>
|
||||||
|
index >= range.start && urlEnd <= range.end |
||||||
|
) |
||||||
|
if (isInWikilink) { |
||||||
|
continue |
||||||
|
} |
||||||
|
|
||||||
|
const before = content.substring(Math.max(0, index - 30), index) |
||||||
|
|
||||||
|
// Check if this URL is already part of AsciiDoc syntax
|
||||||
|
// Skip if preceded by: image::, video::, audio::, or link:
|
||||||
|
if (before.match(/image::\s*$/) ||
|
||||||
|
before.match(/video::\s*$/) ||
|
||||||
|
before.match(/audio::\s*$/) || |
||||||
|
before.match(/link:\S+\[/) || |
||||||
|
before.match(/https?:\/\/[^\s]*\[/)) { |
||||||
|
continue |
||||||
|
} |
||||||
|
|
||||||
|
allMatches.push({ url, index }) |
||||||
|
} |
||||||
|
|
||||||
|
// Process in reverse order to preserve indices
|
||||||
|
for (let i = allMatches.length - 1; i >= 0; i--) { |
||||||
|
const { url, index } = allMatches[i] |
||||||
|
|
||||||
|
// Check if URL is in code block
|
||||||
|
const beforeUrl = content.substring(0, index) |
||||||
|
const codeBlockCount = (beforeUrl.match(/----/g) || []).length |
||||||
|
if (codeBlockCount % 2 === 1) { |
||||||
|
continue // In code block
|
||||||
|
} |
||||||
|
|
||||||
|
// Check if it's a media URL
|
||||||
|
const isImageUrl = isImage(url) |
||||||
|
const isVideoUrl = isVideo(url) |
||||||
|
const isAudioUrl = isAudio(url) |
||||||
|
|
||||||
|
let replacement: string |
||||||
|
if (isImageUrl) { |
||||||
|
// Images: convert to image::url[]
|
||||||
|
replacement = `image::${url}[]` |
||||||
|
} else if (isVideoUrl) { |
||||||
|
// Videos: convert to video::url[]
|
||||||
|
replacement = `video::${url}[]` |
||||||
|
} else if (isAudioUrl) { |
||||||
|
// Audio: convert to audio::url[]
|
||||||
|
replacement = `audio::${url}[]` |
||||||
|
} else { |
||||||
|
// Regular hyperlinks: convert to link:url[url]
|
||||||
|
replacement = `link:${url}[${url}]` |
||||||
|
} |
||||||
|
|
||||||
|
// Replace the URL
|
||||||
|
processed = processed.substring(0, index) + replacement + processed.substring(index + url.length) |
||||||
|
} |
||||||
|
|
||||||
|
return processed |
||||||
|
} |
||||||
|
|
||||||
|
/** |
||||||
|
* Post-process content to convert nostr: links and hashtags |
||||||
|
* This should be applied AFTER markup processing |
||||||
|
*/ |
||||||
|
export function postProcessNostrLinks(content: string): string { |
||||||
|
let processed = content |
||||||
|
|
||||||
|
// Convert nostr: prefixed links to embedded format
|
||||||
|
// nostr:npub1... -> [nostr:npub1...]
|
||||||
|
// nostr:note1... -> [nostr:note1...]
|
||||||
|
// etc.
|
||||||
|
const nostrRegex = /nostr:(npub1[a-z0-9]{58}|nprofile1[a-z0-9]+|note1[a-z0-9]{58}|nevent1[a-z0-9]+|naddr1[a-z0-9]+)/g |
||||||
|
processed = processed.replace(nostrRegex, (match) => { |
||||||
|
// Already in a link? Don't double-wrap
|
||||||
|
// Check if it's already in markdown link syntax [text](nostr:...)
|
||||||
|
// or AsciiDoc link syntax link:nostr:...[text]
|
||||||
|
return match // Keep as is for now, will be processed by the parser
|
||||||
|
}) |
||||||
|
|
||||||
|
// Convert hashtags to links
|
||||||
|
// #tag -> link:/notes?t=tag[#tag] (for AsciiDoc) or [#tag](/notes?t=tag) (for Markdown)
|
||||||
|
// But only if not already in a link
|
||||||
|
// We'll handle this in the rendering phase to avoid breaking markup
|
||||||
|
|
||||||
|
return processed |
||||||
|
} |
||||||
|
|
||||||
@ -1,78 +0,0 @@ |
|||||||
import { isImage, isVideo, isAudio } from '@/lib/url' |
|
||||||
|
|
||||||
/** |
|
||||||
* Preprocess markdown content to convert plain media URLs to proper markdown syntax |
|
||||||
* - Images: `https://example.com/image.png` -> `` |
|
||||||
* - Videos: `https://example.com/video.mp4` -> `` |
|
||||||
* - Audio: `https://example.com/audio.mp3` -> `` |
|
||||||
*/ |
|
||||||
export function preprocessMediaLinks(content: string): string { |
|
||||||
let processed = content |
|
||||||
|
|
||||||
// Find all matches but process them manually to avoid complex regex lookbehind
|
|
||||||
const allMatches: Array<{ url: string; index: number }> = [] |
|
||||||
let match |
|
||||||
|
|
||||||
// Find all candidate URLs
|
|
||||||
const tempRegex = /https?:\/\/[^\s<>"']+/gi |
|
||||||
while ((match = tempRegex.exec(content)) !== null) { |
|
||||||
const index = match.index |
|
||||||
const url = match[0] |
|
||||||
const before = content.substring(Math.max(0, index - 10), index) |
|
||||||
|
|
||||||
// Check if this URL is already part of markdown syntax
|
|
||||||
// Skip if preceded by: [text](url,  || before.match(/\]\([^)]*$/) || before.match(/!\[[^\]]*$/)) { |
|
||||||
continue |
|
||||||
} |
|
||||||
|
|
||||||
allMatches.push({ url, index }) |
|
||||||
} |
|
||||||
|
|
||||||
// Process in reverse order to preserve indices
|
|
||||||
for (let i = allMatches.length - 1; i >= 0; i--) { |
|
||||||
const { url, index } = allMatches[i] |
|
||||||
|
|
||||||
// Check if URL is in code block
|
|
||||||
const beforeUrl = content.substring(0, index) |
|
||||||
const backticksCount = (beforeUrl.match(/```/g) || []).length |
|
||||||
if (backticksCount % 2 === 1) { |
|
||||||
continue // In code block
|
|
||||||
} |
|
||||||
|
|
||||||
// Check if URL is in inline code
|
|
||||||
const lastBacktick = beforeUrl.lastIndexOf('`') |
|
||||||
if (lastBacktick !== -1) { |
|
||||||
const afterUrl = content.substring(index + url.length) |
|
||||||
const nextBacktick = afterUrl.indexOf('`') |
|
||||||
if (nextBacktick !== -1) { |
|
||||||
const codeBefore = beforeUrl.substring(lastBacktick + 1) |
|
||||||
const codeAfter = afterUrl.substring(0, nextBacktick) |
|
||||||
// If no newlines between backticks, it's inline code
|
|
||||||
if (!codeBefore.includes('\n') && !codeAfter.includes('\n')) { |
|
||||||
continue |
|
||||||
} |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
// Check if it's a media URL
|
|
||||||
const isImageUrl = isImage(url) |
|
||||||
const isVideoUrl = isVideo(url) |
|
||||||
const isAudioUrl = isAudio(url) |
|
||||||
|
|
||||||
let replacement: string |
|
||||||
if (isImageUrl || isVideoUrl || isAudioUrl) { |
|
||||||
// Media URLs: convert to 
|
|
||||||
replacement = `` |
|
||||||
} else { |
|
||||||
// Don't convert non-media URLs - let autolink handle them
|
|
||||||
continue |
|
||||||
} |
|
||||||
|
|
||||||
// Replace the URL
|
|
||||||
processed = processed.substring(0, index) + replacement + processed.substring(index + url.length) |
|
||||||
} |
|
||||||
|
|
||||||
return processed |
|
||||||
} |
|
||||||
|
|
||||||
@ -1,83 +0,0 @@ |
|||||||
import type { PhrasingContent, Root, Text } from 'mdast' |
|
||||||
import type { Plugin } from 'unified' |
|
||||||
import { visit } from 'unist-util-visit' |
|
||||||
|
|
||||||
const HASHTAG_REGEX = /#([a-zA-Z0-9_]+)/g |
|
||||||
|
|
||||||
export const remarkHashtags: Plugin<[], Root> = () => { |
|
||||||
return (tree) => { |
|
||||||
visit(tree, 'text', (node: Text, index, parent) => { |
|
||||||
if (!parent || typeof index !== 'number') return |
|
||||||
|
|
||||||
const text = node.value |
|
||||||
const matches = Array.from(text.matchAll(HASHTAG_REGEX)) |
|
||||||
|
|
||||||
if (matches.length === 0) return |
|
||||||
|
|
||||||
const children: PhrasingContent[] = [] |
|
||||||
let lastIndex = 0 |
|
||||||
|
|
||||||
matches.forEach((match) => { |
|
||||||
const matchStart = match.index! |
|
||||||
const matchEnd = matchStart + match[0].length |
|
||||||
const hashtag = match[1] |
|
||||||
|
|
||||||
// Add text before the hashtag
|
|
||||||
// Normalize whitespace to prevent paragraph breaks around hashtags
|
|
||||||
if (matchStart > lastIndex) { |
|
||||||
const beforeText = text.slice(lastIndex, matchStart) |
|
||||||
// Replace ALL newlines with spaces to keep hashtags inline
|
|
||||||
// This prevents markdown from treating newlines as paragraph breaks
|
|
||||||
const normalized = beforeText.replace(/\s*\n+\s*/g, ' ') |
|
||||||
if (normalized.trim()) { |
|
||||||
children.push({ |
|
||||||
type: 'text', |
|
||||||
value: normalized |
|
||||||
}) |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
// Create a link node for the hashtag
|
|
||||||
children.push({ |
|
||||||
type: 'link', |
|
||||||
url: `/notes?t=${hashtag.toLowerCase()}`, |
|
||||||
children: [ |
|
||||||
{ |
|
||||||
type: 'text', |
|
||||||
value: `#${hashtag}` |
|
||||||
} |
|
||||||
] |
|
||||||
}) |
|
||||||
|
|
||||||
lastIndex = matchEnd |
|
||||||
}) |
|
||||||
|
|
||||||
// Add remaining text after the last match
|
|
||||||
// Normalize whitespace to prevent paragraph breaks
|
|
||||||
if (lastIndex < text.length) { |
|
||||||
const afterText = text.slice(lastIndex) |
|
||||||
// Replace ALL newlines with spaces to keep hashtags inline
|
|
||||||
// This prevents markdown from treating newlines as paragraph breaks
|
|
||||||
const normalized = afterText.replace(/\s*\n+\s*/g, ' ') |
|
||||||
if (normalized.trim()) { |
|
||||||
children.push({ |
|
||||||
type: 'text', |
|
||||||
value: normalized |
|
||||||
}) |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
// Filter out empty text nodes to prevent paragraph breaks
|
|
||||||
const filteredChildren = children.filter((child) => { |
|
||||||
if (child.type === 'text') { |
|
||||||
return child.value.trim().length > 0 |
|
||||||
} |
|
||||||
return true |
|
||||||
}) |
|
||||||
|
|
||||||
// Replace the text node with the processed children
|
|
||||||
parent.children.splice(index, 1, ...filteredChildren) |
|
||||||
}) |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
@ -1,90 +0,0 @@ |
|||||||
import type { PhrasingContent, Root, Text } from 'mdast' |
|
||||||
import type { Plugin } from 'unified' |
|
||||||
import { visit } from 'unist-util-visit' |
|
||||||
import { NostrNode } from './types' |
|
||||||
|
|
||||||
const NOSTR_REGEX = |
|
||||||
/nostr:(npub1[a-z0-9]{58}|nprofile1[a-z0-9]+|note1[a-z0-9]{58}|nevent1[a-z0-9]+|naddr1[a-z0-9]+)/g |
|
||||||
const NOSTR_REFERENCE_REGEX = |
|
||||||
/\[[^\]]+\]\[(nostr:(npub1[a-z0-9]{58}|nprofile1[a-z0-9]+|note1[a-z0-9]{58}|nevent1[a-z0-9]+|naddr1[a-z0-9]+))\]/g |
|
||||||
|
|
||||||
export const remarkNostr: Plugin<[], Root> = () => { |
|
||||||
return (tree) => { |
|
||||||
visit(tree, 'text', (node: Text, index, parent) => { |
|
||||||
if (!parent || typeof index !== 'number') return |
|
||||||
|
|
||||||
const text = node.value |
|
||||||
|
|
||||||
// First, handle reference-style nostr links [text][nostr:...]
|
|
||||||
const refMatches = Array.from(text.matchAll(NOSTR_REFERENCE_REGEX)) |
|
||||||
// Then, handle direct nostr links that are not part of reference links
|
|
||||||
const directMatches = Array.from(text.matchAll(NOSTR_REGEX)).filter((directMatch) => { |
|
||||||
return !refMatches.some( |
|
||||||
(refMatch) => |
|
||||||
directMatch.index! >= refMatch.index! && |
|
||||||
directMatch.index! < refMatch.index! + refMatch[0].length |
|
||||||
) |
|
||||||
}) |
|
||||||
|
|
||||||
// Combine and sort matches by position
|
|
||||||
const allMatches = [ |
|
||||||
...refMatches.map((match) => ({ |
|
||||||
...match, |
|
||||||
type: 'reference' as const, |
|
||||||
bech32Id: match[2], |
|
||||||
rawText: match[0] |
|
||||||
})), |
|
||||||
...directMatches.map((match) => ({ |
|
||||||
...match, |
|
||||||
type: 'direct' as const, |
|
||||||
bech32Id: match[1], |
|
||||||
rawText: match[0] |
|
||||||
})) |
|
||||||
].sort((a, b) => a.index! - b.index!) |
|
||||||
|
|
||||||
if (allMatches.length === 0) return |
|
||||||
|
|
||||||
const children: (Text | NostrNode)[] = [] |
|
||||||
let lastIndex = 0 |
|
||||||
|
|
||||||
allMatches.forEach((match) => { |
|
||||||
const matchStart = match.index! |
|
||||||
const matchEnd = matchStart + match[0].length |
|
||||||
|
|
||||||
// Add text before the match
|
|
||||||
if (matchStart > lastIndex) { |
|
||||||
children.push({ |
|
||||||
type: 'text', |
|
||||||
value: text.slice(lastIndex, matchStart) |
|
||||||
}) |
|
||||||
} |
|
||||||
|
|
||||||
// Create custom nostr node with type information
|
|
||||||
const nostrNode: NostrNode = { |
|
||||||
type: 'nostr', |
|
||||||
data: { |
|
||||||
hName: 'nostr', |
|
||||||
hProperties: { |
|
||||||
bech32Id: match.bech32Id, |
|
||||||
rawText: match.rawText |
|
||||||
} |
|
||||||
} |
|
||||||
} |
|
||||||
children.push(nostrNode) |
|
||||||
|
|
||||||
lastIndex = matchEnd |
|
||||||
}) |
|
||||||
|
|
||||||
// Add remaining text after the last match
|
|
||||||
if (lastIndex < text.length) { |
|
||||||
children.push({ |
|
||||||
type: 'text', |
|
||||||
value: text.slice(lastIndex) |
|
||||||
}) |
|
||||||
} |
|
||||||
|
|
||||||
// Type assertion to tell TypeScript these are valid AST nodes
|
|
||||||
parent.children.splice(index, 1, ...(children as PhrasingContent[])) |
|
||||||
}) |
|
||||||
} |
|
||||||
} |
|
||||||
@ -1,107 +0,0 @@ |
|||||||
import type { Paragraph, Root, Image, Link, RootContent } from 'mdast' |
|
||||||
import type { Plugin } from 'unified' |
|
||||||
import { visit } from 'unist-util-visit' |
|
||||||
|
|
||||||
/** |
|
||||||
* Remark plugin to unwrap images from paragraphs |
|
||||||
* This prevents the DOM nesting warning where <div> (Image component) appears inside <p> |
|
||||||
*
|
|
||||||
* Markdown wraps standalone images in paragraphs. This plugin unwraps them at the AST level |
|
||||||
* so they render directly without a <p> wrapper. |
|
||||||
*/ |
|
||||||
export const remarkUnwrapImages: Plugin<[], Root> = () => { |
|
||||||
return (tree) => { |
|
||||||
visit(tree, 'paragraph', (node: Paragraph, index, parent) => { |
|
||||||
if (!parent || typeof index !== 'number') return |
|
||||||
|
|
||||||
const children = node.children |
|
||||||
|
|
||||||
// Case 1: Paragraph contains only an image: 
|
|
||||||
if (children.length === 1 && children[0].type === 'image') { |
|
||||||
// Replace the paragraph with the image directly
|
|
||||||
const image = children[0] as Image |
|
||||||
parent.children.splice(index, 1, image as unknown as RootContent) |
|
||||||
return |
|
||||||
} |
|
||||||
|
|
||||||
// Case 2: Paragraph contains only a link with an image: [](link)
|
|
||||||
if (children.length === 1 && children[0].type === 'link') { |
|
||||||
const link = children[0] as Link |
|
||||||
if (link.children.length === 1 && link.children[0].type === 'image') { |
|
||||||
// Keep the link but remove the paragraph wrapper
|
|
||||||
parent.children.splice(index, 1, link as unknown as RootContent) |
|
||||||
return |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
// Case 3: Paragraph contains images mixed with text
|
|
||||||
// Split the paragraph: extract images as separate block elements, keep text in paragraph
|
|
||||||
const imageIndices: number[] = [] |
|
||||||
children.forEach((child, i) => { |
|
||||||
if (child.type === 'image') { |
|
||||||
imageIndices.push(i) |
|
||||||
} else if (child.type === 'link' && child.children.some(c => c.type === 'image')) { |
|
||||||
imageIndices.push(i) |
|
||||||
} |
|
||||||
}) |
|
||||||
|
|
||||||
if (imageIndices.length > 0) { |
|
||||||
// We have images in the paragraph - need to split it
|
|
||||||
const newNodes: RootContent[] = [] |
|
||||||
let lastIndex = 0 |
|
||||||
|
|
||||||
imageIndices.forEach((imgIndex) => { |
|
||||||
// Add text before the image as a paragraph (if any)
|
|
||||||
if (imgIndex > lastIndex) { |
|
||||||
const textBefore = children.slice(lastIndex, imgIndex) |
|
||||||
if (textBefore.length > 0 && textBefore.some(c => c.type === 'text' && c.value.trim())) { |
|
||||||
newNodes.push({ |
|
||||||
type: 'paragraph', |
|
||||||
children: textBefore |
|
||||||
} as unknown as RootContent) |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
// Add the image as a separate block element
|
|
||||||
const imageChild = children[imgIndex] |
|
||||||
if (imageChild.type === 'image') { |
|
||||||
newNodes.push(imageChild as unknown as RootContent) |
|
||||||
} else if (imageChild.type === 'link') { |
|
||||||
newNodes.push(imageChild as unknown as RootContent) |
|
||||||
} |
|
||||||
|
|
||||||
lastIndex = imgIndex + 1 |
|
||||||
}) |
|
||||||
|
|
||||||
// Add remaining text after the last image (if any)
|
|
||||||
if (lastIndex < children.length) { |
|
||||||
const textAfter = children.slice(lastIndex) |
|
||||||
if (textAfter.length > 0 && textAfter.some(c => c.type === 'text' && c.value.trim())) { |
|
||||||
newNodes.push({ |
|
||||||
type: 'paragraph', |
|
||||||
children: textAfter |
|
||||||
} as unknown as RootContent) |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
// If we only had images and whitespace, just use the images
|
|
||||||
if (newNodes.length === 0) { |
|
||||||
// All content was images, extract them
|
|
||||||
children.forEach(child => { |
|
||||||
if (child.type === 'image') { |
|
||||||
newNodes.push(child as unknown as RootContent) |
|
||||||
} else if (child.type === 'link' && child.children.some(c => c.type === 'image')) { |
|
||||||
newNodes.push(child as unknown as RootContent) |
|
||||||
} |
|
||||||
}) |
|
||||||
} |
|
||||||
|
|
||||||
// Replace the paragraph with the split nodes
|
|
||||||
if (newNodes.length > 0) { |
|
||||||
parent.children.splice(index, 1, ...newNodes) |
|
||||||
} |
|
||||||
} |
|
||||||
}) |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
@ -1,60 +0,0 @@ |
|||||||
import type { Paragraph, Root, RootContent } from 'mdast' |
|
||||||
import type { Plugin } from 'unified' |
|
||||||
import { visit } from 'unist-util-visit' |
|
||||||
import { NostrNode } from './types' |
|
||||||
|
|
||||||
/** |
|
||||||
* Remark plugin to unwrap nostr nodes from paragraphs |
|
||||||
* This prevents the DOM nesting warning where <div> (EmbeddedNote/EmbeddedMention) appears inside <p> |
|
||||||
*
|
|
||||||
* Markdown wraps standalone nostr references in paragraphs. This plugin unwraps them at the AST level |
|
||||||
* so they render directly without a <p> wrapper. |
|
||||||
*/ |
|
||||||
export const remarkUnwrapNostr: Plugin<[], Root> = () => { |
|
||||||
return (tree) => { |
|
||||||
visit(tree, 'paragraph', (node: Paragraph, index, parent) => { |
|
||||||
if (!parent || typeof index !== 'number') return |
|
||||||
|
|
||||||
const children = node.children |
|
||||||
|
|
||||||
// Type guard to check if a node is a NostrNode
|
|
||||||
const isNostrNode = (node: any): node is NostrNode => { |
|
||||||
return node && node.type === 'nostr' |
|
||||||
} |
|
||||||
|
|
||||||
// Case 1: Paragraph contains only a nostr node
|
|
||||||
if (children.length === 1 && isNostrNode(children[0])) { |
|
||||||
// Replace the paragraph with the nostr node directly
|
|
||||||
// Cast to RootContent since we're promoting it to block level
|
|
||||||
const nostrNode = children[0] as unknown as RootContent |
|
||||||
parent.children.splice(index, 1, nostrNode) |
|
||||||
return |
|
||||||
} |
|
||||||
|
|
||||||
// Case 2: Paragraph contains text and a nostr node
|
|
||||||
// If the paragraph only contains whitespace and a nostr node, unwrap it
|
|
||||||
const hasOnlyNostrAndWhitespace = children.every(child => { |
|
||||||
if (isNostrNode(child)) return true |
|
||||||
if (child.type === 'text') { |
|
||||||
return !child.value.trim() // Only whitespace
|
|
||||||
} |
|
||||||
return false |
|
||||||
}) |
|
||||||
|
|
||||||
if (hasOnlyNostrAndWhitespace) { |
|
||||||
// Find the nostr node and unwrap it
|
|
||||||
const nostrNode = children.find(isNostrNode) |
|
||||||
if (nostrNode) { |
|
||||||
// Cast to RootContent since we're promoting it to block level
|
|
||||||
parent.children.splice(index, 1, nostrNode as unknown as RootContent) |
|
||||||
return |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
// Case 3: Paragraph contains mixed content (text + nostr node)
|
|
||||||
// We'll leave these as-is since they're mixed content
|
|
||||||
// The paragraph handler in the component will convert them to divs
|
|
||||||
}) |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
@ -1,19 +0,0 @@ |
|||||||
import { ComponentProps } from 'react' |
|
||||||
import type { Components as RmComponents } from 'react-markdown' |
|
||||||
import type { Data, Node } from 'unist' |
|
||||||
|
|
||||||
// Extend the Components interface to include your custom component
|
|
||||||
export interface Components extends RmComponents { |
|
||||||
nostr: React.ComponentType<{ |
|
||||||
rawText: string |
|
||||||
bech32Id?: string |
|
||||||
}> |
|
||||||
} |
|
||||||
|
|
||||||
export interface NostrNode extends Node { |
|
||||||
type: 'nostr' |
|
||||||
data: Data & { |
|
||||||
hName: string |
|
||||||
hProperties: ComponentProps<Components['nostr']> |
|
||||||
} |
|
||||||
} |
|
||||||
@ -1,18 +0,0 @@ |
|||||||
import { Event } from 'nostr-tools' |
|
||||||
import { useMemo } from 'react' |
|
||||||
import { parseNostrContent, renderNostrContent } from '@/lib/nostr-parser.tsx' |
|
||||||
import { cn } from '@/lib/utils' |
|
||||||
|
|
||||||
export default function SimpleContent({ |
|
||||||
event, |
|
||||||
className |
|
||||||
}: { |
|
||||||
event: Event |
|
||||||
className?: string |
|
||||||
}) { |
|
||||||
const parsedContent = useMemo(() => { |
|
||||||
return parseNostrContent(event.content, event) |
|
||||||
}, [event.content, event]) |
|
||||||
|
|
||||||
return renderNostrContent(parsedContent, cn('prose prose-sm prose-zinc max-w-none break-words dark:prose-invert w-full', className)) |
|
||||||
} |
|
||||||
Loading…
Reference in new issue