then for the tail
// and the block
forces a visual line break.
// Get the original pattern syntax from the content
const patternMarkdown = content.substring(pattern.index, pattern.end)
// Get text after the pattern on the same line
const textAfterPattern = content.substring(pattern.end, lineEndIndex)
// Extend the text to include the pattern and any text after it on the same line
text = text + patternMarkdown + textAfterPattern
textEndIndex = lineEndIndex === content.length ? content.length : lineEndIndex + 1
// Mark this pattern as merged so we don't render it separately later
mergedPatterns.add(patternIdx)
} else if (pattern.type === 'nostr') {
// Only merge profile types (npub/nprofile) inline; event types (note/nevent/naddr) remain block-level.
// Same idea as hashtags: if the mention is first on the line but more text follows on that line,
// merge into the paragraph — otherwise we emit a bare and the rest in , which looks
// like a spurious hard return (block
after inline-block mention).
const bech32Id = pattern.data
const isProfileType = bech32Id.startsWith('npub') || bech32Id.startsWith('nprofile')
const hasTextAfterNostrOnSameLine =
isProfileType && content.substring(pattern.end, lineEndIndex).trim().length > 0
if (isProfileType && (hasTextOnSameLine || hasTextBefore || hasTextAfterNostrOnSameLine)) {
const patternMarkdown = content.substring(pattern.index, pattern.end)
const textAfterPattern = content.substring(pattern.end, lineEndIndex)
text = text + patternMarkdown + textAfterPattern
textEndIndex = lineEndIndex === content.length ? content.length : lineEndIndex + 1
mergedPatterns.add(patternIdx)
}
}
}
if (text) {
// Skip if this text is part of a table (tables are handled as block patterns)
const isInTable = blockLevelPatternsFromAll.some(p =>
p.type === 'table' &&
lastIndex >= p.index &&
lastIndex < p.end
)
if (!isInTable) {
// Split text into paragraphs (double newlines create paragraph breaks)
// Single newlines within paragraphs should be converted to spaces
const paragraphs = text.split(/\n\n+/)
paragraphs.forEach((paragraph, paraIdx) => {
// Check for markdown images in this paragraph and extract them
const markdownImageRegex = /!\[([^\]]*)\]\(([^)]+)\)/g
const imageMatches = Array.from(paragraph.matchAll(markdownImageRegex))
if (imageMatches.length > 0) {
// Process text and images separately
let paraLastIndex = 0
imageMatches.forEach((match, imgIdx) => {
if (match.index !== undefined) {
const imgStart = match.index
const imgEnd = match.index + match[0].length
const imgUrl = match[2]
const cleaned = cleanUrl(imgUrl)
// Add text before this image
if (imgStart > paraLastIndex) {
const textBefore = paragraph.slice(paraLastIndex, imgStart)
let normalizedText = textBefore.replace(/\n/g, ' ')
normalizedText = normalizedText.replace(/[ \t]{2,}/g, ' ')
normalizedText = normalizedText.trim()
if (normalizedText) {
const textContent = parseInlineMarkdown(normalizedText, `text-${patternIdx}-para-${paraIdx}-img-${imgIdx}`, footnotes, emojiInfos)
parts.push(
{textContent}
)
}
}
// Render the image
if (isImage(cleaned)) {
let imageIndex = imageIndexMap.get(cleaned)
if (imageIndex === undefined && getImageIdentifier) {
const identifier = getImageIdentifier(cleaned)
if (identifier) {
imageIndex = imageIndexMap.get(`__img_id:${identifier}`)
}
}
let thumbnailUrl: string | undefined
if (imageThumbnailMap) {
thumbnailUrl = imageThumbnailMap.get(cleaned)
if (!thumbnailUrl && getImageIdentifier) {
const identifier = getImageIdentifier(cleaned)
if (identifier) {
thumbnailUrl = imageThumbnailMap.get(`__img_id:${identifier}`)
}
}
}
// Don't use thumbnails in notes - use original URL
const displayUrl = imgUrl
parts.push(
{
e.stopPropagation()
if (imageIndex !== undefined) {
openLightbox(imageIndex)
}
}}
/>
)
}
paraLastIndex = imgEnd
}
})
// Add any remaining text after the last image
if (paraLastIndex < paragraph.length) {
const remainingText = paragraph.slice(paraLastIndex)
let normalizedText = remainingText.replace(/\n/g, ' ')
normalizedText = normalizedText.replace(/[ \t]{2,}/g, ' ')
normalizedText = normalizedText.trim()
if (normalizedText) {
const textContent = parseInlineMarkdown(normalizedText, `text-${patternIdx}-para-${paraIdx}-final`, footnotes, emojiInfos)
parts.push(
{textContent}
)
}
}
} else {
// No images, process normally
// Convert single newlines to spaces within the paragraph
// This prevents hard breaks within sentences
// Also collapse multiple spaces into one
let normalizedPara = paragraph.replace(/\n/g, ' ')
// Collapse multiple consecutive spaces/tabs (2+) into a single space, but preserve single spaces
normalizedPara = normalizedPara.replace(/[ \t]{2,}/g, ' ')
// Trim only leading/trailing whitespace, not internal spaces
normalizedPara = normalizedPara.trim()
if (normalizedPara) {
// Process paragraph for inline formatting (which will handle markdown links)
const paraContent = parseInlineMarkdown(normalizedPara, `text-${patternIdx}-para-${paraIdx}`, footnotes, emojiInfos)
// Wrap in paragraph tag (no whitespace-pre-wrap, let normal text wrapping handle it)
parts.push(
{paraContent}
)
} else if (paraIdx > 0) {
// Empty paragraph between non-empty paragraphs - add spacing
// This handles cases where there are multiple consecutive newlines
parts.push(
)
}
}
})
// Update lastIndex to the end of the processed text (including link if merged)
// Only update if we haven't already updated it (e.g., for hashtag-only lines)
if (textEndIndex > lastIndex) {
lastIndex = textEndIndex
}
} else {
// Still update lastIndex even if in table
lastIndex = textEndIndex
}
} else {
// No text before pattern, but still update lastIndex if we merged a pattern
if (mergedPatterns.has(patternIdx)) {
// textEndIndex should have been set during the merge logic above
if (textEndIndex > lastIndex) {
lastIndex = textEndIndex
}
// Skip rendering since it was merged
return
}
}
} else {
// Pattern starts at or before lastIndex - check if it was merged
// This can happen if a previous pattern's merge extended past this pattern
if (mergedPatterns.has(patternIdx)) {
// This pattern was already merged (e.g., as part of a hashtag-only line)
// Skip it and don't update lastIndex (it was already updated)
return
}
}
// Skip rendering if this pattern was merged into a paragraph
// (lastIndex was already updated when we merged it above)
// This is a final safety check
if (mergedPatterns.has(patternIdx)) {
return
}
// Render pattern
if (pattern.type === 'markdown-image') {
const { url } = pattern.data
const cleaned = cleanUrl(url)
// Look up image index - try by URL first, then by identifier for cross-domain matching
let imageIndex = imageIndexMap.get(cleaned)
if (imageIndex === undefined && getImageIdentifier) {
const identifier = getImageIdentifier(cleaned)
if (identifier) {
imageIndex = imageIndexMap.get(`__img_id:${identifier}`)
}
}
if (isImage(cleaned)) {
parts.push(
{
e.stopPropagation()
if (imageIndex !== undefined) {
openLightbox(imageIndex)
}
}}
/>
)
} else if (isVideo(cleaned) || isAudio(cleaned)) {
const poster = videoPosterMap?.get(cleaned)
parts.push(
)
}
} else if (pattern.type === 'markdown-image-link') {
// Link containing an image: [](url)
const { text, url } = pattern.data
// Extract image URL from the link text (which contains )
const imageMatch = text.match(/!\[([^\]]*)\]\(([^)]+)\)/)
if (imageMatch) {
const imageUrl = imageMatch[2]
const cleaned = cleanUrl(imageUrl)
if (isImage(cleaned)) {
// Check if there's a thumbnail available for this image
let thumbnailUrl: string | undefined
if (imageThumbnailMap) {
thumbnailUrl = imageThumbnailMap.get(cleaned)
// Also check by identifier for cross-domain matching
if (!thumbnailUrl && getImageIdentifier) {
const identifier = getImageIdentifier(cleaned)
if (identifier) {
thumbnailUrl = imageThumbnailMap.get(`__img_id:${identifier}`)
}
}
}
// Don't use thumbnails in notes - use original URL
const displayUrl = imageUrl
// Render as a block-level clickable image that links to the URL
// Clicking the image should navigate to the URL (standard markdown behavior)
parts.push(
)
} else {
// Not an image, render as regular link
parts.push(
{text}
)
}
} else {
// Fallback: render as regular link
parts.push(
{text}
)
}
} else if (pattern.type === 'markdown-link-standalone') {
const { url } = pattern.data
const cleanedStandalone = cleanUrl(url)
if (cleanedStandalone && (isVideo(cleanedStandalone) || isAudio(cleanedStandalone))) {
const poster = videoPosterMap?.get(cleanedStandalone)
parts.push(
)
} else {
const cleanedStandaloneForPreview = cleanedStandalone || url
if (
suppressStandaloneWebPreviewCleanedUrls &&
suppressStandaloneWebPreviewCleanedUrls.has(cleanedStandaloneForPreview)
) {
parts.push(
{url}
)
} else if (isPseudoNostrHttpsUrl(url)) {
parts.push(
{url}
)
} else {
parts.push(
)
}
}
} else if (pattern.type === 'markdown-link') {
const { text, url } = pattern.data
// Process the link text for inline formatting (bold, italic, etc.)
const linkContent = stripNestedAnchorsFromNodes(
parseInlineMarkdown(text, `link-${patternIdx}`, footnotes, emojiInfos),
`link-${patternIdx}-sanitized`
)
// Markdown links should always be rendered as inline links, not block-level components
// This ensures they don't break up the content flow when used in paragraphs
if (isWebsocketUrl(url)) {
// Relay URLs link to relay page
const relayPath = `/relays/${encodeURIComponent(url)}`
parts.push(
{
e.stopPropagation()
e.preventDefault()
navigateToRelay(relayPath)
}}
title={text.length > 200 ? text : undefined}
>
{linkContent}
)
} else {
// Regular markdown links render as simple inline links (green to match theme)
parts.push(
{linkContent}
)
}
} else if (pattern.type === 'youtube-url') {
const { url } = pattern.data
// Render YouTube URL as embedded player
parts.push(
)
} else if (pattern.type === 'relay-url') {
const { url } = pattern.data
const relayPath = `/relays/${encodeURIComponent(url)}`
const displayText = truncateLinkText(url)
parts.push(
{
e.stopPropagation()
e.preventDefault()
navigateToRelay(relayPath)
}}
title={url.length > 200 ? url : undefined}
>
{displayText}
)
} else if (pattern.type === 'header') {
const { level, text } = pattern.data
// Parse the header text for inline formatting (but not nested headers)
const headerContent = parseInlineMarkdown(text, `header-${patternIdx}`, footnotes, emojiInfos)
const HeaderTag = `h${Math.min(level, 6)}` as keyof JSX.IntrinsicElements
parts.push(
{headerContent}
)
} else if (pattern.type === 'horizontal-rule') {
parts.push(
)
} else if (pattern.type === 'bullet-list-item') {
const { text } = pattern.data
const listContent = parseInlineMarkdown(text, `bullet-${patternIdx}`, footnotes, emojiInfos)
parts.push(
{listContent}
)
} else if (pattern.type === 'numbered-list-item') {
const { text, number } = pattern.data
const listContent = parseInlineMarkdown(text, `numbered-${patternIdx}`, footnotes, emojiInfos)
const itemNumber = number ? parseInt(number, 10) : undefined
parts.push(
{listContent}
)
} else if (pattern.type === 'table') {
const { rows } = pattern.data
if (rows.length > 0) {
const headerRow = rows[0]
const dataRows = rows.slice(1)
parts.push(
{headerRow.map((cell: string, cellIdx: number) => (
|
{parseInlineMarkdown(cell, `table-header-${patternIdx}-${cellIdx}`, footnotes, emojiInfos)}
|
))}
{dataRows.map((row: string[], rowIdx: number) => (
{row.map((cell: string, cellIdx: number) => (
|
{parseInlineMarkdown(cell, `table-cell-${patternIdx}-${rowIdx}-${cellIdx}`, footnotes, emojiInfos)}
|
))}
))}
)
}
} else if (pattern.type === 'blockquote') {
const { lines } = pattern.data
// Group lines into paragraphs (consecutive non-empty lines form a paragraph, empty lines separate paragraphs)
const paragraphs: string[][] = []
let currentParagraph: string[] = []
lines.forEach((line: string) => {
if (line.trim() === '') {
// Empty line - if we have a current paragraph, finish it and start a new one
if (currentParagraph.length > 0) {
paragraphs.push(currentParagraph)
currentParagraph = []
}
} else {
// Non-empty line - add to current paragraph
currentParagraph.push(line)
}
})
// Add the last paragraph if it exists
if (currentParagraph.length > 0) {
paragraphs.push(currentParagraph)
}
// Render paragraphs
const blockquoteContent = paragraphs.map((paragraphLines: string[], paraIdx: number) => {
// Join paragraph lines with newlines to preserve line breaks (especially before em-dashes)
// This preserves the original formatting of the blockquote
const paragraphText = paragraphLines.join('\n')
const paragraphContent = parseInlineMarkdown(paragraphText, `blockquote-${patternIdx}-para-${paraIdx}`, footnotes, emojiInfos)
return (
{paragraphContent}
)
})
parts.push(
{blockquoteContent}
)
} else if (pattern.type === 'greentext') {
const { lines } = pattern.data
// Join all greentext lines with
to preserve line breaks
// Each line should have the > prefix preserved
const greentextContent = lines.map((line: string, lineIdx: number) => {
// Parse inline markdown for each line (for links, hashtags, etc.)
const lineContent = parseInlineMarkdown(line, `greentext-${patternIdx}-line-${lineIdx}`, footnotes, emojiInfos)
return (
{lineIdx > 0 &&
}
>{lineContent}
)
})
parts.push(
{greentextContent}
)
} else if (pattern.type === 'fenced-code-block') {
const { code, language } = pattern.data
const parsedMath = parseDelimitedMath(String(code ?? '').trim())
if (parsedMath || isMathLanguage(String(language ?? ''))) {
parts.push(
)
return
}
// Render code block with syntax highlighting
// We'll use a ref and useEffect to apply highlight.js after render
const codeBlockId = `code-block-${patternIdx}`
parts.push(
)
} else if (pattern.type === 'footnote-definition') {
// Don't render footnote definitions in the main content - they'll be rendered at the bottom
// Just skip this pattern
} else if (pattern.type === 'footnote-ref') {
const footnoteId = pattern.data
const footnoteText = footnotes.get(footnoteId)
if (footnoteText) {
parts.push(
)
} else {
// Footnote not found, just render the reference as-is
parts.push([^{footnoteId}])
}
} else if (pattern.type === 'citation') {
const { type: citationType, citationId, index: citationIndex } = pattern.data
const citationNumber = citationIndex + 1
if (citationType === 'inline' || citationType === 'prompt-inline') {
// Inline citations render as clickable text
parts.push(
)
} else if (citationType === 'foot' || citationType === 'foot-end') {
// Footnotes render as superscript numbers
parts.push(
{
e.preventDefault()
const citationElement = document.getElementById(`citation-${citationIndex}`)
if (citationElement) {
citationElement.scrollIntoView({ behavior: 'smooth', block: 'center' })
}
}}
>
[{citationNumber}]
)
} else if (citationType === 'quote') {
// Quotes render as block-level citation cards
parts.push(
)
} else {
// end, prompt-end render as superscript numbers that link to references section
parts.push(
{
e.preventDefault()
const refSection = document.getElementById('references-section')
if (refSection) {
refSection.scrollIntoView({ behavior: 'smooth', block: 'start' })
}
}}
>
[{citationNumber}]
)
}
} else if (pattern.type === 'nostr') {
const bech32Id = pattern.data
// Check if it's a profile type (mentions/handles should be inline)
if (bech32Id.startsWith('npub') || bech32Id.startsWith('nprofile')) {
parts.push(
)
} else if (bech32Id.startsWith('note') || bech32Id.startsWith('nevent') || bech32Id.startsWith('naddr')) {
// When this is the calendar invite naddr, show full calendar card with RSVP instead of embedded preview
if (fullCalendarInvite && fullCalendarInvite.naddr === bech32Id) {
parts.push(
)
} else {
// Embedded events should be block-level and fill width
parts.push(
)
}
} else {
parts.push(nostr:{bech32Id})
}
} else if (pattern.type === 'hashtag') {
const tag = pattern.data
const tagLower = tag.toLowerCase()
hashtagsInContent.add(tagLower) // Track hashtags rendered inline
// Check if there's another hashtag immediately following (no space between them)
// If so, add a space after this hashtag to prevent them from appearing smushed together
const nextPattern = filteredPatterns[patternIdx + 1]
// Add space if the next pattern is a hashtag that starts exactly where this one ends
// (meaning there's no space or text between them)
const shouldAddSpace = nextPattern && nextPattern.type === 'hashtag' && nextPattern.index === pattern.end
parts.push(
{
e.stopPropagation()
e.preventDefault()
navigateToHashtag(`/notes?t=${tagLower}`)
}}
>
#{tag}
)
// Add a space after the hashtag if another hashtag follows immediately
// Use a non-breaking space wrapped in a span to ensure it's rendered
if (shouldAddSpace) {
parts.push( )
}
} else if (pattern.type === 'bookstr-url') {
const { wikilink, sourceUrl } = pattern.data
parts.push(
)
} else if (pattern.type === 'wikilink') {
const linkContent = pattern.data
// Check if this is a bookstr wikilink (NKBIP-08 format: book::...)
const isBookstrLink = linkContent.startsWith('book::')
if (isBookstrLink) {
// Extract the bookstr content (already in book:: format)
const bookstrContent = linkContent.trim()
parts.push(
)
} else {
// Regular wikilink
let target = linkContent.includes('|') ? linkContent.split('|')[0].trim() : linkContent.trim()
let displayText = linkContent.includes('|') ? linkContent.split('|')[1].trim() : linkContent.trim()
const dtag = target.toLowerCase().replace(/[^a-z0-9]+/g, '-').replace(/^-+|-+$/g, '')
parts.push(
)
}
}
lastIndex = pattern.end
})
// Add remaining text
if (lastIndex < content.length) {
const text = content.slice(lastIndex)
// Skip whitespace-only text to avoid empty spans
if (text && text.trim()) {
// Process text for inline formatting
// But skip if this text is part of a table
const isInTable = blockLevelPatternsFromAll.some((p: { type: string; index: number; end: number }) =>
p.type === 'table' &&
lastIndex >= p.index &&
lastIndex < p.end
)
if (!isInTable && text.trim()) {
// Check if there are any markdown images in the remaining text that weren't detected as patterns
// If so, we need to process them separately before processing the text
const markdownImageRegex = /!\[([^\]]*)\]\(([^)]+)\)/g
const remainingImageMatches = Array.from(text.matchAll(markdownImageRegex))
// Process images first, then text between/after them
let textLastIndex = 0
remainingImageMatches.forEach((match, imgIdx) => {
if (match.index !== undefined) {
const imgStart = match.index
const imgEnd = match.index + match[0].length
const imgUrl = match[2]
const cleaned = cleanUrl(imgUrl)
// Add text before this image
if (imgStart > textLastIndex) {
const textBefore = text.slice(textLastIndex, imgStart).trim()
if (textBefore) {
// Split into paragraphs
const paragraphs = textBefore.split(/\n\n+/)
paragraphs.forEach((paragraph, paraIdx) => {
let normalizedPara = paragraph.replace(/\n/g, ' ')
normalizedPara = normalizedPara.replace(/[ \t]{2,}/g, ' ')
normalizedPara = normalizedPara.trim()
if (normalizedPara) {
const paraContent = parseInlineMarkdown(normalizedPara, `text-end-para-${imgIdx}-${paraIdx}`, footnotes, emojiInfos)
parts.push(
{paraContent}
)
}
})
}
}
// Render the image
if (isImage(cleaned)) {
let imageIndex = imageIndexMap.get(cleaned)
if (imageIndex === undefined && getImageIdentifier) {
const identifier = getImageIdentifier(cleaned)
if (identifier) {
imageIndex = imageIndexMap.get(`__img_id:${identifier}`)
}
}
parts.push(
{
e.stopPropagation()
if (imageIndex !== undefined) {
openLightbox(imageIndex)
}
}}
/>
)
}
textLastIndex = imgEnd
}
})
// Add any remaining text after the last image
if (textLastIndex < text.length) {
const remainingText = text.slice(textLastIndex).trim()
if (remainingText) {
const paragraphs = remainingText.split(/\n\n+/)
paragraphs.forEach((paragraph, paraIdx) => {
let normalizedPara = paragraph.replace(/\n/g, ' ')
normalizedPara = normalizedPara.replace(/[ \t]{2,}/g, ' ')
normalizedPara = normalizedPara.trim()
if (normalizedPara) {
const paraContent = parseInlineMarkdown(normalizedPara, `text-end-final-para-${paraIdx}`, footnotes, emojiInfos)
parts.push(
{paraContent}
)
}
})
}
} else if (remainingImageMatches.length === 0) {
// No images found, process the text normally
const paragraphs = text.split(/\n\n+/)
paragraphs.forEach((paragraph, paraIdx) => {
// Convert single newlines to spaces within the paragraph
// Collapse multiple consecutive spaces/tabs (2+) into a single space, but preserve single spaces
let normalizedPara = paragraph.replace(/\n/g, ' ')
normalizedPara = normalizedPara.replace(/[ \t]{2,}/g, ' ')
normalizedPara = normalizedPara.trim()
if (normalizedPara) {
const paraContent = parseInlineMarkdown(normalizedPara, `text-end-para-${paraIdx}`, footnotes, emojiInfos)
parts.push(
{paraContent}
)
}
})
}
}
}
}
// If no patterns, just return the content as text (with inline formatting and paragraphs)
if (parts.length === 0) {
const paragraphs = content.split(/\n\n+/)
const formattedParagraphs = paragraphs.map((paragraph, paraIdx) => {
// Convert single newlines to spaces within the paragraph
// Collapse multiple consecutive spaces/tabs (2+) into a single space, but preserve single spaces
let normalizedPara = paragraph.replace(/\n/g, ' ')
normalizedPara = normalizedPara.replace(/[ \t]{2,}/g, ' ')
normalizedPara = normalizedPara.trim()
if (!normalizedPara) return null
const paraContent = parseInlineMarkdown(normalizedPara, `text-only-para-${paraIdx}`, footnotes, emojiInfos)
return (
{paraContent}
)
}).filter(Boolean)
return { nodes: formattedParagraphs, hashtagsInContent, footnotes, citations }
}
// Filter out empty spans before wrapping lists
// But preserve whitespace that appears between inline patterns (like hashtags)
const filteredParts = parts.filter((part, idx) => {
if (React.isValidElement(part) && part.type === 'span') {
const children = part.props.children
const isWhitespaceOnly =
(typeof children === 'string' && !children.trim()) ||
(Array.isArray(children) && children.every(child => typeof child === 'string' && !child.trim()))
if (isWhitespaceOnly) {
// Check if this whitespace is adjacent to inline patterns (like hashtags)
// Look at the previous and next parts to see if they're inline patterns
const prevPart = idx > 0 ? parts[idx - 1] : null
const nextPart = idx < parts.length - 1 ? parts[idx + 1] : null
// Check if a part is an inline pattern (hashtag, wikilink, nostr mention, markdown link, etc.)
const isInlinePattern = (part: any) => {
if (!part || !React.isValidElement(part)) return false
const key = part.key?.toString() || ''
const type = part.type
// Hashtags are elements with keys starting with 'hashtag-'
// Markdown links are elements with keys starting with 'link-' or 'relay-'
// Wikilinks might be custom components
// Nostr mentions might be spans or other elements
return (type === 'a' && (
key.startsWith('hashtag-') ||
key.startsWith('wikilink-') ||
key.startsWith('link-') ||
key.startsWith('relay-')
)) ||
(type === 'span' && (key.startsWith('wikilink-') || key.startsWith('nostr-'))) ||
// Also check for embedded mentions/components that might be inline
(type && typeof type !== 'string' && key.includes('mention'))
}
const prevIsInlinePattern = isInlinePattern(prevPart)
const nextIsInlinePattern = isInlinePattern(nextPart)
// Preserve whitespace if it's between two inline patterns, or before/after one
// This ensures spaces around hashtags are preserved
if (prevIsInlinePattern || nextIsInlinePattern) {
return true
}
// Otherwise filter out whitespace-only spans
return false
}
}
return true
})
// Wrap list items in or tags
const wrappedParts: React.ReactNode[] = []
let partIdx = 0
while (partIdx < filteredParts.length) {
const part = filteredParts[partIdx]
// Check if this is a list item
if (React.isValidElement(part) && part.type === 'li') {
// Determine if it's a bullet or numbered list
const isBullet = part.key && part.key.toString().startsWith('bullet-')
const isNumbered = part.key && part.key.toString().startsWith('numbered-')
if (isBullet || isNumbered) {
// Collect consecutive list items of the same type
const listItems: React.ReactNode[] = [part]
partIdx++
while (partIdx < filteredParts.length) {
const nextPart = filteredParts[partIdx]
if (React.isValidElement(nextPart) && nextPart.type === 'li') {
const nextIsBullet = nextPart.key && nextPart.key.toString().startsWith('bullet-')
const nextIsNumbered = nextPart.key && nextPart.key.toString().startsWith('numbered-')
if ((isBullet && nextIsBullet) || (isNumbered && nextIsNumbered)) {
listItems.push(nextPart)
partIdx++
} else {
break
}
} else {
break
}
}
// Only wrap in or if there's more than one item
// Single-item lists should not be formatted as lists
if (listItems.length > 1) {
if (isBullet) {
wrappedParts.push(
)
} else {
wrappedParts.push(
{listItems}
)
}
} else {
// Single item - render the original line text (including marker) as plain text
// Extract pattern index from the key to look up original line
const listItem = listItems[0]
if (React.isValidElement(listItem) && listItem.key) {
const keyStr = listItem.key.toString()
const patternIndexMatch = keyStr.match(/(?:bullet|numbered)-(\d+)/)
if (patternIndexMatch) {
const patternIndex = parseInt(patternIndexMatch[1], 10)
const originalLine = listItemOriginalLines.get(patternIndex)
if (originalLine) {
// Render the original line with inline markdown processing
const lineContent = parseInlineMarkdown(originalLine, `single-list-item-${partIdx}`, footnotes, emojiInfos)
wrappedParts.push(
{lineContent}
)
} else {
// Fallback: render the list item content
wrappedParts.push(
{listItem.props.children}
)
}
} else {
// Fallback: render the list item content
wrappedParts.push(
{listItem.props.children}
)
}
} else {
wrappedParts.push(listItem)
}
}
continue
}
}
wrappedParts.push(part)
partIdx++
}
// Add footnotes section at the end if there are any footnotes
if (footnotes.size > 0) {
wrappedParts.push(
Footnotes
{Array.from(footnotes.entries()).map(([id, text]) => (
))}
)
}
// Add citations section (footnotes) at the end if there are any footnotes
const footCitations = citations.filter(c => c.type === 'foot' || c.type === 'foot-end')
if (footCitations.length > 0) {
wrappedParts.push(
Citations
{footCitations.map((citation, idx) => (
-
))}
)
}
// Add references section at the end if there are any endnote citations
const endCitations = citations.filter(c => c.type === 'end' || c.type === 'prompt-end')
if (endCitations.length > 0) {
wrappedParts.push(
References
{endCitations.map((citation, idx) => (
-
))}
)
}
return { nodes: wrappedParts, hashtagsInContent, footnotes, citations }
}
/**
* Marked-driven markdown renderer (standard markdown blocks/inline), while keeping
* Nostr-specific enrichments (embeds, wikilinks, relay/profile navigation) custom.
*/
function parseMarkdownContentMarked(
content: string,
options: {
eventPubkey: string
imageIndexMap: Map
openLightbox: (index: number) => void
navigateToHashtag: (href: string) => void
navigateToRelay: (url: string) => void
videoPosterMap?: Map
imageThumbnailMap?: Map
getImageIdentifier?: (url: string) => string | null
emojiInfos?: TEmoji[]
fullCalendarInvite?: { naddr: string; event: Event }
suppressStandaloneWebPreviewCleanedUrls?: ReadonlySet
containingEvent?: Event
/** Hold images as placeholders until clicked (lightbox). False in detail/full views. */
lazyMedia?: boolean
}
): { nodes: React.ReactNode[]; hashtagsInContent: Set; footnotes: Map; citations: Array<{ id: string; type: string; citationId: string }> } {
const {
eventPubkey,
imageIndexMap,
openLightbox,
navigateToHashtag,
navigateToRelay,
videoPosterMap,
getImageIdentifier,
emojiInfos = [],
fullCalendarInvite,
suppressStandaloneWebPreviewCleanedUrls,
containingEvent,
lazyMedia = true
} = options
/** Direct image URLs on their own line: render Image (NIP-94 / Amethyst-style), not WebPreview — WebPreview returns null when autoLoadMedia is off. */
const imetaInfoForStandaloneImageUrl = (cleaned: string): TImetaInfo => {
if (containingEvent) {
const infos = getImetaInfosFromEvent(containingEvent)
const hit = infos.find((i) => cleanUrl(i.url) === cleaned)
if (hit) return { ...hit, url: cleaned }
}
return { url: cleaned, pubkey: eventPubkey }
}
const renderStandaloneHttpsImageBlock = (cleaned: string, reactKey: string) => {
let imageIndex = imageIndexMap.get(cleaned)
if (imageIndex === undefined && getImageIdentifier) {
const identifier = getImageIdentifier(cleaned)
if (identifier) {
imageIndex = imageIndexMap.get(`__img_id:${identifier}`)
}
}
return (
{
e.stopPropagation()
if (imageIndex !== undefined) {
openLightbox(imageIndex)
}
}}
/>
)
}
const hashtagsInContent = new Set()
const footnotes = new Map()
const citations: Array<{ id: string; type: string; citationId: string }> = []
const contentLines: string[] = []
let currentFootnoteId: string | null = null
for (const line of content.split('\n')) {
const footnoteDefMatch = line.match(/^\[\^([^\]]+)\]:\s+(.+)$/)
if (footnoteDefMatch) {
currentFootnoteId = footnoteDefMatch[1]
footnotes.set(currentFootnoteId, footnoteDefMatch[2])
continue
}
// Support indented continuation lines for multi-line footnote definitions.
if (currentFootnoteId && /^(?:\s{2,}|\t)(.+)$/.test(line)) {
const continuation = line.replace(/^(?:\s{2,}|\t)/, '')
const prev = footnotes.get(currentFootnoteId) ?? ''
footnotes.set(currentFootnoteId, prev ? `${prev} ${continuation}` : continuation)
continue
}
currentFootnoteId = null
contentLines.push(line)
}
const contentWithoutFootnotes = contentLines.join('\n')
const blockTokens = marked.lexer(contentWithoutFootnotes, { gfm: true, breaks: true }) as any[]
let codeBlockIdx = 0
const collectHashtags = (text: string) => {
const re = /#([a-zA-Z0-9_]+)/g
let m: RegExpExecArray | null
while ((m = re.exec(text)) !== null) {
hashtagsInContent.add(m[1].toLowerCase())
}
}
const renderInlineTokens = (tokens: any[], keyPrefix: string): React.ReactNode[] => {
const out: React.ReactNode[] = []
for (let i = 0; i < tokens.length; i++) {
const token = tokens[i]
const key = `${keyPrefix}-${i}`
switch (token.type) {
case 'text':
case 'escape': {
const txt = String(token.text ?? token.raw ?? '')
collectHashtags(txt)
out.push(
...parseInlineMarkdownLegacy(txt, `${key}-text`, footnotes, emojiInfos, navigateToHashtag)
)
break
}
case 'strong':
out.push(
{renderInlineTokens(token.tokens ?? [{ type: 'text', text: token.text ?? '' }], `${key}-strong`)}
)
break
case 'em':
out.push(
{renderInlineTokens(token.tokens ?? [{ type: 'text', text: token.text ?? '' }], `${key}-em`)}
)
break
case 'del':
out.push(
{renderInlineTokens(token.tokens ?? [{ type: 'text', text: token.text ?? '' }], `${key}-del`)}
)
break
case 'codespan':
out.push(
)
break
case 'link': {
const href = String(token.href ?? '')
const children = stripNestedAnchorsFromNodes(
renderInlineTokens(token.tokens ?? [{ type: 'text', text: token.text ?? href }], `${key}-link`),
`${key}-link-sanitized`
)
if (href.startsWith('payto://')) {
out.push(
{children}
)
} else {
out.push(
{children}
)
}
break
}
case 'br':
out.push(
)
break
case 'image': {
const src = String(token.href ?? '')
const cleaned = cleanUrl(src)
if (!cleaned) break
const label = String(token.text ?? '')
if (isVideo(cleaned) || isAudio(cleaned)) {
out.push(
{label || src}
)
break
}
if (!isImage(cleaned) || !isSafeMediaUrl(cleaned)) {
out.push(
{label || src}
)
break
}
// `` has empty alt — a plain {label} was invisible. Use Image like block paragraphs.
const baseImeta = imetaInfoForStandaloneImageUrl(cleaned)
let imageIdx = imageIndexMap.get(cleaned)
if (imageIdx === undefined && getImageIdentifier) {
const id = getImageIdentifier(cleaned)
if (id) imageIdx = imageIndexMap.get(`__img_id:${id}`)
}
out.push(
{
e.stopPropagation()
if (typeof imageIdx === 'number') openLightbox(imageIdx)
}}
/>
)
break
}
default: {
const txt = String(token.raw ?? token.text ?? '')
if (txt) {
collectHashtags(txt)
out.push(
...parseInlineMarkdownLegacy(txt, `${key}-fallback`, footnotes, emojiInfos, navigateToHashtag)
)
}
}
}
}
return out
}
const renderParagraph = (token: any, key: string): React.ReactNode => {
const paragraphText = String(token.text ?? '').trim()
const rawParagraphText = String(token.text ?? '')
const standaloneMath = parseDelimitedMath(rawParagraphText.trim())
if (standaloneMath) {
return (
)
}
const isNostrEventBech32 = (value: string): boolean =>
value.startsWith('note') || value.startsWith('nevent') || value.startsWith('naddr')
const standaloneNostr = paragraphText.match(/^nostr:([a-z0-9]{8,})$/i)
if (standaloneNostr) {
const bech32Id = standaloneNostr[1]
if (bech32Id.startsWith('npub') || bech32Id.startsWith('nprofile')) {
return (
)
}
if (bech32Id.startsWith('note') || bech32Id.startsWith('nevent') || bech32Id.startsWith('naddr')) {
if (fullCalendarInvite && bech32Id === fullCalendarInvite.naddr) {
return (
)
}
return (
)
}
}
const wiki = paragraphText.match(/^\[\[([^\]]+)\]\]$/)
if (wiki) {
const linkContent = wiki[1].trim()
if (linkContent.startsWith('book::')) {
return
}
const target = linkContent.includes('|') ? linkContent.split('|')[0].trim() : linkContent
const displayText = linkContent.includes('|') ? linkContent.split('|')[1].trim() : linkContent
const dTag = target.toLowerCase().replace(/[^a-z0-9]+/g, '-').replace(/^-+|-+$/g, '')
return
}
if (/^wss?:\/\/\S+$/i.test(paragraphText)) {
return (
{
e.preventDefault()
navigateToRelay(paragraphText)
}}
>
{paragraphText}
)
}
// Mixed paragraphs can contain normal text plus one or more standalone nostr lines.
// Render standalone special lines (nostr refs, relay links, plain URLs/media) as dedicated blocks
// even when they are not the entire paragraph.
if (rawParagraphText.includes('\n')) {
const lines = rawParagraphText.split('\n').map((line) => line.trim()).filter((line) => line.length > 0)
const hasStandaloneSpecialLine = lines.some(
(line) =>
/^nostr:([a-z0-9]{8,})$/i.test(line) ||
/^wss?:\/\/\S+$/i.test(line) ||
/^https?:\/\/\S+$/i.test(line)
)
if (hasStandaloneSpecialLine) {
const lineNodes = lines.map((line, lineIdx) => {
const nostrMatch = line.match(/^nostr:([a-z0-9]{8,})$/i)
if (!nostrMatch) {
if (/^wss?:\/\/\S+$/i.test(line)) {
return (
{
e.preventDefault()
navigateToRelay(line)
}}
>
{line}
)
}
if (/^https?:\/\/\S+$/i.test(line)) {
const cleaned = cleanUrl(line)
if (cleaned) {
if (isYouTubeUrl(cleaned)) {
return (
)
}
if (isVideo(cleaned) || isAudio(cleaned)) {
const poster = videoPosterMap?.get(cleaned)
return (
)
}
if (isPseudoNostrHttpsUrl(cleaned)) {
return (
)
}
if (isImage(cleaned) && isSafeMediaUrl(cleaned)) {
return renderStandaloneHttpsImageBlock(cleaned, `${key}-line-img-${lineIdx}`)
}
if (suppressStandaloneWebPreviewCleanedUrls?.has(cleaned)) {
return (
{cleaned}
)
}
return
}
}
return (
{renderInlineTokens(lexInlineProtected(line) as any[], `${key}-line-inline-${lineIdx}`)}
)
}
const bech32Id = nostrMatch[1]
if (bech32Id.startsWith('npub') || bech32Id.startsWith('nprofile')) {
return (
)
}
if (bech32Id.startsWith('note') || bech32Id.startsWith('nevent') || bech32Id.startsWith('naddr')) {
if (fullCalendarInvite && bech32Id === fullCalendarInvite.naddr) {
return (
)
}
return (
)
}
return (
{renderInlineTokens(lexInlineProtected(line) as any[], `${key}-line-fallback-inline-${lineIdx}`)}
)
})
return {lineNodes}
}
}
// Inline nostr event IDs can appear as plain text inside a sentence (not link tokens).
// Split paragraph around those IDs so event references render as embedded cards.
const rawInlineNostrMatches = Array.from(rawParagraphText.matchAll(new RegExp(NOSTR_URI_INLINE_REGEX.source, NOSTR_URI_INLINE_REGEX.flags)))
.filter((m) => m.index !== undefined && isNostrEventBech32((m[1] ?? '').toLowerCase()))
if (rawInlineNostrMatches.length > 0) {
const nodes: React.ReactNode[] = []
let cursor = 0
let segmentIdx = 0
for (const match of rawInlineNostrMatches) {
const start = match.index!
const end = start + match[0].length
const bech32Id = String(match[1] ?? '')
const before = rawParagraphText.slice(cursor, start)
if (before.trim().length > 0) {
nodes.push(
{parseInlineMarkdown(before, `${key}-nostr-raw-segment-${segmentIdx}`, footnotes, emojiInfos, navigateToHashtag)}
)
}
if (bech32Id.startsWith('naddr') && fullCalendarInvite && bech32Id === fullCalendarInvite.naddr) {
nodes.push(
)
} else {
nodes.push(
)
}
cursor = end
}
const after = rawParagraphText.slice(cursor)
if (after.trim().length > 0) {
nodes.push(
{parseInlineMarkdown(after, `${key}-nostr-raw-segment-${segmentIdx}`, footnotes, emojiInfos, navigateToHashtag)}
)
}
if (nodes.length > 0) {
return {nodes}
}
}
if (/^https?:\/\/\S+$/i.test(paragraphText)) {
const cleaned = cleanUrl(paragraphText)
if (cleaned) {
if (isYouTubeUrl(cleaned)) {
return (
)
}
if (isVideo(cleaned) || isAudio(cleaned)) {
const poster = videoPosterMap?.get(cleaned)
return (
)
}
if (isPseudoNostrHttpsUrl(cleaned)) {
return (
)
}
if (isImage(cleaned) && isSafeMediaUrl(cleaned)) {
return renderStandaloneHttpsImageBlock(cleaned, `${key}-para-img`)
}
if (suppressStandaloneWebPreviewCleanedUrls?.has(cleaned)) {
return (
{cleaned}
)
}
return
}
}
const paragraphTokens = lexInlineProtected(String(token.text ?? token.raw ?? ''))
const parseNostrHref = (href: string): string | null => {
if (!href.toLowerCase().startsWith('nostr:')) return null
const raw = href.slice(6).trim()
if (!raw) return null
const bech32 = raw.split(/[?#]/)[0]?.replace(/\/+$/, '') || ''
return bech32 || null
}
// Inline nostr event links (e.g. "… nostr:naddr1…") should render embedded cards.
// Split paragraph into inline text segments + block embeds to avoid invalid
trees.
if (Array.isArray(paragraphTokens) && paragraphTokens.length > 0) {
const hasInlineMediaImageToken = paragraphTokens.some((t) => {
if (t?.type !== 'image') return false
const cleaned = cleanUrl(String(t.href ?? ''))
return !!cleaned && (isVideo(cleaned) || isAudio(cleaned))
})
if (hasInlineMediaImageToken) {
const nodes: React.ReactNode[] = []
let inlineSegment: any[] = []
const flushInlineSegment = (segmentIdx: number) => {
if (inlineSegment.length === 0) return
nodes.push(
{renderInlineTokens(inlineSegment, `${key}-media-inline-segment-${segmentIdx}`)}
)
inlineSegment = []
}
let segmentIdx = 0
paragraphTokens.forEach((t: any, idx: number) => {
if (t?.type !== 'image') {
inlineSegment.push(t)
return
}
const src = String(t.href ?? '')
const cleaned = cleanUrl(src)
if (!cleaned || (!isVideo(cleaned) && !isAudio(cleaned))) {
inlineSegment.push(t)
return
}
flushInlineSegment(segmentIdx++)
const poster = videoPosterMap?.get(cleaned)
nodes.push(
)
})
flushInlineSegment(segmentIdx++)
if (nodes.length > 0) {
return {nodes}
}
}
const hasInlineNostrEventLink = paragraphTokens.some((t) => {
if (t?.type !== 'link') return false
const bech32 = parseNostrHref(String(t.href ?? ''))
return !!bech32 && isNostrEventBech32(bech32)
})
if (hasInlineNostrEventLink) {
const nodes: React.ReactNode[] = []
let inlineSegment: any[] = []
const flushInlineSegment = (segmentIdx: number) => {
if (inlineSegment.length === 0) return
nodes.push(
{renderInlineTokens(inlineSegment, `${key}-nostr-inline-segment-${segmentIdx}`)}
)
inlineSegment = []
}
let segmentIdx = 0
paragraphTokens.forEach((t: any, idx: number) => {
if (t?.type !== 'link') {
inlineSegment.push(t)
return
}
const href = String(t.href ?? '')
const bech32 = parseNostrHref(href)
if (!bech32 || !isNostrEventBech32(bech32)) {
inlineSegment.push(t)
return
}
flushInlineSegment(segmentIdx++)
if (bech32.startsWith('naddr') && fullCalendarInvite && bech32 === fullCalendarInvite.naddr) {
nodes.push(
)
} else {
nodes.push(
)
}
})
flushInlineSegment(segmentIdx++)
if (nodes.length > 0) {
return {nodes}
}
}
}
// If the paragraph is a single markdown image token, render it as block media/image
// instead of wrapping in (avoids invalid DOM nesting for media players).
if (Array.isArray(paragraphTokens) && paragraphTokens.length === 1 && paragraphTokens[0]?.type === 'image') {
const imageToken = paragraphTokens[0]
const src = String(imageToken.href ?? '')
const cleaned = cleanUrl(src)
if (cleaned) {
if (isVideo(cleaned) || isAudio(cleaned)) {
const poster = videoPosterMap?.get(cleaned)
return (
)
}
if (!isImage(cleaned) || !isSafeMediaUrl(cleaned)) {
return (
{renderInlineTokens(paragraphTokens, `${key}-img-inline-fallback`)}
)
}
const imageIdx = imageIndexMap.get(cleaned)
return (
{
e.stopPropagation()
if (typeof imageIdx === 'number') openLightbox(imageIdx)
}}
/>
)
}
}
const inlineNodes = renderInlineTokens(paragraphTokens, `${key}-inline`)
return {inlineNodes}
}
const renderBlockTokens = (tokens: any[], keyPrefix: string): React.ReactNode[] => {
const nodes: React.ReactNode[] = []
for (let i = 0; i < tokens.length; i++) {
const token = tokens[i]
const key = `${keyPrefix}-${i}`
switch (token.type) {
case 'space':
break
case 'paragraph':
nodes.push(renderParagraph(token, key))
break
case 'heading': {
const level = Number(token.depth || 1)
const headingClass =
level === 1
? 'text-3xl'
: level === 2
? 'text-2xl'
: level === 3
? 'text-xl'
: level === 4
? 'text-lg'
: 'text-base'
nodes.push(
React.createElement(
`h${Math.min(Math.max(level, 1), 6)}`,
{ key: `${key}-h`, className: `font-bold break-words block mt-4 mb-2 ${headingClass}` },
renderInlineTokens(lexInlineProtected(String(token.text ?? '')), `${key}-h-inline`)
)
)
break
}
case 'hr':
nodes.push(
)
break
case 'code': {
const codeText = String(token.text ?? '')
const codeLang = String(token.lang ?? '')
const parsedMath = parseDelimitedMath(codeText.trim())
if (parsedMath || isMathLanguage(codeLang)) {
nodes.push(
)
break
}
nodes.push(
)
break
}
case 'blockquote': {
const rawLines = String(token.raw ?? '')
.split('\n')
.filter((line) => line.trim().length > 0)
const isGreentext =
rawLines.length > 0 && rawLines.every((line) => /^>([^\s>].*)$/.test(line.trim()))
if (isGreentext) {
const lines = rawLines.map((line) => line.replace(/^>\s?/, ''))
nodes.push(
{lines.map((line, idx) => (
{renderInlineTokens(lexInlineProtected(line) as any[], `${key}-gt-inline-${idx}`)}
{idx < lines.length - 1 ?
: null}
))}
)
} else {
nodes.push(
{renderBlockTokens(token.tokens ?? [], `${key}-bq-inner`)}
)
}
break
}
case 'list': {
const ListTag = token.ordered ? 'ol' : 'ul'
const listClass = token.ordered
? 'list-decimal list-outside my-2 ml-6'
: 'list-disc list-outside my-2 ml-6 space-y-1'
const renderListItemContent = (item: any, itemKey: string): React.ReactNode => {
const itemTokens = item.tokens ?? [{ type: 'text', text: item.text ?? '' }]
if (itemTokens.length === 1) {
const single = itemTokens[0]
if (single.type === 'text') {
return renderInlineTokens(
lexInlineProtected(String(single.text ?? '')),
`${itemKey}-inline`
)
}
if (single.type === 'paragraph') {
return renderInlineTokens(
lexInlineProtected(String(single.text ?? '')),
`${itemKey}-inline`
)
}
}
return renderBlockTokens(itemTokens, itemKey)
}
nodes.push(
React.createElement(
ListTag,
{ key: `${key}-list`, className: listClass },
(token.items ?? []).map((item: any, itemIdx: number) => (
{renderListItemContent(item, `${key}-li-${itemIdx}`)}
))
)
)
break
}
case 'table': {
nodes.push(
{(token.header ?? []).map((cell: any, cIdx: number) => (
|
{renderInlineTokens(lexInlineProtected(String(cell.text ?? '')), `${key}-th-inline-${cIdx}`)}
|
))}
{(token.rows ?? []).map((row: any[], rIdx: number) => (
{row.map((cell: any, cIdx: number) => (
|
{renderInlineTokens(
lexInlineProtected(String(cell.text ?? '')),
`${key}-td-inline-${rIdx}-${cIdx}`
)}
|
))}
))}
)
break
}
default: {
if (Array.isArray(token.tokens) && token.tokens.length > 0) {
nodes.push(...renderBlockTokens(token.tokens, `${key}-nested`))
} else if (typeof token.text === 'string' && token.text.trim()) {
nodes.push(
{renderInlineTokens(lexInlineProtected(String(token.text ?? token.raw ?? '')) as any[], `${key}-fallback-inline`)}
)
}
}
}
}
return nodes
}
const nodes = renderBlockTokens(blockTokens, 'marked-root')
if (footnotes.size > 0) {
nodes.push(
Footnotes
{Array.from(footnotes.entries()).map(([id, text]) => (
))}
)
}
return { nodes, hashtagsInContent, footnotes, citations }
}
/**
* Parse inline markdown formatting (bold, italic, strikethrough, inline code, footnote references)
* Returns an array of React nodes
*
* Supports:
* - Bold: **text** or __text__ (double) or *text* (single asterisk)
* - Italic: _text_ (single underscore) or __text__ (double underscore, but bold takes priority)
* - Strikethrough: ~~text~~ (double tilde) or ~text~ (single tilde)
* - Inline code: ``code`` (double backtick) or `code` (single backtick)
* - Footnote references: [^1] (handled at block level, but parsed here for inline context)
*/
function parseInlineMarkdown(
text: string,
keyPrefix: string,
_footnotes: Map = new Map(),
emojiInfos: TEmoji[] = [],
navigateToHashtag?: (href: string) => void
): React.ReactNode[] {
const normalized = text.replace(/\n/g, ' ').replace(/[ \t]{2,}/g, ' ')
const tokens = lexInlineProtected(normalized) as any[]
const hasMarkdownSyntax = tokens.some((token) => token.type !== 'text' && token.type !== 'escape')
// Fast path: keep old behavior when there is no markdown syntax.
if (!hasMarkdownSyntax) {
return parseInlineMarkdownLegacy(normalized, keyPrefix, _footnotes, emojiInfos, navigateToHashtag)
}
const renderTokens = (list: any[], path: string): React.ReactNode[] => {
const out: React.ReactNode[] = []
for (let i = 0; i < list.length; i++) {
const token = list[i]
const tokenKey = `${path}-${i}`
if (token.type === 'text' || token.type === 'escape') {
out.push(
...parseInlineMarkdownLegacy(
String(token.text ?? token.raw ?? ''),
`${keyPrefix}-${tokenKey}-text`,
_footnotes,
emojiInfos,
navigateToHashtag
)
)
continue
}
if (token.type === 'strong') {
out.push(
{renderTokens(token.tokens ?? [{ type: 'text', text: token.text ?? '' }], `${tokenKey}-strong`)}
)
continue
}
if (token.type === 'em') {
out.push(
{renderTokens(token.tokens ?? [{ type: 'text', text: token.text ?? '' }], `${tokenKey}-em`)}
)
continue
}
if (token.type === 'del') {
out.push(
{renderTokens(token.tokens ?? [{ type: 'text', text: token.text ?? '' }], `${tokenKey}-del`)}
)
continue
}
if (token.type === 'codespan') {
out.push(
)
continue
}
if (token.type === 'link') {
const href = String(token.href ?? '')
const children = stripNestedAnchorsFromNodes(
renderTokens(token.tokens ?? [{ type: 'text', text: token.text ?? href }], `${tokenKey}-link`),
`${tokenKey}-link-sanitized`
)
if (href.startsWith('payto://')) {
out.push(
{children}
)
} else {
out.push(
{children}
)
}
continue
}
if (token.type === 'br') {
out.push(
)
continue
}
// Unknown/HTML token: treat as text to avoid unsafe HTML injection.
out.push(
...parseInlineMarkdownLegacy(
String(token.raw ?? token.text ?? ''),
`${keyPrefix}-${tokenKey}-fallback`,
_footnotes,
emojiInfos,
navigateToHashtag
)
)
}
return out
}
const rendered = renderTokens(tokens, `${keyPrefix}-md`)
return rendered.length > 0
? rendered
: parseInlineMarkdownLegacy(normalized, keyPrefix, _footnotes, emojiInfos, navigateToHashtag)
}
function parseInlineMarkdownLegacy(
text: string,
keyPrefix: string,
_footnotes: Map = new Map(),
emojiInfos: TEmoji[] = [],
navigateToHashtag?: (href: string) => void
): React.ReactNode[] {
if (isContentSpacingDebug() && text.includes('nostr:')) {
// eslint-disable-next-line no-console
console.log('[imwald content-spacing] parseInlineMarkdown:before-normalize', {
keyPrefix,
repr: reprString(text)
})
}
// Normalize newlines to spaces at the start (defensive - text should already be normalized, but ensure it)
// This prevents any hard breaks within inline content
text = text.replace(/\n/g, ' ')
// Collapse multiple consecutive spaces/tabs (2+) into a single space, but preserve single spaces
text = text.replace(/[ \t]{2,}/g, ' ')
if (isContentSpacingDebug() && text.includes('nostr:')) {
// eslint-disable-next-line no-console
console.log('[imwald content-spacing] parseInlineMarkdown:after-normalize', {
keyPrefix,
repr: reprString(text)
})
}
const parts: React.ReactNode[] = []
let lastIndex = 0
const inlinePatterns: Array<{ index: number; end: number; type: string; data: any }> = []
collectMathInlinePatterns(text).forEach((pattern) => {
inlinePatterns.push(pattern)
})
// Legacy helper is intentionally narrowed to non-standard enrichments.
// Standard markdown emphasis/code is handled by marked in parseInlineMarkdown().
// Markdown links are still recognized here for plain-text/fallback inline fragments.
const markdownLinkRegex = /\[([^\]]+)\]\(([^)]+)\)/g
const markdownLinkMatches = Array.from(text.matchAll(markdownLinkRegex))
markdownLinkMatches.forEach(match => {
if (match.index !== undefined) {
// Skip if already in code, bold, italic, or strikethrough
const isInOther = inlinePatterns.some(p =>
(p.type === 'code' || p.type === 'bold' || p.type === 'italic' || p.type === 'strikethrough' || p.type === 'math-inline' || p.type === 'math-block') &&
match.index! >= p.index &&
match.index! < p.end
)
if (!isInOther) {
inlinePatterns.push({
index: match.index,
end: match.index + match[0].length,
type: 'link',
data: { text: match[1], url: match[2] }
})
}
}
})
// Footnote references: [^id]
// Only render as clickable refs when the referenced definition exists.
const footnoteRefRegex = /\[\^([^\]]+)\]/g
const footnoteRefMatches = Array.from(text.matchAll(footnoteRefRegex))
footnoteRefMatches.forEach(match => {
if (match.index !== undefined) {
const footnoteId = match[1]
if (!_footnotes.has(footnoteId)) return
const isInOther = inlinePatterns.some(p =>
(p.type === 'link' || p.type === 'hashtag' || p.type === 'relay-url' || p.type === 'nostr' || p.type === 'payto' || p.type === 'math-inline' || p.type === 'math-block') &&
match.index! >= p.index &&
match.index! < p.end
)
if (!isInOther) {
inlinePatterns.push({
index: match.index,
end: match.index + match[0].length,
type: 'footnote-ref',
data: footnoteId
})
}
}
})
// Hashtags: #tag (process after code/bold/italic/links to avoid conflicts)
const hashtagRegex = /#([a-zA-Z0-9_]+)/g
const hashtagMatches = Array.from(text.matchAll(hashtagRegex))
hashtagMatches.forEach(match => {
if (match.index !== undefined) {
// Skip if already in another inline custom pattern
const isInOther = inlinePatterns.some(p =>
(p.type === 'link' || p.type === 'hashtag' || p.type === 'relay-url' || p.type === 'nostr' || p.type === 'payto' || p.type === 'math-inline' || p.type === 'math-block') &&
match.index! >= p.index &&
match.index! < p.end
)
if (!isInOther) {
inlinePatterns.push({
index: match.index,
end: match.index + match[0].length,
type: 'hashtag',
data: match[1] // The tag without the #
})
}
}
})
// Relay URLs: wss:// or ws:// (process after code/bold/italic/links/hashtags to avoid conflicts)
const relayUrlMatches = Array.from(text.matchAll(WS_URL_REGEX))
relayUrlMatches.forEach(match => {
if (match.index !== undefined) {
const url = match[0]
// Only process if it's actually a websocket URL
if (isWebsocketUrl(url)) {
// Skip if already in another inline custom pattern
const isInOther = inlinePatterns.some(p =>
(p.type === 'link' || p.type === 'hashtag' || p.type === 'relay-url' || p.type === 'nostr' || p.type === 'payto' || p.type === 'math-inline' || p.type === 'math-block') &&
match.index! >= p.index &&
match.index! < p.end
)
if (!isInOther) {
inlinePatterns.push({
index: match.index,
end: match.index + match[0].length,
type: 'relay-url',
data: url
})
}
}
}
})
// Nostr addresses: nostr:npub1..., nostr:note1..., etc. (process after code/bold/italic/links/hashtags/relay-urls to avoid conflicts)
// Only process profile types (npub/nprofile) inline; event types (note/nevent/naddr) should remain block-level
const nostrRegex = new RegExp(NOSTR_URI_INLINE_REGEX.source, NOSTR_URI_INLINE_REGEX.flags)
const nostrMatches = Array.from(text.matchAll(nostrRegex))
nostrMatches.forEach(match => {
if (match.index !== undefined) {
const bech32Id = match[1]
// Only process profile types inline; event types should remain block-level
const isProfileType = bech32Id.startsWith('npub') || bech32Id.startsWith('nprofile')
if (isProfileType) {
// Skip if already in another inline custom pattern
const isInOther = inlinePatterns.some(p =>
(p.type === 'link' || p.type === 'hashtag' || p.type === 'relay-url' || p.type === 'nostr' || p.type === 'payto' || p.type === 'math-inline' || p.type === 'math-block') &&
match.index! >= p.index &&
match.index! < p.end
)
if (!isInOther) {
inlinePatterns.push({
index: match.index,
end: match.index + match[0].length,
type: 'nostr',
data: bech32Id
})
}
}
}
})
// payto: URIs (RFC-8905 / NIP-A3) – process after nostr so we don't match inside other patterns
const paytoMatches = Array.from(text.matchAll(PAYTO_URI_REGEX))
paytoMatches.forEach(match => {
if (match.index !== undefined) {
const fullMatch = match[0]
const parsed = parsePaytoUri(fullMatch)
if (!parsed) return
const isInOther = inlinePatterns.some(p =>
(p.type === 'link' || p.type === 'hashtag' || p.type === 'relay-url' || p.type === 'nostr' || p.type === 'payto' || p.type === 'math-inline' || p.type === 'math-block') &&
match.index! >= p.index &&
match.index! < p.end
)
if (!isInOther) {
inlinePatterns.push({
index: match.index,
end: match.index + match[0].length,
type: 'payto',
data: parsed
})
}
}
})
// Emoji shortcodes :shortcode: or :short code: (custom and native)
const emojiMatches = Array.from(text.matchAll(EMOJI_SHORT_CODE_REGEX))
emojiMatches.forEach(match => {
if (match.index !== undefined) {
const isInOther = inlinePatterns.some(p =>
(p.type === 'link' || p.type === 'hashtag' || p.type === 'relay-url' || p.type === 'nostr' || p.type === 'payto' || p.type === 'emoji' || p.type === 'math-inline' || p.type === 'math-block') &&
match.index! >= p.index &&
match.index! < p.end
)
if (!isInOther) {
inlinePatterns.push({
index: match.index,
end: match.index + match[0].length,
type: 'emoji',
data: (match[1] ?? match[0].slice(1, -1)).trim()
})
}
}
})
// Sort by index
inlinePatterns.sort((a, b) => a.index - b.index)
// Remove overlaps (keep first)
const filtered: typeof inlinePatterns = []
let lastEnd = 0
inlinePatterns.forEach(pattern => {
if (pattern.index >= lastEnd) {
filtered.push(pattern)
lastEnd = pattern.end
}
})
// Build nodes
filtered.forEach((pattern, i) => {
let consumeEnd = pattern.end
// Add text before pattern
if (pattern.index > lastIndex) {
let textBefore = text.slice(lastIndex, pattern.index)
// Preserve spaces for proper spacing around inline elements
// Text is already normalized (newlines to spaces, multiple spaces collapsed to one)
// Even if textBefore is just whitespace, we need to preserve it for spacing
if (textBefore.length > 0) {
// If it's all whitespace, render as a space
if (textBefore.trim().length === 0) {
parts.push({' '})
} else {
parts.push({textBefore})
}
}
}
// Render custom inline pattern
if (pattern.type === 'link') {
const { text, url } = pattern.data
if (url.startsWith('payto://')) {
parts.push(
{parseInlineMarkdownLegacy(text, `${keyPrefix}-link-${i}`, _footnotes, emojiInfos)}
)
} else {
const linkContent = parseInlineMarkdownLegacy(
text,
`${keyPrefix}-link-${i}`,
_footnotes,
emojiInfos
)
parts.push(
{linkContent}
)
}
} else if (pattern.type === 'hashtag') {
// Render hashtags as inline links (green to match theme)
const tag = pattern.data
const tagLower = tag.toLowerCase()
parts.push(
{
if (!navigateToHashtag) return
e.stopPropagation()
e.preventDefault()
navigateToHashtag(`/notes?t=${tagLower}`)
}}
>
#{tag}
)
} else if (pattern.type === 'footnote-ref') {
const footnoteId = pattern.data
parts.push(
)
} else if (pattern.type === 'relay-url') {
// Render relay URLs as inline links (green to match theme)
const url = pattern.data
const relayPath = `/relays/${encodeURIComponent(url)}`
// Note: We can't use navigateToRelay here since this is a pure function
// The link will navigate normally, or we could make this a callback
parts.push(
{url}
)
} else if (pattern.type === 'nostr') {
// Render nostr addresses - only profile types (npub/nprofile) should be here (event types remain block-level)
const bech32Id = pattern.data
if (bech32Id.startsWith('npub') || bech32Id.startsWith('nprofile')) {
// Render as inline mention
parts.push(
)
} else {
// Fallback for unexpected types (shouldn't happen, but handle gracefully)
parts.push(nostr:{bech32Id})
}
} else if (pattern.type === 'payto') {
const payto = pattern.data as { type: string; authority: string; raw: string }
parts.push(
)
} else if (pattern.type === 'emoji') {
const shortcode = pattern.data as string
const custom = emojiInfos.find((e) => e.shortcode === shortcode)
if (custom) {
parts.push()
} else {
const native = shortcodeToEmoji(shortcode, emojis) ?? shortcodeToEmoji(shortcode.replace(/\s+/g, '_'), emojis)
if (native?.emoji) {
parts.push()
} else {
parts.push({`:${shortcode}:`})
}
}
} else if (pattern.type === 'math-inline' || pattern.type === 'math-block') {
if (pattern.type === 'math-block') {
const after = text.slice(pattern.end)
const punctMatch = after.match(/^\s*([.,;:!?])\s*$/)
if (punctMatch) {
consumeEnd = pattern.end + punctMatch[0].length
parts.push(
{punctMatch[1]}
)
} else {
parts.push(
)
}
} else {
parts.push(
)
}
}
lastIndex = consumeEnd
})
// Add remaining text
if (lastIndex < text.length) {
const remaining = text.slice(lastIndex)
// Preserve spaces - text should already be normalized (newlines converted to spaces)
if (remaining.length > 0) {
// If it's all whitespace, render as a space
if (remaining.trim().length === 0) {
parts.push({' '})
} else {
parts.push({remaining})
}
}
}
// If no patterns found, return the text as-is (already normalized at start of function)
if (parts.length === 0) {
const trimmedText = text.trim()
return trimmedText ? [{trimmedText}] : []
}
return parts
}
export default function MarkdownArticle({
event,
className,
hideMetadata = false,
lazyMedia = true,
parentImageUrl,
fullCalendarInvite,
duplicateWebPreviewCleanedUrlHints
}: {
event: Event
className?: string
hideMetadata?: boolean
/**
* When true (default), images in the note are held as blur/skeleton placeholders
* until the user opens them in the lightbox. Set to false in full/detail views
* so images load immediately.
*/
lazyMedia?: boolean
parentImageUrl?: string
/** When viewing a kind-24 invite, render full calendar card with RSVP in place of the naddr embed */
fullCalendarInvite?: { naddr: string; event: Event }
/** e.g. RSS/article URL-thread root: suppress duplicate WebPreview for the same page already shown as OP */
duplicateWebPreviewCleanedUrlHints?: string[]
}) {
const secondaryPage = useSecondaryPageOptional()
const push = secondaryPage?.push ?? ((url: string) => { window.location.href = url })
const { navigateToHashtag } = useSmartHashtagNavigationOptional()
const { navigateToRelay } = useSmartRelayNavigationOptional()
const metadata = useMemo(() => getLongFormArticleMetadataFromEvent(event), [event])
const iArticleUrl = useMemo(() => getHttpUrlFromITags(event), [event])
const webPreviewSuppressCleanedSet = useMemo(() => {
const s = new Set()
const addHint = (raw: string) => {
const t = raw.trim()
if (!t) return
const c = cleanUrl(t)
if (c) s.add(c)
else s.add(t)
if (t.startsWith('http://') || t.startsWith('https://')) {
const canon = canonicalizeRssArticleUrl(t)
if (canon) s.add(canon)
}
}
if (iArticleUrl) addHint(iArticleUrl)
for (const h of duplicateWebPreviewCleanedUrlHints ?? []) addHint(h)
return s
}, [iArticleUrl, duplicateWebPreviewCleanedUrlHints])
/** URL-thread OP already shows this link; hide the embedded i-tag card on kind 1111 / scoped replies */
const suppressITagArticleWebPreview = useMemo(() => {
if (!iArticleUrl || !duplicateWebPreviewCleanedUrlHints?.length) return false
const canon = canonicalizeRssArticleUrl(iArticleUrl)
return duplicateWebPreviewCleanedUrlHints.some(
(h) => canonicalizeRssArticleUrl(h) === canon
)
}, [iArticleUrl, duplicateWebPreviewCleanedUrlHints])
// Extract all media from event
const extractedMedia = useMediaExtraction(event, event.content)
// Extract media from tags only (for display at top)
const tagMedia = useMemo(() => {
const seenUrls = new Set()
const media: Array<{ url: string; type: 'image' | 'video' | 'audio'; poster?: string }> = []
// Extract from imeta tags
const imetaInfos = getImetaInfosFromEvent(event)
imetaInfos.forEach((info) => {
const cleaned = cleanUrl(info.url)
if (!cleaned || seenUrls.has(cleaned)) return
if (!isImage(cleaned) && !isMedia(cleaned)) return
seenUrls.add(cleaned)
if (info.m?.startsWith('image/') || isImage(cleaned)) {
media.push({ url: info.url, type: 'image' })
} else if (info.m?.startsWith('video/') || isVideo(cleaned)) {
media.push({ url: info.url, type: 'video', poster: info.image })
} else if (info.m?.startsWith('audio/') || isAudio(cleaned)) {
media.push({ url: info.url, type: 'audio' })
}
})
// Extract from r tags
event.tags.filter(tag => tag[0] === 'r' && tag[1]).forEach(tag => {
const url = tag[1]
const cleaned = cleanUrl(url)
if (!cleaned || seenUrls.has(cleaned)) return
if (!isImage(cleaned) && !isMedia(cleaned)) return
seenUrls.add(cleaned)
if (isImage(cleaned)) {
media.push({ url, type: 'image' })
} else if (isVideo(cleaned)) {
media.push({ url, type: 'video' })
} else if (isAudio(cleaned)) {
media.push({ url, type: 'audio' })
}
})
// Extract from image tag
const imageTag = event.tags.find(tag => tag[0] === 'image' && tag[1])
if (imageTag?.[1]) {
const cleaned = cleanUrl(imageTag[1])
if (cleaned && !seenUrls.has(cleaned) && isImage(cleaned)) {
seenUrls.add(cleaned)
media.push({ url: imageTag[1], type: 'image' })
}
}
return media
}, [event.id, JSON.stringify(event.tags)])
// Extract YouTube URLs from tags (for display at top)
const tagYouTubeUrls = useMemo(() => {
const youtubeUrls: string[] = []
const seenUrls = new Set()
event.tags
.filter(tag => tag[0] === 'r' && tag[1])
.forEach(tag => {
const url = tag[1]
if (!url.startsWith('http://') && !url.startsWith('https://')) return
if (!isYouTubeUrl(url)) return
const cleaned = cleanUrl(url)
if (cleaned && !seenUrls.has(cleaned)) {
youtubeUrls.push(cleaned)
seenUrls.add(cleaned)
}
})
return youtubeUrls
}, [event.id, JSON.stringify(event.tags)])
// Extract non-media links from tags (excluding YouTube URLs)
const tagLinks = useMemo(() => {
const links: string[] = []
const seenUrls = new Set()
event.tags
.filter(tag => tag[0] === 'r' && tag[1])
.forEach(tag => {
const url = tag[1]
if (!url.startsWith('http://') && !url.startsWith('https://')) return
if (isPseudoNostrHttpsUrl(url)) return
if (isImage(url) || isMedia(url)) return
if (isYouTubeUrl(url)) return // Exclude YouTube URLs
const cleaned = cleanUrl(url)
if (cleaned && !seenUrls.has(cleaned)) {
links.push(cleaned)
seenUrls.add(cleaned)
}
})
return links
}, [event.id, JSON.stringify(event.tags)])
// Get all images for gallery (deduplicated)
const allImages = useMemo(() => {
const seenUrls = new Set()
const images: Array<{ url: string; alt?: string }> = []
// Add images from extractedMedia
extractedMedia.images.forEach(img => {
const cleaned = cleanUrl(img.url)
if (cleaned && !seenUrls.has(cleaned)) {
seenUrls.add(cleaned)
images.push({ url: img.url, alt: img.alt })
}
})
// Add metadata image if it exists
if (metadata.image) {
const cleaned = cleanUrl(metadata.image)
if (cleaned && !seenUrls.has(cleaned) && isImage(cleaned)) {
seenUrls.add(cleaned)
images.push({ url: metadata.image })
}
}
return images
}, [extractedMedia.images, metadata.image])
// Helper function to extract image filename/hash from URL for comparison
// This helps identify the same image hosted on different domains
const getImageIdentifier = useMemo(() => {
return (url: string): string | null => {
try {
const cleaned = cleanUrl(url)
if (!cleaned) return null
const parsed = new URL(cleaned)
const pathname = parsed.pathname
// Extract the filename (last segment of the path)
const filename = pathname.split('/').pop() || ''
// If the filename looks like a hash (hex string), use it for comparison
// Also use the full pathname as a fallback
if (filename && /^[a-f0-9]{32,}\.(png|jpg|jpeg|gif|webp|svg)$/i.test(filename)) {
return filename.toLowerCase()
}
// Fallback to cleaned URL for non-hash filenames
return cleaned
} catch {
return cleanUrl(url) || null
}
}
}, [])
// Create image index map for lightbox
// Maps image URLs (and identifiers) to their index in allImages
const imageIndexMap = useMemo(() => {
const map = new Map()
allImages.forEach((img, index) => {
const cleaned = cleanUrl(img.url)
if (cleaned) {
map.set(cleaned, index)
// Also map by identifier for cross-domain matching
const identifier = getImageIdentifier(cleaned)
if (identifier && identifier !== cleaned) {
// Only add identifier mapping if it's different from the cleaned URL
// This helps match images across different domains
if (!map.has(`__img_id:${identifier}`)) {
map.set(`__img_id:${identifier}`, index)
}
}
}
})
return map
}, [allImages, getImageIdentifier])
// Parse content to find media URLs that are already rendered
// Store both cleaned URLs and image identifiers for comparison
const mediaUrlsInContent = useMemo(() => {
const urls = new Set()
const imageIdentifiers = new Set()
const urlRegex = /https?:\/\/[^\s<>"']+/g
let match
while ((match = urlRegex.exec(event.content)) !== null) {
const url = match[0]
const cleaned = cleanUrl(url)
if (cleaned && (isImage(cleaned) || isVideo(cleaned) || isAudio(cleaned))) {
urls.add(cleaned)
// Also add image identifier for filename-based matching
const identifier = getImageIdentifier(cleaned)
if (identifier) {
imageIdentifiers.add(identifier)
}
}
}
// Store identifiers in the Set as well (using a prefix to distinguish)
imageIdentifiers.forEach(id => urls.add(`__img_id:${id}`))
return urls
}, [event.content, getImageIdentifier])
// Extract YouTube URLs from content
const youtubeUrlsInContent = useMemo(() => {
const urls = new Set()
const urlRegex = /https?:\/\/[^\s<>"']+/g
let match
while ((match = urlRegex.exec(event.content)) !== null) {
const url = match[0]
const cleaned = cleanUrl(url)
if (cleaned && isYouTubeUrl(cleaned)) {
urls.add(cleaned)
}
}
return urls
}, [event.content])
// Extract non-media links from content (excluding YouTube URLs)
const contentLinks = useMemo(() => {
const links: string[] = []
const seenUrls = new Set()
const urlRegex = /https?:\/\/[^\s<>"']+/g
let match
while ((match = urlRegex.exec(event.content)) !== null) {
const url = match[0]
if ((url.startsWith('http://') || url.startsWith('https://')) && !isImage(url) && !isMedia(url) && !isYouTubeUrl(url)) {
const cleaned = cleanUrl(url)
if (cleaned && !seenUrls.has(cleaned)) {
links.push(cleaned)
seenUrls.add(cleaned)
}
}
}
return links
}, [event.content])
// Image gallery state
const [lightboxOpen, setLightboxOpen] = useState(false)
const [lightboxIndex, setLightboxIndex] = useState(0)
const openLightbox = useCallback((index: number) => {
setLightboxIndex(index)
setLightboxOpen(true)
}, [])
// Filter tag media to only show what's not in content
const leftoverTagMedia = useMemo(() => {
const metadataImageUrl = metadata.image ? cleanUrl(metadata.image) : null
const parentImageUrlCleaned = parentImageUrl ? cleanUrl(parentImageUrl) : null
return tagMedia.filter(media => {
const cleaned = cleanUrl(media.url)
if (!cleaned) return false
// Check if already in content by cleaned URL
if (mediaUrlsInContent.has(cleaned)) return false
// Also check by image identifier (filename/hash) for same image on different domains
const identifier = getImageIdentifier(cleaned)
if (identifier && mediaUrlsInContent.has(`__img_id:${identifier}`)) return false
// Skip if this is the metadata image (shown separately)
if (metadataImageUrl && cleaned === metadataImageUrl && !hideMetadata) return false
// Skip if this matches the parent publication's image (to avoid duplicate cover images)
if (parentImageUrlCleaned && cleaned === parentImageUrlCleaned) return false
return true
})
}, [tagMedia, mediaUrlsInContent, metadata.image, hideMetadata, parentImageUrl])
// Filter tag YouTube URLs to only show what's not in content
const leftoverTagYouTubeUrls = useMemo(() => {
return tagYouTubeUrls.filter(url => {
const cleaned = cleanUrl(url)
return cleaned && !youtubeUrlsInContent.has(cleaned)
})
}, [tagYouTubeUrls, youtubeUrlsInContent])
// Filter tag links to only show what's not in content (to avoid duplicate WebPreview cards)
const leftoverTagLinks = useMemo(() => {
const contentLinksSet = new Set(contentLinks.map((link) => cleanUrl(link)).filter(Boolean))
return tagLinks.filter((link) => {
const cleaned = cleanUrl(link)
if (!cleaned) return false
if (webPreviewSuppressCleanedSet.has(cleaned)) return false
if (
(link.startsWith('http://') || link.startsWith('https://')) &&
webPreviewSuppressCleanedSet.has(canonicalizeRssArticleUrl(link))
) {
return false
}
return !contentLinksSet.has(cleaned)
})
}, [tagLinks, contentLinks, webPreviewSuppressCleanedSet])
// Preprocess content to convert URLs to markdown syntax
const preprocessedContent = useMemo(() => {
// First unescape JSON-encoded escape sequences
let processed = unescapeJsonContent(event.content)
// Normalize excessive newlines (reduce 3+ to 2)
processed = normalizeNewlines(processed)
// Normalize single newlines within bold/italic spans to spaces
processed = normalizeInlineFormattingNewlines(processed)
// Normalize Setext-style headers (H1 with ===, H2 with ---)
processed = normalizeSetextHeaders(processed)
// Normalize backticks (inline code and code blocks)
processed = normalizeBackticks(processed)
// Replace standard :shortcode: with Unicode (custom emojis stay as shortcode for tag lookup)
const customShortcodes = event.tags.filter((t) => t[0] === 'emoji').map((t) => t[1]).filter(Boolean)
processed = replaceStandardEmojiShortcodesInContent(processed, customShortcodes)
// Then preprocess media links
return preprocessMarkdownMediaLinks(processed)
}, [event.content, event.tags])
// Create video poster map from imeta tags
const videoPosterMap = useMemo(() => {
const map = new Map()
const imetaInfos = getImetaInfosFromEvent(event)
imetaInfos.forEach((info) => {
if (info.image && (info.m?.startsWith('video/') || isVideo(info.url))) {
const cleaned = cleanUrl(info.url)
if (cleaned) {
map.set(cleaned, info.image)
}
}
})
return map
}, [event.id, JSON.stringify(event.tags)])
// Create thumbnail map from imeta tags (for images)
// Maps original image URL to thumbnail URL
const imageThumbnailMap = useMemo(() => {
const map = new Map()
const imetaInfos = getImetaInfosFromEvent(event)
imetaInfos.forEach((info) => {
if (info.thumb && (info.m?.startsWith('image/') || isImage(info.url))) {
const cleaned = cleanUrl(info.url)
if (cleaned && info.thumb) {
map.set(cleaned, info.thumb)
// Also map by identifier for cross-domain matching
const identifier = getImageIdentifier(cleaned)
if (identifier) {
map.set(`__img_id:${identifier}`, info.thumb)
}
}
}
})
return map
}, [event.id, JSON.stringify(event.tags), getImageIdentifier])
// Maps cleaned image URL → blurhash string (for inline placeholder rendering)
const imageBlurHashMap = useMemo(() => {
const map = new Map()
getImetaInfosFromEvent(event).forEach((info) => {
if (info.blurHash) {
const cleaned = cleanUrl(info.url)
if (cleaned) map.set(cleaned, info.blurHash)
}
})
return map
}, [event.id, JSON.stringify(event.tags)])
const emojiInfos = useMemo(() => getEmojiInfosFromEmojiTags(event.tags), [event.tags])
// Parse markdown content with post-processing for nostr: links and hashtags
const { nodes: parsedContent, hashtagsInContent } = useMemo(() => {
const parseOptions = {
eventPubkey: event.pubkey,
imageIndexMap,
openLightbox,
navigateToHashtag,
navigateToRelay,
videoPosterMap,
imageThumbnailMap,
getImageIdentifier,
emojiInfos,
fullCalendarInvite,
containingEvent: event,
lazyMedia,
suppressStandaloneWebPreviewCleanedUrls:
webPreviewSuppressCleanedSet.size > 0 ? webPreviewSuppressCleanedSet : undefined
}
let result
try {
result = parseMarkdownContentMarked(preprocessedContent, parseOptions)
} catch (error) {
logger.error('Marked parser failed, falling back to legacy parser:', error)
result = parseMarkdownContentLegacy(preprocessedContent, parseOptions)
}
// Return nodes and hashtags (footnotes are already included in nodes)
return { nodes: result.nodes, hashtagsInContent: result.hashtagsInContent }
}, [
preprocessedContent,
event,
event.pubkey,
imageIndexMap,
openLightbox,
navigateToHashtag,
navigateToRelay,
videoPosterMap,
imageThumbnailMap,
getImageIdentifier,
emojiInfos,
fullCalendarInvite,
lazyMedia,
webPreviewSuppressCleanedSet
])
// Filter metadata tags to only show what's not already in content
const leftoverMetadataTags = useMemo(() => {
return metadata.tags.filter(tag => !hashtagsInContent.has(tag.toLowerCase()))
}, [metadata.tags, hashtagsInContent])
return (
<>
{iArticleUrl && !suppressITagArticleWebPreview && (
)}
{/* Metadata */}
{!hideMetadata && metadata.title &&
{metadata.title}
}
{!hideMetadata && metadata.summary && (
{metadata.summary}
)}
{hideMetadata && metadata.title && event.kind !== ExtendedKind.DISCUSSION && (
{metadata.title}
)}
{/* Metadata image */}
{!hideMetadata && metadata.image && (() => {
const cleanedMetadataImage = cleanUrl(metadata.image)
const parentImageUrlCleaned = parentImageUrl ? cleanUrl(parentImageUrl) : null
// Don't show if already in content (check by URL and by identifier)
if (cleanedMetadataImage) {
if (mediaUrlsInContent.has(cleanedMetadataImage)) return null
const identifier = getImageIdentifier(cleanedMetadataImage)
if (identifier && mediaUrlsInContent.has(`__img_id:${identifier}`)) return null
}
// Don't show if it matches the parent publication's image (to avoid duplicate cover images)
if (parentImageUrlCleaned && cleanedMetadataImage === parentImageUrlCleaned) return null
const metadataImageIndex = imageIndexMap.get(cleanedMetadataImage)
return (
{
e.stopPropagation()
if (metadataImageIndex !== undefined) {
openLightbox(metadataImageIndex)
}
}}
/>
)
})()}
{/* Media from tags (only if not in content) */}
{leftoverTagMedia.length > 0 && (
{leftoverTagMedia.map((media) => {
const cleaned = cleanUrl(media.url)
const mediaIndex = imageIndexMap.get(cleaned)
if (media.type === 'image') {
return (
{
e.stopPropagation()
if (mediaIndex !== undefined) {
openLightbox(mediaIndex)
}
}}
/>
)
} else if (media.type === 'video' || media.type === 'audio') {
return (
)
}
return null
})}
)}
{/* YouTube URLs from tags (only if not in content) */}
{leftoverTagYouTubeUrls.length > 0 && (
{leftoverTagYouTubeUrls.map((url) => {
const cleaned = cleanUrl(url)
return (
)
})}
)}
{/* Parsed content */}
{parsedContent}
{/* Hashtags from metadata (only if not already in content) */}
{leftoverMetadataTags.length > 0 && (
{leftoverMetadataTags.map((tag) => (
{
e.stopPropagation()
push(toNoteList({ hashtag: tag, kinds: [kinds.LongFormArticle] }))
}}
>
#{tag}
))}
)}
{/* WebPreview cards for links from tags (only if not already in content) */}
{/* Note: Links in content are already rendered as green hyperlinks above, so we don't show WebPreview for them */}
{leftoverTagLinks.length > 0 && (
{leftoverTagLinks.map((url, index) => (
))}
)}
{/* Image gallery lightbox */}
{allImages.length > 0 && createPortal(
e.stopPropagation()}
onPointerDown={(e) => e.stopPropagation()}
onMouseDown={(e) => e.stopPropagation()}
onTouchStart={(e) => e.stopPropagation()}
>
({
src: preferBlossomPrimalDisplayUrl(url),
alt: alt || url
}))}
plugins={[Zoom]}
open={lightboxOpen}
close={() => setLightboxOpen(false)}
on={{
view: ({ index }) => setLightboxIndex(index)
}}
controller={{
closeOnBackdropClick: false,
closeOnPullUp: true,
closeOnPullDown: true
}}
render={{
buttonPrev: allImages.length <= 1 ? () => null : undefined,
buttonNext: allImages.length <= 1 ? () => null : undefined
}}
styles={{
toolbar: { paddingTop: '2.25rem' }
}}
carousel={{
finite: false
}}
/>
,
document.body
)}
>
)
}