Browse Source

asciidoc images fix

imwald
Silberengel 5 months ago
parent
commit
78ef3319c8
  1. 93
      src/components/Note/AsciidocArticle/AsciidocArticle.tsx
  2. 85
      src/services/content-parser.service.ts

93
src/components/Note/AsciidocArticle/AsciidocArticle.tsx

@ -10,6 +10,11 @@ import HighlightSourcePreview from '../../UniversalContent/HighlightSourcePrevie @@ -10,6 +10,11 @@ import HighlightSourcePreview from '../../UniversalContent/HighlightSourcePrevie
import { Button } from '@/components/ui/button'
import { Collapsible, CollapsibleContent, CollapsibleTrigger } from '@/components/ui/collapsible'
import { ExtendedKind } from '@/constants'
import { createPortal } from 'react-dom'
import Lightbox from 'yet-another-react-lightbox'
import Zoom from 'yet-another-react-lightbox/plugins/zoom'
import { TImetaInfo } from '@/types'
import { useMediaExtraction } from '@/hooks'
export default function AsciidocArticle({
event,
@ -232,6 +237,67 @@ export default function AsciidocArticle({ @@ -232,6 +237,67 @@ export default function AsciidocArticle({
return () => clearTimeout(timeoutId)
}, [parsedContent?.html, isArticleType])
// Extract images from content using the unified media extraction service
// This includes images from tags, content, and parsed HTML
const extractedMedia = useMediaExtraction(event, event.content)
// Extract images from parsed HTML (after AsciiDoc processing) for carousel
// This ensures we get images that were rendered in the HTML output
const imagesInContent = useMemo<TImetaInfo[]>(() => {
if (!parsedContent?.html || !event) return []
const images: TImetaInfo[] = []
const imgRegex = /<img[^>]+src=["']([^"']+)["'][^>]*>/gi
const seenUrls = new Set<string>()
// Create a map of extracted media by URL for metadata lookup
const mediaMap = new Map<string, TImetaInfo>()
extractedMedia.all.forEach((media) => {
if (media.m?.startsWith('image/')) {
mediaMap.set(media.url, media)
}
})
let match
while ((match = imgRegex.exec(parsedContent.html)) !== null) {
const url = match[1]
if (url && !seenUrls.has(url)) {
seenUrls.add(url)
// Use metadata from extracted media if available, otherwise create basic entry
const mediaInfo = mediaMap.get(url) || { url, pubkey: event.pubkey }
images.push(mediaInfo)
}
}
return images
}, [parsedContent?.html, event, extractedMedia])
// Handle image clicks to open carousel
const [lightboxIndex, setLightboxIndex] = useState(-1)
useEffect(() => {
if (!contentRef.current || imagesInContent.length === 0) return
const handleImageClick = (event: MouseEvent) => {
const target = event.target as HTMLElement
if (target.tagName === 'IMG' && target.hasAttribute('data-asciidoc-image')) {
event.preventDefault()
event.stopPropagation()
const imageIndex = target.getAttribute('data-image-index')
if (imageIndex !== null) {
setLightboxIndex(parseInt(imageIndex, 10))
}
}
}
const contentElement = contentRef.current
contentElement.addEventListener('click', handleImageClick)
return () => {
contentElement.removeEventListener('click', handleImageClick)
}
}, [imagesInContent.length])
if (isLoading) {
return (
@ -303,6 +369,33 @@ export default function AsciidocArticle({ @@ -303,6 +369,33 @@ export default function AsciidocArticle({
dangerouslySetInnerHTML={{ __html: parsedContent?.html || '' }}
/>
{/* Image carousel lightbox */}
{imagesInContent.length > 0 && lightboxIndex >= 0 && createPortal(
<div onClick={(e) => e.stopPropagation()}>
<Lightbox
index={lightboxIndex}
slides={imagesInContent.map(({ url }) => ({
src: url,
alt: url
}))}
plugins={[Zoom]}
open={lightboxIndex >= 0}
close={() => setLightboxIndex(-1)}
controller={{
closeOnBackdropClick: true,
closeOnPullUp: true,
closeOnPullDown: true
}}
styles={{
toolbar: { paddingTop: '2.25rem' }
}}
carousel={{
finite: false
}}
/>
</div>,
document.body
)}
{/* Collapsible Article Info - only for article-type events */}
{!hideImagesAndInfo && isArticleType && (parsedContent?.nostrLinks?.length > 0 || parsedContent?.highlightSources?.length > 0 || parsedContent?.hashtags?.length > 0) && (

85
src/services/content-parser.service.ts

@ -118,11 +118,26 @@ class ContentParserService { @@ -118,11 +118,26 @@ class ContentParserService {
return this.parsePlainText(content)
}
// Check if content starts with level 3+ headers (=== or deeper)
// Asciidoctor article doctype requires level 1 (=) or level 2 (==) before level 3 (===)
// If content starts with level 3+, use book doctype which allows sections at any level
const firstHeaderMatch = content.match(/^(={1,6})\s+/m)
let doctype: 'article' | 'book' = 'article'
if (firstHeaderMatch) {
const firstHeaderLevel = firstHeaderMatch[1].length
// If first header is level 3 or deeper, use book doctype
// Book doctype allows sections at any level without requiring hierarchy
if (firstHeaderLevel >= 3) {
doctype = 'book'
}
}
try {
const result = asciidoctor.convert(content, {
safe: 'safe',
backend: 'html5',
doctype: 'article',
doctype: doctype,
attributes: {
'showtitle': true,
'sectanchors': true,
@ -169,8 +184,11 @@ class ContentParserService { @@ -169,8 +184,11 @@ class ContentParserService {
// Process wikilinks in the HTML output
const processedHtml = this.processWikilinksInHtml(htmlString)
// Process images: add max-width styling and prepare for carousel
const imagesProcessedHtml = this.processImagesInHtml(processedHtml)
// Clean up any leftover markdown syntax and hide raw ToC text
const cleanedHtml = this.cleanupMarkdown(processedHtml)
const cleanedHtml = this.cleanupMarkdown(imagesProcessedHtml)
// Add proper CSS classes for styling
const styledHtml = this.addStylingClasses(cleanedHtml)
@ -191,7 +209,17 @@ class ContentParserService { @@ -191,7 +209,17 @@ class ContentParserService {
switch (markupType) {
case 'asciidoc':
asciidoc = content
// For AsciiDoc content, ensure proper formatting
// Convert escaped newlines to actual newlines
asciidoc = content.replace(/\\n/g, '\n')
// Ensure headers are on their own lines with proper spacing
// AsciiDoc requires blank lines before headers when they follow other content
// Fix pattern: non-empty line + newline + header without blank line between
asciidoc = asciidoc.replace(/(\S[^\n]*)\n(={1,6}\s+[^\n]+)/g, (_match, before, header) => {
// Add blank line before header if it follows non-empty content
return `${before}\n\n${header}`
})
break
case 'advanced-markdown':
@ -543,6 +571,57 @@ class ContentParserService { @@ -543,6 +571,57 @@ class ContentParserService {
return processed
}
/**
* Process images in HTML output: add max-width styling and data attributes for carousel
*/
private processImagesInHtml(html: string): string {
let processed = html
// Extract all image URLs for carousel
const imageUrls: string[] = []
const imageUrlRegex = /<img[^>]+src=["']([^"']+)["'][^>]*>/gi
let match
while ((match = imageUrlRegex.exec(html)) !== null) {
const url = match[1]
if (url && !imageUrls.includes(url)) {
imageUrls.push(url)
}
}
// Process each img tag: add max-width styling and data attributes
processed = processed.replace(/<img([^>]+)>/gi, (imgTag, attributes) => {
// Extract src attribute
const srcMatch = attributes.match(/src=["']([^"']+)["']/i)
if (!srcMatch) return imgTag
const src = srcMatch[1]
const currentIndex = imageUrls.indexOf(src)
// Add/update class for max-width
let updatedAttributes = attributes
if (updatedAttributes.match(/class=["']/i)) {
updatedAttributes = updatedAttributes.replace(/class=["']([^"']*)["']/i, (_match: string, classes: string) => {
// Remove existing max-w classes and add our max-w-[400px]
const cleanedClasses = classes.replace(/max-w-\[?[^\s\]]+\]?/g, '').trim()
const newClasses = cleanedClasses
? `${cleanedClasses} max-w-[400px] object-contain cursor-zoom-in`
: 'max-w-[400px] object-contain cursor-zoom-in'
return `class="${newClasses}"`
})
} else {
updatedAttributes += ` class="max-w-[400px] h-auto object-contain cursor-zoom-in"`
}
// Add data attributes for carousel
updatedAttributes += ` data-asciidoc-image="true" data-image-index="${currentIndex}" data-image-src="${src.replace(/"/g, '&quot;')}"`
return `<img${updatedAttributes}>`
})
return processed
}
/**
* Convert plain text to AsciiDoc format
*/

Loading…
Cancel
Save