diff --git a/src/PageManager.tsx b/src/PageManager.tsx
index 5087ac65..b6e79081 100644
--- a/src/PageManager.tsx
+++ b/src/PageManager.tsx
@@ -322,7 +322,7 @@ function restoredPrimaryBrowserUrl(pathname: string, fullUrlForQuery: string): s
function parseNoteUrl(url: string): { noteId: string; context?: string } {
// Match patterns like /discussions/notes/{noteId} or /notes/{noteId}
const contextualMatch = url.match(
- /\/(discussions|search|profile|home|feed|spells|explore)\/notes\/(.+)$/
+ /\/(discussions|search|profile|home|feed|spells|explore|rss)\/notes\/(.+)$/
)
if (contextualMatch) {
return { noteId: contextualMatch[2], context: contextualMatch[1] }
@@ -1012,7 +1012,7 @@ export function PageManager({ maxStackSize = 5 }: { maxStackSize?: number }) {
const pathname = window.location.pathname
// Check if this is a note URL - handle both /notes/{id} and /{context}/notes/{id}
- const contextualNoteMatch = pathname.match(/\/(discussions|search|profile|home|feed|spells|explore)\/notes\/(.+)$/)
+ const contextualNoteMatch = pathname.match(/\/(discussions|search|profile|home|feed|spells|explore|rss)\/notes\/(.+)$/)
const standardNoteMatch = pathname.match(/\/notes\/(.+)$/)
const noteUrlMatch = contextualNoteMatch || standardNoteMatch
@@ -1216,7 +1216,7 @@ export function PageManager({ maxStackSize = 5 }: { maxStackSize?: number }) {
// Check if pathname matches a primary page name
// First, check if it's a contextual note URL (e.g., /discussions/notes/...)
const contextualNoteMatch = pathname.match(
- /^\/(discussions|search|profile|home|feed|spells|explore)\/notes\//
+ /^\/(discussions|search|profile|home|feed|spells|explore|rss)\/notes\//
)
if (contextualNoteMatch) {
const pageContext = contextualNoteMatch[1]
@@ -1281,7 +1281,7 @@ export function PageManager({ maxStackSize = 5 }: { maxStackSize?: number }) {
const urlToCheck = state?.url || window.location.pathname
// Check if it's a note URL (we'll update drawer after stack is synced)
- const noteUrlMatch = urlToCheck.match(/\/(discussions|search|profile|home|feed|spells|explore)\/notes\/(.+)$/) ||
+ const noteUrlMatch = urlToCheck.match(/\/(discussions|search|profile|home|feed|spells|explore|rss)\/notes\/(.+)$/) ||
urlToCheck.match(/\/notes\/(.+)$/)
const noteIdToShow = noteUrlMatch ? noteUrlMatch[noteUrlMatch.length - 1].split('?')[0].split('#')[0] : null
@@ -1394,7 +1394,7 @@ export function PageManager({ maxStackSize = 5 }: { maxStackSize?: number }) {
}
// Check if navigating to a note URL (supports both /notes/{id} and /{context}/notes/{id})
- const noteUrlMatch = state.url.match(/\/(discussions|search|profile|home|feed|spells|explore)\/notes\/(.+)$/) ||
+ const noteUrlMatch = state.url.match(/\/(discussions|search|profile|home|feed|spells|explore|rss)\/notes\/(.+)$/) ||
state.url.match(/\/notes\/(.+)$/)
if (noteUrlMatch) {
const noteId = noteUrlMatch[noteUrlMatch.length - 1].split('?')[0].split('#')[0]
@@ -1445,7 +1445,7 @@ export function PageManager({ maxStackSize = 5 }: { maxStackSize?: number }) {
// Extract noteId from top item's URL or from state.url
const topItemUrl = newStack[newStack.length - 1]?.url || state?.url
if (topItemUrl) {
- const topNoteUrlMatch = topItemUrl.match(/\/(discussions|search|profile|home|feed|spells|explore)\/notes\/(.+)$/) ||
+ const topNoteUrlMatch = topItemUrl.match(/\/(discussions|search|profile|home|feed|spells|explore|rss)\/notes\/(.+)$/) ||
topItemUrl.match(/\/notes\/(.+)$/)
if (topNoteUrlMatch) {
const topNoteId = topNoteUrlMatch[topNoteUrlMatch.length - 1].split('?')[0].split('#')[0]
diff --git a/src/components/Note/Highlight/index.tsx b/src/components/Note/Highlight/index.tsx
index eecf2da1..403ff812 100644
--- a/src/components/Note/Highlight/index.tsx
+++ b/src/components/Note/Highlight/index.tsx
@@ -10,6 +10,15 @@ import { toNote } from '@/lib/link'
import { useFetchEvent } from '@/hooks'
import { useEffect, useState, useMemo } from 'react'
import { ExtendedKind } from '@/constants'
+import { resolveNip84HighlightDisplay } from '@/lib/nip84-highlight-display'
+
+function stripOuterQuotes(s: string): string {
+ let t = s.trim()
+ if (t.startsWith('"') && t.endsWith('"')) {
+ t = t.slice(1, -1).trim()
+ }
+ return t
+}
/**
* Check if a string is a URL or Nostr address
@@ -295,58 +304,52 @@ export default function Highlight({
}
}, [sourceTag, referencedEventAuthor, hasSpecialCard])
- // Extract the context (the main quote/full text being highlighted from)
- const contextTag = event.tags.find(tag => tag[0] === 'context')
- const context = contextTag?.[1] || event.content // Default to content if no context
-
- // The event.content is the highlighted portion
- const highlightedText = event.content
+ const { fullText, markedSpan } = useMemo(
+ () => resolveNip84HighlightDisplay(event),
+ [event.id, event.content, event.tags]
+ )
+
+ const markClassName =
+ 'bg-green-200 dark:bg-green-600 dark:text-white px-1 rounded font-medium'
+
+ const quotedBody = useMemo(() => {
+ const cleanFull = stripOuterQuotes(fullText)
+ const cleanMark = stripOuterQuotes(markedSpan)
+ if (!cleanFull) return null
+ if (!cleanMark || cleanFull === cleanMark) {
+ return (
+
+ {cleanFull}
+
+ )
+ }
+ const pieces = cleanFull.split(cleanMark)
+ if (pieces.length === 1) {
+ return (
+
+ {cleanFull}
+
+ )
+ }
+ return pieces.map((part, index) => (
+
+ {part}
+ {index < pieces.length - 1 && (
+
+ {cleanMark}
+
+ )}
+
+ ))
+ }, [fullText, markedSpan])
return (
- {/* Full quoted text with highlighted portion */}
- {context && (
+ {/* Full quoted text with highlighted portion (context, textquoteselector, or textpositionselector) */}
+ {quotedBody && (
- {contextTag && highlightedText ? (
- // If we have both context and highlighted text, show the highlight within the context
-
- {(() => {
- // Strip outer quotation marks if present
- let cleanContext = context.trim()
- if (cleanContext.startsWith('"') && cleanContext.endsWith('"')) {
- cleanContext = cleanContext.slice(1, -1).trim()
- }
- // Strip outer quotation marks from highlighted text if present
- let cleanHighlightedText = highlightedText.trim()
- if (cleanHighlightedText.startsWith('"') && cleanHighlightedText.endsWith('"')) {
- cleanHighlightedText = cleanHighlightedText.slice(1, -1).trim()
- }
- return cleanContext.split(cleanHighlightedText).map((part, index) => (
-
- {part}
- {index < cleanContext.split(cleanHighlightedText).length - 1 && (
-
- {cleanHighlightedText}
-
- )}
-
- ))
- })()}
-
- ) : (
- // If no context tag, just show the content as a regular quote
-
- {(() => {
- // Strip outer quotation marks if present
- let cleanContext = context.trim()
- if (cleanContext.startsWith('"') && cleanContext.endsWith('"')) {
- cleanContext = cleanContext.slice(1, -1).trim()
- }
- return cleanContext
- })()}
-
- )}
+
{quotedBody}
)}
diff --git a/src/components/Note/MarkdownArticle/MarkdownArticle.tsx b/src/components/Note/MarkdownArticle/MarkdownArticle.tsx
index 676d08ef..07e7882b 100644
--- a/src/components/Note/MarkdownArticle/MarkdownArticle.tsx
+++ b/src/components/Note/MarkdownArticle/MarkdownArticle.tsx
@@ -10,6 +10,7 @@ import { toNoteList } from '@/lib/link'
import { useMediaExtraction } from '@/hooks'
import { cleanUrl, isImage, isMedia, isVideo, isAudio, isWebsocketUrl } from '@/lib/url'
import { getHttpUrlFromITags, getImetaInfosFromEvent } from '@/lib/event'
+import { canonicalizeRssArticleUrl } from '@/lib/rss-article'
import { Event, kinds } from 'nostr-tools'
import Emoji from '@/components/Emoji'
import { ExtendedKind, WS_URL_REGEX, YOUTUBE_URL_REGEX } from '@/constants'
@@ -428,8 +429,8 @@ function parseMarkdownContent(
emojiInfos?: TEmoji[]
/** When viewing a kind-24 invite, render full calendar card with RSVP instead of EmbeddedNote for this naddr */
fullCalendarInvite?: { naddr: string; event: Event }
- /** If set, a standalone markdown link to this cleaned URL renders as inline link (OG shown separately). */
- suppressStandaloneWebPreviewForCleanedUrl?: string
+ /** Cleaned URL variants: standalone markdown links matching any render as inline (OG elsewhere). */
+ suppressStandaloneWebPreviewCleanedUrls?: ReadonlySet
}
): { nodes: React.ReactNode[]; hashtagsInContent: Set; footnotes: Map; citations: Array<{ id: string; type: string; citationId: string }> } {
const {
@@ -443,7 +444,7 @@ function parseMarkdownContent(
getImageIdentifier,
emojiInfos = [],
fullCalendarInvite,
- suppressStandaloneWebPreviewForCleanedUrl
+ suppressStandaloneWebPreviewCleanedUrls
} = options
const parts: React.ReactNode[] = []
const hashtagsInContent = new Set()
@@ -1833,8 +1834,8 @@ function parseMarkdownContent(
const { url } = pattern.data
const cleanedStandalone = cleanUrl(url) || url
if (
- suppressStandaloneWebPreviewForCleanedUrl &&
- cleanedStandalone === suppressStandaloneWebPreviewForCleanedUrl
+ suppressStandaloneWebPreviewCleanedUrls &&
+ suppressStandaloneWebPreviewCleanedUrls.has(cleanedStandalone)
) {
parts.push(
{ window.location.href = url })
@@ -3231,10 +3235,33 @@ export default function MarkdownArticle({
const { navigateToRelay } = useSmartRelayNavigationOptional()
const metadata = useMemo(() => getLongFormArticleMetadataFromEvent(event), [event])
const iArticleUrl = useMemo(() => getHttpUrlFromITags(event), [event])
- const iArticleCleaned = useMemo(
- () => (iArticleUrl ? cleanUrl(iArticleUrl) || iArticleUrl : ''),
- [iArticleUrl]
- )
+
+ const webPreviewSuppressCleanedSet = useMemo(() => {
+ const s = new Set()
+ const addHint = (raw: string) => {
+ const t = raw.trim()
+ if (!t) return
+ const c = cleanUrl(t)
+ if (c) s.add(c)
+ else s.add(t)
+ if (t.startsWith('http://') || t.startsWith('https://')) {
+ const canon = canonicalizeRssArticleUrl(t)
+ if (canon) s.add(canon)
+ }
+ }
+ if (iArticleUrl) addHint(iArticleUrl)
+ for (const h of duplicateWebPreviewCleanedUrlHints ?? []) addHint(h)
+ return s
+ }, [iArticleUrl, duplicateWebPreviewCleanedUrlHints])
+
+ /** URL-thread OP already shows this link; hide the embedded i-tag card on kind 1111 / scoped replies */
+ const suppressITagArticleWebPreview = useMemo(() => {
+ if (!iArticleUrl || !duplicateWebPreviewCleanedUrlHints?.length) return false
+ const canon = canonicalizeRssArticleUrl(iArticleUrl)
+ return duplicateWebPreviewCleanedUrlHints.some(
+ (h) => canonicalizeRssArticleUrl(h) === canon
+ )
+ }, [iArticleUrl, duplicateWebPreviewCleanedUrlHints])
// Extract all media from event
const extractedMedia = useMediaExtraction(event, event.content)
@@ -3511,10 +3538,16 @@ export default function MarkdownArticle({
return tagLinks.filter((link) => {
const cleaned = cleanUrl(link)
if (!cleaned) return false
- if (iArticleCleaned && cleaned === iArticleCleaned) return false
+ if (webPreviewSuppressCleanedSet.has(cleaned)) return false
+ if (
+ (link.startsWith('http://') || link.startsWith('https://')) &&
+ webPreviewSuppressCleanedSet.has(canonicalizeRssArticleUrl(link))
+ ) {
+ return false
+ }
return !contentLinksSet.has(cleaned)
})
- }, [tagLinks, contentLinks, iArticleCleaned])
+ }, [tagLinks, contentLinks, webPreviewSuppressCleanedSet])
// Preprocess content to convert URLs to markdown syntax
const preprocessedContent = useMemo(() => {
@@ -3586,7 +3619,8 @@ export default function MarkdownArticle({
getImageIdentifier,
emojiInfos,
fullCalendarInvite,
- suppressStandaloneWebPreviewForCleanedUrl: iArticleCleaned || undefined
+ suppressStandaloneWebPreviewCleanedUrls:
+ webPreviewSuppressCleanedSet.size > 0 ? webPreviewSuppressCleanedSet : undefined
})
// Return nodes and hashtags (footnotes are already included in nodes)
return { nodes: result.nodes, hashtagsInContent: result.hashtagsInContent }
@@ -3602,7 +3636,7 @@ export default function MarkdownArticle({
getImageIdentifier,
emojiInfos,
fullCalendarInvite,
- iArticleCleaned
+ webPreviewSuppressCleanedSet
])
// Filter metadata tags to only show what's not already in content
@@ -3698,7 +3732,7 @@ export default function MarkdownArticle({
}
`}
- {iArticleUrl && (
+ {iArticleUrl && !suppressITagArticleWebPreview && (
diff --git a/src/components/NoteOptions/useMenuActions.tsx b/src/components/NoteOptions/useMenuActions.tsx
index c98a85c7..26f79322 100644
--- a/src/components/NoteOptions/useMenuActions.tsx
+++ b/src/components/NoteOptions/useMenuActions.tsx
@@ -637,8 +637,10 @@ export function useMenuActions({
// Contextual URL when on Spells (e.g. discussions faux-spell); plain /notes/{id} otherwise
const path =
currentPrimaryPage === 'spells'
- ? `/spells/notes/${noteId}`
- : `/notes/${noteId}`
+ ? `/spells/notes/${noteId}`
+ : currentPrimaryPage === 'rss'
+ ? `/rss/notes/${noteId}`
+ : `/notes/${noteId}`
const jumbleUrl = `https://jumble.imwald.eu${path}`
navigator.clipboard.writeText(jumbleUrl)
closeDrawer()
diff --git a/src/components/NoteStats/index.tsx b/src/components/NoteStats/index.tsx
index d8d5f075..54e55e50 100644
--- a/src/components/NoteStats/index.tsx
+++ b/src/components/NoteStats/index.tsx
@@ -43,11 +43,11 @@ export default function NoteStats({
// Hide interaction counts if event is in quiet mode
const hideInteractions = shouldHideInteractions(event)
- /** Synthetic RSS article root: only reply + reactions (no boost/quote/zap). */
+ /** Synthetic RSS article root: no boost/quote/zap; still show reaction breakdown (NIP-25 + kind-17 web). */
const isRssArticleRoot = event.kind === ExtendedKind.RSS_THREAD_ROOT
- /** Kind 11 / kind 1111 under kind 11: LikeButton already shows ⬆️/⬇️ counts — skip duplicate pill row. */
- const showLikesPills = !isDiscussion && !isReplyToDiscussion && !isRssArticleRoot
+ /** Emoji reaction pills (aggregated likes). Shown for RSS/Web URL threads so the side panel matches feed rows. */
+ const showLikesPills = !isDiscussion && !isReplyToDiscussion
useEffect(() => {
if (!fetchIfNotExisting) return
diff --git a/src/components/ReplyNote/index.tsx b/src/components/ReplyNote/index.tsx
index 2db06fd5..233de442 100644
--- a/src/components/ReplyNote/index.tsx
+++ b/src/components/ReplyNote/index.tsx
@@ -37,13 +37,15 @@ export default function ReplyNote({
parentEventId,
onClickParent = () => {},
onClickReply,
- highlight = false
+ highlight = false,
+ duplicateWebPreviewCleanedUrlHints
}: {
event: Event
parentEventId?: string
onClickParent?: () => void
onClickReply?: (event: Event) => void
highlight?: boolean
+ duplicateWebPreviewCleanedUrlHints?: string[]
}) {
const { t } = useTranslation()
const { isSmallScreen } = useScreenSize()
@@ -147,7 +149,12 @@ export default function ReplyNote({
{t(notificationReactionSummaryKey(reactionDisplay))}
) : (
-
+
)
) : (
{
+ const out: string[] = [...(duplicateWebPreviewCleanedUrlHints ?? [])]
+ if (rootInfo?.type === 'I') out.push(rootInfo.id)
+ return out.length ? out : undefined
+ }, [duplicateWebPreviewCleanedUrlHints, rootInfo])
+
// Helper function to get vote score for a reply
const getReplyVoteScore = (reply: NEvent) => {
const stats = noteStatsService.getNoteStats(reply.id)
@@ -345,6 +362,59 @@ function ReplyNoteList({
fetchRootEvent()
}, [event])
+ /** When stats saw a URL-thread reply on relays we didn't REQ in the reply list, fetch by id so count matches list. */
+ const rssStatsHydratedReplyIdsRef = useRef>(new Set())
+
+ useEffect(() => {
+ rssStatsHydratedReplyIdsRef.current.clear()
+ }, [event.id])
+
+ useEffect(() => {
+ if (event.kind !== ExtendedKind.RSS_THREAD_ROOT || rootInfo?.type !== 'I') return
+ const fromStats = noteStats?.replies
+ if (!fromStats?.length) return
+
+ const urlKey = canonicalizeRssArticleUrl(rootInfo.id)
+ const inBucket = new Set((repliesMap.get(urlKey)?.events ?? []).map((e) => e.id))
+
+ const candidates = fromStats.filter(
+ (r) => !inBucket.has(r.id) && !rssStatsHydratedReplyIdsRef.current.has(r.id)
+ )
+ if (candidates.length === 0) return
+
+ let cancelled = false
+ ;(async () => {
+ const batch: NEvent[] = []
+ for (const { id } of candidates) {
+ rssStatsHydratedReplyIdsRef.current.add(id)
+ try {
+ const ev = await eventService.fetchEvent(id)
+ if (cancelled) return
+ if (ev && isRssArticleUrlThreadInteraction(ev, rootInfo.id)) {
+ batch.push(ev)
+ } else {
+ rssStatsHydratedReplyIdsRef.current.delete(id)
+ }
+ } catch {
+ rssStatsHydratedReplyIdsRef.current.delete(id)
+ }
+ }
+ if (!cancelled && batch.length > 0) addReplies(batch)
+ })()
+
+ return () => {
+ cancelled = true
+ }
+ }, [
+ event.kind,
+ event.id,
+ rootInfo,
+ noteStats?.replies,
+ noteStats?.updatedAt,
+ repliesMap,
+ addReplies
+ ])
+
const onNewReply = useCallback((evt: NEvent) => {
addReplies([evt])
if (rootInfo) {
@@ -374,7 +444,10 @@ function ReplyNoteList({
const replyFetchGenRef = useRef(0)
useEffect(() => {
- if (!rootInfo || currentIndex !== index) return
+ if (!rootInfo) return
+ // Hidden stack pages pass a numeric index that differs from the top panel's currentIndex.
+ // When index is omitted (edge routes), still fetch so replies are not stuck empty.
+ if (index !== undefined && currentIndex !== index) return
const fetchGeneration = ++replyFetchGenRef.current
@@ -474,16 +547,7 @@ function ReplyNoteList({
finalRelayUrls.push(rootInfo.relay)
}
} else if (rootInfo.type === 'I') {
- filters.push({
- '#i': [rootInfo.id],
- kinds: [ExtendedKind.COMMENT, ExtendedKind.VOICE_COMMENT],
- limit: LIMIT
- })
- filters.push({
- '#I': [rootInfo.id],
- kinds: [ExtendedKind.COMMENT, ExtendedKind.VOICE_COMMENT],
- limit: LIMIT
- })
+ filters.push(...buildRssArticleUrlThreadInteractionFilters(rootInfo.id, LIMIT))
}
// Use fetchEvents instead of subscribeTimeline for one-time fetching
@@ -491,8 +555,12 @@ function ReplyNoteList({
if (fetchGeneration !== replyFetchGenRef.current) return
- // Filter and add replies
- const regularReplies = allReplies.filter((evt) => isReplyNoteEvent(evt))
+ // Filter and add replies (URL threads include kind 9802 highlights of this page)
+ const regularReplies = allReplies.filter((evt) =>
+ rootInfo.type === 'I'
+ ? isRssArticleUrlThreadInteraction(evt, rootInfo.id)
+ : isReplyNoteEvent(evt)
+ )
// Store in cache (this merges with existing cached replies)
// After this call, the cache contains ALL replies we've ever seen for this thread
@@ -622,12 +690,17 @@ function ReplyNoteList({
// vanishing when wotSet is still empty (all non-self appear untrusted)
if (isTrustLoaded && hideUntrustedInteractions && !isUserTrusted(item.pubkey)) {
if (isQuote) return null
- const repliesForThisReply = repliesMap.get(item.id)
- if (
- !repliesForThisReply ||
- repliesForThisReply.events.every((evt) => !isUserTrusted(evt.pubkey))
- ) {
- return null
+ // URL-scoped comments (NIP-22 / kind 1111) are keyed under the article URL in ReplyProvider,
+ // not under each note id — repliesMap.get(item.id) is usually empty. Skipping the "trusted
+ // children" rule avoids hiding every untrusted URL-thread note.
+ if (rootInfo?.type !== 'I') {
+ const repliesForThisReply = repliesMap.get(item.id)
+ if (
+ !repliesForThisReply ||
+ repliesForThisReply.events.every((evt) => !isUserTrusted(evt.pubkey))
+ ) {
+ return null
+ }
}
}
@@ -671,7 +744,11 @@ function ReplyNoteList({
const replyRootId = getRootEventHexId(reply)
const replyUrlForIThread =
- rootInfo?.type === 'I' ? getArticleUrlFromCommentITags(reply) : undefined
+ rootInfo?.type === 'I'
+ ? reply.kind === kinds.Highlights
+ ? getHighlightSourceHttpUrl(reply)
+ : getArticleUrlFromCommentITags(reply)
+ : undefined
const belongsToSameThread = rootInfo && (
(rootInfo.type === 'E' && replyRootId === rootInfo.id) ||
(rootInfo.type === 'A' && getRootATag(reply)?.[1] === rootInfo.id) ||
@@ -689,6 +766,7 @@ function ReplyNoteList({
{
if (!parentEventHexId) return
if (replies.every((r) => r.id !== parentEventHexId)) {
diff --git a/src/components/RssFeedList/index.tsx b/src/components/RssFeedList/index.tsx
index 2f5f2464..9ca70025 100644
--- a/src/components/RssFeedList/index.tsx
+++ b/src/components/RssFeedList/index.tsx
@@ -8,15 +8,20 @@ import RssFeedItem from '../RssFeedItem'
import RssWebFeedCard from '../RssWebFeedCard'
import { ArticleUrlsSection } from './ArticleUrlsSection'
import { RssEntriesSection } from './RssEntriesSection'
+import { canonicalizeRssArticleUrl, isClawstrDotComHttpUrl } from '@/lib/rss-article'
import {
addManualRssWebUrl,
fetchDiscoveredWebUrlsFromRelays,
loadManualRssWebUrls,
loadRssWebFeedScopePreference,
+ loadRssWebHideUnifiedClutterPreference,
loadRssWebSuppressClawstrPreference,
buildArticleUrlFeedRows,
+ isHttpArticleUrl,
+ isRssWebUnifiedClutterUrl,
mergeDiscoveredRssWebUrls,
saveRssWebFeedScopePreference,
+ saveRssWebHideUnifiedClutterPreference,
saveRssWebSuppressClawstrPreference,
WEB_EXTERNAL_REACTION_PUBLISHED_EVENT,
type ManualRssWebUrlEntry,
@@ -131,6 +136,12 @@ function ManualRssUrlAddRow({
)
}
+function rssFeedItemArticleIsClawstrHost(item: TRssFeedItem): boolean {
+ const l = item.link?.trim()
+ if (!l || (!l.startsWith('http://') && !l.startsWith('https://'))) return false
+ return isClawstrDotComHttpUrl(l) || isClawstrDotComHttpUrl(canonicalizeRssArticleUrl(l))
+}
+
export default function RssFeedList() {
const { t } = useTranslation()
const { pubkey, rssFeedListEvent } = useNostr()
@@ -156,6 +167,8 @@ export default function RssFeedList() {
const [manualWebEntries, setManualWebEntries] = useState([])
/** Latest relay discovery (in-memory); URLs appear as faux cards even before IndexedDB merge. */
const [relayDiscoveredUrls, setRelayDiscoveredUrls] = useState([])
+ const [suppressClawstrLinks, setSuppressClawstrLinks] = useState(true)
+ const [hideUnifiedClutter, setHideUnifiedClutter] = useState(true)
const refreshManualWebUrls = useCallback(() => {
void loadManualRssWebUrls().then(setManualWebEntries)
@@ -515,6 +528,16 @@ export default function RssFeedList() {
return filtered
}, [items, selectedFeeds, timeFilter])
+ /** When “hide clutter” is on, drop those entries from the feed (not only from URL cards). */
+ const rssWebItemsRespectingClutterPref = useMemo(() => {
+ if (!hideUnifiedClutter) return baseFilteredItems
+ return baseFilteredItems.filter((item) => {
+ const link = item.link?.trim()
+ if (!link || !isHttpArticleUrl(link)) return true
+ return !isRssWebUnifiedClutterUrl(link)
+ })
+ }, [baseFilteredItems, hideUnifiedClutter])
+
const rssItemMatchesSearch = useCallback((item: TRssFeedItem, q: string) => {
const query = q.toLowerCase().trim()
if (!query) return true
@@ -530,14 +553,17 @@ export default function RssFeedList() {
/** RSS-only view: flat timeline with full-text search. */
const rssScopeItems = useMemo(() => {
const q = searchQuery.trim()
- let list = baseFilteredItems
+ let list = rssWebItemsRespectingClutterPref
if (q) {
list = list.filter((item) => rssItemMatchesSearch(item, q))
}
+ if (suppressClawstrLinks) {
+ list = list.filter((item) => !rssFeedItemArticleIsClawstrHost(item))
+ }
return [...list].sort(
(a, b) => (b.pubDate?.getTime() ?? 0) - (a.pubDate?.getTime() ?? 0)
)
- }, [baseFilteredItems, searchQuery, rssItemMatchesSearch])
+ }, [rssWebItemsRespectingClutterPref, searchQuery, rssItemMatchesSearch, suppressClawstrLinks])
type CombinedFeedRow =
| { kind: 'web'; canonicalUrl: string; rssItems: TRssFeedItem[]; latestPub: number }
@@ -563,7 +589,8 @@ export default function RssFeedList() {
const discovered = await fetchDiscoveredWebUrlsFromRelays({
accountPubkey: pubkey,
favoriteRelays: favoriteRelays ?? [],
- blockedRelays: blockedRelays ?? []
+ blockedRelays: blockedRelays ?? [],
+ excludeClutterUrls: hideUnifiedClutter
})
if (cancelled) return
setRelayDiscoveredUrls(discovered)
@@ -576,24 +603,44 @@ export default function RssFeedList() {
return () => {
cancelled = true
}
- }, [feedScope, pubkey, favoriteRelays, blockedRelays, refreshManualWebUrls, relayDiscoveryTick])
+ }, [
+ feedScope,
+ pubkey,
+ favoriteRelays,
+ blockedRelays,
+ refreshManualWebUrls,
+ relayDiscoveryTick,
+ hideUnifiedClutter
+ ])
const combinedFeedRows = useMemo((): CombinedFeedRow[] => {
const { webRows, nonHttpItems } = buildArticleUrlFeedRows(
- baseFilteredItems,
+ rssWebItemsRespectingClutterPref,
manualWebEntries,
- relayDiscoveredUrls
+ relayDiscoveredUrls,
+ { excludeClutterLinks: hideUnifiedClutter }
)
const rest: CombinedFeedRow[] = nonHttpItems.map((item) => ({
kind: 'rss' as const,
item
}))
- return [...webRows, ...rest].sort((a, b) => {
+ const merged = [...webRows, ...rest].sort((a, b) => {
const ta = a.kind === 'web' ? a.latestPub : (a.item.pubDate?.getTime() ?? 0)
const tb = b.kind === 'web' ? b.latestPub : (b.item.pubDate?.getTime() ?? 0)
return tb - ta
})
- }, [baseFilteredItems, manualWebEntries, relayDiscoveredUrls])
+ if (!suppressClawstrLinks) return merged
+ return merged.filter((row) => {
+ if (row.kind === 'web') return !isClawstrDotComHttpUrl(row.canonicalUrl)
+ return !rssFeedItemArticleIsClawstrHost(row.item)
+ })
+ }, [
+ rssWebItemsRespectingClutterPref,
+ manualWebEntries,
+ relayDiscoveredUrls,
+ suppressClawstrLinks,
+ hideUnifiedClutter
+ ])
const combinedFeedRowsForSearch = useMemo((): CombinedFeedRow[] => {
const q = searchQuery.trim()
@@ -652,14 +699,18 @@ export default function RssFeedList() {
return { view: 'unified', rows }
}, [feedScope, rssScopeItems, combinedFeedRowsForSearch, urlKeysWithNostrFootprint])
- const [suppressClawstrLinks, setSuppressClawstrLinks] = useState(true)
-
const persistSuppressClawstr = useCallback((checked: boolean) => {
rssWebPrefsUserTouchedRef.current = true
setSuppressClawstrLinks(checked)
void saveRssWebSuppressClawstrPreference(checked)
}, [])
+ const persistHideUnifiedClutter = useCallback((checked: boolean) => {
+ rssWebPrefsUserTouchedRef.current = true
+ setHideUnifiedClutter(checked)
+ void saveRssWebHideUnifiedClutterPreference(checked)
+ }, [])
+
const persistFeedScope = useCallback((scope: RssWebFeedScope) => {
rssWebPrefsUserTouchedRef.current = true
setFeedScope(scope)
@@ -669,12 +720,14 @@ export default function RssFeedList() {
useEffect(() => {
let cancelled = false
void (async () => {
- const [suppressClawstr, scope] = await Promise.all([
+ const [suppressClawstr, hideClutter, scope] = await Promise.all([
loadRssWebSuppressClawstrPreference(),
+ loadRssWebHideUnifiedClutterPreference(),
loadRssWebFeedScopePreference()
])
if (cancelled || rssWebPrefsUserTouchedRef.current) return
setSuppressClawstrLinks(suppressClawstr)
+ setHideUnifiedClutter(hideClutter)
setFeedScope(scope)
})()
return () => {
@@ -690,7 +743,7 @@ export default function RssFeedList() {
// Reset pagination when filters change
useEffect(() => {
setShowRowCount(20)
- }, [selectedFeeds, timeFilter, searchQuery, feedScope, suppressClawstrLinks])
+ }, [selectedFeeds, timeFilter, searchQuery, feedScope, suppressClawstrLinks, hideUnifiedClutter])
const displayedFeed = useMemo(():
| { view: 'rss'; items: TRssFeedItem[] }
@@ -811,18 +864,33 @@ export default function RssFeedList() {
{t('RSS')}
-
-
persistSuppressClawstr(c === true)}
- />
-
- {t('Suppress Clawstr links in RSS previews')}
-
+
+
+ persistSuppressClawstr(c === true)}
+ />
+
+ {t('Suppress Clawstr links in RSS previews')}
+
+
+
+ persistHideUnifiedClutter(c === true)}
+ />
+
+ {t('Hide local, media & feed URLs from URL cards')}
+
+
diff --git a/src/components/RssUrlThreadEventsPreview/index.tsx b/src/components/RssUrlThreadEventsPreview/index.tsx
new file mode 100644
index 00000000..ab5ba036
--- /dev/null
+++ b/src/components/RssUrlThreadEventsPreview/index.tsx
@@ -0,0 +1,87 @@
+import NoteCard from '@/components/NoteCard'
+import { Skeleton } from '@/components/ui/skeleton'
+import { FAST_READ_RELAY_URLS, SEARCHABLE_RELAY_URLS } from '@/constants'
+import { useNoteStatsRelayHints } from '@/hooks/useNoteStatsRelayHints'
+import {
+ buildRssArticleUrlThreadInteractionFilters,
+ isRssArticleUrlThreadInteraction
+} from '@/lib/rss-web-feed'
+import { queryService } from '@/services/client.service'
+import type { Event } from 'nostr-tools'
+import { useEffect, useMemo, useState } from 'react'
+
+const PREVIEW_LIMIT = 5
+const FETCH_LIMIT = 24
+
+/**
+ * Compact Nostr thread rows (comments + highlights) for an article URL card in the RSS+Web feed.
+ */
+export default function RssUrlThreadEventsPreview({ canonicalUrl }: { canonicalUrl: string }) {
+ const { relays, key: relayHintsKey } = useNoteStatsRelayHints()
+ const relayUrls = useMemo(
+ () => [...new Set([...SEARCHABLE_RELAY_URLS, ...FAST_READ_RELAY_URLS, ...relays])],
+ [relays]
+ )
+ const [events, setEvents] = useState([])
+ const [loading, setLoading] = useState(true)
+
+ useEffect(() => {
+ let cancelled = false
+ setLoading(true)
+ const filters = buildRssArticleUrlThreadInteractionFilters(canonicalUrl, FETCH_LIMIT)
+ void queryService
+ .fetchEvents(relayUrls, filters)
+ .then((all) => {
+ if (cancelled) return
+ const seen = new Set()
+ const merged: Event[] = []
+ for (const e of [...all].sort((a, b) => b.created_at - a.created_at)) {
+ if (seen.has(e.id)) continue
+ if (!isRssArticleUrlThreadInteraction(e, canonicalUrl)) continue
+ seen.add(e.id)
+ merged.push(e)
+ }
+ setEvents(merged.slice(0, PREVIEW_LIMIT))
+ })
+ .catch(() => {
+ if (!cancelled) setEvents([])
+ })
+ .finally(() => {
+ if (!cancelled) setLoading(false)
+ })
+ return () => {
+ cancelled = true
+ }
+ }, [canonicalUrl, relayHintsKey, relayUrls])
+
+ if (loading) {
+ return (
+ e.stopPropagation()}
+ onKeyDown={(e) => e.stopPropagation()}
+ >
+
+
+
+ )
+ }
+
+ if (events.length === 0) return null
+
+ return (
+ e.stopPropagation()}
+ onKeyDown={(e) => e.stopPropagation()}
+ >
+
+ {events.map((evt) => (
+
+
+
+ ))}
+
+
+ )
+}
diff --git a/src/components/RssWebFeedCard/index.tsx b/src/components/RssWebFeedCard/index.tsx
index f64fcd44..b7ee63b4 100644
--- a/src/components/RssWebFeedCard/index.tsx
+++ b/src/components/RssWebFeedCard/index.tsx
@@ -1,4 +1,5 @@
import RssFeedItem from '@/components/RssFeedItem'
+import RssUrlThreadEventsPreview from '@/components/RssUrlThreadEventsPreview'
import RssUrlThreadStatsBar from '@/components/RssUrlThreadStatsBar'
import WebPreview from '@/components/WebPreview'
import { cn } from '@/lib/utils'
@@ -100,6 +101,10 @@ export default function RssWebFeedCard({
) : null}
+ {isHttpArticleUrl(canonicalUrl) ? (
+
+ ) : null}
+
)
diff --git a/src/i18n/locales/en.ts b/src/i18n/locales/en.ts
index c227d2e8..c8addc4b 100644
--- a/src/i18n/locales/en.ts
+++ b/src/i18n/locales/en.ts
@@ -1268,7 +1268,9 @@ export default {
'Web URL item label': 'Web URL',
'URL thread activity': 'URL thread activity',
'Suppress Clawstr links in RSS previews':
- 'Hide links to clawstr.com in RSS previews',
+ 'Hide clawstr.com in RSS and URL feed',
+ 'Hide local, media & feed URLs from URL cards':
+ 'Hide local, media, feed, document & XML links from the feed',
'RSS articles': 'RSS articles',
'Web comments': 'Web comments',
'Web highlights': 'Web highlights',
diff --git a/src/lib/nip84-highlight-display.ts b/src/lib/nip84-highlight-display.ts
new file mode 100644
index 00000000..97f4071e
--- /dev/null
+++ b/src/lib/nip84-highlight-display.ts
@@ -0,0 +1,82 @@
+import type { Event } from 'nostr-tools'
+
+/**
+ * NIP-84 / Web Annotation style `textquoteselector` (prefix + exact + suffix).
+ * `exact` is always {@link Event.content}; prefix/suffix are adjacent source text.
+ *
+ * Common shapes:
+ * - `["textquoteselector", prefix, suffix]` (3 items)
+ * - `["textquoteselector", "-", prefix, suffix]` — leading "-" = empty slot (Hypothesis-style)
+ */
+export function parseTextQuoteSelectorParts(tag: readonly string[]): { prefix: string; suffix: string } {
+ if (tag.length < 2 || tag[0] !== 'textquoteselector') {
+ return { prefix: '', suffix: '' }
+ }
+ if (tag.length >= 4 && tag[1] === '-') {
+ return {
+ prefix: (tag[2] ?? '').trim(),
+ suffix: (tag[3] ?? '').trim()
+ }
+ }
+ if (tag.length >= 3) {
+ return {
+ prefix: (tag[1] ?? '').trim(),
+ suffix: (tag[2] ?? '').trim()
+ }
+ }
+ return { prefix: '', suffix: '' }
+}
+
+/** `["textpositionselector", start, end]` — character offsets into a full document string. */
+export function parseTextPositionSelector(tag: readonly string[]): { start: number; end: number } | null {
+ if (tag.length < 3 || tag[0] !== 'textpositionselector') return null
+ const start = parseInt(tag[1] ?? '', 10)
+ const end = parseInt(tag[2] ?? '', 10)
+ if (Number.isNaN(start) || Number.isNaN(end) || start < 0 || end <= start) return null
+ return { start, end }
+}
+
+export type Nip84HighlightDisplay = {
+ /** Full passage to show in the quote box */
+ fullText: string
+ /** Substring of fullText to wrap in */
+ markedSpan: string
+}
+
+/**
+ * Resolve which span to mark inside which full text, using `context`, `textquoteselector`,
+ * and optionally `textpositionselector` (only when offsets fit the base string).
+ */
+export function resolveNip84HighlightDisplay(event: Pick): Nip84HighlightDisplay {
+ const highlightedText = event.content ?? ''
+ const tags = event.tags
+
+ const contextTag = tags.find((t) => t[0] === 'context')
+ const contextBody = contextTag?.[1]?.trim() ? contextTag[1] : undefined
+
+ const posTag = tags.find((t) => t[0] === 'textpositionselector')
+ const pos = posTag ? parseTextPositionSelector(posTag) : null
+
+ if (contextBody && pos) {
+ const { start, end } = pos
+ if (end <= contextBody.length) {
+ const slice = contextBody.slice(start, end)
+ if (slice.length > 0) {
+ return { fullText: contextBody, markedSpan: slice }
+ }
+ }
+ }
+
+ if (contextBody) {
+ return { fullText: contextBody, markedSpan: highlightedText }
+ }
+
+ const tqs = tags.find((t) => t[0] === 'textquoteselector')
+ if (tqs) {
+ const { prefix, suffix } = parseTextQuoteSelectorParts(tqs)
+ const fullText = `${prefix}${highlightedText}${suffix}`
+ return { fullText, markedSpan: highlightedText }
+ }
+
+ return { fullText: highlightedText, markedSpan: highlightedText }
+}
diff --git a/src/lib/rss-article.ts b/src/lib/rss-article.ts
index bab00e81..df2609dc 100644
--- a/src/lib/rss-article.ts
+++ b/src/lib/rss-article.ts
@@ -99,16 +99,16 @@ export function getWebBookmarkArticleUrl(event: Pick): s
return undefined
}
-/** HTTP(S) page URL from kind 9802 `r` tags (`source` marker or bare `r`). */
+/** HTTP(S) page URL from kind 9802 `r` tags. */
export function getHighlightSourceHttpUrl(event: Pick): string | undefined {
for (const t of event.tags) {
- if (t[0] !== 'r' || !t[1]) continue
+ if (!t[0] || String(t[0]).toLowerCase() !== 'r' || !t[1]) continue
const u = t[1].trim()
if (!u.startsWith('http://') && !u.startsWith('https://')) continue
const marker = (t[2] ?? '').trim().toLowerCase()
- // NIP-84: non-source URL refs use `mention`; only `source` (any casing) or legacy bare `r` is the page.
+ // NIP-84: only `mention` marks a non-source URL; everything else (bare `r`, `source`, `-`, unknown) is the page.
if (marker === 'mention') continue
- if (marker === 'source' || marker === '') return canonicalizeRssArticleUrl(u)
+ return canonicalizeRssArticleUrl(u)
}
return undefined
}
@@ -138,9 +138,9 @@ export function computeRTagFilterValuesForArticleThread(canonicalUrl: string): s
return [...out]
}
-/** Strip anchors whose href targets https://clawstr.com/… (incl. subdomains, http(s), protocol-relative). */
-export function isClawstrDotComHttpHref(href: string): boolean {
- const t = href.trim()
+/** True for http(s) URLs whose host is clawstr.com (incl. subdomains; supports protocol-relative `//…`). */
+export function isClawstrDotComHttpUrl(url: string): boolean {
+ const t = url.trim()
if (!t) return false
try {
const u = t.startsWith('//') ? new URL(`https:${t}`) : new URL(t)
@@ -152,6 +152,11 @@ export function isClawstrDotComHttpHref(href: string): boolean {
}
}
+/** Same as {@link isClawstrDotComHttpUrl} — use for `href` attributes in HTML. */
+export function isClawstrDotComHttpHref(href: string): boolean {
+ return isClawstrDotComHttpUrl(href)
+}
+
/**
* NIP-25 kind 17 + NIP-73: resolve http(s) target URL for a `k: web` external reaction.
* Stops at the next `k` tag so podcast-style multi-scope reactions are not mis-parsed as web.
diff --git a/src/lib/rss-web-feed.ts b/src/lib/rss-web-feed.ts
index b377ff13..1381c615 100644
--- a/src/lib/rss-web-feed.ts
+++ b/src/lib/rss-web-feed.ts
@@ -1,24 +1,28 @@
import { ExtendedKind, FAST_READ_RELAY_URLS } from '@/constants'
import { buildAccountListRelayUrlsForMerge } from '@/lib/account-list-relay-urls'
import { getFavoritesFeedRelayUrls } from '@/lib/favorites-feed-relays'
+import { isReplyNoteEvent } from '@/lib/event'
import {
canonicalizeRssArticleUrl,
+ computeRTagFilterValuesForArticleThread,
getArticleUrlFromCommentITags,
getHighlightSourceHttpUrl,
getWebBookmarkArticleUrl,
getWebExternalReactionTargetUrl
} from '@/lib/rss-article'
import logger from '@/lib/logger'
-import { normalizeUrl } from '@/lib/url'
+import { isImage, isLocalNetworkUrl, isMedia, isVideo, normalizeUrl } from '@/lib/url'
import { queryService } from '@/services/client.service'
import indexedDb from '@/services/indexed-db.service'
import type { RssFeedItem } from '@/services/rss-feed.service'
-import type { Event } from 'nostr-tools'
-import { kinds } from 'nostr-tools'
+import { kinds, type Event, type Filter } from 'nostr-tools'
-/** IndexedDB: `'1'` (default) = strip <a href> to clawstr.com from RSS HTML in the feed list. */
+/** IndexedDB: `'1'` (default) = hide clawstr.com (strip preview links + drop URL/RSS rows for that host). */
export const RSS_WEB_SUPPRESS_CLAWSTR_SETTING = 'rssWebSuppressClawstrLinks'
+/** IndexedDB: `'1'` (default) = keep local/media/feed XML links as plain RSS rows, not URL cards. */
+export const RSS_WEB_HIDE_UNIFIED_CLUTTER_SETTING = 'rssWebHideUnifiedClutter'
+
/** IndexedDB: feed view — article URL cards, flat RSS timeline, or both interleaved. */
export const RSS_WEB_FEED_SCOPE_SETTING = 'rssWebFeedScope'
@@ -132,19 +136,89 @@ export function isHttpArticleUrl(url: string): boolean {
return t.startsWith('http://') || t.startsWith('https://')
}
+/**
+ * URLs that make poor “article URL” cards: localhost/LAN, direct media files, and common RSS/Atom document paths.
+ * When filtering is on, these stay as normal RSS timeline rows instead of Web URL cards.
+ */
+export function isRssWebUnifiedClutterUrl(url: string): boolean {
+ const t = url.trim()
+ if (!isHttpArticleUrl(t)) return false
+ let parsed: URL
+ try {
+ parsed = new URL(t)
+ } catch {
+ return false
+ }
+ const host = parsed.hostname.toLowerCase()
+ if (host.endsWith('.local')) return true
+ if (isLocalNetworkUrl(t)) return true
+ const ipv4 = host.match(/^(\d+)\.(\d+)\.(\d+)\.(\d+)$/)
+ if (ipv4 && Number(ipv4[1]) === 127) return true
+
+ if (isMedia(t) || isVideo(t) || isImage(t)) return true
+
+ const path = parsed.pathname.toLowerCase()
+ const segments = path.split('/').filter(Boolean)
+ const last = segments[segments.length - 1] || ''
+ // Documents — not article pages
+ if (
+ /\.(pdf|epub|mobi|azw3|doc|docx|xls|xlsx|ppt|pptx|ods|odt|rtf)(\?.*)?$/i.test(path)
+ ) {
+ return true
+ }
+ if (/\.(rss|atom)$/i.test(last)) return true
+ if (last === 'feed.xml' || last === 'rss.xml' || last === 'atom.xml') return true
+ if (last.endsWith('.xml')) return true
+ if (last === 'feed' || last === 'rss' || last === 'atom') return true
+ return false
+}
+
+/** REQ filters for Nostr comments, voice comments, and highlights on one article URL (synthetic RSS thread). */
+export function buildRssArticleUrlThreadInteractionFilters(
+ canonicalArticleUrl: string,
+ limit: number
+): Filter[] {
+ const canonical = canonicalizeRssArticleUrl(canonicalArticleUrl)
+ const rVals = computeRTagFilterValuesForArticleThread(canonical)
+ const filters: Filter[] = [
+ { '#i': [canonical], kinds: [ExtendedKind.COMMENT, ExtendedKind.VOICE_COMMENT], limit },
+ { '#I': [canonical], kinds: [ExtendedKind.COMMENT, ExtendedKind.VOICE_COMMENT], limit }
+ ]
+ if (rVals.length > 0) {
+ filters.push({ '#r': rVals, kinds: [kinds.Highlights], limit })
+ }
+ return filters
+}
+
+/** Whether `evt` belongs to the URL-scoped article thread (comments / voice / highlight of this page). */
+export function isRssArticleUrlThreadInteraction(evt: Event, canonicalArticleUrl: string): boolean {
+ const key = canonicalizeRssArticleUrl(canonicalArticleUrl)
+ if (evt.kind === kinds.Highlights) {
+ const hu = getHighlightSourceHttpUrl(evt)
+ return !!hu && canonicalizeRssArticleUrl(hu) === key
+ }
+ if (!isReplyNoteEvent(evt)) return false
+ const u = getArticleUrlFromCommentITags(evt)
+ return !!u && canonicalizeRssArticleUrl(u) === key
+}
+
/**
* Group RSS entries by canonical article URL (NIP-22 / web thread key).
*/
export function groupRssItemsByCanonicalUrl(items: RssFeedItem[]): RssUrlGroup[] {
- const { groups } = partitionRssItemsForWebFeed(items)
+ const { groups } = partitionRssItemsForWebFeed(items, { excludeClutterLinks: true })
return groups
}
/** HTTP(S) article groups for combined cards; everything else stays as plain RSS rows. */
-export function partitionRssItemsForWebFeed(items: RssFeedItem[]): {
+export function partitionRssItemsForWebFeed(
+ items: RssFeedItem[],
+ options?: { excludeClutterLinks?: boolean }
+): {
groups: RssUrlGroup[]
nonHttpItems: RssFeedItem[]
} {
+ const excludeClutter = options?.excludeClutterLinks !== false
const map = new Map()
const nonHttpItems: RssFeedItem[] = []
for (const item of items) {
@@ -153,6 +227,10 @@ export function partitionRssItemsForWebFeed(items: RssFeedItem[]): {
nonHttpItems.push(item)
continue
}
+ if (excludeClutter && isRssWebUnifiedClutterUrl(link)) {
+ nonHttpItems.push(item)
+ continue
+ }
const key = canonicalizeRssArticleUrl(link)
const list = map.get(key)
if (list) list.push(item)
@@ -191,9 +269,11 @@ export type ArticleUrlFeedWebRow = {
export function buildArticleUrlFeedRows(
filteredItems: RssFeedItem[],
manualEntries: ManualRssWebUrlEntry[],
- relayDiscoveredEntries: ManualRssWebUrlEntry[]
+ relayDiscoveredEntries: ManualRssWebUrlEntry[],
+ options?: { excludeClutterLinks?: boolean }
): { webRows: ArticleUrlFeedWebRow[]; nonHttpItems: RssFeedItem[] } {
- const { groups, nonHttpItems } = partitionRssItemsForWebFeed(filteredItems)
+ const { groups, nonHttpItems } = partitionRssItemsForWebFeed(filteredItems, options)
+ const excludeClutter = options?.excludeClutterLinks !== false
const webByUrl = new Map()
for (const g of groups) {
@@ -214,10 +294,12 @@ export function buildArticleUrlFeedRows(
for (const { url, addedAt } of manualEntries) {
if (!isHttpArticleUrl(url)) continue
+ if (excludeClutter && isRssWebUnifiedClutterUrl(url)) continue
mergeNostrTimestamp(canonicalizeRssArticleUrl(url), addedAt)
}
for (const { url, addedAt } of relayDiscoveredEntries) {
if (!isHttpArticleUrl(url)) continue
+ if (excludeClutter && isRssWebUnifiedClutterUrl(url)) continue
mergeNostrTimestamp(canonicalizeRssArticleUrl(url), addedAt)
}
@@ -306,7 +388,10 @@ export async function fetchDiscoveredWebUrlsFromRelays(options: {
accountPubkey: string | null
favoriteRelays: string[]
blockedRelays: string[]
+ /** When true (default), omit localhost, media files, and feed-document URLs from discovery. */
+ excludeClutterUrls?: boolean
}): Promise {
+ const excludeClutter = options.excludeClutterUrls !== false
const relayUrls = await buildRssWebNostrQueryRelayUrls(options)
if (relayUrls.length === 0) {
logger.info('[RssWebFeed] Relay URL discovery skipped (no relays)')
@@ -323,6 +408,7 @@ export async function fetchDiscoveredWebUrlsFromRelays(options: {
const onEvent = (evt: Event) => {
const url = extractArticleUrlFromWebActivityEvent(evt)
if (!url) return
+ if (excludeClutter && isRssWebUnifiedClutterUrl(url)) return
const key = canonicalizeRssArticleUrl(url)
const prev = latestByUrl.get(key) ?? 0
if (evt.created_at > prev) latestByUrl.set(key, evt.created_at)
@@ -370,6 +456,17 @@ export async function saveRssWebSuppressClawstrPreference(suppress: boolean): Pr
await indexedDb.setSetting(RSS_WEB_SUPPRESS_CLAWSTR_SETTING, suppress ? '1' : '0')
}
+export async function loadRssWebHideUnifiedClutterPreference(): Promise {
+ const v = await indexedDb.getSetting(RSS_WEB_HIDE_UNIFIED_CLUTTER_SETTING)
+ if (v === '0' || v === 'false') return false
+ if (v === '1' || v === 'true') return true
+ return true
+}
+
+export async function saveRssWebHideUnifiedClutterPreference(hide: boolean): Promise {
+ await indexedDb.setSetting(RSS_WEB_HIDE_UNIFIED_CLUTTER_SETTING, hide ? '1' : '0')
+}
+
export async function loadRssWebFeedScopePreference(): Promise {
const v = await indexedDb.getSetting(RSS_WEB_FEED_SCOPE_SETTING)
return parseRssWebFeedScope(v)
diff --git a/src/lib/thread-reply-root-match.ts b/src/lib/thread-reply-root-match.ts
index b89131c5..4042c164 100644
--- a/src/lib/thread-reply-root-match.ts
+++ b/src/lib/thread-reply-root-match.ts
@@ -1,6 +1,11 @@
import { getRootATag, getRootEventHexId } from '@/lib/event'
-import { canonicalizeRssArticleUrl, getArticleUrlFromCommentITags } from '@/lib/rss-article'
+import {
+ canonicalizeRssArticleUrl,
+ getArticleUrlFromCommentITags,
+ getHighlightSourceHttpUrl
+} from '@/lib/rss-article'
import type { Event } from 'nostr-tools'
+import { kinds } from 'nostr-tools'
/** Matches `ReplyNoteList` / discussion thread root shapes. */
export type TThreadRootRef =
@@ -12,8 +17,12 @@ export type TThreadRootRef =
export function eventReplyMatchesThreadRoot(evt: Event, root: TThreadRootRef): boolean {
if (root.type === 'I') {
const u = getArticleUrlFromCommentITags(evt)
- if (!u) return false
- return canonicalizeRssArticleUrl(u) === canonicalizeRssArticleUrl(root.id)
+ if (u && canonicalizeRssArticleUrl(u) === canonicalizeRssArticleUrl(root.id)) return true
+ if (evt.kind === kinds.Highlights) {
+ const hu = getHighlightSourceHttpUrl(evt)
+ return !!hu && canonicalizeRssArticleUrl(hu) === canonicalizeRssArticleUrl(root.id)
+ }
+ return false
}
if (root.type === 'A') {
const coord = getRootATag(evt)?.[1]
diff --git a/src/pages/secondary/RssArticlePage/index.tsx b/src/pages/secondary/RssArticlePage/index.tsx
index 2820d61c..6d8d71c5 100644
--- a/src/pages/secondary/RssArticlePage/index.tsx
+++ b/src/pages/secondary/RssArticlePage/index.tsx
@@ -232,7 +232,7 @@ const RssArticlePage = forwardRef(
{syntheticRoot && (
-
+
)}
@@ -301,7 +301,7 @@ const RssArticlePage = forwardRef(
{syntheticRoot && (
-
+
)}
diff --git a/src/providers/ReplyProvider.tsx b/src/providers/ReplyProvider.tsx
index fc6f0d13..5b097ebe 100644
--- a/src/providers/ReplyProvider.tsx
+++ b/src/providers/ReplyProvider.tsx
@@ -1,4 +1,8 @@
-import { canonicalizeRssArticleUrl, getArticleUrlFromCommentITags } from '@/lib/rss-article'
+import {
+ canonicalizeRssArticleUrl,
+ getArticleUrlFromCommentITags,
+ getHighlightSourceHttpUrl
+} from '@/lib/rss-article'
import {
getParentATag,
getParentETag,
@@ -7,7 +11,7 @@ import {
getRootETag,
isNip25ReactionKind
} from '@/lib/event'
-import { Event } from 'nostr-tools'
+import { Event, kinds } from 'nostr-tools'
import { createContext, useCallback, useContext, useState } from 'react'
type TReplyContext = {
@@ -50,6 +54,9 @@ export function ReplyProvider({ children }: { children: React.ReactNode }) {
const articleUrl = getArticleUrlFromCommentITags(reply)
if (articleUrl) {
rootId = canonicalizeRssArticleUrl(articleUrl)
+ } else if (reply.kind === kinds.Highlights) {
+ const hu = getHighlightSourceHttpUrl(reply)
+ if (hu) rootId = canonicalizeRssArticleUrl(hu)
}
}
}
diff --git a/src/routes.tsx b/src/routes.tsx
index 11dee5b4..059e408c 100644
--- a/src/routes.tsx
+++ b/src/routes.tsx
@@ -51,6 +51,7 @@ const ROUTES = [
{ path: '/home/notes/:id', element: SR(NotePageLazy) },
{ path: '/feed/notes/:id', element: SR(NotePageLazy) },
{ path: '/spells/notes/:id', element: SR(NotePageLazy) },
+ { path: '/rss/notes/:id', element: SR(NotePageLazy) },
{ path: '/rss-item/:articleKey', element: SR(RssArticlePageLazy) },
{ path: '/rss/rss-item/:articleKey', element: SR(RssArticlePageLazy) },
{ path: '/feed/rss-item/:articleKey', element: SR(RssArticlePageLazy) },
diff --git a/src/services/note-stats.service.ts b/src/services/note-stats.service.ts
index 9115ba1d..94cc8ee1 100644
--- a/src/services/note-stats.service.ts
+++ b/src/services/note-stats.service.ts
@@ -303,11 +303,21 @@ class NoteStatsService {
kinds: [ExtendedKind.EXTERNAL_REACTION],
limit: reactionLimit
},
+ {
+ '#I': [canonical],
+ kinds: [ExtendedKind.EXTERNAL_REACTION],
+ limit: reactionLimit
+ },
{
'#i': [canonical],
kinds: [ExtendedKind.COMMENT, ExtendedKind.VOICE_COMMENT],
limit: interactionLimit
},
+ {
+ '#I': [canonical],
+ kinds: [ExtendedKind.COMMENT, ExtendedKind.VOICE_COMMENT],
+ limit: interactionLimit
+ },
{
'#r': computeRTagFilterValuesForArticleThread(canonical),
kinds: [kinds.Highlights],