Browse Source

bug-fixes

master
Silberengel 4 weeks ago
parent
commit
3f9b45643f
  1. 13
      internal/generator/html.go
  2. 427
      internal/nostr/client.go
  3. 28
      internal/nostr/ebooks.go
  4. 75
      internal/nostr/feed.go
  5. 3
      internal/nostr/kinds.go
  6. 9
      internal/nostr/profile.go
  7. 145
      internal/nostr/wiki.go
  8. 14
      static/css/main.css
  9. 227
      static/css/responsive.css
  10. 5
      templates/articles.html
  11. 5
      templates/blog.html
  12. 86
      templates/landing.html

13
internal/generator/html.go

@ -309,14 +309,9 @@ func (g *HTMLGenerator) ProcessMarkdown(markdownContent string) (string, error) @@ -309,14 +309,9 @@ func (g *HTMLGenerator) ProcessMarkdown(markdownContent string) (string, error)
}
// GenerateLandingPage generates the static landing page
func (g *HTMLGenerator) GenerateLandingPage(wikiPages []WikiPageInfo, feedItems []FeedItemInfo, newestBlogItem *BlogItemInfo, newestArticleItem *ArticleItemInfo, allArticleItems []ArticleItemInfo, allEBooks []EBookInfo) (string, error) {
// Collect pubkeys from feed items
pubkeys := make([]string, 0, len(feedItems))
for _, item := range feedItems {
if item.Author != "" {
pubkeys = append(pubkeys, item.Author)
}
}
func (g *HTMLGenerator) GenerateLandingPage(wikiPages []WikiPageInfo, newestBlogItem *BlogItemInfo, newestArticleItem *ArticleItemInfo, allArticleItems []ArticleItemInfo, allEBooks []EBookInfo) (string, error) {
// Collect pubkeys from blog and article items
pubkeys := make([]string, 0)
// Add blog and article author pubkeys if available
if newestBlogItem != nil && newestBlogItem.Author != "" {
@ -375,7 +370,7 @@ func (g *HTMLGenerator) GenerateLandingPage(wikiPages []WikiPageInfo, feedItems @@ -375,7 +370,7 @@ func (g *HTMLGenerator) GenerateLandingPage(wikiPages []WikiPageInfo, feedItems
SiteURL: g.siteURL,
CurrentYear: time.Now().Year(),
WikiPages: wikiPages,
FeedItems: feedItems,
FeedItems: []FeedItemInfo{}, // Empty - feed only on feed page
Profiles: profiles,
}

427
internal/nostr/client.go

@ -65,6 +65,11 @@ func (c *Client) ConnectToRelay(ctx context.Context, url string) (*nostr.Relay, @@ -65,6 +65,11 @@ func (c *Client) ConnectToRelay(ctx context.Context, url string) (*nostr.Relay,
// Returns the newest event (highest created_at) if multiple events are found
// Rate-limited to prevent overwhelming relays
func (c *Client) FetchEvent(ctx context.Context, filter nostr.Filter) (*nostr.Event, error) {
return c.FetchEventFromRelays(ctx, filter, c.relays)
}
// FetchEventFromRelays fetches a single event from specific relays
func (c *Client) FetchEventFromRelays(ctx context.Context, filter nostr.Filter, relays []string) (*nostr.Event, error) {
// Acquire semaphore to limit concurrent requests
c.requestSem <- struct{}{}
defer func() { <-c.requestSem }()
@ -74,16 +79,16 @@ func (c *Client) FetchEvent(ctx context.Context, filter nostr.Filter) (*nostr.Ev @@ -74,16 +79,16 @@ func (c *Client) FetchEvent(ctx context.Context, filter nostr.Filter) (*nostr.Ev
defer cancel()
logger.WithFields(map[string]interface{}{
"relays": c.relays,
"relays": relays,
"kinds": filter.Kinds,
"authors": filter.Authors,
"ids": filter.IDs,
"tags": filter.Tags,
}).Debug("Querying relays using SimplePool")
// Use SimplePool's SubManyEose to query all relays in parallel
// Use SimplePool's SubManyEose to query specified relays in parallel
// It automatically handles connection pooling, failover, and deduplication
eventChan := c.pool.SubManyEose(queryCtx, c.relays, nostr.Filters{filter})
eventChan := c.pool.SubManyEose(queryCtx, relays, nostr.Filters{filter})
// Collect all events from all relays
var allEvents []*nostr.Event
@ -121,7 +126,12 @@ func (c *Client) FetchEvent(ctx context.Context, filter nostr.Filter) (*nostr.Ev @@ -121,7 +126,12 @@ func (c *Client) FetchEvent(ctx context.Context, filter nostr.Filter) (*nostr.Ev
// Returns deduplicated events, keeping the newest version of each event
// Rate-limited to prevent overwhelming relays
func (c *Client) FetchEvents(ctx context.Context, filter nostr.Filter) ([]*nostr.Event, error) {
return c.FetchEventsBatch(ctx, []nostr.Filter{filter})
return c.FetchEventsFromRelays(ctx, filter, c.relays)
}
// FetchEventsFromRelays fetches multiple events from specific relays
func (c *Client) FetchEventsFromRelays(ctx context.Context, filter nostr.Filter, relays []string) ([]*nostr.Event, error) {
return c.FetchEventsBatchFromRelays(ctx, []nostr.Filter{filter}, relays)
}
// FetchEventsBatch fetches multiple events using multiple filters in a single batched query
@ -129,6 +139,11 @@ func (c *Client) FetchEvents(ctx context.Context, filter nostr.Filter) ([]*nostr @@ -129,6 +139,11 @@ func (c *Client) FetchEvents(ctx context.Context, filter nostr.Filter) ([]*nostr
// Returns deduplicated events, keeping the newest version of each event
// Rate-limited to prevent overwhelming relays
func (c *Client) FetchEventsBatch(ctx context.Context, filters []nostr.Filter) ([]*nostr.Event, error) {
return c.FetchEventsBatchFromRelays(ctx, filters, c.relays)
}
// FetchEventsBatchFromRelays fetches multiple events from specific relays using multiple filters
func (c *Client) FetchEventsBatchFromRelays(ctx context.Context, filters []nostr.Filter, relays []string) ([]*nostr.Event, error) {
if len(filters) == 0 {
return nil, fmt.Errorf("no filters provided")
}
@ -142,13 +157,13 @@ func (c *Client) FetchEventsBatch(ctx context.Context, filters []nostr.Filter) ( @@ -142,13 +157,13 @@ func (c *Client) FetchEventsBatch(ctx context.Context, filters []nostr.Filter) (
defer cancel()
logger.WithFields(map[string]interface{}{
"relays": c.relays,
"relays": relays,
"filters": len(filters),
}).Debug("Querying relays using SimplePool with batched filters")
// Use SimplePool's SubManyEose to query all relays in parallel with all filters
// Use SimplePool's SubManyEose to query specified relays in parallel with all filters
// It automatically handles connection pooling, failover, and deduplication
eventChan := c.pool.SubManyEose(queryCtx, c.relays, nostr.Filters(filters))
eventChan := c.pool.SubManyEose(queryCtx, relays, nostr.Filters(filters))
// Collect all events from all relays, deduplicating by ID and keeping newest
eventMap := make(map[string]*nostr.Event)
@ -208,6 +223,25 @@ func (c *Client) GetRelays() []string { @@ -208,6 +223,25 @@ func (c *Client) GetRelays() []string {
return c.relays
}
// GetPrimaryRelay returns the primary relay (theforest) for main event fetching
func (c *Client) GetPrimaryRelay() string {
if len(c.relays) > 0 {
return c.relays[0]
}
return ""
}
// GetProfileRelays returns fallback relays for profile fetching (excludes primary/theforest)
func (c *Client) GetProfileRelays() []string {
profileRelays := []string{}
// Skip the first relay (primary/theforest) and use fallback relays
if len(c.relays) > 1 {
profileRelays = append(profileRelays, c.relays[1:]...)
}
// If no fallback relays, return empty (shouldn't happen, but handle gracefully)
return profileRelays
}
// GetPool returns the underlying SimplePool (for services that need direct access)
func (c *Client) GetPool() *nostr.SimplePool {
return c.pool
@ -228,3 +262,382 @@ func (c *Client) HealthCheck(ctx context.Context, timeout time.Duration) error { @@ -228,3 +262,382 @@ func (c *Client) HealthCheck(ctx context.Context, timeout time.Duration) error {
_, err := c.FetchEvents(ctx, filter)
return err
}
// FetchDeletionEvents fetches kind 5 deletion events for the given authors
// Returns a map of deleted event IDs (event ID -> deletion event)
func (c *Client) FetchDeletionEvents(ctx context.Context, authors []string) (map[string]*nostr.Event, error) {
if len(authors) == 0 {
return make(map[string]*nostr.Event), nil
}
// Deduplicate authors
authorSet := make(map[string]bool)
uniqueAuthors := make([]string, 0, len(authors))
for _, author := range authors {
if author != "" && !authorSet[author] {
authorSet[author] = true
uniqueAuthors = append(uniqueAuthors, author)
}
}
if len(uniqueAuthors) == 0 {
return make(map[string]*nostr.Event), nil
}
// Fetch kind 5 deletion events from theforest only (primary relay)
primaryRelay := c.GetPrimaryRelay()
if primaryRelay == "" {
return nil, fmt.Errorf("primary relay not configured")
}
filter := nostr.Filter{
Kinds: []int{KindDelete},
Authors: uniqueAuthors,
// No limit - fetch all deletion events
}
logger.WithFields(map[string]interface{}{
"authors": len(uniqueAuthors),
}).Debug("Fetching deletion events")
deletionEvents, err := c.FetchEventsFromRelays(ctx, filter, []string{primaryRelay})
if err != nil {
return nil, fmt.Errorf("failed to fetch deletion events: %w", err)
}
// Parse deletion events - extract event IDs from "e" tags
deletedEventIDs := make(map[string]*nostr.Event)
for _, deletionEvent := range deletionEvents {
// Kind 5 events have "e" tags with the event IDs they're deleting
for _, tag := range deletionEvent.Tags {
if len(tag) > 0 && tag[0] == "e" && len(tag) > 1 {
eventID := tag[1]
// Keep the newest deletion event if multiple deletions exist
existing, exists := deletedEventIDs[eventID]
if !exists || deletionEvent.CreatedAt > existing.CreatedAt {
deletedEventIDs[eventID] = deletionEvent
}
}
}
}
logger.WithFields(map[string]interface{}{
"deletion_events": len(deletionEvents),
"deleted_ids": len(deletedEventIDs),
}).Debug("Parsed deletion events")
return deletedEventIDs, nil
}
// FilterDeletedEvents removes events that have been deleted (kind 5)
// Returns the filtered list of events
func FilterDeletedEvents(events []*nostr.Event, deletedEventIDs map[string]*nostr.Event) []*nostr.Event {
if len(deletedEventIDs) == 0 {
return events
}
filtered := make([]*nostr.Event, 0, len(events))
for _, event := range events {
if _, deleted := deletedEventIDs[event.ID]; !deleted {
filtered = append(filtered, event)
} else {
logger.WithFields(map[string]interface{}{
"event_id": event.ID,
"kind": event.Kind,
}).Debug("Filtering out deleted event")
}
}
logger.WithFields(map[string]interface{}{
"original": len(events),
"filtered": len(filtered),
"removed": len(events) - len(filtered),
}).Debug("Filtered deleted events")
return filtered
}
// ProcessEventsWithCacheResult contains the processed events and their profiles
type ProcessEventsWithCacheResult struct {
Events []*nostr.Event
Profiles map[string]*Profile
}
// ProcessEventsWithCache is the standard process for fetching and processing events
// for kinds 1, 30023, 30040, 30041, 30818. It:
// 1. If indexEventID is provided, fetches the index event and only queries for events referenced in it
// 2. Fetches 2x the display limit (or all index-referenced events if indexEventID is provided)
// 3. Merges with existing cache map
// 4. Deduplicates (keeping newest)
// 5. Fetches deletion events
// 6. Removes deleted events
// 7. Sorts newest-first
// 8. Applies display limit
// 9. Fetches profiles for displayed events
// 10. Returns final events and profiles ready for display
func (c *Client) ProcessEventsWithCache(
ctx context.Context,
kind int,
displayLimit int,
existingEvents map[string]*nostr.Event, // Existing cache map (event ID -> event)
relayURL string, // Relay to fetch from (empty = use primary relay)
indexEventID string, // Optional: index event ID to fetch and filter by
indexKind int, // Optional: kind of the index event (required if indexEventID is provided)
) (*ProcessEventsWithCacheResult, error) {
// Calculate fetch limit (2x display limit, minimum 50)
fetchLimit := displayLimit * 2
if fetchLimit < 50 {
fetchLimit = 50
}
// Determine which relay to use
relays := []string{}
if relayURL != "" {
relays = []string{relayURL}
} else {
primaryRelay := c.GetPrimaryRelay()
if primaryRelay == "" {
return nil, fmt.Errorf("primary relay not configured")
}
relays = []string{primaryRelay}
}
var fetchedEvents []*nostr.Event
var err error
var indexItems []IndexItem
// Step 1: If indexEventID is provided, fetch the index event and extract referenced items
if indexEventID != "" {
// Fetch the index event
indexFilter := nostr.Filter{
IDs: []string{indexEventID},
}
indexEvents, err := c.FetchEventsFromRelays(ctx, indexFilter, relays)
if err != nil {
return nil, fmt.Errorf("failed to fetch index event: %w", err)
}
if len(indexEvents) == 0 {
return nil, fmt.Errorf("index event not found: %s", indexEventID)
}
// Parse the index event
index, err := ParseIndexEvent(indexEvents[0], indexKind)
if err != nil {
return nil, fmt.Errorf("failed to parse index event: %w", err)
}
// Extract items of the target kind from the index
indexItems = make([]IndexItem, 0)
for _, item := range index.Items {
if item.Kind == kind {
indexItems = append(indexItems, item)
}
}
if len(indexItems) == 0 {
// No items of this kind in the index, return empty result
return &ProcessEventsWithCacheResult{
Events: []*nostr.Event{},
Profiles: make(map[string]*Profile),
}, nil
}
logger.WithFields(map[string]interface{}{
"kind": kind,
"index_items": len(indexItems),
"index_event_id": indexEventID,
}).Debug("Fetched index event, querying referenced events")
// Build filters for events referenced in the index
// Group by author to create efficient filters
authorDTags := make(map[string][]string) // author -> list of d tags
for _, item := range indexItems {
if item.Pubkey != "" && item.DTag != "" {
authorDTags[item.Pubkey] = append(authorDTags[item.Pubkey], item.DTag)
}
}
// Fetch events for each author (query by kind, author, and d tags)
allFetchedEvents := make([]*nostr.Event, 0)
for author, dTags := range authorDTags {
// Query for events by this author with any of the d tags
// Note: Nostr filters don't support OR for d tags, so we query each d tag separately
// or query all events by author and filter locally
filter := nostr.Filter{
Kinds: []int{kind},
Authors: []string{author},
// We'll filter by d tags locally after fetching
}
authorEvents, err := c.FetchEventsFromRelays(ctx, filter, relays)
if err != nil {
logger.WithFields(map[string]interface{}{
"author": author,
"error": err,
}).Warn("Failed to fetch events for author, continuing")
continue
}
// Filter by d tags locally
dTagSet := make(map[string]bool)
for _, dTag := range dTags {
dTagSet[dTag] = true
}
for _, event := range authorEvents {
// Extract d tag from event
var eventDTag string
for _, tag := range event.Tags {
if len(tag) > 0 && tag[0] == "d" && len(tag) > 1 {
eventDTag = tag[1]
break
}
}
if eventDTag != "" && dTagSet[eventDTag] {
allFetchedEvents = append(allFetchedEvents, event)
}
}
}
fetchedEvents = allFetchedEvents
logger.WithFields(map[string]interface{}{
"kind": kind,
"fetched": len(fetchedEvents),
"index_items": len(indexItems),
"index_event_id": indexEventID,
}).Debug("Fetched events from index")
} else {
// Step 1: Fetch 2x display limit (standard process)
filter := nostr.Filter{
Kinds: []int{kind},
Limit: fetchLimit,
}
logger.WithFields(map[string]interface{}{
"kind": kind,
"fetch_limit": fetchLimit,
"display_limit": displayLimit,
"relay": relays[0],
}).Debug("Fetching events for processing")
// Use client's FetchEventsFromRelays (works with both specific relay and primary)
fetchedEvents, err = c.FetchEventsFromRelays(ctx, filter, relays)
if err != nil {
return nil, fmt.Errorf("failed to fetch events: %w", err)
}
}
logger.WithFields(map[string]interface{}{
"kind": kind,
"fetched": len(fetchedEvents),
}).Debug("Fetched events from relay")
// Step 2: Merge with existing cache map (existing takes precedence if same ID)
eventMap := make(map[string]*nostr.Event)
// Add existing events first
for id, event := range existingEvents {
eventMap[id] = event
}
// Add/update with fetched events (keep newest if duplicate)
for _, event := range fetchedEvents {
existing, exists := eventMap[event.ID]
if !exists || event.CreatedAt > existing.CreatedAt {
eventMap[event.ID] = event
}
}
logger.WithFields(map[string]interface{}{
"existing": len(existingEvents),
"fetched": len(fetchedEvents),
"merged": len(eventMap),
}).Debug("Merged events with cache")
// Step 3: Convert map to slice (already deduplicated)
allEvents := make([]*nostr.Event, 0, len(eventMap))
for _, event := range eventMap {
allEvents = append(allEvents, event)
}
// Step 4: Fetch deletion events for all authors
authors := make([]string, 0, len(allEvents))
authorSet := make(map[string]bool)
for _, event := range allEvents {
if !authorSet[event.PubKey] {
authors = append(authors, event.PubKey)
authorSet[event.PubKey] = true
}
}
deletedEventIDs, err := c.FetchDeletionEvents(ctx, authors)
if err != nil {
logger.WithField("error", err).Warn("Failed to fetch deletion events, continuing without filtering")
deletedEventIDs = make(map[string]*nostr.Event)
}
// Step 5: Remove deleted events
allEvents = FilterDeletedEvents(allEvents, deletedEventIDs)
// Step 6: Sort newest-first (by created_at descending)
for i := 0; i < len(allEvents)-1; i++ {
for j := i + 1; j < len(allEvents); j++ {
if allEvents[i].CreatedAt < allEvents[j].CreatedAt {
allEvents[i], allEvents[j] = allEvents[j], allEvents[i]
}
}
}
logger.WithFields(map[string]interface{}{
"after_deletion": len(allEvents),
"sorted": true,
}).Debug("Removed deletions and sorted events")
// Step 7: Apply display limit
if displayLimit > 0 && len(allEvents) > displayLimit {
allEvents = allEvents[:displayLimit]
logger.WithFields(map[string]interface{}{
"limited": displayLimit,
}).Debug("Applied display limit")
}
// Ensure we always show the display limit if we have enough events
if len(allEvents) < displayLimit && len(allEvents) > 0 {
logger.WithFields(map[string]interface{}{
"available": len(allEvents),
"requested": displayLimit,
}).Debug("Fewer events available than display limit")
}
logger.WithFields(map[string]interface{}{
"kind": kind,
"final_count": len(allEvents),
"display_limit": displayLimit,
}).Info("Processed events with cache")
// Step 8: Fetch profiles for displayed events
profileAuthors := make([]string, 0, len(allEvents))
profileAuthorSet := make(map[string]bool)
for _, event := range allEvents {
if !profileAuthorSet[event.PubKey] {
profileAuthors = append(profileAuthors, event.PubKey)
profileAuthorSet[event.PubKey] = true
}
}
profiles, err := c.FetchProfilesBatch(ctx, profileAuthors)
if err != nil {
logger.WithField("error", err).Warn("Failed to fetch profiles, continuing without profiles")
profiles = make(map[string]*Profile)
}
logger.WithFields(map[string]interface{}{
"profiles_fetched": len(profiles),
"authors": len(authors),
}).Debug("Fetched profiles for displayed events")
return &ProcessEventsWithCacheResult{
Events: allEvents,
Profiles: profiles,
}, nil
}

28
internal/nostr/ebooks.go

@ -40,30 +40,26 @@ type EBookInfo struct { @@ -40,30 +40,26 @@ type EBookInfo struct {
// FetchTopLevelIndexEvents fetches all top-level 30040 events from the specified relay
// Top-level means the event is not referenced in any other 30040 event's 'a' tags
// Uses the standard ProcessEventsWithCache process, then filters for top-level events
func (es *EBooksService) FetchTopLevelIndexEvents(ctx context.Context) ([]EBookInfo, error) {
// Connect to the specific relay
relay, err := es.client.ConnectToRelay(ctx, es.relayURL)
// For e-books, we want to fetch a large number to ensure we get all top-level events
// Use a high display limit (1000) to get all events, then filter for top-level
displayLimit := 1000
// Use standard process: fetch 2x limit, merge cache, deduplicate, filter deletions, sort, limit, fetch profiles
// For e-books, use a high display limit (1000) to get all events, then filter for top-level
// This means we'll fetch 2000 events, which should be enough for most cases
result, err := es.client.ProcessEventsWithCache(ctx, es.indexKind, displayLimit, make(map[string]*nostr.Event), es.relayURL, "", 0)
if err != nil {
return nil, fmt.Errorf("failed to connect to relay %s: %w", es.relayURL, err)
return nil, fmt.Errorf("failed to process index events: %w", err)
}
defer relay.Close()
// Fetch all 30040 events (limit 1000 for e-books)
filter := nostr.Filter{
Kinds: []int{es.indexKind},
Limit: 1000,
}
logFilter(filter, fmt.Sprintf("all index events (kind %d) from %s", es.indexKind, es.relayURL))
events, err := relay.QuerySync(ctx, filter)
if err != nil {
return nil, fmt.Errorf("failed to query events: %w", err)
}
events := result.Events
logger.WithFields(map[string]interface{}{
"events": len(events),
"relay": es.relayURL,
}).Debug("Fetched index events")
}).Debug("Processed index events using standard process")
// Build a set of all referenced kind:pubkey:dtag from 'a' tags
referencedSet := make(map[string]bool)

75
internal/nostr/feed.go

@ -24,64 +24,45 @@ func NewFeedService(client *Client, feedKind int) *FeedService { @@ -24,64 +24,45 @@ func NewFeedService(client *Client, feedKind int) *FeedService {
}
}
// FetchFeedItems fetches recent feed events from the configured feed relay using SimplePool
// FetchFeedItems fetches recent feed events from the configured feed relay
// Uses the standard ProcessEventsWithCache process
func (fs *FeedService) FetchFeedItems(ctx context.Context, feedRelay string, maxEvents int) ([]FeedItem, error) {
if feedRelay == "" {
return nil, fmt.Errorf("feed relay not configured")
}
filter := nostr.Filter{
Kinds: []int{fs.feedKind},
Limit: 50,
// Use standard process: fetch 2x limit, merge cache, deduplicate, filter deletions, sort, limit, fetch profiles
result, err := fs.client.ProcessEventsWithCache(ctx, fs.feedKind, maxEvents, make(map[string]*nostr.Event), feedRelay, "", 0)
if err != nil {
return nil, fmt.Errorf("failed to process feed events: %w", err)
}
logFilter(filter, fmt.Sprintf("feed (kind %d)", fs.feedKind))
// Create context with 30-second timeout
queryCtx, cancel := context.WithTimeout(ctx, 30*time.Second)
defer cancel()
logger.WithFields(map[string]interface{}{
"relay": feedRelay,
"kind": fs.feedKind,
"limit": 50,
}).Debug("Fetching feed items using SimplePool")
// Use the client's pool to query the feed relay
// SimplePool handles connection pooling and reuse automatically
eventChan := fs.client.GetPool().SubManyEose(queryCtx, []string{feedRelay}, nostr.Filters{filter})
// Collect events and convert to feed items
items := make([]FeedItem, 0, 50)
for incomingEvent := range eventChan {
if incomingEvent.Event != nil {
item := FeedItem{
EventID: incomingEvent.Event.ID,
Author: incomingEvent.Event.PubKey,
Content: incomingEvent.Event.Content,
Time: time.Unix(int64(incomingEvent.Event.CreatedAt), 0),
Link: fmt.Sprintf("https://alexandria.gitcitadel.eu/events?id=nevent1%s", incomingEvent.Event.ID),
}
// Convert events to feed items
items := make([]FeedItem, 0, len(result.Events))
for _, event := range result.Events {
item := FeedItem{
EventID: event.ID,
Author: event.PubKey,
Content: event.Content,
Time: time.Unix(int64(event.CreatedAt), 0),
Link: fmt.Sprintf("https://alexandria.gitcitadel.eu/events?id=nevent1%s", event.ID),
}
// Extract title, summary, and image tags
for _, tag := range incomingEvent.Event.Tags {
if len(tag) > 0 && len(tag) > 1 {
switch tag[0] {
case "title":
item.Title = tag[1]
case "summary":
item.Summary = tag[1]
case "image":
item.Image = tag[1]
}
// Extract title, summary, and image tags
for _, tag := range event.Tags {
if len(tag) > 0 && len(tag) > 1 {
switch tag[0] {
case "title":
item.Title = tag[1]
case "summary":
item.Summary = tag[1]
case "image":
item.Image = tag[1]
}
}
items = append(items, item)
logger.WithFields(map[string]interface{}{
"relay": incomingEvent.Relay.URL,
"event_id": incomingEvent.Event.ID,
}).Debug("Received feed event")
}
items = append(items, item)
}
logger.WithFields(map[string]interface{}{

3
internal/nostr/kinds.go

@ -26,6 +26,9 @@ const ( @@ -26,6 +26,9 @@ const (
// KindRepoAnnouncement is kind 30617 - repository announcement events
KindRepoAnnouncement = 30617
// KindDelete is kind 5 - event deletion (NIP-09)
KindDelete = 5
)
// SupportedWikiKinds returns the list of supported wiki kinds

9
internal/nostr/profile.go

@ -118,8 +118,13 @@ func (c *Client) FetchProfilesBatch(ctx context.Context, pubkeys []string) (map[ @@ -118,8 +118,13 @@ func (c *Client) FetchProfilesBatch(ctx context.Context, pubkeys []string) (map[
"pubkeys": len(uniquePubkeys),
}).Debug("Batch fetching profiles")
// Fetch all profile events
events, err := c.FetchEvents(ctx, filter)
// Fetch all profile events from fallback relays only (not theforest)
profileRelays := c.GetProfileRelays()
if len(profileRelays) == 0 {
// Fallback: if no profile relays configured, use all relays
profileRelays = c.GetRelays()
}
events, err := c.FetchEventsFromRelays(ctx, filter, profileRelays)
if err != nil {
return nil, fmt.Errorf("failed to fetch profile events: %w", err)
}

145
internal/nostr/wiki.go

@ -57,8 +57,12 @@ func (ws *WikiService) FetchWikiIndex(ctx context.Context, naddrStr string) (*In @@ -57,8 +57,12 @@ func (ws *WikiService) FetchWikiIndex(ctx context.Context, naddrStr string) (*In
filter := naddr.ToFilter()
logFilter(filter, fmt.Sprintf("wiki index (kind %d)", ws.indexKind))
// Fetch the event
event, err := ws.client.FetchEvent(ctx, filter)
// Fetch the event from theforest only (primary relay)
primaryRelay := ws.client.GetPrimaryRelay()
if primaryRelay == "" {
return nil, fmt.Errorf("primary relay not configured")
}
event, err := ws.client.FetchEventFromRelays(ctx, filter, []string{primaryRelay})
if err != nil {
return nil, fmt.Errorf("failed to fetch index event: %w", err)
}
@ -73,7 +77,7 @@ func (ws *WikiService) FetchWikiIndex(ctx context.Context, naddrStr string) (*In @@ -73,7 +77,7 @@ func (ws *WikiService) FetchWikiIndex(ctx context.Context, naddrStr string) (*In
}
// FetchWikiEvents fetches all wiki events referenced in an index
// Queries by kind only, then filters locally and batch-fetches profiles
// Uses ProcessEventsWithCache for the initial fetch, then filters by index items
func (ws *WikiService) FetchWikiEvents(ctx context.Context, index *IndexEvent) ([]*WikiEvent, error) {
// Build a map of expected items (kind:pubkey:dtag) for fast lookup
expectedItems := make(map[string]IndexItem)
@ -88,31 +92,37 @@ func (ws *WikiService) FetchWikiEvents(ctx context.Context, index *IndexEvent) ( @@ -88,31 +92,37 @@ func (ws *WikiService) FetchWikiEvents(ctx context.Context, index *IndexEvent) (
return []*WikiEvent{}, nil
}
// Query ALL events of this kind - simple query by kind only
filter := nostr.Filter{
Kinds: []int{ws.wikiKind},
Limit: 50,
// Use ProcessEventsWithCache to fetch events of this kind
// Use a high display limit (1000) to ensure we get all events referenced in the index
// This means we'll fetch 2000 events, which should be enough for most cases
displayLimit := 1000
primaryRelay := ws.client.GetPrimaryRelay()
if primaryRelay == "" {
return nil, fmt.Errorf("primary relay not configured")
}
logger.WithFields(map[string]interface{}{
"kind": ws.wikiKind,
"items": len(expectedItems),
}).Debug("Querying all events of kind from relays")
"kind": ws.wikiKind,
"items": len(expectedItems),
"index_event_id": index.Event.ID,
}).Debug("Fetching wiki events using ProcessEventsWithCache with index")
// Fetch all events of this kind
allEvents, err := ws.client.FetchEvents(ctx, filter)
// Use standard process with index event ID: fetch index, query only referenced events, merge cache, deduplicate, filter deletions, sort, limit, fetch profiles
result, err := ws.client.ProcessEventsWithCache(ctx, ws.wikiKind, displayLimit, make(map[string]*nostr.Event), primaryRelay, index.Event.ID, ws.indexKind)
if err != nil {
logger.WithField("error", err).Warn("Failed to fetch events by kind")
logger.WithField("error", err).Warn("Failed to fetch wiki events using ProcessEventsWithCache")
return nil, err
}
allEvents := result.Events
logger.WithFields(map[string]interface{}{
"fetched": len(allEvents),
"expected": len(expectedItems),
}).Debug("Fetched events, filtering locally")
}).Debug("Fetched wiki events using ProcessEventsWithCache with index")
// Filter events locally by matching against index items
eventMap := make(map[string]*nostr.Event) // Map by kind:pubkey:dtag
// Build event map by kind:pubkey:dtag for matching
eventMap := make(map[string]*nostr.Event)
for _, event := range allEvents {
// Extract d-tag from event
var dTag string
@ -128,12 +138,10 @@ func (ws *WikiService) FetchWikiEvents(ctx context.Context, index *IndexEvent) ( @@ -128,12 +138,10 @@ func (ws *WikiService) FetchWikiEvents(ctx context.Context, index *IndexEvent) (
}
key := fmt.Sprintf("%d:%s:%s", event.Kind, event.PubKey, dTag)
if _, expected := expectedItems[key]; expected {
// Keep the newest version if we have multiple
existing, exists := eventMap[key]
if !exists || event.CreatedAt > existing.CreatedAt {
eventMap[key] = event
}
// Keep the newest version if we have multiple
existing, exists := eventMap[key]
if !exists || event.CreatedAt > existing.CreatedAt {
eventMap[key] = event
}
}
@ -181,50 +189,22 @@ func (ws *WikiService) GetLongformKind() int { @@ -181,50 +189,22 @@ func (ws *WikiService) GetLongformKind() int {
}
// FetchLongformArticles fetches the newest longform articles (kind 30023) from a specific relay
// Queries by kind only, sorted by newest first, limit 50
// Uses the standard ProcessEventsWithCache process
func (ws *WikiService) FetchLongformArticles(ctx context.Context, relayURL string, longformKind int, limit int) ([]*nostr.Event, error) {
// Connect to the specific relay
relay, err := ws.client.ConnectToRelay(ctx, relayURL)
if err != nil {
return nil, fmt.Errorf("failed to connect to relay %s: %w", relayURL, err)
}
defer relay.Close()
// Query ALL events of this kind, sorted by newest first
filter := nostr.Filter{
Kinds: []int{longformKind},
Limit: limit,
}
logFilter(filter, fmt.Sprintf("longform articles (kind %d) from %s", longformKind, relayURL))
events, err := relay.QuerySync(ctx, filter)
// Use standard process: fetch 2x limit, merge cache, deduplicate, filter deletions, sort, limit, fetch profiles
result, err := ws.client.ProcessEventsWithCache(ctx, longformKind, limit, make(map[string]*nostr.Event), relayURL, "", 0)
if err != nil {
return nil, fmt.Errorf("failed to query events: %w", err)
return nil, fmt.Errorf("failed to process longform articles: %w", err)
}
// Sort by created_at descending (newest first)
// Note: go-nostr may already return sorted, but we'll ensure it
for i := 0; i < len(events)-1; i++ {
for j := i + 1; j < len(events); j++ {
if events[i].CreatedAt < events[j].CreatedAt {
events[i], events[j] = events[j], events[i]
}
}
}
logger.WithFields(map[string]interface{}{
"events": len(events),
"relay": relayURL,
"kind": longformKind,
}).Debug("Fetched longform articles")
return events, nil
// Note: Profiles are available in result.Profiles but not returned here
// Callers should fetch profiles separately if needed, or we could return both
return result.Events, nil
}
// FetchIndexEvents fetches all events of a specific kind referenced in an index
// Only supports article kinds configured in the service
// Queries by kind only, then filters locally
// Uses ProcessEventsWithCache for the initial fetch, then filters by index items
func (ws *WikiService) FetchIndexEvents(ctx context.Context, index *IndexEvent, targetKind int) ([]*nostr.Event, error) {
// Check if the target kind is in the allowed article kinds
allowed := false
@ -251,31 +231,37 @@ func (ws *WikiService) FetchIndexEvents(ctx context.Context, index *IndexEvent, @@ -251,31 +231,37 @@ func (ws *WikiService) FetchIndexEvents(ctx context.Context, index *IndexEvent,
return []*nostr.Event{}, nil
}
// Query ALL events of this kind - simple query by kind only
filter := nostr.Filter{
Kinds: []int{targetKind},
Limit: 50,
// Use ProcessEventsWithCache to fetch events of this kind
// Use a high display limit (1000) to ensure we get all events referenced in the index
// This means we'll fetch 2000 events, which should be enough for most cases
displayLimit := 1000
primaryRelay := ws.client.GetPrimaryRelay()
if primaryRelay == "" {
return nil, fmt.Errorf("primary relay not configured")
}
logger.WithFields(map[string]interface{}{
"kind": targetKind,
"items": len(expectedItems),
}).Debug("Querying all events of kind from relays")
"kind": targetKind,
"items": len(expectedItems),
"index_event_id": index.Event.ID,
}).Debug("Fetching events using ProcessEventsWithCache with index")
// Fetch all events of this kind
allEvents, err := ws.client.FetchEvents(ctx, filter)
// Use standard process with index event ID: fetch index, query only referenced events, merge cache, deduplicate, filter deletions, sort, limit, fetch profiles
result, err := ws.client.ProcessEventsWithCache(ctx, targetKind, displayLimit, make(map[string]*nostr.Event), primaryRelay, index.Event.ID, ws.indexKind)
if err != nil {
logger.WithField("error", err).Warn("Failed to fetch events by kind")
logger.WithField("error", err).Warn("Failed to fetch events using ProcessEventsWithCache")
return nil, err
}
allEvents := result.Events
logger.WithFields(map[string]interface{}{
"fetched": len(allEvents),
"expected": len(expectedItems),
}).Debug("Fetched events, filtering locally")
}).Debug("Fetched events using ProcessEventsWithCache with index")
// Filter events locally by matching against index items
eventMap := make(map[string]*nostr.Event) // Map by kind:pubkey:dtag
// Build event map by kind:pubkey:dtag for matching
eventMap := make(map[string]*nostr.Event)
for _, event := range allEvents {
// Extract d-tag from event
var dTag string
@ -291,12 +277,10 @@ func (ws *WikiService) FetchIndexEvents(ctx context.Context, index *IndexEvent, @@ -291,12 +277,10 @@ func (ws *WikiService) FetchIndexEvents(ctx context.Context, index *IndexEvent,
}
key := fmt.Sprintf("%d:%s:%s", event.Kind, event.PubKey, dTag)
if _, expected := expectedItems[key]; expected {
// Keep the newest version if we have multiple
existing, exists := eventMap[key]
if !exists || event.CreatedAt > existing.CreatedAt {
eventMap[key] = event
}
// Keep the newest version if we have multiple
existing, exists := eventMap[key]
if !exists || event.CreatedAt > existing.CreatedAt {
eventMap[key] = event
}
}
@ -342,7 +326,12 @@ func (ws *WikiService) FetchWikiEventByDTag(ctx context.Context, pubkey, dTag st @@ -342,7 +326,12 @@ func (ws *WikiService) FetchWikiEventByDTag(ctx context.Context, pubkey, dTag st
}
logFilter(filter, fmt.Sprintf("wiki by d-tag %s", dTag))
event, err := ws.client.FetchEvent(ctx, filter)
// Fetch from theforest only (primary relay)
primaryRelay := ws.client.GetPrimaryRelay()
if primaryRelay == "" {
return nil, fmt.Errorf("primary relay not configured")
}
event, err := ws.client.FetchEventFromRelays(ctx, filter, []string{primaryRelay})
if err != nil {
return nil, fmt.Errorf("failed to fetch wiki event: %w", err)
}

14
static/css/main.css

@ -1025,6 +1025,20 @@ footer { @@ -1025,6 +1025,20 @@ footer {
border-bottom: 1px solid var(--border-color);
}
.article-image {
margin: 1.5rem 0;
width: 100%;
}
.article-image img {
width: 100%;
max-width: 100%;
height: auto;
border-radius: 8px;
object-fit: cover;
display: block;
}
.article-title {
font-size: 2.5rem;
margin: 0 0 0.5rem 0;

227
static/css/responsive.css

@ -207,7 +207,8 @@ @@ -207,7 +207,8 @@
.blog-content {
order: 1;
padding: 1.5rem;
padding: 1rem;
margin-bottom: 1rem;
}
.blog-header {
@ -223,27 +224,54 @@ @@ -223,27 +224,54 @@
max-width: 150px;
}
.article-header {
margin-bottom: 1.5rem;
padding-bottom: 1rem;
}
.article-title {
font-size: 1.75rem;
font-size: 1.5rem;
line-height: 1.3;
word-wrap: break-word;
overflow-wrap: break-word;
}
.article-subtitle {
font-size: 0.9rem;
}
.article-summary {
font-size: 1rem;
font-size: 0.95rem;
padding: 0.75rem;
margin: 1rem 0;
word-wrap: break-word;
overflow-wrap: break-word;
}
.article-link {
padding: 0.75rem;
min-height: 44px;
}
.article-link-title {
font-size: 0.95rem;
word-wrap: break-word;
overflow-wrap: break-word;
}
.article-link-meta {
font-size: 0.8rem;
flex-wrap: wrap;
gap: 0.5rem;
}
.article-link-meta .article-date {
white-space: nowrap;
}
.article-link-meta .article-author {
flex: 1;
min-width: 0;
}
/* Contact Page */
@ -417,9 +445,130 @@ @@ -417,9 +445,130 @@
.page-content {
line-height: 1.7;
font-size: 0.95rem;
word-wrap: break-word;
overflow-wrap: break-word;
}
.page-content p {
margin-bottom: 1rem;
}
.page-content h1,
.page-content h2,
.page-content h3,
.page-content h4,
.page-content h5,
.page-content h6 {
margin-top: 1.5rem;
margin-bottom: 0.75rem;
word-wrap: break-word;
overflow-wrap: break-word;
}
.page-content ul,
.page-content ol {
margin-left: 1.25rem;
margin-bottom: 1rem;
padding-left: 0.5rem;
}
.page-content li {
margin-bottom: 0.5rem;
}
.page-content blockquote {
margin: 1rem 0;
padding: 0.75rem 1rem;
border-left: 3px solid var(--accent-color);
background: var(--bg-secondary);
font-size: 0.9rem;
}
.page-content table {
display: block;
overflow-x: auto;
-webkit-overflow-scrolling: touch;
margin: 1rem 0;
font-size: 0.85rem;
}
.page-content table thead,
.page-content table tbody,
.page-content table tr {
display: table;
width: 100%;
table-layout: fixed;
}
.page-content table th,
.page-content table td {
padding: 0.5rem;
word-break: break-word;
}
.table-of-contents {
margin-top: 2rem;
padding: 1rem;
background: var(--bg-secondary);
border-radius: 8px;
border: 1px solid var(--border-color);
font-size: 0.9rem;
}
.table-of-contents h2 {
font-size: 1.1rem;
margin-bottom: 0.75rem;
}
.table-of-contents ul {
margin-left: 1rem;
}
.table-of-contents li {
margin-bottom: 0.5rem;
}
.table-of-contents a {
color: var(--link-color);
text-decoration: none;
word-break: break-word;
}
.table-of-contents a:hover {
text-decoration: underline;
}
/* Feed */
.feed-page {
padding: 1rem;
}
.feed-about-blurb {
padding: 1rem;
margin-bottom: 1.5rem;
}
.feed-about-blurb h2 {
font-size: 1.25rem;
margin-bottom: 0.75rem;
}
.feed-about-blurb p {
font-size: 0.9rem;
margin-bottom: 0.75rem;
}
.feed-about-blurb ul {
margin-left: 1.25rem;
font-size: 0.9rem;
}
.feed-about-blurb code {
font-size: 0.8rem;
padding: 0.15rem 0.3rem;
word-break: break-all;
}
.feed-container {
padding: 1rem;
}
@ -436,12 +585,84 @@ @@ -436,12 +585,84 @@
.feed-content {
font-size: 0.85rem;
word-wrap: break-word;
overflow-wrap: break-word;
}
.feed-time {
font-size: 0.8rem;
}
.feed-link {
word-break: break-all;
overflow-wrap: anywhere;
}
/* Article Content */
.article-content {
word-wrap: break-word;
overflow-wrap: break-word;
line-height: 1.7;
font-size: 0.95rem;
}
.article-content p {
margin-bottom: 1rem;
}
.article-content h1,
.article-content h2,
.article-content h3,
.article-content h4,
.article-content h5,
.article-content h6 {
margin-top: 1.5rem;
margin-bottom: 0.75rem;
word-wrap: break-word;
overflow-wrap: break-word;
}
.article-content ul,
.article-content ol {
margin-left: 1.25rem;
margin-bottom: 1rem;
padding-left: 0.5rem;
}
.article-content li {
margin-bottom: 0.5rem;
}
.article-content blockquote {
margin: 1rem 0;
padding: 0.75rem 1rem;
border-left: 3px solid var(--accent-color);
background: var(--bg-secondary);
font-size: 0.9rem;
}
.article-content table {
display: block;
overflow-x: auto;
-webkit-overflow-scrolling: touch;
margin: 1rem 0;
font-size: 0.85rem;
}
.article-content table thead,
.article-content table tbody,
.article-content table tr {
display: table;
width: 100%;
table-layout: fixed;
}
.article-content table th,
.article-content table td {
padding: 0.5rem;
word-break: break-word;
}
/* Error Pages */
.error-page {
padding: 2rem 1rem;

5
templates/articles.html

@ -45,6 +45,11 @@ @@ -45,6 +45,11 @@
<h1 class="article-title">{{$item.Title}}</h1>
<p class="article-subtitle">Longform article</p>
</header>
{{if and $item.Image (ne $item.Image "")}}
<div class="article-image">
<img src="{{$item.Image}}" alt="{{$item.Title}}" />
</div>
{{end}}
{{if $item.Summary}}<p class="article-summary">{{$item.Summary}}</p>{{end}}
<div class="article-content">
{{$item.Content}}

5
templates/blog.html

@ -58,6 +58,11 @@ @@ -58,6 +58,11 @@
<h1 class="article-title">{{$item.Title}}</h1>
<p class="article-subtitle">This entry originally appeared in this blog.</p>
</header>
{{if and $item.Image (ne $item.Image "")}}
<div class="article-image">
<img src="{{$item.Image}}" alt="{{$item.Title}}" />
</div>
{{end}}
{{if $item.Summary}}<p class="article-summary">{{$item.Summary}}</p>{{end}}
<div class="article-content">
{{$item.Content}}

86
templates/landing.html

@ -5,12 +5,6 @@ @@ -5,12 +5,6 @@
<p class="lead">Your gateway to decentralized knowledge and community-driven content.</p>
</section>
{{if .FeedItems}}
<section class="feed-section">
{{template "feed" .}}
</section>
{{end}}
<section class="features">
<h2>Explore Our Content</h2>
<div class="feature-grid">
@ -113,83 +107,5 @@ @@ -113,83 +107,5 @@
</div>
</div>
</section>
{{if .AllArticleItems}}
<section class="articles-section">
<h2>All Articles</h2>
<div class="feature-grid">
{{range .AllArticleItems}}
<div class="feature-card">
<div class="feature-image-container">
{{$item := .}}
{{$profile := index $.Profiles $item.Author}}
{{$image := "/static/GitCitadel_Icon_Gradient.svg"}}
{{if and $item.Image (ne $item.Image "")}}
{{$image = $item.Image}}
{{else if and $profile $profile.Picture (ne $profile.Picture "")}}
{{$image = $profile.Picture}}
{{end}}
<div class="feature-image-wrapper">
<img src="{{$image}}" alt="{{$item.Title}}" class="feature-image">
<div class="feature-image-overlay">
<h4 class="feature-image-title">{{$item.Title}}</h4>
{{if $item.Summary}}
<p class="feature-image-summary">{{truncate $item.Summary 250}}</p>
{{end}}
</div>
</div>
</div>
<div class="feature-card-content">
<h3>{{$item.Title}}</h3>
{{if $item.Summary}}
<p>{{truncate $item.Summary 150}}</p>
{{end}}
<a href="/articles#{{$item.DTag}}" class="btn"><span class="icon-inline">{{icon "arrow-right"}}</span> Read Article</a>
</div>
</div>
{{end}}
</div>
</section>
{{end}}
{{if .AllEBooks}}
<section class="ebooks-section">
<h2>All E-Books</h2>
<div class="feature-grid">
{{range .AllEBooks}}
<div class="feature-card">
<div class="feature-image-container">
{{$ebook := .}}
{{$profile := index $.Profiles $ebook.Author}}
{{$image := "/static/GitCitadel_Icon_Gradient.svg"}}
{{if and $ebook.Image (ne $ebook.Image "")}}
{{$image = $ebook.Image}}
{{else if and $profile $profile.Picture (ne $profile.Picture "")}}
{{$image = $profile.Picture}}
{{end}}
<div class="feature-image-wrapper">
<img src="{{$image}}" alt="{{$ebook.Title}}" class="feature-image">
<div class="feature-image-overlay">
<h4 class="feature-image-title">{{$ebook.Title}}</h4>
{{if $ebook.Summary}}
<p class="feature-image-summary">{{truncate $ebook.Summary 250}}</p>
{{end}}
</div>
</div>
</div>
<div class="feature-card-content">
<h3>{{$ebook.Title}}</h3>
{{if $ebook.Summary}}
<p>{{truncate $ebook.Summary 150}}</p>
{{end}}
<a href="https://alexandria.gitcitadel.eu/publication/naddr/{{$ebook.Naddr}}" target="_blank" rel="noopener noreferrer" class="btn"><span class="icon-inline">{{icon "external-link"}}</span> View on Alexandria</a>
</div>
</div>
{{end}}
</div>
</section>
{{end}}
</article>
{{end}}
{{/* Feed is defined in components.html */}}
{{end}}
Loading…
Cancel
Save