Browse Source

uningnore internal/cache dir

master
Silberengel 2 weeks ago
parent
commit
3437759fd5
  1. 4
      .gitignore
  2. 89
      internal/cache/cache.go
  3. 54
      internal/cache/feed_cache.go
  4. 198
      internal/cache/media_cache.go
  5. 29
      internal/cache/page.go
  6. 408
      internal/cache/rewarm.go

4
.gitignore vendored

@ -75,9 +75,9 @@ Thumbs.db @@ -75,9 +75,9 @@ Thumbs.db
.AppleDouble
.LSOverride
# Cache directories
# Cache directories (runtime cache, not source code)
.cache/
cache/
/cache/
# Build artifacts
*.a

89
internal/cache/cache.go vendored

@ -0,0 +1,89 @@ @@ -0,0 +1,89 @@
package cache
import (
"bytes"
"compress/gzip"
"sync"
"time"
)
// Cache stores generated HTML pages
type Cache struct {
pages map[string]*CachedPage
mu sync.RWMutex
}
// NewCache creates a new cache
func NewCache() *Cache {
return &Cache{
pages: make(map[string]*CachedPage),
}
}
// Get retrieves a page from cache
func (c *Cache) Get(path string) (*CachedPage, bool) {
c.mu.RLock()
defer c.mu.RUnlock()
page, exists := c.pages[path]
return page, exists
}
// Set stores a page in cache
func (c *Cache) Set(path string, content string) error {
c.mu.Lock()
defer c.mu.Unlock()
// Generate ETag
etag := GenerateETag(content)
// Pre-compress content
var compressed bytes.Buffer
writer := gzip.NewWriter(&compressed)
if _, err := writer.Write([]byte(content)); err != nil {
return err
}
if err := writer.Close(); err != nil {
return err
}
c.pages[path] = &CachedPage{
Content: content,
ETag: etag,
LastUpdated: time.Now(),
Compressed: compressed.Bytes(),
}
return nil
}
// Delete removes a page from cache
func (c *Cache) Delete(path string) {
c.mu.Lock()
defer c.mu.Unlock()
delete(c.pages, path)
}
// Clear clears all cached pages
func (c *Cache) Clear() {
c.mu.Lock()
defer c.mu.Unlock()
c.pages = make(map[string]*CachedPage)
}
// Size returns the number of cached pages
func (c *Cache) Size() int {
c.mu.RLock()
defer c.mu.RUnlock()
return len(c.pages)
}
// GetAllPaths returns all cached page paths
func (c *Cache) GetAllPaths() []string {
c.mu.RLock()
defer c.mu.RUnlock()
paths := make([]string, 0, len(c.pages))
for path := range c.pages {
paths = append(paths, path)
}
return paths
}

54
internal/cache/feed_cache.go vendored

@ -0,0 +1,54 @@ @@ -0,0 +1,54 @@
package cache
import (
"sync"
"time"
)
// FeedItem represents a cached feed item
type FeedItem struct {
EventID string
Author string
Content string
Time time.Time
Link string
Title string
Summary string
Image string
}
// FeedCache stores the kind 1 feed
type FeedCache struct {
items []FeedItem
mu sync.RWMutex
lastUpdated time.Time
}
// NewFeedCache creates a new feed cache
func NewFeedCache() *FeedCache {
return &FeedCache{
items: make([]FeedItem, 0),
}
}
// Set updates the feed cache
func (fc *FeedCache) Set(items []FeedItem) {
fc.mu.Lock()
defer fc.mu.Unlock()
fc.items = items
fc.lastUpdated = time.Now()
}
// Get retrieves feed items
func (fc *FeedCache) Get() []FeedItem {
fc.mu.RLock()
defer fc.mu.RUnlock()
return fc.items
}
// GetLastUpdated returns when the feed was last updated
func (fc *FeedCache) GetLastUpdated() time.Time {
fc.mu.RLock()
defer fc.mu.RUnlock()
return fc.lastUpdated
}

198
internal/cache/media_cache.go vendored

@ -0,0 +1,198 @@ @@ -0,0 +1,198 @@
package cache
import (
"context"
"crypto/sha256"
"encoding/hex"
"fmt"
"io"
"net/http"
"os"
"path/filepath"
"sync"
"time"
"gitcitadel-online/internal/logger"
)
// MediaCache handles caching of images and other media from events
type MediaCache struct {
cacheDir string
activeEvents map[string]time.Time // eventID -> last seen time
mu sync.RWMutex
httpClient *http.Client
}
// NewMediaCache creates a new media cache
func NewMediaCache(cacheDir string) (*MediaCache, error) {
// Create cache directory if it doesn't exist
if err := os.MkdirAll(cacheDir, 0755); err != nil {
return nil, fmt.Errorf("failed to create media cache directory: %w", err)
}
mc := &MediaCache{
cacheDir: cacheDir,
activeEvents: make(map[string]time.Time),
httpClient: &http.Client{
Timeout: 30 * time.Second,
},
}
// Start cleanup goroutine
go mc.cleanupLoop(context.Background())
return mc, nil
}
// CacheMedia downloads and caches a media file from a URL
// Returns the local path to the cached file, or the original URL if caching fails
func (mc *MediaCache) CacheMedia(ctx context.Context, url string, eventID string) (string, error) {
if url == "" {
return "", fmt.Errorf("empty URL")
}
// Mark event as active
mc.mu.Lock()
mc.activeEvents[eventID] = time.Now()
mc.mu.Unlock()
// Generate cache filename from URL hash
hash := sha256.Sum256([]byte(url))
filename := hex.EncodeToString(hash[:]) + filepath.Ext(url)
cachePath := filepath.Join(mc.cacheDir, filename)
// Check if already cached
if _, err := os.Stat(cachePath); err == nil {
return "/cache/media/" + filename, nil
}
// Download the media
req, err := http.NewRequestWithContext(ctx, "GET", url, nil)
if err != nil {
return url, fmt.Errorf("failed to create request: %w", err)
}
// Set user agent
req.Header.Set("User-Agent", "GitCitadel-Online/1.0")
resp, err := mc.httpClient.Do(req)
if err != nil {
logger.WithFields(map[string]interface{}{
"url": url,
"eventID": eventID,
"error": err,
}).Warn("Failed to download media")
return url, fmt.Errorf("failed to download: %w", err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return url, fmt.Errorf("unexpected status code: %d", resp.StatusCode)
}
// Check content type - only cache images
contentType := resp.Header.Get("Content-Type")
if !isImageContentType(contentType) {
logger.WithFields(map[string]interface{}{
"url": url,
"contentType": contentType,
}).Debug("Skipping non-image media")
return url, nil
}
// Create cache file
file, err := os.Create(cachePath)
if err != nil {
return url, fmt.Errorf("failed to create cache file: %w", err)
}
defer file.Close()
// Copy response to file
_, err = io.Copy(file, resp.Body)
if err != nil {
os.Remove(cachePath) // Clean up on error
return url, fmt.Errorf("failed to write cache file: %w", err)
}
logger.WithFields(map[string]interface{}{
"url": url,
"eventID": eventID,
"cachePath": cachePath,
}).Debug("Cached media file")
return "/cache/media/" + filename, nil
}
// GetCacheDir returns the cache directory path
func (mc *MediaCache) GetCacheDir() string {
return mc.cacheDir
}
// isImageContentType checks if a content type is an image
func isImageContentType(contentType string) bool {
imageTypes := []string{
"image/jpeg",
"image/jpg",
"image/png",
"image/gif",
"image/webp",
"image/svg+xml",
"image/bmp",
"image/x-icon",
}
for _, imgType := range imageTypes {
if contentType == imgType {
return true
}
}
return false
}
// MarkEventActive marks an event as currently active (displayed)
func (mc *MediaCache) MarkEventActive(eventID string) {
mc.mu.Lock()
defer mc.mu.Unlock()
mc.activeEvents[eventID] = time.Now()
}
// cleanupLoop periodically removes media for events that are no longer active
func (mc *MediaCache) cleanupLoop(ctx context.Context) {
ticker := time.NewTicker(1 * time.Hour) // Run cleanup every hour
defer ticker.Stop()
for {
select {
case <-ctx.Done():
return
case <-ticker.C:
mc.cleanup()
}
}
}
// cleanup removes media files for events that haven't been seen in 24 hours
func (mc *MediaCache) cleanup() {
mc.mu.Lock()
defer mc.mu.Unlock()
cutoff := time.Now().Add(-24 * time.Hour)
var toRemove []string
// Find events that are no longer active
for eventID, lastSeen := range mc.activeEvents {
if lastSeen.Before(cutoff) {
toRemove = append(toRemove, eventID)
}
}
// Remove inactive events from tracking
for _, eventID := range toRemove {
delete(mc.activeEvents, eventID)
}
// Note: We don't delete the actual files here because multiple events might use the same image
// Instead, we rely on the fact that if an event is no longer displayed, its media won't be accessed
// A more sophisticated cleanup would track which files are used by which events
logger.WithField("removed_events", len(toRemove)).Debug("Cleaned up inactive events from media cache")
}

29
internal/cache/page.go vendored

@ -0,0 +1,29 @@ @@ -0,0 +1,29 @@
package cache
import (
"fmt"
"time"
)
// CachedPage represents a cached HTML page
type CachedPage struct {
Content string
ETag string
LastUpdated time.Time
Compressed []byte // Pre-compressed gzip content
}
// IsStale checks if the cached page is stale based on maxAge
func (cp *CachedPage) IsStale(maxAge time.Duration) bool {
return time.Since(cp.LastUpdated) > maxAge
}
// GenerateETag generates an ETag for the content
func GenerateETag(content string) string {
// Simple ETag based on content hash
hash := 0
for _, b := range []byte(content) {
hash = hash*31 + int(b)
}
return fmt.Sprintf(`"%x"`, hash)
}

408
internal/cache/rewarm.go vendored

@ -0,0 +1,408 @@ @@ -0,0 +1,408 @@
package cache
import (
"context"
"html/template"
"time"
"gitcitadel-online/internal/generator"
"gitcitadel-online/internal/logger"
"gitcitadel-online/internal/nostr"
)
// Rewarmer handles cache rewarming
type Rewarmer struct {
cache *Cache
feedCache *FeedCache
wikiService *nostr.WikiService
feedService *nostr.FeedService
ebooksService *nostr.EBooksService
htmlGenerator *generator.HTMLGenerator
wikiIndex string
blogIndex string
feedRelay string
maxFeedEvents int
interval time.Duration
feedInterval time.Duration
}
// NewRewarmer creates a new cache rewarming service
func NewRewarmer(
cache *Cache,
feedCache *FeedCache,
wikiService *nostr.WikiService,
feedService *nostr.FeedService,
ebooksService *nostr.EBooksService,
htmlGenerator *generator.HTMLGenerator,
wikiIndex, blogIndex, feedRelay string,
maxFeedEvents int,
interval, feedInterval time.Duration,
) *Rewarmer {
return &Rewarmer{
cache: cache,
feedCache: feedCache,
wikiService: wikiService,
feedService: feedService,
ebooksService: ebooksService,
htmlGenerator: htmlGenerator,
wikiIndex: wikiIndex,
blogIndex: blogIndex,
feedRelay: feedRelay,
maxFeedEvents: maxFeedEvents,
interval: interval,
feedInterval: feedInterval,
}
}
// Start starts the rewarming goroutines
func (r *Rewarmer) Start(ctx context.Context) {
// Initial population
go r.rewarmPages(ctx)
go r.rewarmFeed(ctx)
// Periodic rewarming
go r.periodicRewarmPages(ctx)
go r.periodicRewarmFeed(ctx)
}
// rewarmPages rewarms the page cache
func (r *Rewarmer) rewarmPages(ctx context.Context) {
logger.Info("Starting page cache rewarming...")
// Initialize wikiPages as empty - will be populated if wiki fetch succeeds
wikiPages := make([]generator.WikiPageInfo, 0)
// Fetch wiki index (non-blocking - landing page can still be generated)
// If theforest fails, leave pages as-is (don't remove existing events)
wikiIndex, err := r.wikiService.FetchWikiIndex(ctx, r.wikiIndex)
if err != nil {
logger.Warnf("Error fetching wiki index from theforest: %v - keeping existing pages", err)
// Don't update cache - leave existing pages as-is
// Continue to generate landing page even if wiki fetch fails
} else {
// Fetch wiki events
// If theforest fails, leave pages as-is (don't remove existing events)
wikiEvents, err := r.wikiService.FetchWikiEvents(ctx, wikiIndex)
if err != nil {
logger.Warnf("Error fetching wiki events from theforest: %v - keeping existing pages", err)
// Don't update cache - leave existing pages as-is
} else {
// Build wiki page info for navigation
wikiPages = make([]generator.WikiPageInfo, 0, len(wikiEvents))
for _, event := range wikiEvents {
wikiPages = append(wikiPages, generator.WikiPageInfo{
DTag: event.DTag,
Title: event.Title,
})
}
// Generate and cache wiki index page
wikiIndexHTML, err := r.htmlGenerator.GenerateWikiIndexPage(wikiIndex, wikiPages, []generator.FeedItemInfo{})
if err != nil {
logger.Errorf("Error generating wiki index page: %v", err)
} else {
if err := r.cache.Set("/wiki", wikiIndexHTML); err != nil {
logger.Errorf("Error caching wiki index page: %v", err)
} else {
logger.WithField("pages", len(wikiPages)).Info("Wiki index page cached successfully")
}
}
// Generate and cache wiki pages
for _, event := range wikiEvents {
html, err := r.htmlGenerator.GenerateWikiPage(event, wikiPages, []generator.FeedItemInfo{})
if err != nil {
logger.WithField("dtag", event.DTag).Errorf("Error generating wiki page: %v", err)
continue
}
if err := r.cache.Set("/wiki/"+event.DTag, html); err != nil {
logger.WithField("dtag", event.DTag).Errorf("Error caching wiki page: %v", err)
}
}
}
}
// Fetch blog index if configured (needed for landing page)
// If theforest fails, leave pages as-is (don't remove existing events)
var newestBlogItem *generator.BlogItemInfo
if r.blogIndex != "" {
blogIndex, err := r.wikiService.FetchWikiIndex(ctx, r.blogIndex)
if err != nil {
logger.Warnf("Error fetching blog index from theforest: %v - keeping existing pages", err)
// Don't update cache - leave existing pages as-is
} else {
// Fetch blog events using the generic FetchIndexEvents function
// If theforest fails, leave pages as-is (don't remove existing events)
blogKind := r.wikiService.GetBlogKind()
blogEventList, err := r.wikiService.FetchIndexEvents(ctx, blogIndex, blogKind)
if err != nil {
logger.Warnf("Error fetching blog events from theforest: %v - keeping existing pages", err)
// Don't update cache - leave existing pages as-is
} else {
logger.WithFields(map[string]interface{}{
"events": len(blogEventList),
"kind": blogKind,
}).Debug("Fetched blog events")
blogItems := make([]generator.BlogItemInfo, 0, len(blogEventList))
for _, event := range blogEventList {
// Parse the blog event
blog, err := nostr.ParseBlogEvent(event, blogKind)
if err != nil {
logger.WithField("event_id", event.ID).Warnf("Error parsing blog event: %v", err)
continue
}
html, err := r.htmlGenerator.ProcessAsciiDoc(blog.Content)
if err != nil {
logger.WithField("dtag", blog.DTag).Warnf("Error processing blog content: %v", err)
html = blog.Content // Fallback to raw content
}
blogItems = append(blogItems, generator.BlogItemInfo{
DTag: blog.DTag,
Title: blog.Title,
Summary: blog.Summary,
Content: template.HTML(html),
Author: event.PubKey,
Image: blog.Image,
CreatedAt: int64(event.CreatedAt),
})
}
logger.WithField("items", len(blogItems)).Debug("Generated blog items")
// Get newest blog item for landing page
if len(blogItems) > 0 {
newestBlogItem = &blogItems[0]
}
// Generate blog page without feed items (feed only on landing page)
blogHTML, err := r.htmlGenerator.GenerateBlogPage(blogIndex, blogItems, []generator.FeedItemInfo{})
if err != nil {
logger.Errorf("Error generating blog page: %v", err)
} else {
if err := r.cache.Set("/blog", blogHTML); err != nil {
logger.Errorf("Error caching blog page: %v", err)
} else {
logger.WithField("items", len(blogItems)).Info("Blog page cached successfully")
}
}
}
}
}
// Fetch and cache articles page (longform articles) - needed for landing page
// If theforest fails, leave pages as-is (don't remove existing events)
var allArticleItems []generator.ArticleItemInfo
var newestArticleItem *generator.ArticleItemInfo
longformKind := r.wikiService.GetLongformKind()
if longformKind > 0 {
articleEvents, err := r.wikiService.FetchLongformArticles(ctx, "wss://theforest.nostr1.com", longformKind, 50)
if err != nil {
logger.Warnf("Error fetching longform articles from theforest: %v - keeping existing pages", err)
// Don't update cache - leave existing pages as-is
} else {
articleItems := make([]generator.ArticleItemInfo, 0, len(articleEvents))
for _, event := range articleEvents {
// Parse the longform article
article, err := nostr.ParseLongformEvent(event, longformKind)
if err != nil {
logger.WithField("event_id", event.ID).Warnf("Error parsing longform article: %v", err)
continue
}
// Process markdown content
html, err := r.htmlGenerator.ProcessMarkdown(article.Content)
if err != nil {
logger.WithField("dtag", article.DTag).Warnf("Error processing markdown content: %v", err)
html = article.Content // Fallback to raw content
}
articleItems = append(articleItems, generator.ArticleItemInfo{
DTag: article.DTag,
Title: article.Title,
Summary: article.Summary,
Content: template.HTML(html),
Author: event.PubKey,
Image: article.Image,
CreatedAt: int64(event.CreatedAt),
})
}
logger.WithField("items", len(articleItems)).Debug("Generated article items")
// Store all article items for landing page
allArticleItems = articleItems
// Get newest article item for landing page
if len(articleItems) > 0 {
newestArticleItem = &articleItems[0]
}
// Generate articles page
articlesHTML, err := r.htmlGenerator.GenerateArticlesPage(articleItems, []generator.FeedItemInfo{})
if err != nil {
logger.Errorf("Error generating articles page: %v", err)
} else {
if err := r.cache.Set("/articles", articlesHTML); err != nil {
logger.Errorf("Error caching articles page: %v", err)
} else {
logger.WithField("items", len(articleItems)).Info("Articles page cached successfully")
}
}
}
}
// Fetch and cache e-books page (needed for landing page)
// If theforest fails, leave pages as-is (don't remove existing events)
var allEBooks []generator.EBookInfo
if r.ebooksService != nil {
ebooks, err := r.ebooksService.FetchTopLevelIndexEvents(ctx)
if err != nil {
logger.Warnf("Error fetching e-books from theforest: %v - keeping existing pages", err)
// Don't update cache - leave existing pages as-is
} else {
// Convert to generator.EBookInfo
generatorEBooks := make([]generator.EBookInfo, 0, len(ebooks))
for _, ebook := range ebooks {
generatorEBooks = append(generatorEBooks, generator.EBookInfo{
EventID: ebook.EventID,
Title: ebook.Title,
DTag: ebook.DTag,
Author: ebook.Author,
Summary: ebook.Summary,
Image: ebook.Image,
Type: ebook.Type,
CreatedAt: ebook.CreatedAt,
Naddr: ebook.Naddr,
})
}
// Store all e-books for landing page
allEBooks = generatorEBooks
ebooksHTML, err := r.htmlGenerator.GenerateEBooksPage(generatorEBooks, []generator.FeedItemInfo{})
if err != nil {
logger.Errorf("Error generating e-books page: %v", err)
} else {
if err := r.cache.Set("/ebooks", ebooksHTML); err != nil {
logger.Errorf("Error caching e-books page: %v", err)
} else {
logger.WithField("ebooks", len(generatorEBooks)).Info("E-books page cached successfully")
}
}
}
}
// Always generate landing page AFTER blog, articles, and e-books are fetched and cached
// Now we have all the data needed for the landing page
landingHTML, err := r.htmlGenerator.GenerateLandingPage(wikiPages, newestBlogItem, newestArticleItem, allArticleItems, allEBooks)
if err != nil {
logger.Errorf("Error generating landing page: %v", err)
} else {
if err := r.cache.Set("/", landingHTML); err != nil {
logger.Errorf("Error caching landing page: %v", err)
} else {
logger.WithField("pages", len(wikiPages)).Info("Landing page cached successfully")
}
}
// Generate and cache Feed page (using feed items from cache)
feedItems := r.convertFeedItemsToInfo(r.feedCache.Get())
feedHTML, err := r.htmlGenerator.GenerateFeedPage(feedItems)
if err != nil {
logger.Errorf("Error generating feed page: %v", err)
} else {
if err := r.cache.Set("/feed", feedHTML); err != nil {
logger.Errorf("Error caching feed page: %v", err)
} else {
logger.WithField("items", len(feedItems)).Info("Feed page cached successfully")
}
}
logger.Info("Page cache rewarming completed")
}
// rewarmFeed rewarms the feed cache
func (r *Rewarmer) rewarmFeed(ctx context.Context) {
logger.WithFields(map[string]interface{}{
"relay": r.feedRelay,
"max_events": r.maxFeedEvents,
}).Info("Starting feed cache rewarming")
nostrItems, err := r.feedService.FetchFeedItems(ctx, r.feedRelay, r.maxFeedEvents)
if err != nil {
logger.WithField("relay", r.feedRelay).Warnf("Error fetching feed: %v", err)
// Don't clear the cache on error - keep old items
return
}
if len(nostrItems) == 0 {
logger.WithField("relay", r.feedRelay).Warn("No feed items fetched")
// Don't clear the cache - keep old items
return
}
// Convert nostr.FeedItem to cache.FeedItem
items := make([]FeedItem, 0, len(nostrItems))
for _, item := range nostrItems {
items = append(items, FeedItem{
EventID: item.EventID,
Author: item.Author,
Content: item.Content,
Time: item.Time,
Link: item.Link,
Title: item.Title,
Summary: item.Summary,
Image: item.Image,
})
}
r.feedCache.Set(items)
logger.WithFields(map[string]interface{}{
"items": len(items),
"relay": r.feedRelay,
}).Info("Feed cache rewarmed successfully")
}
// periodicRewarmPages periodically rewarms pages
func (r *Rewarmer) periodicRewarmPages(ctx context.Context) {
ticker := time.NewTicker(r.interval)
defer ticker.Stop()
for {
select {
case <-ctx.Done():
return
case <-ticker.C:
r.rewarmPages(ctx)
}
}
}
// convertFeedItemsToInfo converts cache.FeedItem to generator.FeedItemInfo
func (r *Rewarmer) convertFeedItemsToInfo(items []FeedItem) []generator.FeedItemInfo {
feedItems := make([]generator.FeedItemInfo, 0, len(items))
for _, item := range items {
feedItems = append(feedItems, generator.FeedItemInfo{
EventID: item.EventID,
Author: item.Author,
Content: item.Content,
Time: item.Time.Format("2006-01-02 15:04:05"),
TimeISO: item.Time.Format(time.RFC3339),
Link: item.Link,
})
}
return feedItems
}
// periodicRewarmFeed periodically rewarms feed
func (r *Rewarmer) periodicRewarmFeed(ctx context.Context) {
ticker := time.NewTicker(r.feedInterval)
defer ticker.Stop()
for {
select {
case <-ctx.Done():
return
case <-ticker.C:
r.rewarmFeed(ctx)
}
}
}
Loading…
Cancel
Save