Browse Source

Merges pull request #56

Feature/text entry
master
Michael J 4 months ago
parent
commit
166736031b
No known key found for this signature in database
GPG Key ID: 962BEC8725790894
  1. 304
      deno.lock
  2. 173
      doc/compose_tree.md
  3. 3
      import_map.json
  4. 2163
      package-lock.json
  5. 7
      package.json
  6. 1547
      src/lib/components/ZettelEditor.svelte
  7. 221
      src/lib/services/publisher.ts
  8. 273
      src/lib/utils/asciidoc_ast_parser.ts
  9. 375
      src/lib/utils/asciidoc_metadata.ts
  10. 577
      src/lib/utils/asciidoc_parser.ts
  11. 148
      src/lib/utils/asciidoc_publication_parser.ts
  12. 4
      src/lib/utils/event_input_utils.ts
  13. 377
      src/lib/utils/publication_tree_factory.ts
  14. 1091
      src/lib/utils/publication_tree_processor.ts
  15. 277
      src/routes/new/compose/+page.svelte
  16. 4
      tests/unit/metadataExtraction.test.ts
  17. 284
      tests/unit/publication_tree_processor.test.ts
  18. 560
      tests/zettel-publisher-tdd.test.ts

304
deno.lock

@ -3,6 +3,11 @@ @@ -3,6 +3,11 @@
"specifiers": {
"jsr:@std/internal@^1.0.9": "1.0.10",
"jsr:@std/path@*": "1.1.1",
"npm:@codemirror/basic-setup@0.20": "0.20.0",
"npm:@codemirror/lang-markdown@^6.3.4": "6.5.0",
"npm:@codemirror/state@^6.5.2": "6.5.2",
"npm:@codemirror/theme-one-dark@^6.1.3": "6.1.3",
"npm:@codemirror/view@^6.38.1": "6.38.6",
"npm:@lucide/svelte@0.539": "0.539.0_svelte@5.38.2__acorn@8.15.0",
"npm:@noble/curves@^1.9.4": "1.9.7",
"npm:@noble/hashes@^1.8.0": "1.8.0",
@ -27,6 +32,7 @@ @@ -27,6 +32,7 @@
"npm:autoprefixer@^10.4.21": "10.4.21_postcss@8.5.6",
"npm:bech32@2": "2.0.0",
"npm:class-variance-authority@~0.7.1": "0.7.1",
"npm:codemirror@^6.0.2": "6.0.2",
"npm:d3@^7.9.0": "7.9.0_d3-selection@3.0.0",
"npm:eslint-plugin-svelte@^3.11.0": "3.11.0_eslint@9.33.0_svelte@5.38.2__acorn@8.15.0_postcss@8.5.6",
"npm:flowbite-svelte-icons@2.1": "2.1.1_svelte@5.38.2__acorn@8.15.0_tailwind-merge@3.3.1",
@ -42,6 +48,7 @@ @@ -42,6 +48,7 @@
"npm:plantuml-encoder@^1.4.0": "1.4.0",
"npm:playwright@^1.50.1": "1.54.1",
"npm:playwright@^1.54.1": "1.54.1",
"npm:postcss-import@^16.1.1": "16.1.1_postcss@8.5.6",
"npm:postcss-load-config@6": "6.0.1_postcss@8.5.6_yaml@2.8.1",
"npm:postcss@^8.5.6": "8.5.6",
"npm:prettier-plugin-svelte@^3.4.0": "3.4.0_prettier@3.6.2_svelte@5.38.2__acorn@8.15.0",
@ -114,6 +121,192 @@ @@ -114,6 +121,192 @@
"@babel/helper-validator-identifier"
]
},
"@codemirror/autocomplete@0.20.3": {
"integrity": "sha512-lYB+NPGP+LEzAudkWhLfMxhTrxtLILGl938w+RcFrGdrIc54A+UgmCoz+McE3IYRFp4xyQcL4uFJwo+93YdgHw==",
"dependencies": [
"@codemirror/language@0.20.2",
"@codemirror/state@0.20.1",
"@codemirror/view@0.20.7",
"@lezer/common@0.16.1"
]
},
"@codemirror/autocomplete@6.19.1": {
"integrity": "sha512-q6NenYkEy2fn9+JyjIxMWcNjzTL/IhwqfzOut1/G3PrIFkrbl4AL7Wkse5tLrQUUyqGoAKU5+Pi5jnnXxH5HGw==",
"dependencies": [
"@codemirror/language@6.11.3",
"@codemirror/state@6.5.2",
"@codemirror/view@6.38.6",
"@lezer/common@1.3.0"
]
},
"@codemirror/basic-setup@0.20.0": {
"integrity": "sha512-W/ERKMLErWkrVLyP5I8Yh8PXl4r+WFNkdYVSzkXYPQv2RMPSkWpr2BgggiSJ8AHF/q3GuApncDD8I4BZz65fyg==",
"dependencies": [
"@codemirror/autocomplete@0.20.3",
"@codemirror/commands@0.20.0",
"@codemirror/language@0.20.2",
"@codemirror/lint@0.20.3",
"@codemirror/search@0.20.1",
"@codemirror/state@0.20.1",
"@codemirror/view@0.20.7"
],
"deprecated": true
},
"@codemirror/commands@0.20.0": {
"integrity": "sha512-v9L5NNVA+A9R6zaFvaTbxs30kc69F6BkOoiEbeFw4m4I0exmDEKBILN6mK+GksJtvTzGBxvhAPlVFTdQW8GB7Q==",
"dependencies": [
"@codemirror/language@0.20.2",
"@codemirror/state@0.20.1",
"@codemirror/view@0.20.7",
"@lezer/common@0.16.1"
]
},
"@codemirror/commands@6.10.0": {
"integrity": "sha512-2xUIc5mHXQzT16JnyOFkh8PvfeXuIut3pslWGfsGOhxP/lpgRm9HOl/mpzLErgt5mXDovqA0d11P21gofRLb9w==",
"dependencies": [
"@codemirror/language@6.11.3",
"@codemirror/state@6.5.2",
"@codemirror/view@6.38.6",
"@lezer/common@1.3.0"
]
},
"@codemirror/lang-css@6.3.1": {
"integrity": "sha512-kr5fwBGiGtmz6l0LSJIbno9QrifNMUusivHbnA1H6Dmqy4HZFte3UAICix1VuKo0lMPKQr2rqB+0BkKi/S3Ejg==",
"dependencies": [
"@codemirror/autocomplete@6.19.1",
"@codemirror/language@6.11.3",
"@codemirror/state@6.5.2",
"@lezer/common@1.3.0",
"@lezer/css"
]
},
"@codemirror/lang-html@6.4.11": {
"integrity": "sha512-9NsXp7Nwp891pQchI7gPdTwBuSuT3K65NGTHWHNJ55HjYcHLllr0rbIZNdOzas9ztc1EUVBlHou85FFZS4BNnw==",
"dependencies": [
"@codemirror/autocomplete@6.19.1",
"@codemirror/lang-css",
"@codemirror/lang-javascript",
"@codemirror/language@6.11.3",
"@codemirror/state@6.5.2",
"@codemirror/view@6.38.6",
"@lezer/common@1.3.0",
"@lezer/css",
"@lezer/html"
]
},
"@codemirror/lang-javascript@6.2.4": {
"integrity": "sha512-0WVmhp1QOqZ4Rt6GlVGwKJN3KW7Xh4H2q8ZZNGZaP6lRdxXJzmjm4FqvmOojVj6khWJHIb9sp7U/72W7xQgqAA==",
"dependencies": [
"@codemirror/autocomplete@6.19.1",
"@codemirror/language@6.11.3",
"@codemirror/lint@6.9.1",
"@codemirror/state@6.5.2",
"@codemirror/view@6.38.6",
"@lezer/common@1.3.0",
"@lezer/javascript"
]
},
"@codemirror/lang-markdown@6.5.0": {
"integrity": "sha512-0K40bZ35jpHya6FriukbgaleaqzBLZfOh7HuzqbMxBXkbYMJDxfF39c23xOgxFezR+3G+tR2/Mup+Xk865OMvw==",
"dependencies": [
"@codemirror/autocomplete@6.19.1",
"@codemirror/lang-html",
"@codemirror/language@6.11.3",
"@codemirror/state@6.5.2",
"@codemirror/view@6.38.6",
"@lezer/common@1.3.0",
"@lezer/markdown"
]
},
"@codemirror/language@0.20.2": {
"integrity": "sha512-WB3Bnuusw0xhVvhBocieYKwJm04SOk5bPoOEYksVHKHcGHFOaYaw+eZVxR4gIqMMcGzOIUil0FsCmFk8yrhHpw==",
"dependencies": [
"@codemirror/state@0.20.1",
"@codemirror/view@0.20.7",
"@lezer/common@0.16.1",
"@lezer/highlight@0.16.0",
"@lezer/lr@0.16.3",
"style-mod"
]
},
"@codemirror/language@6.11.3": {
"integrity": "sha512-9HBM2XnwDj7fnu0551HkGdrUrrqmYq/WC5iv6nbY2WdicXdGbhR/gfbZOH73Aqj4351alY1+aoG9rCNfiwS1RA==",
"dependencies": [
"@codemirror/state@6.5.2",
"@codemirror/view@6.38.6",
"@lezer/common@1.3.0",
"@lezer/highlight@1.2.3",
"@lezer/lr@1.4.2",
"style-mod"
]
},
"@codemirror/lint@0.20.3": {
"integrity": "sha512-06xUScbbspZ8mKoODQCEx6hz1bjaq9m8W8DxdycWARMiiX1wMtfCh/MoHpaL7ws/KUMwlsFFfp2qhm32oaCvVA==",
"dependencies": [
"@codemirror/state@0.20.1",
"@codemirror/view@0.20.7",
"crelt"
]
},
"@codemirror/lint@6.9.1": {
"integrity": "sha512-te7To1EQHePBQQzasDKWmK2xKINIXpk+xAiSYr9ZN+VB4KaT+/Hi2PEkeErTk5BV3PTz1TLyQL4MtJfPkKZ9sw==",
"dependencies": [
"@codemirror/state@6.5.2",
"@codemirror/view@6.38.6",
"crelt"
]
},
"@codemirror/search@0.20.1": {
"integrity": "sha512-ROe6gRboQU5E4z6GAkNa2kxhXqsGNbeLEisbvzbOeB7nuDYXUZ70vGIgmqPu0tB+1M3F9yWk6W8k2vrFpJaD4Q==",
"dependencies": [
"@codemirror/state@0.20.1",
"@codemirror/view@0.20.7",
"crelt"
]
},
"@codemirror/search@6.5.11": {
"integrity": "sha512-KmWepDE6jUdL6n8cAAqIpRmLPBZ5ZKnicE8oGU/s3QrAVID+0VhLFrzUucVKHG5035/BSykhExDL/Xm7dHthiA==",
"dependencies": [
"@codemirror/state@6.5.2",
"@codemirror/view@6.38.6",
"crelt"
]
},
"@codemirror/state@0.20.1": {
"integrity": "sha512-ms0tlV5A02OK0pFvTtSUGMLkoarzh1F8mr6jy1cD7ucSC2X/VLHtQCxfhdSEGqTYlQF2hoZtmLv+amqhdgbwjQ=="
},
"@codemirror/state@6.5.2": {
"integrity": "sha512-FVqsPqtPWKVVL3dPSxy8wEF/ymIEuVzF1PK3VbUgrxXpJUSHQWWZz4JMToquRxnkw+36LTamCZG2iua2Ptq0fA==",
"dependencies": [
"@marijn/find-cluster-break"
]
},
"@codemirror/theme-one-dark@6.1.3": {
"integrity": "sha512-NzBdIvEJmx6fjeremiGp3t/okrLPYT0d9orIc7AFun8oZcRk58aejkqhv6spnz4MLAevrKNPMQYXEWMg4s+sKA==",
"dependencies": [
"@codemirror/language@6.11.3",
"@codemirror/state@6.5.2",
"@codemirror/view@6.38.6",
"@lezer/highlight@1.2.3"
]
},
"@codemirror/view@0.20.7": {
"integrity": "sha512-pqEPCb9QFTOtHgAH5XU/oVy9UR/Anj6r+tG5CRmkNVcqSKEPmBU05WtN/jxJCFZBXf6HumzWC9ydE4qstO3TxQ==",
"dependencies": [
"@codemirror/state@0.20.1",
"style-mod",
"w3c-keyname"
]
},
"@codemirror/view@6.38.6": {
"integrity": "sha512-qiS0z1bKs5WOvHIAC0Cybmv4AJSkAXgX5aD6Mqd2epSLlVJsQl8NG23jCVouIgkh4All/mrbdsf2UOLFnJw0tw==",
"dependencies": [
"@codemirror/state@6.5.2",
"crelt",
"style-mod",
"w3c-keyname"
]
},
"@emnapi/core@1.4.5": {
"integrity": "sha512-XsLw1dEOpkSX/WucdqUhPWP7hDxSvZiY+fsUC14h+FtQ2Ifni4znbBt8punRX+Uj2JG/uDb8nEHVKvrVlvdZ5Q==",
"dependencies": [
@ -515,12 +708,76 @@ @@ -515,12 +708,76 @@
"@jridgewell/sourcemap-codec"
]
},
"@lezer/common@0.16.1": {
"integrity": "sha512-qPmG7YTZ6lATyTOAWf8vXE+iRrt1NJd4cm2nJHK+v7X9TsOF6+HtuU/ctaZy2RCrluxDb89hI6KWQ5LfQGQWuA=="
},
"@lezer/common@1.3.0": {
"integrity": "sha512-L9X8uHCYU310o99L3/MpJKYxPzXPOS7S0NmBaM7UO/x2Kb2WbmMLSkfvdr1KxRIFYOpbY0Jhn7CfLSUDzL8arQ=="
},
"@lezer/css@1.3.0": {
"integrity": "sha512-pBL7hup88KbI7hXnZV3PQsn43DHy6TWyzuyk2AO9UyoXcDltvIdqWKE1dLL/45JVZ+YZkHe1WVHqO6wugZZWcw==",
"dependencies": [
"@lezer/common@1.3.0",
"@lezer/highlight@1.2.3",
"@lezer/lr@1.4.2"
]
},
"@lezer/highlight@0.16.0": {
"integrity": "sha512-iE5f4flHlJ1g1clOStvXNLbORJoiW4Kytso6ubfYzHnaNo/eo5SKhxs4wv/rtvwZQeZrK3we8S9SyA7OGOoRKQ==",
"dependencies": [
"@lezer/common@0.16.1"
]
},
"@lezer/highlight@1.2.3": {
"integrity": "sha512-qXdH7UqTvGfdVBINrgKhDsVTJTxactNNxLk7+UMwZhU13lMHaOBlJe9Vqp907ya56Y3+ed2tlqzys7jDkTmW0g==",
"dependencies": [
"@lezer/common@1.3.0"
]
},
"@lezer/html@1.3.12": {
"integrity": "sha512-RJ7eRWdaJe3bsiiLLHjCFT1JMk8m1YP9kaUbvu2rMLEoOnke9mcTVDyfOslsln0LtujdWespjJ39w6zo+RsQYw==",
"dependencies": [
"@lezer/common@1.3.0",
"@lezer/highlight@1.2.3",
"@lezer/lr@1.4.2"
]
},
"@lezer/javascript@1.5.4": {
"integrity": "sha512-vvYx3MhWqeZtGPwDStM2dwgljd5smolYD2lR2UyFcHfxbBQebqx8yjmFmxtJ/E6nN6u1D9srOiVWm3Rb4tmcUA==",
"dependencies": [
"@lezer/common@1.3.0",
"@lezer/highlight@1.2.3",
"@lezer/lr@1.4.2"
]
},
"@lezer/lr@0.16.3": {
"integrity": "sha512-pau7um4eAw94BEuuShUIeQDTf3k4Wt6oIUOYxMmkZgDHdqtIcxWND4LRxi8nI9KuT4I1bXQv67BCapkxt7Ywqw==",
"dependencies": [
"@lezer/common@0.16.1"
]
},
"@lezer/lr@1.4.2": {
"integrity": "sha512-pu0K1jCIdnQ12aWNaAVU5bzi7Bd1w54J3ECgANPmYLtQKP0HBj2cE/5coBD66MT10xbtIuUr7tg0Shbsvk0mDA==",
"dependencies": [
"@lezer/common@1.3.0"
]
},
"@lezer/markdown@1.5.1": {
"integrity": "sha512-F3ZFnIfNAOy/jPSk6Q0e3bs7e9grfK/n5zerkKoc5COH6Guy3Zb0vrJwXzdck79K16goBhYBRAvhf+ksqe0cMg==",
"dependencies": [
"@lezer/common@1.3.0",
"@lezer/highlight@1.2.3"
]
},
"@lucide/svelte@0.539.0_svelte@5.38.2__acorn@8.15.0": {
"integrity": "sha512-OWhw4BhHO+owmOE/ijSNLnw/flbW2/DsLzMHAeM8oEjLsO0xE6glX0ADCDwxKItTs5ZJYssfyGNXxMXrea173w==",
"dependencies": [
"svelte"
]
},
"@marijn/find-cluster-break@1.0.2": {
"integrity": "sha512-l0h88YhZFyKdXIFNfSWpyjStDjGHwZ/U7iobcK1cQQD8sejsONdQtTVU+1wVN1PBw40PiiHB1vA5S7VTfQiP9g=="
},
"@napi-rs/wasm-runtime@0.2.12": {
"integrity": "sha512-ZVWUcfwY4E/yPitQJl481FjFo3K22D6qF0DuFH6Y/nbnE11GY5uguDxZMGXPQ8WQ0128MXQD7TnfHyK4oWoIJQ==",
"dependencies": [
@ -1519,6 +1776,18 @@ @@ -1519,6 +1776,18 @@
"clsx@2.1.1": {
"integrity": "sha512-eYm0QWBtUrBWZWG0d386OGAw16Z995PiOVo2B7bjWSbHedGl5e0ZWaq65kOGgUSNesEIDkB9ISbTg/JK9dhCZA=="
},
"codemirror@6.0.2": {
"integrity": "sha512-VhydHotNW5w1UGK0Qj96BwSk/Zqbp9WbnyK2W/eVMv4QyF41INRGpjUhFJY7/uDNuudSc33a/PKr4iDqRduvHw==",
"dependencies": [
"@codemirror/autocomplete@6.19.1",
"@codemirror/commands@6.10.0",
"@codemirror/language@6.11.3",
"@codemirror/lint@6.9.1",
"@codemirror/search@6.5.11",
"@codemirror/state@6.5.2",
"@codemirror/view@6.38.6"
]
},
"color-convert@2.0.1": {
"integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==",
"dependencies": [
@ -1550,6 +1819,9 @@ @@ -1550,6 +1819,9 @@
"cookie@0.6.0": {
"integrity": "sha512-U71cyTamuh1CRNCfpGY6to28lxvNwPG4Guz/EVjgf3Jmzv0vlDp1atT9eS5dDjMYHucpHbWns6Lwf3BKz6svdw=="
},
"crelt@1.0.6": {
"integrity": "sha512-VQ2MBenTq1fWZUH9DJNGti7kKv6EeAuYr3cLwxUWhIu1baTaXh4Ib5W2CqHVqib4/MqbYGJqiL3Zb8GJZr3l4g=="
},
"cross-spawn@7.0.6": {
"integrity": "sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA==",
"dependencies": [
@ -2675,6 +2947,9 @@ @@ -2675,6 +2947,9 @@
"picomatch@4.0.3": {
"integrity": "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q=="
},
"pify@2.3.0": {
"integrity": "sha512-udgsAY+fTnvv7kI7aaxbqwWNb0AHiB0qBO89PZKPkoTmGOgdbrHDKD+0B2X4uTfJ/FT1R09r9gTsjUjNJotuog=="
},
"plantuml-encoder@1.4.0": {
"integrity": "sha512-sxMwpDw/ySY1WB2CE3+IdMuEcWibJ72DDOsXLkSmEaSzwEUaYBT6DWgOfBiHGCux4q433X6+OEFWjlVqp7gL6g=="
},
@ -2695,6 +2970,15 @@ @@ -2695,6 +2970,15 @@
"pngjs@5.0.0": {
"integrity": "sha512-40QW5YalBNfQo5yRYmiw7Yz6TKKVr3h6970B2YE+3fQpsWcrbj1PzJgxeJ19DRQjhMbKPIuMY8rFaXc8moolVw=="
},
"postcss-import@16.1.1_postcss@8.5.6": {
"integrity": "sha512-2xVS1NCZAfjtVdvXiyegxzJ447GyqCeEI5V7ApgQVOWnros1p5lGNovJNapwPpMombyFBfqDwt7AD3n2l0KOfQ==",
"dependencies": [
"postcss",
"postcss-value-parser",
"read-cache",
"resolve"
]
},
"postcss-load-config@3.1.4_postcss@8.5.6": {
"integrity": "sha512-6DiM4E7v4coTE4uzA8U//WhtPwyhiim3eyjEMFCnUpzbrkK9wJHgKDT2mR+HbtSrd/NubVaYTOpSpjUl8NQeRg==",
"dependencies": [
@ -2875,6 +3159,12 @@ @@ -2875,6 +3159,12 @@
],
"bin": true
},
"read-cache@1.0.0": {
"integrity": "sha512-Owdv/Ft7IjOgm/i0xvNDZ1LrRANRfew4b2prF3OWMQLxLfu3bS8FVhCsrSCMK4lR56Y9ya+AThoTpDCTxCmpRA==",
"dependencies": [
"pify"
]
},
"readdirp@4.1.2": {
"integrity": "sha512-GDhwkLfywWL2s6vEjyhri+eXmfH6j1L7JE27WhqLeYzoh/A3DBaYGEj2H/HFZCn/kMfim73FXxEJTw06WtxQwg=="
},
@ -3012,6 +3302,9 @@ @@ -3012,6 +3302,9 @@
"js-tokens"
]
},
"style-mod@4.1.3": {
"integrity": "sha512-i/n8VsZydrugj3Iuzll8+x/00GH2vnYsk1eomD8QiRrSAeW6ItbCQDtfXCeJHd0iwiNagqjQkvpvREEPtW3IoQ=="
},
"supports-color@7.2.0": {
"integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==",
"dependencies": [
@ -3302,6 +3595,9 @@ @@ -3302,6 +3595,9 @@
"void-elements@3.1.0": {
"integrity": "sha512-Dhxzh5HZuiHQhbvTW9AMetFfBHDMYpo23Uo9btPXgdYP+3T5S+p+jgNy7spra+veYhBP2dCSgxR/i2Y02h5/6w=="
},
"w3c-keyname@2.2.8": {
"integrity": "sha512-dpojBhNsCNN7T82Tm7k26A6G9ML3NkhDsnw9n/eoxSRlVBB4CEtIQ/KTCLI2Fwf3ataSXRhYFkQi3SlnFwPvPQ=="
},
"which-module@2.0.1": {
"integrity": "sha512-iBdZ57RDvnOR9AGBhML2vFZf7h8vmBjhoaZqODJBFWHVtKkDmKuHai3cx5PgVMrX5YDNp27AofYbAwctSS+vhQ=="
},
@ -3558,6 +3854,7 @@ @@ -3558,6 +3854,7 @@
"npm:node-emoji@^2.2.0",
"npm:nostr-tools@2.15",
"npm:plantuml-encoder@^1.4.0",
"npm:postcss-import@^16.1.1",
"npm:qrcode@^1.5.4",
"npm:svelte@^5.36.8",
"npm:tailwind-merge@^3.3.1",
@ -3565,6 +3862,11 @@ @@ -3565,6 +3862,11 @@
],
"packageJson": {
"dependencies": [
"npm:@codemirror/basic-setup@0.20",
"npm:@codemirror/lang-markdown@^6.3.4",
"npm:@codemirror/state@^6.5.2",
"npm:@codemirror/theme-one-dark@^6.1.3",
"npm:@codemirror/view@^6.38.1",
"npm:@lucide/svelte@0.539",
"npm:@noble/curves@^1.9.4",
"npm:@noble/hashes@^1.8.0",
@ -3589,6 +3891,7 @@ @@ -3589,6 +3891,7 @@
"npm:autoprefixer@^10.4.21",
"npm:bech32@2",
"npm:class-variance-authority@~0.7.1",
"npm:codemirror@^6.0.2",
"npm:d3@^7.9.0",
"npm:eslint-plugin-svelte@^3.11.0",
"npm:flowbite-svelte-icons@2.1",
@ -3601,6 +3904,7 @@ @@ -3601,6 +3904,7 @@
"npm:nostr-tools@2.15",
"npm:plantuml-encoder@^1.4.0",
"npm:playwright@^1.50.1",
"npm:postcss-import@^16.1.1",
"npm:postcss-load-config@6",
"npm:postcss@^8.5.6",
"npm:prettier-plugin-svelte@^3.4.0",

173
doc/compose_tree.md

@ -0,0 +1,173 @@ @@ -0,0 +1,173 @@
# NKBIP-01 Hierarchical Parsing Technical Plan
## Overview
This document outlines the complete restart plan for implementing NKBIP-01 compliant hierarchical AsciiDoc parsing using proper Asciidoctor tree processor extensions.
## Current State Analysis
### Problems Identified
1. **Dual Architecture Conflict**: Two competing parsing implementations exist:
- `publication_tree_factory.ts` - AST-first approach (currently used)
- `publication_tree_extension.ts` - Extension approach (incomplete)
2. **Missing Proper Extension Registration**: Current code doesn't follow the official Asciidoctor extension pattern you provided
3. **Incomplete NKBIP-01 Compliance**: Testing with `deep_hierarchy_test.adoc` may not produce the exact structures shown in `docreference.md`
## NKBIP-01 Specification Summary
From `test_data/AsciidocFiles/docreference.md`:
### Event Types
- **30040**: Index events (collections/hierarchical containers)
- **30041**: Content events (actual article sections)
### Parse Level Behaviors
- **Level 2**: Only `==` sections → 30041 events (subsections included in content)
- **Level 3**: `==` → 30040 indices, `===` → 30041 content events
- **Level 4+**: Full hierarchy with each level becoming separate events
### Key Rules
1. If a section has subsections at target level → becomes 30040 index
2. If no subsections at target level → becomes 30041 content event
3. Content inclusion: 30041 events include all content below parse level
4. Hierarchical references: Parent indices use `a` tags to reference children
## Proposed Architecture
### Core Pattern: Asciidoctor Tree Processor Extension
Following the pattern you provided:
```javascript
// Extension registration pattern
module.exports = function (registry) {
registry.treeProcessor(function () {
var self = this
self.process(function (doc) {
// Process document and build PublicationTree
return doc
})
})
}
```
### Implementation Components
1. **PublicationTreeProcessor** (`src/lib/utils/publication_tree_processor.ts`)
- Implements the tree processor extension
- Registers with Asciidoctor during document processing
- Builds PublicationTree with NDK events during AST traversal
- Returns result via closure to avoid Ruby compatibility issues
2. **Unified Parser Interface** (`src/lib/utils/asciidoc_publication_parser.ts`)
- Single entry point for all parsing operations
- Manages extension registration and cleanup
- Provides clean API for ZettelEditor integration
3. **Enhanced ZettelEditor Integration**
- Replace `publication_tree_factory.ts` usage
- Use proper extension-based parsing
- Maintain current preview and publishing workflow
## Technical Implementation Plan
### Phase 1: Core Tree Processor (`publication_tree_processor.ts`)
```typescript
export function registerPublicationTreeProcessor(
registry: Registry,
ndk: NDK,
parseLevel: number,
options?: ProcessorOptions
): { getResult: () => ProcessorResult | null }
```
**Key Features:**
- Follows Asciidoctor extension pattern exactly
- Builds events during AST traversal (not after)
- Preserves original AsciiDoc content in events
- Handles all parse levels (2-7) with proper NKBIP-01 compliance
- Uses closure pattern to return results safely
### Phase 2: Unified Parser Interface (`asciidoc_publication_parser.ts`)
```typescript
export async function parseAsciiDocWithTree(
content: string,
ndk: NDK,
parseLevel: number = 2
): Promise<PublicationTreeResult>
```
**Responsibilities:**
- Create Asciidoctor instance
- Register tree processor extension
- Execute parsing with extension
- Return PublicationTree and events
- Clean up resources
### Phase 3: ZettelEditor Integration
**Changes to `ZettelEditor.svelte`:**
- Replace `createPublicationTreeFromContent()` calls
- Use new `parseAsciiDocWithTree()` function
- Maintain existing preview/publishing interface
- No changes to component props or UI
### Phase 4: Validation Testing
**Test Suite:**
1. Parse `deep_hierarchy_test.adoc` at levels 2-7
2. Verify event structures match `docreference.md` examples
3. Validate content preservation and tag inheritance
4. Test publish workflow end-to-end
## File Organization
### Files to Create
1. `src/lib/utils/publication_tree_processor.ts` - Core tree processor extension
2. `src/lib/utils/asciidoc_publication_parser.ts` - Unified parser interface
3. `tests/unit/publication_tree_processor.test.ts` - Comprehensive test suite
### Files to Modify
1. `src/lib/components/ZettelEditor.svelte` - Update parsing calls
2. `src/routes/new/compose/+page.svelte` - Verify integration works
### Files to Remove (After Validation)
1. `src/lib/utils/publication_tree_factory.ts` - Replace with processor
2. `src/lib/utils/publication_tree_extension.ts` - Merge concepts into processor
## Success Criteria
1. **NKBIP-01 Compliance**: All parse levels produce structures exactly matching `docreference.md`
2. **Content Preservation**: Original AsciiDoc content preserved in events (not converted to HTML)
3. **Proper Extension Pattern**: Uses official Asciidoctor tree processor registration
4. **Zero Regression**: Current ZettelEditor functionality unchanged
5. **Performance**: No degradation in parsing or preview speed
6. **Test Coverage**: Comprehensive validation with `deep_hierarchy_test.adoc`
## Development Sequence
1. **Study & Plan** ✓ (Current phase)
2. **Implement Core Processor** - Create `publication_tree_processor.ts`
3. **Build Unified Interface** - Create `asciidoc_publication_parser.ts`
4. **Integrate with ZettelEditor** - Update parsing calls
5. **Validate with Test Documents** - Verify NKBIP-01 compliance
6. **Clean Up Legacy Code** - Remove old implementations
7. **Documentation & Testing** - Comprehensive test suite
## Risk Mitigation
- **Incremental Integration**: Keep old code until new implementation validated
- **Extensive Testing**: Use both test documents for validation
- **Performance Monitoring**: Ensure no degradation in user experience
- **Rollback Plan**: Can revert to `publication_tree_factory.ts` if needed
## References
- NKBIP-01 Specification: `test_data/AsciidocFiles/docreference.md`
- Test Document: `test_data/AsciidocFiles/deep_hierarchy_test.adoc`
- Asciidoctor Extensions: [Official Documentation](https://docs.asciidoctor.org/asciidoctor.js/latest/extend/extensions/)
- Current Implementation: `src/lib/components/ZettelEditor.svelte:64`

3
import_map.json

@ -27,6 +27,7 @@ @@ -27,6 +27,7 @@
"qrcode": "npm:qrcode@^1.5.4",
"child_process": "node:child_process",
"process": "node:process",
"tailwindcss": "npm:tailwindcss@^4.1.11"
"tailwindcss": "npm:tailwindcss@^4.1.11",
"postcss-import": "npm:postcss-import@^16.1.1"
}
}

2163
package-lock.json generated

File diff suppressed because it is too large Load Diff

7
package.json

@ -17,6 +17,11 @@ @@ -17,6 +17,11 @@
"tokens": "node src/lib/theme/build-tokens.mjs"
},
"dependencies": {
"@codemirror/basic-setup": "^0.20.0",
"@codemirror/lang-markdown": "^6.3.4",
"@codemirror/state": "^6.5.2",
"@codemirror/theme-one-dark": "^6.1.3",
"@codemirror/view": "^6.38.1",
"@lucide/svelte": "^0.539.0",
"@noble/curves": "^1.9.4",
"@noble/hashes": "^1.8.0",
@ -29,6 +34,7 @@ @@ -29,6 +34,7 @@
"asciidoctor": "3.0.x",
"bech32": "^2.0.0",
"class-variance-authority": "^0.7.1",
"codemirror": "^6.0.2",
"d3": "^7.9.0",
"he": "1.2.x",
"highlight.js": "^11.11.1",
@ -58,6 +64,7 @@ @@ -58,6 +64,7 @@
"flowbite-typography": "^1.0.5",
"playwright": "^1.50.1",
"postcss": "^8.5.6",
"postcss-import": "^16.1.1",
"postcss-load-config": "6.x",
"prettier": "^3.6.2",
"prettier-plugin-svelte": "^3.4.0",

1547
src/lib/components/ZettelEditor.svelte

File diff suppressed because it is too large Load Diff

221
src/lib/services/publisher.ts

@ -1,8 +1,6 @@ @@ -1,8 +1,6 @@
import { getMimeTags } from "../utils/mime.ts";
import {
metadataToTags,
parseAsciiDocWithMetadata,
} from "../utils/asciidoc_metadata.ts";
import { metadataToTags } from "../utils/asciidoc_metadata.ts";
import { parseAsciiDocWithMetadata } from "../utils/asciidoc_parser.ts";
import NDK, { NDKEvent, NDKRelaySet } from "@nostr-dev-kit/ndk";
import { nip19 } from "nostr-tools";
@ -12,6 +10,14 @@ export interface PublishResult { @@ -12,6 +10,14 @@ export interface PublishResult {
error?: string;
}
export interface ProcessedPublishResults {
successCount: number;
total: number;
errors: string[];
successfulEvents: Array<{ eventId: string; title: string }>;
failedEvents: Array<{ title: string; error: string; sectionIndex: number }>;
}
export interface PublishOptions {
content: string;
kind?: number;
@ -96,9 +102,103 @@ export async function publishZettel( @@ -96,9 +102,103 @@ export async function publishZettel(
throw new Error("Failed to publish to any relays");
}
} catch (error) {
const errorMessage = error instanceof Error
? error.message
: "Unknown error";
const errorMessage =
error instanceof Error ? error.message : "Unknown error";
onError?.(errorMessage);
return { success: false, error: errorMessage };
}
}
/**
* Publishes a single Nostr event directly
* @param options - Publishing options for a single event
* @returns Promise resolving to publish result
*/
export async function publishSingleEvent(
options: {
content: string;
kind: number;
tags: string[][];
onError?: (error: string) => void;
},
ndk: NDK,
): Promise<PublishResult> {
const { content, kind, tags, onError } = options;
if (!ndk?.activeUser) {
const error = "Please log in first";
onError?.(error);
return { success: false, error };
}
try {
const allRelayUrls = Array.from(ndk.pool?.relays.values() || []).map(
(r) => r.url,
);
if (allRelayUrls.length === 0) {
throw new Error("No relays available in NDK pool");
}
const relaySet = NDKRelaySet.fromRelayUrls(allRelayUrls, ndk);
// Fix a-tags that have placeholder "pubkey" with actual pubkey
const fixedTags = tags.map((tag) => {
if (
tag[0] === "a" &&
tag[1] &&
tag[1].includes(":pubkey:") &&
ndk.activeUser
) {
// Replace "pubkey" placeholder with actual pubkey
const fixedATag = tag[1].replace(
":pubkey:",
`:${ndk.activeUser.pubkey}:`,
);
return [tag[0], fixedATag, tag[2] || "", tag[3] || ""];
}
return tag;
});
// Auto-add author identity if not publishing on behalf of others
const hasAuthorTag = fixedTags.some((tag) => tag[0] === "author");
const hasPTag = fixedTags.some((tag) => tag[0] === "p");
const finalTags = [...fixedTags];
if (!hasAuthorTag && ndk.activeUser) {
// Add display name as author
const displayName =
ndk.activeUser.profile?.displayName ||
ndk.activeUser.profile?.name ||
"Anonymous";
finalTags.push(["author", displayName]);
}
if (!hasPTag && ndk.activeUser) {
// Add pubkey as p-tag
finalTags.push(["p", ndk.activeUser.pubkey]);
}
// Create and sign NDK event
const ndkEvent = new NDKEvent(ndk);
ndkEvent.kind = kind;
ndkEvent.created_at = Math.floor(Date.now() / 1000);
ndkEvent.tags = finalTags;
ndkEvent.content = content;
ndkEvent.pubkey = ndk.activeUser.pubkey;
await ndkEvent.sign();
// Publish to relays
const publishedToRelays = await ndkEvent.publish(relaySet);
if (publishedToRelays.size > 0) {
return { success: true, eventId: ndkEvent.id };
} else {
throw new Error("Failed to publish to any relays");
}
} catch (error) {
const errorMessage =
error instanceof Error ? error.message : "Unknown error";
console.error(`Error publishing event: ${errorMessage}`);
onError?.(errorMessage);
return { success: false, error: errorMessage };
}
@ -133,8 +233,8 @@ export async function publishMultipleZettels( @@ -133,8 +233,8 @@ export async function publishMultipleZettels(
throw new Error("No valid sections found in content");
}
const allRelayUrls = Array.from(ndk.pool?.relays.values() || []).map((r) =>
r.url
const allRelayUrls = Array.from(ndk.pool?.relays.values() || []).map(
(r) => r.url,
);
if (allRelayUrls.length === 0) {
throw new Error("No relays available in NDK pool");
@ -172,41 +272,94 @@ export async function publishMultipleZettels( @@ -172,41 +272,94 @@ export async function publishMultipleZettels(
});
}
} catch (err) {
const errorMessage = err instanceof Error
? err.message
: "Unknown error";
const errorMessage =
err instanceof Error ? err.message : "Unknown error";
results.push({ success: false, error: errorMessage });
}
}
// Debug: extract and log 'e' and 'a' tags from all published events
publishedEvents.forEach((ev) => {
// Extract d-tag from tags
const dTagEntry = ev.tags.find((t) => t[0] === "d");
const dTag = dTagEntry ? dTagEntry[1] : "";
const aTag = `${ev.kind}:${ev.pubkey}:${dTag}`;
console.log(`Event ${ev.id} tags:`);
console.log(" e:", ev.id);
console.log(" a:", aTag);
// Print nevent and naddr using nip19
const nevent = nip19.neventEncode({ id: ev.id });
const naddr = nip19.naddrEncode({
kind: ev.kind,
pubkey: ev.pubkey,
identifier: dTag,
});
console.log(" nevent:", nevent);
console.log(" naddr:", naddr);
});
return results;
} catch (error) {
const errorMessage = error instanceof Error
? error.message
: "Unknown error";
const errorMessage =
error instanceof Error ? error.message : "Unknown error";
onError?.(errorMessage);
return [{ success: false, error: errorMessage }];
}
}
/**
* Processes publish results and extracts success/failure information
* @param results - Array of publish results
* @param events - Event objects containing content and metadata
* @param hasIndexEvent - Whether the events include an index event
* @returns Processed results with counts and event details
*/
export function processPublishResults(
results: PublishResult[],
events: { indexEvent?: any; contentEvents: any[] },
hasIndexEvent: boolean = false,
): ProcessedPublishResults {
const successCount = results.filter((r) => r.success).length;
const errors = results
.filter((r) => !r.success && r.error)
.map((r) => r.error!);
// Extract successful events with their titles
const successfulEvents = results
.filter((r) => r.success && r.eventId)
.map((r, index) => {
let title: string;
if (index === 0 && hasIndexEvent && events.indexEvent) {
title = "Article Index";
} else {
const contentIndex = hasIndexEvent ? index - 1 : index;
const contentEvent = events.contentEvents[contentIndex];
title =
contentEvent?.title ||
contentEvent?.tags?.find((t: any) => t[0] === "title")?.[1] ||
`Note ${contentIndex + 1}`;
}
return {
eventId: r.eventId!,
title,
};
});
// Extract failed events with their titles and errors
const failedEvents = results
.map((r, index) => ({ result: r, index }))
.filter(({ result }) => !result.success)
.map(({ result, index }) => {
let title: string;
if (index === 0 && hasIndexEvent && events.indexEvent) {
title = "Article Index";
} else {
const contentIndex = hasIndexEvent ? index - 1 : index;
const contentEvent = events.contentEvents[contentIndex];
title =
contentEvent?.title ||
contentEvent?.tags?.find((t: any) => t[0] === "title")?.[1] ||
`Note ${contentIndex + 1}`;
}
return {
title,
error: result.error || "Unknown error",
sectionIndex: index,
};
});
return {
successCount,
total: results.length,
errors,
successfulEvents,
failedEvents,
};
}
function generateDTag(title: string): string {
return title
.toLowerCase()

273
src/lib/utils/asciidoc_ast_parser.ts

@ -0,0 +1,273 @@ @@ -0,0 +1,273 @@
/**
* AST-based AsciiDoc parsing using Asciidoctor's native document structure
*
* This replaces the manual regex parsing in asciidoc_metadata.ts with proper
* AST traversal, leveraging Asciidoctor's built-in parsing capabilities.
*/
import Processor from "asciidoctor";
import type { Document } from "asciidoctor";
import { PublicationTree } from "../data_structures/publication_tree";
import { NDKEvent } from "@nostr-dev-kit/ndk";
import type NDK from "@nostr-dev-kit/ndk";
import { getMimeTags } from "./mime";
export interface ASTSection {
title: string;
content: string;
level: number;
attributes: Record<string, string>;
subsections: ASTSection[];
}
export interface ASTParsedDocument {
title: string;
content: string;
attributes: Record<string, string>;
sections: ASTSection[];
}
/**
* Parse AsciiDoc content using Asciidoctor's AST instead of manual regex
*/
export function parseAsciiDocAST(content: string, parseLevel: number = 2): ASTParsedDocument {
const asciidoctor = Processor();
const document = asciidoctor.load(content, { standalone: false }) as Document;
return {
title: document.getTitle() || '',
content: document.getContent() || '',
attributes: document.getAttributes(),
sections: extractSectionsFromAST(document, parseLevel)
};
}
/**
* Extract sections from Asciidoctor AST based on parse level
*/
function extractSectionsFromAST(document: Document, parseLevel: number): ASTSection[] {
const directSections = document.getSections();
// Collect all sections at all levels up to parseLevel
const allSections: ASTSection[] = [];
function collectSections(sections: any[]) {
for (const section of sections) {
const asciidoctorLevel = section.getLevel();
// Convert Asciidoctor's internal level to our application level
// Asciidoctor: == is level 1, === is level 2, etc.
// Our app: == is level 2, === is level 3, etc.
const appLevel = asciidoctorLevel + 1;
if (appLevel <= parseLevel) {
allSections.push({
title: section.getTitle() || '',
content: section.getContent() || '',
level: appLevel,
attributes: section.getAttributes() || {},
subsections: []
});
}
// Recursively collect subsections
const subsections = section.getSections?.() || [];
if (subsections.length > 0) {
collectSections(subsections);
}
}
}
collectSections(directSections);
return allSections;
}
/**
* Extract subsections from a section (recursive helper)
*/
function extractSubsections(section: any, parseLevel: number): ASTSection[] {
const subsections = section.getSections?.() || [];
return subsections
.filter((sub: any) => (sub.getLevel() + 1) <= parseLevel)
.map((sub: any) => ({
title: sub.getTitle() || '',
content: sub.getContent() || '',
level: sub.getLevel() + 1, // Convert to app level
attributes: sub.getAttributes() || {},
subsections: extractSubsections(sub, parseLevel)
}));
}
/**
* Create a PublicationTree directly from Asciidoctor AST
* This integrates with Michael's PublicationTree architecture
*/
export async function createPublicationTreeFromAST(
content: string,
ndk: NDK,
parseLevel: number = 2
): Promise<PublicationTree> {
const parsed = parseAsciiDocAST(content, parseLevel);
// Create root 30040 index event from document metadata
const rootEvent = createIndexEventFromAST(parsed, ndk);
const tree = new PublicationTree(rootEvent, ndk);
// Add sections as 30041 events
for (const section of parsed.sections) {
const contentEvent = createContentEventFromSection(section, ndk);
await tree.addEvent(contentEvent, rootEvent);
}
return tree;
}
/**
* Create a 30040 index event from AST document metadata
*/
function createIndexEventFromAST(parsed: ASTParsedDocument, ndk: NDK): NDKEvent {
const event = new NDKEvent(ndk);
event.kind = 30040;
event.created_at = Math.floor(Date.now() / 1000);
// Generate d-tag from title
const dTag = generateDTag(parsed.title);
const [mTag, MTag] = getMimeTags(30040);
const tags: string[][] = [
["d", dTag],
mTag,
MTag,
["title", parsed.title]
];
// Add document attributes as tags
addAttributesAsTags(tags, parsed.attributes);
// Add a-tags for each section (30041 content events)
parsed.sections.forEach(section => {
const sectionDTag = generateDTag(section.title);
tags.push(["a", `30041:${ndk.activeUser?.pubkey || 'pubkey'}:${sectionDTag}`]);
});
event.tags = tags;
event.content = parsed.content;
return event;
}
/**
* Create a 30041 content event from an AST section
*/
function createContentEventFromSection(section: ASTSection, ndk: NDK): NDKEvent {
const event = new NDKEvent(ndk);
event.kind = 30041;
event.created_at = Math.floor(Date.now() / 1000);
const dTag = generateDTag(section.title);
const [mTag, MTag] = getMimeTags(30041);
const tags: string[][] = [
["d", dTag],
mTag,
MTag,
["title", section.title]
];
// Add section attributes as tags
addAttributesAsTags(tags, section.attributes);
event.tags = tags;
event.content = section.content;
return event;
}
/**
* Generate a deterministic d-tag from title
*/
function generateDTag(title: string): string {
return title
.toLowerCase()
.replace(/[^\p{L}\p{N}]/gu, "-")
.replace(/-+/g, "-")
.replace(/^-|-$/g, "");
}
/**
* Add AsciiDoc attributes as Nostr event tags, filtering out system attributes
*/
function addAttributesAsTags(tags: string[][], attributes: Record<string, string>) {
const systemAttributes = [
'attribute-undefined', 'attribute-missing', 'appendix-caption', 'appendix-refsig',
'caution-caption', 'chapter-refsig', 'example-caption', 'figure-caption',
'important-caption', 'last-update-label', 'manname-title', 'note-caption',
'part-refsig', 'preface-title', 'section-refsig', 'table-caption',
'tip-caption', 'toc-title', 'untitled-label', 'version-label', 'warning-caption',
'asciidoctor', 'asciidoctor-version', 'safe-mode-name', 'backend', 'doctype',
'basebackend', 'filetype', 'outfilesuffix', 'stylesdir', 'iconsdir',
'localdate', 'localyear', 'localtime', 'localdatetime', 'docdate',
'docyear', 'doctime', 'docdatetime', 'doctitle', 'embedded', 'notitle'
];
// Add standard metadata tags
if (attributes.author) tags.push(["author", attributes.author]);
if (attributes.version) tags.push(["version", attributes.version]);
if (attributes.description) tags.push(["summary", attributes.description]);
if (attributes.tags) {
attributes.tags.split(',').forEach(tag =>
tags.push(["t", tag.trim()])
);
}
// Add custom attributes (non-system)
Object.entries(attributes).forEach(([key, value]) => {
if (!systemAttributes.includes(key) && value) {
tags.push([key, value]);
}
});
}
/**
* Tree processor extension for Asciidoctor
* This can be registered to automatically populate PublicationTree during parsing
*/
export function createPublicationTreeProcessor(ndk: NDK, parseLevel: number = 2) {
return function(extensions: any) {
extensions.treeProcessor(function(this: any) {
const dsl = this;
dsl.process(function(this: any, document: Document) {
// Create PublicationTree and store on document for later retrieval
const publicationTree = createPublicationTreeFromDocument(document, ndk, parseLevel);
document.setAttribute('publicationTree', publicationTree);
});
});
};
}
/**
* Helper function to create PublicationTree from Asciidoctor Document
*/
async function createPublicationTreeFromDocument(
document: Document,
ndk: NDK,
parseLevel: number
): Promise<PublicationTree> {
const parsed: ASTParsedDocument = {
title: document.getTitle() || '',
content: document.getContent() || '',
attributes: document.getAttributes(),
sections: extractSectionsFromAST(document, parseLevel)
};
const rootEvent = createIndexEventFromAST(parsed, ndk);
const tree = new PublicationTree(rootEvent, ndk);
for (const section of parsed.sections) {
const contentEvent = createContentEventFromSection(section, ndk);
await tree.addEvent(contentEvent, rootEvent);
}
return tree;
}

375
src/lib/utils/asciidoc_metadata.ts

@ -2,7 +2,6 @@ @@ -2,7 +2,6 @@
* AsciiDoc Metadata Extraction Service using Asciidoctor
*
* Thin wrapper around Asciidoctor's built-in metadata extraction capabilities.
* Leverages the existing Pharos parser to avoid duplication.
*/
// @ts-ignore
@ -24,45 +23,36 @@ export interface AsciiDocMetadata { @@ -24,45 +23,36 @@ export interface AsciiDocMetadata {
publishedBy?: string;
type?: string;
autoUpdate?: "yes" | "ask" | "no";
customAttributes?: Record<string, string>;
}
export type SectionMetadata = AsciiDocMetadata;
export interface ParsedAsciiDoc {
metadata: AsciiDocMetadata;
content: string;
sections: Array<{
metadata: SectionMetadata;
content: string;
title: string;
}>;
}
// Shared attribute mapping based on Asciidoctor standard attributes
const ATTRIBUTE_MAP: Record<string, keyof AsciiDocMetadata> = {
// Standard Asciidoctor attributes
"author": "authors",
"description": "summary",
"keywords": "tags",
"revnumber": "version",
"revdate": "publicationDate",
"revremark": "edition",
"title": "title",
author: "authors",
description: "summary",
keywords: "tags",
revnumber: "version",
revdate: "publicationDate",
revremark: "edition",
title: "title",
// Custom attributes for Alexandria
"published_by": "publishedBy",
"publisher": "publisher",
"summary": "summary",
"image": "coverImage",
"cover": "coverImage",
"isbn": "isbn",
"source": "source",
"type": "type",
published_by: "publishedBy",
publisher: "publisher",
summary: "summary",
image: "coverImage",
cover: "coverImage",
isbn: "isbn",
source: "source",
type: "type",
"auto-update": "autoUpdate",
"version": "version",
"edition": "edition",
"published_on": "publicationDate",
"date": "publicationDate",
version: "version",
edition: "edition",
published_on: "publicationDate",
date: "publicationDate",
"version-label": "version",
};
@ -73,6 +63,30 @@ function createProcessor() { @@ -73,6 +63,30 @@ function createProcessor() {
return Processor();
}
/**
* Decodes HTML entities in a string
*/
function decodeHtmlEntities(text: string): string {
const entities: Record<string, string> = {
"&#8217;": "'",
"&#8216;": "'",
"&#8220;": '"',
"&#8221;": '"',
"&amp;": "&",
"&lt;": "<",
"&gt;": ">",
"&quot;": '"',
"&#39;": "'",
"&apos;": "'",
};
let result = text;
for (const [entity, char] of Object.entries(entities)) {
result = result.replace(new RegExp(entity, "g"), char);
}
return result;
}
/**
* Extracts tags from attributes, combining tags and keywords
*/
@ -125,6 +139,16 @@ function mapAttributesToMetadata( @@ -125,6 +139,16 @@ function mapAttributesToMetadata(
} else {
(metadata as any)[metadataKey] = value;
}
} else if (
value &&
typeof value === "string" &&
!systemAttributes.includes(key)
) {
// Handle unknown/custom attributes - but only if they're not system attributes
if (!metadata.customAttributes) {
metadata.customAttributes = {};
}
metadata.customAttributes[key] = value;
}
}
}
@ -237,19 +261,44 @@ function extractSectionAuthors(sectionContent: string): string[] { @@ -237,19 +261,44 @@ function extractSectionAuthors(sectionContent: string): string[] {
return authors;
}
// System attributes to filter out when adding custom attributes as tags
const systemAttributes = [
"attribute-undefined",
"attribute-missing",
"appendix-caption",
"appendix-refsig",
"caution-caption",
"chapter-refsig",
"example-caption",
"figure-caption",
"important-caption",
"last-update-label",
"manname-title",
"note-caption",
"part-refsig",
"preface-title",
"section-refsig",
"table-caption",
"tip-caption",
"toc-title",
"untitled-label",
"version-label",
"warning-caption",
];
/**
* Strips document header and attribute lines from content
* Strips section header and attribute lines from content
*/
function stripDocumentHeader(content: string): string {
const lines = content.split(/\r?\n/);
function stripSectionHeader(sectionContent: string): string {
const lines = sectionContent.split(/\r?\n/);
let contentStart = 0;
// Find where the document header ends
// Find where the section header ends
for (let i = 0; i < lines.length; i++) {
const line = lines[i];
// Skip title line, author line, revision line, and attribute lines
// Skip section title line and attribute lines
if (
!line.match(/^=\s+/) &&
!line.match(/^=+\s+/) &&
!line.includes("<") &&
!line.match(/^.+,\s*.+:\s*.+$/) &&
!line.match(/^:[^:]+:\s*.+$/) &&
@ -260,92 +309,132 @@ function stripDocumentHeader(content: string): string { @@ -260,92 +309,132 @@ function stripDocumentHeader(content: string): string {
}
}
// Filter out all attribute lines and author lines from the content
const contentLines = lines.slice(contentStart);
const filteredLines = contentLines.filter((line) => {
// Skip attribute lines
const processedLines: string[] = [];
let lastWasEmpty = false;
for (let i = contentStart; i < lines.length; i++) {
const line = lines[i];
// Skip attribute lines within content
if (line.match(/^:[^:]+:\s*.+$/)) {
return false;
continue;
}
// Handle empty lines - don't add more than one consecutive empty line
if (line.trim() === "") {
if (!lastWasEmpty) {
processedLines.push("");
}
lastWasEmpty = true;
} else {
processedLines.push(line);
lastWasEmpty = false;
}
}
return true;
});
// Remove extra blank lines and normalize newlines
return filteredLines.join("\n").replace(/\n\s*\n\s*\n/g, "\n\n").replace(
/\n\s*\n/g,
"\n",
).trim();
return processedLines
.join("\n")
.replace(/\n\s*\n\s*\n/g, "\n\n")
.trim();
}
/**
* Strips section header and attribute lines from content
* Strips document header and attribute lines from content
*/
function stripSectionHeader(sectionContent: string): string {
const lines = sectionContent.split(/\r?\n/);
function stripDocumentHeader(content: string): string {
const lines = content.split(/\r?\n/);
let contentStart = 0;
// Find where the section header ends
// Find the first line that is actual content (not header, author, or attribute)
for (let i = 0; i < lines.length; i++) {
const line = lines[i];
// Skip section title line, author line, and attribute lines
// Skip title line, author line, revision line, and attribute lines
if (
!line.match(/^==\s+/) &&
!line.match(/^=\s+/) &&
!line.includes("<") &&
!line.match(/^.+,\s*.+:\s*.+$/) &&
!line.match(/^:[^:]+:\s*.+$/) &&
line.trim() !== "" &&
!(line.match(/^[A-Za-z\s]+$/) && line.trim() !== "" &&
line.trim().split(/\s+/).length <= 2)
line.trim() !== ""
) {
contentStart = i;
break;
}
}
// Filter out all attribute lines, author lines, and section headers from the content
// Filter out all attribute lines and author lines from the content
const contentLines = lines.slice(contentStart);
const filteredLines = contentLines.filter((line) => {
// Skip attribute lines
if (line.match(/^:[^:]+:\s*.+$/)) {
return false;
}
// Skip author lines (simple names without email)
if (
line.match(/^[A-Za-z\s]+$/) &&
line.trim() !== "" &&
line.trim().split(/\s+/).length <= 2
) {
return false;
}
// Skip section headers
if (line.match(/^==\s+/)) {
return false;
}
return true;
});
// Ensure deeper headers (====) have proper newlines around them
const processedLines = [];
for (let i = 0; i < filteredLines.length; i++) {
const line = filteredLines[i];
const prevLine = i > 0 ? filteredLines[i - 1] : "";
const nextLine = i < filteredLines.length - 1 ? filteredLines[i + 1] : "";
// If this is a deeper header (====+), ensure it has newlines around it
if (line.match(/^====+\s+/)) {
// Add newline before if previous line isn't blank
if (prevLine && prevLine.trim() !== "") {
processedLines.push("");
}
processedLines.push(line);
// Add newline after if next line isn't blank and exists
if (nextLine && nextLine.trim() !== "") {
processedLines.push("");
}
} else {
processedLines.push(line);
}
}
// Remove extra blank lines and normalize newlines
return filteredLines.join("\n").replace(/\n\s*\n\s*\n/g, "\n\n").replace(
/\n\s*\n/g,
"\n",
).trim();
return processedLines
.join("\n")
.replace(/\n\s*\n\s*\n/g, "\n\n")
.trim();
}
/**
* Parses attributes from section content
* Parses attributes from section content using simple regex
* Converts :tagname: tagvalue -> [tagname, tagvalue]
* Converts :tags: comma,separated -> [t, tag1], [t, tag2], etc.
*/
function parseSectionAttributes(sectionContent: string): Record<string, any> {
const attributes: Record<string, any> = {};
const lines = sectionContent.split(/\r?\n/);
export function parseSimpleAttributes(content: string): [string, string][] {
const tags: [string, string][] = [];
const lines = content.split(/\r?\n/);
for (const line of lines) {
const match = line.match(/^:([^:]+):\s*(.+)$/);
if (match) {
const [, key, value] = match;
attributes[key.trim()] = value.trim();
const tagName = key.trim();
const tagValue = value.trim();
if (tagName === "tags") {
// Special handling for :tags: - split into individual t-tags
const tags_list = tagValue
.split(",")
.map((t) => t.trim())
.filter((t) => t.length > 0);
tags_list.forEach((tag) => {
tags.push(["t", tag]);
});
} else {
// Regular attribute -> [tagname, tagvalue]
tags.push([tagName, tagValue]);
}
}
}
return attributes;
return tags;
}
/**
@ -365,16 +454,7 @@ export function extractDocumentMetadata(inputContent: string): { @@ -365,16 +454,7 @@ export function extractDocumentMetadata(inputContent: string): {
// Extract basic metadata
const title = document.getTitle();
if (title) {
// Decode HTML entities in the title
metadata.title = title
.replace(/&amp;/g, "&")
.replace(/&lt;/g, "<")
.replace(/&gt;/g, ">")
.replace(/&quot;/g, '"')
.replace(/&#039;/g, "'")
.replace(/&nbsp;/g, " ");
}
if (title) metadata.title = decodeHtmlEntities(title);
// Handle multiple authors - combine header line and attributes
const authors = extractDocumentAuthors(document.getSource());
@ -405,15 +485,33 @@ export function extractDocumentMetadata(inputContent: string): { @@ -405,15 +485,33 @@ export function extractDocumentMetadata(inputContent: string): {
metadata.authors = [...new Set(authors)]; // Remove duplicates
}
// Extract revision info
// Extract revision info (only if it looks like valid revision data)
const revisionNumber = document.getRevisionNumber();
if (revisionNumber) metadata.version = revisionNumber;
if (
revisionNumber &&
revisionNumber !== "Version" &&
!revisionNumber.includes("==")
) {
metadata.version = revisionNumber;
}
const revisionRemark = document.getRevisionRemark();
if (revisionRemark) metadata.publishedBy = revisionRemark;
if (
revisionRemark &&
!revisionRemark.includes("[NOTE]") &&
!revisionRemark.includes("==")
) {
metadata.publishedBy = revisionRemark;
}
const revisionDate = document.getRevisionDate();
if (revisionDate) metadata.publicationDate = revisionDate;
if (
revisionDate &&
!revisionDate.includes("[NOTE]") &&
!revisionDate.includes("==")
) {
metadata.publicationDate = revisionDate;
}
// Map attributes to metadata (but skip version and publishedBy if we already have them from revision)
mapAttributesToMetadata(attributes, metadata, true);
@ -446,23 +544,15 @@ export function extractSectionMetadata(inputSectionContent: string): { @@ -446,23 +544,15 @@ export function extractSectionMetadata(inputSectionContent: string): {
content: string;
title: string;
} {
const asciidoctor = createProcessor();
const document = asciidoctor.load(`= Temp\n\n${inputSectionContent}`, {
standalone: false,
}) as Document;
const sections = document.getSections();
if (sections.length === 0) {
return { metadata: {}, content: inputSectionContent, title: "" };
// Extract title directly from the content using regex for more control
const titleMatch = inputSectionContent.match(/^(=+)\s+(.+)$/m);
let title = "";
if (titleMatch) {
title = titleMatch[2].trim();
}
const section = sections[0];
const title = section.getTitle() || "";
const metadata: SectionMetadata = { title };
// Parse attributes from the section content
const attributes = parseSectionAttributes(inputSectionContent);
// Extract authors from section content
const authors = extractSectionAuthors(inputSectionContent);
@ -482,13 +572,11 @@ export function extractSectionMetadata(inputSectionContent: string): { @@ -482,13 +572,11 @@ export function extractSectionMetadata(inputSectionContent: string): {
metadata.authors = authors;
}
// Map attributes to metadata (sections can have authors, but skip author mapping to avoid duplication)
const attributesWithoutAuthor = { ...attributes };
delete attributesWithoutAuthor.author;
mapAttributesToMetadata(attributesWithoutAuthor, metadata, false);
// Handle tags and keywords
const tags = extractTagsFromAttributes(attributes);
// Extract tags using parseSimpleAttributes (which is what's used in generateNostrEvents)
const simpleAttrs = parseSimpleAttributes(inputSectionContent);
const tags = simpleAttrs
.filter((attr) => attr[0] === "t")
.map((attr) => attr[1]);
if (tags.length > 0) {
metadata.tags = tags;
}
@ -497,53 +585,6 @@ export function extractSectionMetadata(inputSectionContent: string): { @@ -497,53 +585,6 @@ export function extractSectionMetadata(inputSectionContent: string): {
return { metadata, content, title };
}
/**
* Parses AsciiDoc content into sections with metadata
*/
export function parseAsciiDocWithMetadata(content: string): ParsedAsciiDoc {
const asciidoctor = createProcessor();
const document = asciidoctor.load(content, { standalone: false }) as Document;
const { metadata: docMetadata } = extractDocumentMetadata(content);
// Parse the original content to find section attributes
const lines = content.split(/\r?\n/);
const sectionsWithMetadata: Array<{
metadata: SectionMetadata;
content: string;
title: string;
}> = [];
let currentSection: string | null = null;
let currentSectionContent: string[] = [];
for (const line of lines) {
if (line.match(/^==\s+/)) {
// Save previous section if exists
if (currentSection) {
const sectionContent = currentSectionContent.join("\n");
sectionsWithMetadata.push(extractSectionMetadata(sectionContent));
}
// Start new section
currentSection = line;
currentSectionContent = [line];
} else if (currentSection) {
currentSectionContent.push(line);
}
}
// Save the last section
if (currentSection) {
const sectionContent = currentSectionContent.join("\n");
sectionsWithMetadata.push(extractSectionMetadata(sectionContent));
}
return {
metadata: docMetadata,
content: document.getSource(),
sections: sectionsWithMetadata,
};
}
/**
* Converts metadata to Nostr event tags
*/
@ -572,6 +613,15 @@ export function metadataToTags( @@ -572,6 +613,15 @@ export function metadataToTags(
metadata.tags.forEach((tag) => tags.push(["t", tag]));
}
// Add custom attributes as tags, but filter out system attributes
if (metadata.customAttributes) {
Object.entries(metadata.customAttributes).forEach(([key, value]) => {
if (!systemAttributes.includes(key)) {
tags.push([key, value]);
}
});
}
return tags;
}
@ -648,9 +698,10 @@ export function extractSmartMetadata(content: string): { @@ -648,9 +698,10 @@ export function extractSmartMetadata(content: string): {
// Check if it's a minimal document header (just title, no other metadata)
const lines = content.split(/\r?\n/);
const titleLine = lines.find((line) => line.match(/^=\s+/));
const hasOtherMetadata = lines.some((line) =>
const hasOtherMetadata = lines.some(
(line) =>
line.includes("<") || // author line
line.match(/^.+,\s*.+:\s*.+$/) // revision line
line.match(/^.+,\s*.+:\s*.+$/), // revision line
);
if (hasOtherMetadata) {

577
src/lib/utils/asciidoc_parser.ts

@ -0,0 +1,577 @@ @@ -0,0 +1,577 @@
/**
* AsciiDoc Content Parsing Service
*
* Handles parsing AsciiDoc content into hierarchical structures for publication.
* Separated from metadata extraction to maintain single responsibility principle.
*/
// @ts-ignore
import Processor from "asciidoctor";
import type { Document } from "asciidoctor";
import {
parseSimpleAttributes,
extractDocumentMetadata,
extractSectionMetadata,
} from "./asciidoc_metadata.ts";
export interface ParsedAsciiDoc {
metadata: {
title?: string;
authors?: string[];
version?: string;
edition?: string;
publicationDate?: string;
publisher?: string;
summary?: string;
coverImage?: string;
isbn?: string;
tags?: string[];
source?: string;
publishedBy?: string;
type?: string;
autoUpdate?: "yes" | "ask" | "no";
customAttributes?: Record<string, string>;
};
content: string;
title: string;
sections: Array<{
metadata: {
title?: string;
authors?: string[];
version?: string;
edition?: string;
publicationDate?: string;
publisher?: string;
summary?: string;
coverImage?: string;
isbn?: string;
tags?: string[];
source?: string;
publishedBy?: string;
type?: string;
autoUpdate?: "yes" | "ask" | "no";
customAttributes?: Record<string, string>;
};
content: string;
title: string;
}>;
}
/**
* Creates an Asciidoctor processor instance
*/
function createProcessor() {
return Processor();
}
/**
* Helper function to determine the header level of a section
*/
function getSectionLevel(sectionContent: string): number {
const lines = sectionContent.split(/\r?\n/);
for (const line of lines) {
const match = line.match(/^(=+)\s+/);
if (match) {
return match[1].length;
}
}
return 0;
}
/**
* Helper function to extract just the intro content (before first subsection)
*/
function extractIntroContent(
sectionContent: string,
currentLevel: number,
): string {
const lines = sectionContent.split(/\r?\n/);
const introLines: string[] = [];
let foundHeader = false;
for (const line of lines) {
const headerMatch = line.match(/^(=+)\s+/);
if (headerMatch) {
const level = headerMatch[1].length;
if (level === currentLevel && !foundHeader) {
// This is the section header itself
foundHeader = true;
continue; // Skip the header line itself for intro content
} else if (level > currentLevel) {
// This is a subsection, stop collecting intro content
break;
}
} else if (foundHeader) {
// This is intro content after the header
introLines.push(line);
}
}
return introLines.join("\n").trim();
}
/**
* Parses AsciiDoc content into sections with metadata
*/
export function parseAsciiDocWithMetadata(content: string): ParsedAsciiDoc {
const asciidoctor = createProcessor();
const document = asciidoctor.load(content, { standalone: false }) as Document;
const { metadata: docMetadata } = extractDocumentMetadata(content);
// Parse the original content to find section attributes
const lines = content.split(/\r?\n/);
const sectionsWithMetadata: Array<{
metadata: ParsedAsciiDoc["sections"][0]["metadata"];
content: string;
title: string;
}> = [];
let currentSection: string | null = null;
let currentSectionContent: string[] = [];
for (const line of lines) {
if (line.match(/^==\s+/)) {
// Save previous section if exists
if (currentSection) {
const sectionContent = currentSectionContent.join("\n");
sectionsWithMetadata.push(extractSectionMetadata(sectionContent));
}
// Start new section
currentSection = line;
currentSectionContent = [line];
} else if (currentSection) {
currentSectionContent.push(line);
}
}
// Save the last section
if (currentSection) {
const sectionContent = currentSectionContent.join("\n");
sectionsWithMetadata.push(extractSectionMetadata(sectionContent));
}
return {
metadata: docMetadata,
content: document.getSource(),
title: docMetadata.title || "",
sections: sectionsWithMetadata,
};
}
/**
* Iterative AsciiDoc parsing based on specified level
* Level 2: Only == sections become content events (containing all subsections)
* Level 3: == sections become indices + content events, === sections become content events
* Level 4: === sections become indices + content events, ==== sections become content events, etc.
*/
export function parseAsciiDocIterative(
content: string,
parseLevel: number = 2,
): ParsedAsciiDoc {
const asciidoctor = createProcessor();
const document = asciidoctor.load(content, { standalone: false }) as Document;
// Extract document metadata using the metadata extraction functions
const { metadata: docMetadata } = extractDocumentMetadata(content);
const lines = content.split(/\r?\n/);
const sections: Array<{
metadata: ParsedAsciiDoc["sections"][0]["metadata"];
content: string;
title: string;
}> = [];
if (parseLevel === 2) {
// Level 2: Only == sections become events
const level2Pattern = /^==\s+/;
let currentSection: string | null = null;
let currentSectionContent: string[] = [];
let documentContent: string[] = [];
let inDocumentHeader = true;
for (const line of lines) {
if (line.match(level2Pattern)) {
inDocumentHeader = false;
// Save previous section if exists
if (currentSection) {
const sectionContent = currentSectionContent.join("\n");
const sectionMeta = extractSectionMetadata(sectionContent);
// For level 2, preserve the full content including the header
sections.push({
...sectionMeta,
content: sectionContent, // Use full content, not stripped
});
}
// Start new section
currentSection = line;
currentSectionContent = [line];
} else if (currentSection) {
currentSectionContent.push(line);
} else if (inDocumentHeader) {
documentContent.push(line);
}
}
// Save the last section
if (currentSection) {
const sectionContent = currentSectionContent.join("\n");
const sectionMeta = extractSectionMetadata(sectionContent);
// For level 2, preserve the full content including the header
sections.push({
...sectionMeta,
content: sectionContent, // Use full content, not stripped
});
}
const docContent = documentContent.join("\n");
return {
metadata: docMetadata,
content: docContent,
title: docMetadata.title || "",
sections: sections,
};
}
// Level 3+: Parse hierarchically
// All levels from 2 to parseLevel-1 are indices (title only)
// Level parseLevel are content sections (full content)
// First, collect all sections at the content level (parseLevel)
const contentLevelPattern = new RegExp(`^${"=".repeat(parseLevel)}\\s+`);
let currentSection: string | null = null;
let currentSectionContent: string[] = [];
let documentContent: string[] = [];
let inDocumentHeader = true;
for (const line of lines) {
if (line.match(contentLevelPattern)) {
inDocumentHeader = false;
// Save previous section if exists
if (currentSection) {
const sectionContent = currentSectionContent.join("\n");
const sectionMeta = extractSectionMetadata(sectionContent);
sections.push({
...sectionMeta,
content: sectionContent, // Full content including headers
});
}
// Start new content section
currentSection = line;
currentSectionContent = [line];
} else if (currentSection) {
// Continue collecting content for current section
currentSectionContent.push(line);
} else if (inDocumentHeader) {
documentContent.push(line);
}
}
// Save the last section
if (currentSection) {
const sectionContent = currentSectionContent.join("\n");
const sectionMeta = extractSectionMetadata(sectionContent);
sections.push({
...sectionMeta,
content: sectionContent, // Full content including headers
});
}
// Now collect index sections (all levels from 2 to parseLevel-1)
// These should be shown as navigation/structure but not full content
const indexSections: Array<{
metadata: ParsedAsciiDoc["sections"][0]["metadata"];
content: string;
title: string;
level: number;
}> = [];
for (let level = 2; level < parseLevel; level++) {
const levelPattern = new RegExp(`^${"=".repeat(level)}\\s+(.+)$`, "gm");
const matches = content.matchAll(levelPattern);
for (const match of matches) {
const title = match[1].trim();
indexSections.push({
metadata: { title },
content: `${"=".repeat(level)} ${title}`, // Just the header line for index sections
title,
level,
});
}
}
// Add actual level to content sections based on their content
const contentSectionsWithLevel = sections.map((s) => ({
...s,
level: getSectionLevel(s.content),
}));
// Combine index sections and content sections
// Sort by position in original content to maintain order
const allSections = [...indexSections, ...contentSectionsWithLevel];
// Sort sections by their appearance in the original content
allSections.sort((a, b) => {
const posA = content.indexOf(a.content.split("\n")[0]);
const posB = content.indexOf(b.content.split("\n")[0]);
return posA - posB;
});
const docContent = documentContent.join("\n");
return {
metadata: docMetadata,
content: docContent,
title: docMetadata.title || "",
sections: allSections,
};
}
/**
* Generates Nostr events from parsed AsciiDoc with proper hierarchical structure
* Based on docreference.md specifications
*/
export function generateNostrEvents(
parsed: ParsedAsciiDoc,
parseLevel: number = 2,
pubkey?: string,
maxDepth: number = 6,
): {
indexEvent?: any;
contentEvents: any[];
} {
const allEvents: any[] = [];
const actualPubkey = pubkey || "pubkey";
// Helper function to generate section ID
const generateSectionId = (title: string): string => {
return title
.toLowerCase()
.replace(/[^\p{L}\p{N}]/gu, "-")
.replace(/-+/g, "-")
.replace(/^-|-$/g, "");
};
// Build hierarchical tree structure
interface TreeNode {
section: {
metadata: any;
content: string;
title: string;
};
level: number;
sectionId: string;
tags: [string, string][];
children: TreeNode[];
parent?: TreeNode;
}
// Convert flat sections to tree structure
const buildTree = (): TreeNode[] => {
const roots: TreeNode[] = [];
const stack: TreeNode[] = [];
for (const section of parsed.sections) {
const level = getSectionLevel(section.content);
const sectionId = generateSectionId(section.title);
const tags = parseSimpleAttributes(section.content);
const node: TreeNode = {
section,
level,
sectionId,
tags,
children: [],
};
// Find the correct parent based on header hierarchy
while (stack.length > 0 && stack[stack.length - 1].level >= level) {
stack.pop();
}
if (stack.length === 0) {
// This is a root level section
roots.push(node);
} else {
// This is a child of the last item in stack
const parent = stack[stack.length - 1];
parent.children.push(node);
node.parent = parent;
}
stack.push(node);
}
return roots;
};
const tree = buildTree();
// Recursively create events from tree
const createEventsFromNode = (node: TreeNode): void => {
const { section, level, sectionId, tags, children } = node;
// Determine if this node should become an index
const hasChildrenAtTargetLevel = children.some(
(child) => child.level === parseLevel,
);
const shouldBeIndex =
level < parseLevel &&
(hasChildrenAtTargetLevel ||
children.some((child) => child.level <= parseLevel));
if (shouldBeIndex) {
// Create content event for intro text (30041)
const introContent = extractIntroContent(section.content, level);
if (introContent.trim()) {
const contentEvent = {
id: "",
pubkey: "",
created_at: Math.floor(Date.now() / 1000),
kind: 30041,
tags: [
["d", `${sectionId}-content`],
["title", section.title],
...tags,
],
content: introContent,
sig: "",
};
allEvents.push(contentEvent);
}
// Create index event (30040)
const childATags: string[][] = [];
// Add a-tag for intro content if it exists
if (introContent.trim()) {
childATags.push([
"a",
`30041:${actualPubkey}:${sectionId}-content`,
"",
"",
]);
}
// Add a-tags for direct children
for (const child of children) {
const childHasSubChildren = child.children.some(
(grandchild) => grandchild.level <= parseLevel,
);
const childShouldBeIndex =
child.level < parseLevel && childHasSubChildren;
const childKind = childShouldBeIndex ? 30040 : 30041;
childATags.push([
"a",
`${childKind}:${actualPubkey}:${child.sectionId}`,
"",
"",
]);
}
const indexEvent = {
id: "",
pubkey: "",
created_at: Math.floor(Date.now() / 1000),
kind: 30040,
tags: [
["d", sectionId],
["title", section.title],
...tags,
...childATags,
],
content: "",
sig: "",
};
allEvents.push(indexEvent);
} else {
// Create regular content event (30041)
const contentEvent = {
id: "",
pubkey: "",
created_at: Math.floor(Date.now() / 1000),
kind: 30041,
tags: [["d", sectionId], ["title", section.title], ...tags],
content: section.content,
sig: "",
};
allEvents.push(contentEvent);
}
// Recursively process children
for (const child of children) {
createEventsFromNode(child);
}
};
// Process all root level sections
for (const rootNode of tree) {
createEventsFromNode(rootNode);
}
// Create main document index if we have a document title (article format)
if (parsed.title && parsed.title.trim() !== "") {
const documentId = generateSectionId(parsed.title);
const documentTags = parseSimpleAttributes(parsed.content);
// Create a-tags for all root level sections (level 2)
const mainIndexATags = tree.map((rootNode) => {
const hasSubChildren = rootNode.children.some(
(child) => child.level <= parseLevel,
);
const shouldBeIndex = rootNode.level < parseLevel && hasSubChildren;
const kind = shouldBeIndex ? 30040 : 30041;
return ["a", `${kind}:${actualPubkey}:${rootNode.sectionId}`, "", ""];
});
console.log("Debug: Root sections found:", tree.length);
console.log("Debug: Main index a-tags:", mainIndexATags);
const mainIndexEvent = {
id: "",
pubkey: "",
created_at: Math.floor(Date.now() / 1000),
kind: 30040,
tags: [
["d", documentId],
["title", parsed.title],
...documentTags,
...mainIndexATags,
],
content: "",
sig: "",
};
return {
indexEvent: mainIndexEvent,
contentEvents: allEvents,
};
}
// For scattered notes, return only content events
return {
contentEvents: allEvents,
};
}
/**
* Detects content type for smart publishing
*/
export function detectContentType(
content: string,
): "article" | "scattered-notes" | "none" {
const hasDocTitle =
content.trim().startsWith("=") && !content.trim().startsWith("==");
const hasSections = content.includes("==");
if (hasDocTitle) {
return "article";
} else if (hasSections) {
return "scattered-notes";
} else {
return "none";
}
}

148
src/lib/utils/asciidoc_publication_parser.ts

@ -0,0 +1,148 @@ @@ -0,0 +1,148 @@
/**
* Unified AsciiDoc Publication Parser
*
* Single entry point for parsing AsciiDoc content into NKBIP-01 compliant
* publication trees using proper Asciidoctor tree processor extensions.
*
* This implements Michael's vision of using PublicationTree as the primary
* data structure for organizing hierarchical Nostr events.
*/
import Asciidoctor from "asciidoctor";
import { registerPublicationTreeProcessor, type ProcessorResult } from "./publication_tree_processor";
import type NDK from "@nostr-dev-kit/ndk";
export type PublicationTreeResult = ProcessorResult;
/**
* Parse AsciiDoc content into a PublicationTree using tree processor extension
* This is the main entry point for all parsing operations
*/
export async function parseAsciiDocWithTree(
content: string,
ndk: NDK,
parseLevel: number = 2
): Promise<PublicationTreeResult> {
console.log(`[Parser] Starting parse at level ${parseLevel}`);
// Create fresh Asciidoctor instance
const asciidoctor = Asciidoctor();
const registry = asciidoctor.Extensions.create();
// Register our tree processor extension
const processorAccessor = registerPublicationTreeProcessor(
registry,
ndk,
parseLevel,
content
);
try {
// Parse the document with our extension
const doc = asciidoctor.load(content, {
extension_registry: registry,
standalone: false,
attributes: {
sectids: false
}
});
console.log(`[Parser] Document converted successfully`);
// Get the result from our processor
const result = processorAccessor.getResult();
if (!result) {
throw new Error("Tree processor failed to generate result");
}
// Build async relationships in the PublicationTree
await buildTreeRelationships(result);
console.log(`[Parser] Tree relationships built successfully`);
return result;
} catch (error) {
console.error('[Parser] Error during parsing:', error);
throw new Error(`Failed to parse AsciiDoc content: ${error instanceof Error ? error.message : 'Unknown error'}`);
}
}
/**
* Build async relationships in the PublicationTree
* This adds content events to the tree structure as Michael envisioned
*/
async function buildTreeRelationships(result: ProcessorResult): Promise<void> {
const { tree, indexEvent, contentEvents } = result;
if (!tree) {
throw new Error("No tree available to build relationships");
}
try {
// Add content events to the tree
if (indexEvent && contentEvents.length > 0) {
// Article structure: add all content events to index
for (const contentEvent of contentEvents) {
await tree.addEvent(contentEvent, indexEvent);
}
} else if (contentEvents.length > 1) {
// Scattered notes: add remaining events to first event
const rootEvent = contentEvents[0];
for (let i = 1; i < contentEvents.length; i++) {
await tree.addEvent(contentEvents[i], rootEvent);
}
}
console.log(`[Parser] Added ${contentEvents.length} events to tree`);
} catch (error) {
console.error('[Parser] Error building tree relationships:', error);
throw error;
}
}
/**
* Export events from PublicationTree for publishing workflow compatibility
*/
export function exportEventsFromTree(result: PublicationTreeResult) {
return {
indexEvent: result.indexEvent ? eventToPublishableObject(result.indexEvent) : undefined,
contentEvents: result.contentEvents.map(eventToPublishableObject)
// Note: Deliberately omitting 'tree' to ensure the object is serializable for postMessage
};
}
/**
* Convert NDKEvent to publishable object format
* Ensures all properties are serializable for postMessage
*/
function eventToPublishableObject(event: any) {
// Extract only primitive values to ensure serializability
return {
kind: Number(event.kind),
content: String(event.content || ''),
tags: Array.isArray(event.tags) ? event.tags.map((tag: any) =>
Array.isArray(tag) ? tag.map(t => String(t)) : []
) : [],
created_at: Number(event.created_at || Math.floor(Date.now() / 1000)),
pubkey: String(event.pubkey || ''),
id: String(event.id || ''),
title: event.tags?.find?.((t: string[]) => t[0] === "title")?.[1] || "Untitled"
};
}
/**
* Validate parse level parameter
*/
export function validateParseLevel(level: number): boolean {
return Number.isInteger(level) && level >= 2 && level <= 5;
}
/**
* Get supported parse levels
*/
export function getSupportedParseLevels(): number[] {
return [2, 3, 4, 5];
}

4
src/lib/utils/event_input_utils.ts

@ -4,8 +4,10 @@ import { EVENT_KINDS } from "./search_constants"; @@ -4,8 +4,10 @@ import { EVENT_KINDS } from "./search_constants";
import {
extractDocumentMetadata,
metadataToTags,
parseAsciiDocWithMetadata,
} from "./asciidoc_metadata.ts";
import {
parseAsciiDocWithMetadata,
} from "./asciidoc_parser.ts";
// =========================
// Validation

377
src/lib/utils/publication_tree_factory.ts

@ -0,0 +1,377 @@ @@ -0,0 +1,377 @@
/**
* Factory for creating PublicationTree instances from AsciiDoc content
*
* This integrates the AST parser with Michael's PublicationTree architecture,
* providing a clean bridge between AsciiDoc parsing and Nostr event publishing.
*/
import { PublicationTree } from "$lib/data_structures/publication_tree";
import { SveltePublicationTree } from "$lib/components/publications/svelte_publication_tree.svelte";
import { parseAsciiDocAST } from "$lib/utils/asciidoc_ast_parser";
import { NDKEvent } from "@nostr-dev-kit/ndk";
import type NDK from "@nostr-dev-kit/ndk";
import { getMimeTags } from "$lib/utils/mime";
export interface PublicationTreeFactoryResult {
tree: PublicationTree;
svelteTree: SveltePublicationTree;
indexEvent: NDKEvent | null;
contentEvents: NDKEvent[];
metadata: {
title: string;
totalSections: number;
contentType: "article" | "scattered-notes" | "none";
attributes: Record<string, string>;
};
}
/**
* Create a PublicationTree from AsciiDoc content using AST parsing
* This is the main integration point between AST parsing and PublicationTree
*/
export async function createPublicationTreeFromContent(
content: string,
ndk: NDK,
parseLevel: number = 2,
): Promise<PublicationTreeFactoryResult> {
// For preview purposes, we can work without authentication
// Authentication is only required for actual publishing
const hasActiveUser = !!ndk.activeUser;
// Parse content using AST
const parsed = parseAsciiDocAST(content, parseLevel);
// Determine content type
const contentType = detectContentType(parsed);
let tree: PublicationTree;
let indexEvent: NDKEvent | null = null;
const contentEvents: NDKEvent[] = [];
if (contentType === "article" && parsed.title) {
// Create hierarchical structure: 30040 index + 30041 content events
indexEvent = createIndexEvent(parsed, ndk);
tree = new PublicationTree(indexEvent, ndk);
// Add content events to tree
for (const section of parsed.sections) {
const contentEvent = createContentEvent(section, parsed, ndk);
await tree.addEvent(contentEvent, indexEvent);
contentEvents.push(contentEvent);
}
} else if (contentType === "scattered-notes") {
// Create flat structure: only 30041 events
if (parsed.sections.length === 0) {
throw new Error("No sections found for scattered notes");
}
// Use first section as root for tree structure
const firstSection = parsed.sections[0];
const rootEvent = createContentEvent(firstSection, parsed, ndk);
tree = new PublicationTree(rootEvent, ndk);
contentEvents.push(rootEvent);
// Add remaining sections
for (let i = 1; i < parsed.sections.length; i++) {
const contentEvent = createContentEvent(parsed.sections[i], parsed, ndk);
await tree.addEvent(contentEvent, rootEvent);
contentEvents.push(contentEvent);
}
} else {
throw new Error("No valid content found to create publication tree");
}
// Create reactive Svelte wrapper
const svelteTree = new SveltePublicationTree(
indexEvent || contentEvents[0],
ndk,
);
return {
tree,
svelteTree,
indexEvent,
contentEvents,
metadata: {
title: parsed.title,
totalSections: parsed.sections.length,
contentType,
attributes: parsed.attributes,
},
};
}
/**
* Create a 30040 index event from parsed document
*/
function createIndexEvent(parsed: any, ndk: NDK): NDKEvent {
const event = new NDKEvent(ndk);
event.kind = 30040;
event.created_at = Math.floor(Date.now() / 1000);
// Use placeholder pubkey for preview if no active user
event.pubkey = ndk.activeUser?.pubkey || "preview-placeholder-pubkey";
// Generate d-tag from title
const dTag = generateDTag(parsed.title);
const [mTag, MTag] = getMimeTags(30040);
const tags: string[][] = [["d", dTag], mTag, MTag, ["title", parsed.title]];
// Add document attributes as tags
addDocumentAttributesToTags(tags, parsed.attributes, event.pubkey);
// Add a-tags for each section (30041 references)
parsed.sections.forEach((section: any) => {
const sectionDTag = generateDTag(section.title);
tags.push(["a", `30041:${event.pubkey}:${sectionDTag}`]);
});
event.tags = tags;
event.content = parsed.content || generateIndexContent(parsed);
return event;
}
/**
* Create a 30041 content event from parsed section
*/
function createContentEvent(
section: any,
documentParsed: any,
ndk: NDK,
): NDKEvent {
const event = new NDKEvent(ndk);
event.kind = 30041;
event.created_at = Math.floor(Date.now() / 1000);
// Use placeholder pubkey for preview if no active user
event.pubkey = ndk.activeUser?.pubkey || "preview-placeholder-pubkey";
const dTag = generateDTag(section.title);
const [mTag, MTag] = getMimeTags(30041);
const tags: string[][] = [["d", dTag], mTag, MTag, ["title", section.title]];
// Add section-specific attributes
addSectionAttributesToTags(tags, section.attributes);
// Add document-level attributes that should be inherited
inheritDocumentAttributes(tags, documentParsed.attributes);
event.tags = tags;
event.content = section.content || "";
return event;
}
/**
* Detect content type based on parsed structure
*/
function detectContentType(
parsed: any,
): "article" | "scattered-notes" | "none" {
const hasDocTitle = !!parsed.title;
const hasSections = parsed.sections.length > 0;
// Check if the "title" is actually just the first section title
// This happens when AsciiDoc starts with == instead of =
const titleMatchesFirstSection =
parsed.sections.length > 0 && parsed.title === parsed.sections[0].title;
if (hasDocTitle && hasSections && !titleMatchesFirstSection) {
return "article";
} else if (hasSections) {
return "scattered-notes";
}
return "none";
}
/**
* Generate deterministic d-tag from title
*/
function generateDTag(title: string): string {
return (
title
.toLowerCase()
.replace(/[^\p{L}\p{N}]/gu, "-")
.replace(/-+/g, "-")
.replace(/^-|-$/g, "") || "untitled"
);
}
/**
* Add document attributes as Nostr tags
*/
function addDocumentAttributesToTags(
tags: string[][],
attributes: Record<string, string>,
pubkey: string,
) {
// Standard metadata
if (attributes.author) tags.push(["author", attributes.author]);
if (attributes.version) tags.push(["version", attributes.version]);
if (attributes.published) tags.push(["published", attributes.published]);
if (attributes.language) tags.push(["language", attributes.language]);
if (attributes.image) tags.push(["image", attributes.image]);
if (attributes.description) tags.push(["summary", attributes.description]);
// Tags
if (attributes.tags) {
attributes.tags.split(",").forEach((tag) => tags.push(["t", tag.trim()]));
}
// Add pubkey reference
tags.push(["p", pubkey]);
// Custom attributes (filtered)
addCustomAttributes(tags, attributes);
}
/**
* Add section-specific attributes as tags
*/
function addSectionAttributesToTags(
tags: string[][],
attributes: Record<string, string>,
) {
addCustomAttributes(tags, attributes);
}
/**
* Inherit relevant document attributes for content events
*/
function inheritDocumentAttributes(
tags: string[][],
documentAttributes: Record<string, string>,
) {
// Inherit selected document attributes
if (documentAttributes.language)
tags.push(["language", documentAttributes.language]);
if (documentAttributes.type) tags.push(["type", documentAttributes.type]);
}
/**
* Add custom attributes, filtering out system ones
*/
function addCustomAttributes(
tags: string[][],
attributes: Record<string, string>,
) {
const systemAttributes = [
"attribute-undefined",
"attribute-missing",
"appendix-caption",
"appendix-refsig",
"caution-caption",
"chapter-refsig",
"example-caption",
"figure-caption",
"important-caption",
"last-update-label",
"manname-title",
"note-caption",
"part-refsig",
"preface-title",
"section-refsig",
"table-caption",
"tip-caption",
"toc-title",
"untitled-label",
"version-label",
"warning-caption",
"asciidoctor",
"asciidoctor-version",
"safe-mode-name",
"backend",
"doctype",
"basebackend",
"filetype",
"outfilesuffix",
"stylesdir",
"iconsdir",
"localdate",
"localyear",
"localtime",
"localdatetime",
"docdate",
"docyear",
"doctime",
"docdatetime",
"doctitle",
"embedded",
"notitle",
// Already handled above
"author",
"version",
"published",
"language",
"image",
"description",
"tags",
"title",
"type",
];
Object.entries(attributes).forEach(([key, value]) => {
if (!systemAttributes.includes(key) && value && typeof value === "string") {
tags.push([key, value]);
}
});
}
/**
* Generate default index content if none provided
*/
function generateIndexContent(parsed: any): string {
return `# ${parsed.title}
${parsed.sections.length} sections available:
${parsed.sections
.map((section: any, i: number) => `${i + 1}. ${section.title}`)
.join("\n")}`;
}
/**
* Export events from PublicationTree for publishing
* This provides compatibility with the current publishing workflow
*/
export async function exportEventsFromTree(
result: PublicationTreeFactoryResult,
) {
const events: any[] = [];
// Add index event if it exists
if (result.indexEvent) {
events.push(eventToPublishableObject(result.indexEvent));
}
// Add content events
result.contentEvents.forEach((event) => {
events.push(eventToPublishableObject(event));
});
return {
indexEvent: result.indexEvent
? eventToPublishableObject(result.indexEvent)
: undefined,
contentEvents: result.contentEvents.map(eventToPublishableObject),
tree: result.tree,
};
}
/**
* Convert NDKEvent to publishable object format
*/
function eventToPublishableObject(event: NDKEvent) {
return {
kind: event.kind,
content: event.content,
tags: event.tags,
created_at: event.created_at,
pubkey: event.pubkey,
id: event.id,
title: event.tags.find((t) => t[0] === "title")?.[1] || "Untitled",
};
}

1091
src/lib/utils/publication_tree_processor.ts

File diff suppressed because it is too large Load Diff

277
src/routes/new/compose/+page.svelte

@ -1,10 +1,12 @@ @@ -1,10 +1,12 @@
<script lang="ts">
import { Heading, Button } from "flowbite-svelte";
import { PaperPlaneOutline } from "flowbite-svelte-icons";
import ZettelEditor from "$lib/components/ZettelEditor.svelte";
import { nip19 } from "nostr-tools";
import { publishMultipleZettels } from "$lib/services/publisher";
import { parseAsciiDocWithMetadata } from "$lib/utils/asciidoc_metadata";
import {
publishSingleEvent,
processPublishResults,
type ProcessedPublishResults,
} from "$lib/services/publisher";
import { getNdkContext } from "$lib/ndk";
import { AAlert } from "$lib/a/index";
@ -13,13 +15,7 @@ @@ -13,13 +15,7 @@
let content = $state("");
let showPreview = $state(false);
let isPublishing = $state(false);
let publishResults = $state<{
successCount: number;
total: number;
errors: string[];
successfulEvents: Array<{ eventId: string; title: string }>;
failedEvents: Array<{ title: string; error: string; sectionIndex: number }>;
} | null>(null);
let publishResults = $state<ProcessedPublishResults | null>(null);
// Handle content changes from ZettelEditor
function handleContentChange(newContent: string) {
@ -31,107 +27,200 @@ @@ -31,107 +27,200 @@
showPreview = show;
}
async function handlePublish() {
// Helper function to create error result
function createErrorResult(error: unknown): ProcessedPublishResults {
return {
successCount: 0,
total: 0,
errors: [error instanceof Error ? error.message : "Unknown error"],
successfulEvents: [],
failedEvents: [],
};
}
// Helper function to log event summaries
function logEventSummary(
events: any,
successfulEvents: Array<{ eventId: string; title: string }>,
) {
console.log("\n=== Events Summary ===");
if (events.indexEvent) {
console.log("\nRoot Index:");
console.log(`Event Summary:`);
console.log(` ID: ${successfulEvents[0]?.eventId || "Failed"}`);
console.log(` Kind: 30040`);
console.log(` Tags:`);
events.indexEvent.tags.forEach((tag: string[]) => {
console.log(` - ${JSON.stringify(tag)}`);
});
console.log(" ---");
}
console.log("\nContent:");
events.contentEvents.forEach((event: any, index: number) => {
const eventId =
successfulEvents.find((e) => e.title === event.title)?.eventId ||
"Failed";
console.log(`\nEvent Summary:`);
console.log(` ID: ${eventId}`);
console.log(` Kind: 30041`);
console.log(` Tags:`);
event.tags.forEach((tag: any) => {
console.log(` - ${JSON.stringify(tag)}`);
});
console.log(` Content preview: ${event.content.substring(0, 100)}...`);
console.log(" ---");
});
}
// Handle unified publishing from ZettelEditor
async function handlePublishArticle(events: any) {
isPublishing = true;
publishResults = null;
const results = await publishMultipleZettels({
content,
// Debug: Log the first content event to see its structure
if (events.contentEvents.length > 0) {
console.log("First content event structure:", {
kind: events.contentEvents[0].kind,
tags: events.contentEvents[0].tags,
contentLength: events.contentEvents[0].content.length,
contentPreview: events.contentEvents[0].content.substring(0, 100),
});
}
try {
const results: any[] = [];
// Publish index event first using publishSingleEvent
if (events.indexEvent) {
const indexResult = await publishSingleEvent(
{
content: events.indexEvent.content,
kind: events.indexEvent.kind,
tags: events.indexEvent.tags,
onError: (error) => {
// Only used for catastrophic errors
publishResults = { successCount: 0, total: 0, errors: [error], successfulEvents: [], failedEvents: [] };
console.error("Index event publish failed:", error);
},
}, ndk);
const successCount = results.filter(r => r.success).length;
const errors = results.filter(r => !r.success && r.error).map(r => r.error!);
// Extract successful events with their titles
const parsed = parseAsciiDocWithMetadata(content);
const successfulEvents = results
.filter(r => r.success && r.eventId)
.map((r, index) => ({
eventId: r.eventId!,
title: parsed.sections[index]?.title || `Note ${index + 1}`
}));
// Extract failed events with their titles and errors
const failedEvents = results
.map((r, index) => ({ result: r, index }))
.filter(({ result }) => !result.success)
.map(({ result, index }) => ({
title: parsed.sections[index]?.title || `Note ${index + 1}`,
error: result.error || 'Unknown error',
sectionIndex: index
}));
publishResults = {
successCount,
total: results.length,
errors,
successfulEvents,
failedEvents,
};
isPublishing = false;
},
ndk,
);
results.push(indexResult);
}
async function retryFailedEvent(sectionIndex: number) {
if (!publishResults) return;
// Publish content events
for (let i = 0; i < events.contentEvents.length; i++) {
const event = events.contentEvents[i];
console.log(
`Publishing content event ${i + 1}: ${event.tags.find((t: any) => t[0] === "title")?.[1] || "Untitled"}`,
);
const result = await publishSingleEvent(
{
content: event.content,
kind: event.kind,
tags: event.tags,
onError: (error) => {
console.error(`Content event ${i + 1} publish failed:`, error);
},
},
ndk,
);
results.push(result);
}
isPublishing = true;
// Process results using shared utility
publishResults = processPublishResults(
results,
events,
!!events.indexEvent,
);
// Show summary
logEventSummary(events, publishResults.successfulEvents);
} catch (error) {
console.error("Publishing failed:", error);
publishResults = createErrorResult(error);
}
// Get the specific section content
const parsed = parseAsciiDocWithMetadata(content);
const section = parsed.sections[sectionIndex];
if (!section) return;
isPublishing = false;
}
// Reconstruct the section content for publishing
const sectionContent = `== ${section.title}\n\n${section.content}`;
async function handlePublishScatteredNotes(events: any) {
isPublishing = true;
publishResults = null;
// Debug: Log the structure of events being published (without content)
console.log("=== PUBLISHING SCATTERED NOTES ===");
console.log(`Number of content events: ${events.contentEvents.length}`);
try {
const result = await publishMultipleZettels({
content: sectionContent,
const results: any[] = [];
// Publish only content events for scattered notes
for (let i = 0; i < events.contentEvents.length; i++) {
const event = events.contentEvents[i];
const result = await publishSingleEvent(
{
content: event.content,
kind: event.kind,
tags: event.tags,
onError: (error) => {
console.error('Retry failed:', error);
console.error(`Content event ${i + 1} publish failed:`, error);
},
}, ndk);
if (result[0]?.success && result[0]?.eventId) {
// Update the successful events list
const newSuccessfulEvent = {
eventId: result[0].eventId,
title: section.title
};
// Remove from failed events
const updatedFailedEvents = publishResults.failedEvents.filter(
(_, index) => index !== sectionIndex
},
ndk,
);
results.push(result);
}
// Add to successful events
const updatedSuccessfulEvents = [...publishResults.successfulEvents, newSuccessfulEvent];
// Process results using shared utility
publishResults = processPublishResults(results, events, false);
publishResults = {
...publishResults,
successCount: publishResults.successCount + 1,
successfulEvents: updatedSuccessfulEvents,
failedEvents: updatedFailedEvents,
};
}
// Show summary
logEventSummary(events, publishResults.successfulEvents);
} catch (error) {
console.error('Retry failed:', error);
console.error("Publishing failed:", error);
publishResults = createErrorResult(error);
}
isPublishing = false;
}
async function retryFailedEvent(sectionIndex: number) {
if (!publishResults) return;
// Find the failed event to retry
const failedEvent = publishResults.failedEvents.find(
(event) => event.sectionIndex === sectionIndex,
);
if (!failedEvent) return;
isPublishing = true;
try {
// Retry publishing the failed content
// Note: This is a simplified retry - in production you'd want to store the original event data
// For now, we'll just show an error message
console.error(
"Retry not implemented - would need to store original event data",
);
// Just return early since retry is not implemented
isPublishing = false;
return;
} catch (error) {
console.error("Retry failed:", error);
isPublishing = false;
}
}
</script>
<svelte:head>
<title>Compose Note - Alexandria</title>
</svelte:head>
<!-- Main container with max 1024px width and centered -->
<div class="flex flex-col self-center items-center w-full max-w-[1024px] mx-auto px-2 space-y-4">
<!-- Main container with max 1536px width and centered -->
<div class="flex flex-col self-center items-center w-full max-w-[1536px] mx-auto px-2 space-y-4">
<Heading
tag="h1" class="h-leather mb-2">
Compose Notes
@ -142,22 +231,10 @@ @@ -142,22 +231,10 @@
{showPreview}
onContentChange={handleContentChange}
onPreviewToggle={handlePreviewToggle}
onPublishArticle={handlePublishArticle}
onPublishScatteredNotes={handlePublishScatteredNotes}
/>
<!-- Publish Button -->
<Button
onclick={handlePublish}
disabled={isPublishing || !content.trim()}
class="self-end my-2"
>
{#if isPublishing}
Publishing...
{:else}
<PaperPlaneOutline class="w-4 h-4 mr-2" />
Publish
{/if}
</Button>
<!-- Status Messages -->
{#if publishResults}
{#if publishResults.successCount === publishResults.total}
@ -214,7 +291,9 @@ @@ -214,7 +291,9 @@
{#each publishResults.failedEvents as failedEvent, index}
<div class="text-sm bg-red-50 dark:bg-red-900/20 p-2 rounded">
<div class="font-medium">{failedEvent.title}</div>
<div class="text-red-600 dark:text-red-400 text-xs">{failedEvent.error}</div>
<div class="text-red-600 dark:text-red-400 text-xs">
{failedEvent.error}
</div>
<Button
size="xs"
color="light"
@ -222,7 +301,7 @@ @@ -222,7 +301,7 @@
disabled={isPublishing}
class="mt-1"
>
{isPublishing ? 'Retrying...' : 'Retry'}
{isPublishing ? "Retrying..." : "Retry"}
</Button>
</div>
{/each}

4
tests/unit/metadataExtraction.test.ts

@ -4,8 +4,10 @@ import { @@ -4,8 +4,10 @@ import {
extractSectionMetadata,
extractSmartMetadata,
metadataToTags,
parseAsciiDocWithMetadata,
} from "../../src/lib/utils/asciidoc_metadata.ts";
import {
parseAsciiDocWithMetadata,
} from "../../src/lib/utils/asciidoc_parser.ts";
describe("AsciiDoc Metadata Extraction", () => {
const testContent = `= Test Document with Metadata

284
tests/unit/publication_tree_processor.test.ts

@ -0,0 +1,284 @@ @@ -0,0 +1,284 @@
/**
* TDD Tests for NKBIP-01 Publication Tree Processor
*
* Tests the iterative parsing function at different hierarchy levels
* using deep_hierarchy_test.adoc to verify NKBIP-01 compliance.
*/
import { describe, it, expect, beforeAll } from 'vitest';
import { readFileSync } from 'fs';
import { parseAsciiDocWithTree, validateParseLevel, getSupportedParseLevels } from '../../src/lib/utils/asciidoc_publication_parser.js';
// Mock NDK for testing
const mockNDK = {
activeUser: {
pubkey: "test-pubkey-12345"
}
} as any;
// Read the test document
const testDocumentPath = "./test_data/AsciidocFiles/deep_hierarchy_test.adoc";
let testContent: string;
try {
testContent = readFileSync(testDocumentPath, 'utf-8');
} catch (error) {
console.error("Failed to read test document:", error);
testContent = `= Deep Hierarchical Document Test
:tags: testing, hierarchy, structure
:author: Test Author
:type: technical
This document tests all 6 levels of AsciiDoc hierarchy to validate our parse level system.
== Level 2: Main Sections
:tags: level2, main
This is a level 2 section that should appear in all parse levels.
=== Level 3: Subsections
:tags: level3, subsection
This is a level 3 section that should appear in parse levels 3-6.
==== Level 4: Sub-subsections
:tags: level4, detailed
This is a level 4 section that should appear in parse levels 4-6.
===== Level 5: Deep Subsections
:tags: level5, deep
This is a level 5 section that should only appear in parse levels 5-6.
====== Level 6: Deepest Level
:tags: level6, deepest
This is a level 6 section that should only appear in parse level 6.
Content at the deepest level of our hierarchy.
== Level 2: Second Main Section
:tags: level2, main, second
A second main section to ensure we have balanced content at the top level.`;
}
describe("NKBIP-01 Publication Tree Processor", () => {
it("should validate parse levels correctly", () => {
// Test valid parse levels
expect(validateParseLevel(2)).toBe(true);
expect(validateParseLevel(3)).toBe(true);
expect(validateParseLevel(5)).toBe(true);
// Test invalid parse levels
expect(validateParseLevel(1)).toBe(false);
expect(validateParseLevel(6)).toBe(false);
expect(validateParseLevel(7)).toBe(false);
expect(validateParseLevel(2.5)).toBe(false);
expect(validateParseLevel(-1)).toBe(false);
// Test supported levels array
const supportedLevels = getSupportedParseLevels();
expect(supportedLevels).toEqual([2, 3, 4, 5]);
});
it("should parse Level 2 with NKBIP-01 minimal structure", async () => {
const result = await parseAsciiDocWithTree(testContent, mockNDK, 2);
// Should be detected as article (has title and sections)
expect(result.metadata.contentType).toBe("article");
expect(result.metadata.parseLevel).toBe(2);
expect(result.metadata.title).toBe("Deep Hierarchical Document Test");
// Should have 1 index event (30040) + 2 content events (30041) for level 2 sections
expect(result.indexEvent).toBeDefined();
expect(result.indexEvent?.kind).toBe(30040);
expect(result.contentEvents.length).toBe(2);
// All content events should be kind 30041
result.contentEvents.forEach(event => {
expect(event.kind).toBe(30041);
});
// Check titles of level 2 sections
const contentTitles = result.contentEvents.map(e =>
e.tags.find((t: string[]) => t[0] === "title")?.[1]
);
expect(contentTitles).toContain("Level 2: Main Sections");
expect(contentTitles).toContain("Level 2: Second Main Section");
// Content should include all nested subsections as AsciiDoc
const firstSectionContent = result.contentEvents[0].content;
expect(firstSectionContent).toBeDefined();
// Should contain level 3, 4, 5 content as nested AsciiDoc markup
expect(firstSectionContent.includes("=== Level 3: Subsections")).toBe(true);
expect(firstSectionContent.includes("==== Level 4: Sub-subsections")).toBe(true);
expect(firstSectionContent.includes("===== Level 5: Deep Subsections")).toBe(true);
});
it("should parse Level 3 with NKBIP-01 intermediate structure", async () => {
const result = await parseAsciiDocWithTree(testContent, mockNDK, 3);
expect(result.metadata.contentType).toBe("article");
expect(result.metadata.parseLevel).toBe(3);
// Should have hierarchical structure
expect(result.indexEvent).toBeDefined();
expect(result.indexEvent?.kind).toBe(30040);
// Should have mix of 30040 (for level 2 sections with children) and 30041 (for content)
const kinds = result.contentEvents.map(e => e.kind);
expect(kinds).toContain(30040); // Level 2 sections with children
expect(kinds).toContain(30041); // Level 3 content sections
// Level 2 sections with children should be 30040 index events
const level2WithChildrenEvents = result.contentEvents.filter(e =>
e.kind === 30040 &&
e.tags.find((t: string[]) => t[0] === "title")?.[1]?.includes("Level 2:")
);
expect(level2WithChildrenEvents.length).toBe(2); // Both level 2 sections have children
// Should have 30041 events for level 3 content
const level3ContentEvents = result.contentEvents.filter(e =>
e.kind === 30041 &&
e.tags.find((t: string[]) => t[0] === "title")?.[1]?.includes("Level 3:")
);
expect(level3ContentEvents.length).toBeGreaterThan(0);
});
it("should parse Level 4 with NKBIP-01 detailed structure", async () => {
const result = await parseAsciiDocWithTree(testContent, mockNDK, 4);
expect(result.metadata.contentType).toBe("article");
expect(result.metadata.parseLevel).toBe(4);
// Should have hierarchical structure with mix of 30040 and 30041 events
expect(result.indexEvent).toBeDefined();
expect(result.indexEvent?.kind).toBe(30040);
const kinds = result.contentEvents.map(e => e.kind);
expect(kinds).toContain(30040); // Level 2 sections with children
expect(kinds).toContain(30041); // Content sections
// Check that we have level 4 content sections
const contentTitles = result.contentEvents.map(e =>
e.tags.find((t: string[]) => t[0] === "title")?.[1]
);
expect(contentTitles).toContain("Level 4: Sub-subsections");
});
it("should parse Level 5 with NKBIP-01 maximum depth", async () => {
const result = await parseAsciiDocWithTree(testContent, mockNDK, 5);
expect(result.metadata.contentType).toBe("article");
expect(result.metadata.parseLevel).toBe(5);
// Should have hierarchical structure
expect(result.indexEvent).toBeDefined();
expect(result.indexEvent?.kind).toBe(30040);
// Should include level 5 sections as content events
const contentTitles = result.contentEvents.map(e =>
e.tags.find((t: string[]) => t[0] === "title")?.[1]
);
expect(contentTitles).toContain("Level 5: Deep Subsections");
});
it("should validate event structure correctly", async () => {
const result = await parseAsciiDocWithTree(testContent, mockNDK, 3);
// Test index event structure
expect(result.indexEvent).toBeDefined();
expect(result.indexEvent?.kind).toBe(30040);
expect(result.indexEvent?.tags).toBeDefined();
// Check required tags
const indexTags = result.indexEvent!.tags;
const dTag = indexTags.find((t: string[]) => t[0] === "d");
const titleTag = indexTags.find((t: string[]) => t[0] === "title");
expect(dTag).toBeDefined();
expect(titleTag).toBeDefined();
expect(titleTag![1]).toBe("Deep Hierarchical Document Test");
// Test content events structure - mix of 30040 and 30041
result.contentEvents.forEach(event => {
expect([30040, 30041]).toContain(event.kind);
expect(event.tags).toBeDefined();
expect(event.content).toBeDefined();
const eventTitleTag = event.tags.find((t: string[]) => t[0] === "title");
expect(eventTitleTag).toBeDefined();
});
});
it("should preserve content as AsciiDoc", async () => {
const result = await parseAsciiDocWithTree(testContent, mockNDK, 2);
// Content should be preserved as original AsciiDoc, not converted to HTML
const firstEvent = result.contentEvents[0];
expect(firstEvent.content).toBeDefined();
// Should contain AsciiDoc markup, not HTML
expect(firstEvent.content.includes("<")).toBe(false);
expect(firstEvent.content.includes("===")).toBe(true);
});
it("should handle attributes correctly", async () => {
const result = await parseAsciiDocWithTree(testContent, mockNDK, 2);
// Document-level attributes should be in index event
expect(result.indexEvent).toBeDefined();
const indexTags = result.indexEvent!.tags;
// Check for document attributes
const authorTag = indexTags.find((t: string[]) => t[0] === "author");
const typeTag = indexTags.find((t: string[]) => t[0] === "type");
const tagsTag = indexTags.find((t: string[]) => t[0] === "t");
expect(authorTag?.[1]).toBe("Test Author");
expect(typeTag?.[1]).toBe("technical");
expect(tagsTag).toBeDefined(); // Should have at least one t-tag
});
it("should handle scattered notes mode", async () => {
// Test with content that has no document title (scattered notes)
const scatteredContent = `== First Note
:tags: note1
Content of first note.
== Second Note
:tags: note2
Content of second note.`;
const result = await parseAsciiDocWithTree(scatteredContent, mockNDK, 2);
expect(result.metadata.contentType).toBe("scattered-notes");
expect(result.indexEvent).toBeNull(); // No index event for scattered notes
expect(result.contentEvents.length).toBe(2);
// All events should be 30041 content events
result.contentEvents.forEach(event => {
expect(event.kind).toBe(30041);
});
});
it("should integrate with PublicationTree structure", async () => {
const result = await parseAsciiDocWithTree(testContent, mockNDK, 2);
// Should have a PublicationTree instance
expect(result.tree).toBeDefined();
// Tree should have methods for event management
expect(typeof result.tree.addEvent).toBe("function");
// Event structure should be populated
expect(result.metadata.eventStructure).toBeDefined();
expect(Array.isArray(result.metadata.eventStructure)).toBe(true);
});
});

560
tests/zettel-publisher-tdd.test.ts

@ -0,0 +1,560 @@ @@ -0,0 +1,560 @@
#!/usr/bin/env node
/**
* Test-Driven Development for ZettelPublisher Enhancement
* Based on understanding_knowledge.adoc, desire.adoc, and docreference.md
*
* Key Requirements Discovered:
* 1. ITERATIVE parsing (not recursive): sections at target level become events
* 2. Level 2: == sections become 30041 events containing ALL subsections (===, ====, etc.)
* 3. Level 3: == sections become 30040 indices, === sections become 30041 events
* 4. 30040 metadata: from document level (= title with :attributes:)
* 5. 30041 metadata: from section level attributes
* 6. Smart publishing: articles (=) vs scattered notes (==)
* 7. Custom attributes: all :key: value pairs preserved as event tags
*/
import fs from 'fs';
import path from 'path';
// Test framework
interface TestCase {
name: string;
fn: () => void | Promise<void>;
}
class TestFramework {
private tests: TestCase[] = [];
private passed: number = 0;
private failed: number = 0;
test(name: string, fn: () => void | Promise<void>): void {
this.tests.push({ name, fn });
}
expect(actual: any) {
return {
toBe: (expected: any) => {
if (actual === expected) return true;
throw new Error(`Expected ${expected}, got ${actual}`);
},
toEqual: (expected: any) => {
if (JSON.stringify(actual) === JSON.stringify(expected)) return true;
throw new Error(`Expected ${JSON.stringify(expected)}, got ${JSON.stringify(actual)}`);
},
toContain: (expected: any) => {
if (actual && actual.includes && actual.includes(expected)) return true;
throw new Error(`Expected "${actual}" to contain "${expected}"`);
},
not: {
toContain: (expected: any) => {
if (actual && actual.includes && !actual.includes(expected)) return true;
throw new Error(`Expected "${actual}" NOT to contain "${expected}"`);
}
},
toBeTruthy: () => {
if (actual) return true;
throw new Error(`Expected truthy value, got ${actual}`);
},
toHaveLength: (expected: number) => {
if (actual && actual.length === expected) return true;
throw new Error(`Expected length ${expected}, got ${actual ? actual.length : 'undefined'}`);
}
};
}
async run() {
console.log(`🧪 Running ${this.tests.length} tests...\n`);
for (const { name, fn } of this.tests) {
try {
await fn();
console.log(`${name}`);
this.passed++;
} catch (error: unknown) {
console.log(`${name}`);
const message = error instanceof Error ? error.message : String(error);
console.log(` ${message}\n`);
this.failed++;
}
}
console.log(`\n📊 Results: ${this.passed} passed, ${this.failed} failed`);
return this.failed === 0;
}
}
const test = new TestFramework();
// Load test data files
const testDataPath = path.join(process.cwd(), 'test_data', 'AsciidocFiles');
const understandingKnowledge = fs.readFileSync(path.join(testDataPath, 'understanding_knowledge.adoc'), 'utf-8');
const desire = fs.readFileSync(path.join(testDataPath, 'desire.adoc'), 'utf-8');
// =============================================================================
// PHASE 1: Core Data Structure Tests (Based on Real Test Data)
// =============================================================================
test.test('Understanding Knowledge: Document metadata should be extracted from = level', () => {
// Expected 30040 metadata from understanding_knowledge.adoc
const expectedDocMetadata = {
title: 'Understanding Knowledge',
image: 'https://i.nostr.build/IUs0xNyUEf5hXTFL.jpg',
published: '2025-04-21',
language: 'en, ISO-639-1',
tags: ['knowledge', 'philosophy', 'education'],
type: 'text'
};
// Test will pass when document parsing extracts these correctly
test.expect(expectedDocMetadata.title).toBe('Understanding Knowledge');
test.expect(expectedDocMetadata.tags).toHaveLength(3);
test.expect(expectedDocMetadata.type).toBe('text');
});
test.test('Desire: Document metadata should include all custom attributes', () => {
// Expected 30040 metadata from desire.adoc
const expectedDocMetadata = {
title: 'Desire Part 1: Mimesis',
image: 'https://i.nostr.build/hGzyi4c3YhTwoCCe.png',
published: '2025-07-02',
language: 'en, ISO-639-1',
tags: ['memetics', 'philosophy', 'desire'],
type: 'podcastArticle'
};
test.expect(expectedDocMetadata.type).toBe('podcastArticle');
test.expect(expectedDocMetadata.tags).toContain('memetics');
});
test.test('Iterative ParsedAsciiDoc interface should support level-based parsing', () => {
// Test the ITERATIVE interface structure (not recursive)
// Based on docreference.md - Level 2 parsing example
const mockLevel2Structure = {
metadata: { title: 'Programming Fundamentals Guide', tags: ['programming', 'fundamentals'] },
content: 'This is the main introduction to the programming guide.',
title: 'Programming Fundamentals Guide',
sections: [
{
metadata: { title: 'Data Structures', tags: ['arrays', 'lists', 'trees'], difficulty: 'intermediate' },
content: `Understanding fundamental data structures is crucial for effective programming.
=== Arrays and Lists
Arrays are contiguous memory blocks that store elements of the same type.
Lists provide dynamic sizing capabilities.
==== Dynamic Arrays
Dynamic arrays automatically resize when capacity is exceeded.
==== Linked Lists
Linked lists use pointers to connect elements.
=== Trees and Graphs
Tree and graph structures enable hierarchical and networked data representation.`,
title: 'Data Structures'
},
{
metadata: { title: 'Algorithms', tags: ['sorting', 'searching', 'optimization'], difficulty: 'advanced' },
content: `Algorithmic thinking forms the foundation of efficient problem-solving.
=== Sorting Algorithms
Different sorting algorithms offer various trade-offs between time and space complexity.
==== Bubble Sort
Bubble sort repeatedly steps through the list, compares adjacent elements.
==== Quick Sort
Quick sort uses divide-and-conquer approach with pivot selection.`,
title: 'Algorithms'
}
]
};
// Verify ITERATIVE structure: only level 2 sections, containing ALL subsections
test.expect(mockLevel2Structure.sections).toHaveLength(2);
test.expect(mockLevel2Structure.sections[0].title).toBe('Data Structures');
test.expect(mockLevel2Structure.sections[0].content).toContain('=== Arrays and Lists');
test.expect(mockLevel2Structure.sections[0].content).toContain('==== Dynamic Arrays');
test.expect(mockLevel2Structure.sections[1].content).toContain('==== Quick Sort');
});
// =============================================================================
// PHASE 2: Content Processing Tests (Header Separation)
// =============================================================================
test.test('Section content should NOT contain its own header', () => {
// From understanding_knowledge.adoc: "== Preface" section
const expectedPrefaceContent = `[NOTE]
This essay was written to outline and elaborate on the purpose of the Nostr client Alexandria. No formal academic citations are included as this serves primarily as a conceptual foundation, inviting readers to experience related ideas connecting and forming as more content becomes uploaded. Traces of AI edits and guidance are left, but the essay style is still my own. Over time this essay may change its wording, structure and content.
-- liminal`;
// Should NOT contain "== Preface"
test.expect(expectedPrefaceContent).not.toContain('== Preface');
test.expect(expectedPrefaceContent).toContain('[NOTE]');
});
test.test('Introduction section should separate from its subsections', () => {
// From understanding_knowledge.adoc
const expectedIntroContent = `image:https://i.nostr.build/IUs0xNyUEf5hXTFL.jpg[library]`;
// Should NOT contain subsection content or headers
test.expect(expectedIntroContent).not.toContain('=== Why Investigate');
test.expect(expectedIntroContent).not.toContain('Understanding the nature of knowledge');
test.expect(expectedIntroContent).toContain('image:https://i.nostr.build');
});
test.test('Subsection content should be cleanly separated', () => {
// "=== Why Investigate the Nature of Knowledge?" subsection
const expectedSubsectionContent = `Understanding the nature of knowledge itself is fundamental, distinct from simply studying how we learn or communicate. Knowledge exests first as representations within individuals, separate from how we interact with it...`;
// Should NOT contain its own header
test.expect(expectedSubsectionContent).not.toContain('=== Why Investigate');
test.expect(expectedSubsectionContent).toContain('Understanding the nature');
});
test.test('Deep headers (====) should have proper newlines', () => {
// From "=== The Four Perspectives" section with ==== subsections
const expectedFormatted = `
==== 1. The Building Blocks (Material Cause)
Just as living organisms are made up of cells, knowledge systems are built from fundamental units of understanding.
==== 2. The Pattern of Organization (Formal Cause)
If you've ever seen how mushrooms connect through underground networks...`;
test.expect(expectedFormatted).toContain('\n==== 1. The Building Blocks (Material Cause)\n');
test.expect(expectedFormatted).toContain('\n==== 2. The Pattern of Organization (Formal Cause)\n');
});
// =============================================================================
// PHASE 3: Publishing Logic Tests (30040/30041 Structure)
// =============================================================================
test.test('Understanding Knowledge should create proper 30040 index event', () => {
// Expected 30040 index event structure
const expectedIndexEvent = {
kind: 30040,
content: '', // Index events have empty content
tags: [
['d', 'understanding-knowledge'],
['title', 'Understanding Knowledge'],
['image', 'https://i.nostr.build/IUs0xNyUEf5hXTFL.jpg'],
['published', '2025-04-21'],
['language', 'en, ISO-639-1'],
['t', 'knowledge'],
['t', 'philosophy'],
['t', 'education'],
['type', 'text'],
// a-tags referencing sections
['a', '30041:pubkey:understanding-knowledge-preface'],
['a', '30041:pubkey:understanding-knowledge-introduction-knowledge-as-a-living-ecosystem'],
['a', '30041:pubkey:understanding-knowledge-i-material-cause-the-substance-of-knowledge'],
// ... more a-tags for each section
]
};
test.expect(expectedIndexEvent.kind).toBe(30040);
test.expect(expectedIndexEvent.content).toBe('');
test.expect(expectedIndexEvent.tags.filter(([k]) => k === 't')).toHaveLength(3);
test.expect(expectedIndexEvent.tags.find(([k, v]) => k === 'type' && v === 'text')).toBeTruthy();
});
test.test('Understanding Knowledge sections should create proper 30041 events', () => {
// Expected 30041 events for main sections
const expectedSectionEvents = [
{
kind: 30041,
content: `[NOTE]\nThis essay was written to outline and elaborate on the purpose of the Nostr client Alexandria...`,
tags: [
['d', 'understanding-knowledge-preface'],
['title', 'Preface']
]
},
{
kind: 30041,
content: `image:https://i.nostr.build/IUs0xNyUEf5hXTFL.jpg[library]`,
tags: [
['d', 'understanding-knowledge-introduction-knowledge-as-a-living-ecosystem'],
['title', 'Introduction: Knowledge as a Living Ecosystem']
]
}
];
expectedSectionEvents.forEach(event => {
test.expect(event.kind).toBe(30041);
test.expect(event.content).toBeTruthy();
test.expect(event.tags.find(([k]) => k === 'd')).toBeTruthy();
test.expect(event.tags.find(([k]) => k === 'title')).toBeTruthy();
});
});
test.test('Level-based parsing should create correct 30040/30041 structure', () => {
// Based on docreference.md examples
// Level 2 parsing: only == sections become events, containing all subsections
const expectedLevel2Events = {
mainIndex: {
kind: 30040,
content: '',
tags: [
['d', 'programming-fundamentals-guide'],
['title', 'Programming Fundamentals Guide'],
['a', '30041:author_pubkey:data-structures'],
['a', '30041:author_pubkey:algorithms']
]
},
dataStructuresSection: {
kind: 30041,
content: 'Understanding fundamental data structures...\n\n=== Arrays and Lists\n\n...==== Dynamic Arrays\n\n...==== Linked Lists\n\n...',
tags: [
['d', 'data-structures'],
['title', 'Data Structures'],
['difficulty', 'intermediate']
]
}
};
// Level 3 parsing: == sections become 30040 indices, === sections become 30041 events
const expectedLevel3Events = {
mainIndex: {
kind: 30040,
content: '',
tags: [
['d', 'programming-fundamentals-guide'],
['title', 'Programming Fundamentals Guide'],
['a', '30040:author_pubkey:data-structures'], // Now references sub-index
['a', '30040:author_pubkey:algorithms']
]
},
dataStructuresIndex: {
kind: 30040,
content: '',
tags: [
['d', 'data-structures'],
['title', 'Data Structures'],
['a', '30041:author_pubkey:data-structures-content'],
['a', '30041:author_pubkey:arrays-and-lists'],
['a', '30041:author_pubkey:trees-and-graphs']
]
},
arraysAndListsSection: {
kind: 30041,
content: 'Arrays are contiguous...\n\n==== Dynamic Arrays\n\n...==== Linked Lists\n\n...',
tags: [
['d', 'arrays-and-lists'],
['title', 'Arrays and Lists']
]
}
};
test.expect(expectedLevel2Events.mainIndex.kind).toBe(30040);
test.expect(expectedLevel2Events.dataStructuresSection.kind).toBe(30041);
test.expect(expectedLevel2Events.dataStructuresSection.content).toContain('=== Arrays and Lists');
test.expect(expectedLevel3Events.dataStructuresIndex.kind).toBe(30040);
test.expect(expectedLevel3Events.arraysAndListsSection.content).toContain('==== Dynamic Arrays');
});
// =============================================================================
// PHASE 4: Smart Publishing System Tests
// =============================================================================
test.test('Content type detection should work for both test files', () => {
const testCases = [
{
name: 'Understanding Knowledge (article)',
content: understandingKnowledge,
expected: 'article'
},
{
name: 'Desire (article)',
content: desire,
expected: 'article'
},
{
name: 'Scattered notes format',
content: '== Note 1\nContent\n\n== Note 2\nMore content',
expected: 'scattered-notes'
}
];
testCases.forEach(({ name, content, expected }) => {
const hasDocTitle = content.trim().startsWith('=') && !content.trim().startsWith('==');
const hasSections = content.includes('==');
let detected;
if (hasDocTitle) {
detected = 'article';
} else if (hasSections) {
detected = 'scattered-notes';
} else {
detected = 'none';
}
console.log(` ${name}: detected ${detected}`);
test.expect(detected).toBe(expected);
});
});
test.test('Parse level should affect event structure correctly', () => {
// Understanding Knowledge has structure: = > == (6 sections) > === (many subsections) > ====
// Based on actual content analysis
const levelEventCounts = [
{ level: 1, description: 'Only document index', events: 1 },
{ level: 2, description: 'Document index + level 2 sections (==)', events: 7 }, // 1 index + 6 sections
{ level: 3, description: 'Document index + section indices + level 3 subsections (===)', events: 20 }, // More complex
{ level: 4, description: 'Full hierarchy including level 4 (====)', events: 35 }
];
levelEventCounts.forEach(({ level, description, events }) => {
console.log(` Level ${level}: ${description} (${events} events)`);
test.expect(events).toBeTruthy();
});
});
// =============================================================================
// PHASE 5: Integration Tests (End-to-End Workflow)
// =============================================================================
test.test('Full Understanding Knowledge publishing workflow (Level 2)', async () => {
// Mock the complete ITERATIVE workflow
const mockWorkflow = {
parseLevel2: (content: string) => ({
metadata: {
title: 'Understanding Knowledge',
image: 'https://i.nostr.build/IUs0xNyUEf5hXTFL.jpg',
published: '2025-04-21',
tags: ['knowledge', 'philosophy', 'education'],
type: 'text'
},
title: 'Understanding Knowledge',
content: 'Introduction content before any sections',
sections: [
{
title: 'Preface',
content: '[NOTE]\nThis essay was written to outline...',
metadata: { title: 'Preface' }
},
{
title: 'Introduction: Knowledge as a Living Ecosystem',
// Contains ALL subsections (===, ====) in content
content: `image:https://i.nostr.build/IUs0xNyUEf5hXTFL.jpg[library]
=== Why Investigate the Nature of Knowledge?
Understanding the nature of knowledge itself is fundamental...
=== Challenging the Static Perception of Knowledge
Traditionally, knowledge has been perceived as a static repository...
==== The Four Perspectives
===== 1. The Building Blocks (Material Cause)
Just as living organisms are made up of cells...`,
metadata: { title: 'Introduction: Knowledge as a Living Ecosystem' }
}
// ... 4 more sections (Material Cause, Formal Cause, Efficient Cause, Final Cause)
]
}),
buildLevel2Events: (parsed: any) => ({
indexEvent: {
kind: 30040,
content: '',
tags: [
['d', 'understanding-knowledge'],
['title', parsed.title],
['image', parsed.metadata.image],
['t', 'knowledge'], ['t', 'philosophy'], ['t', 'education'],
['type', 'text'],
['a', '30041:pubkey:preface'],
['a', '30041:pubkey:introduction-knowledge-as-a-living-ecosystem']
]
},
sectionEvents: parsed.sections.map((s: any) => ({
kind: 30041,
content: s.content,
tags: [
['d', s.title.toLowerCase().replace(/[^a-z0-9]+/g, '-')],
['title', s.title]
]
}))
}),
publish: (events: any) => ({
success: true,
published: events.sectionEvents.length + 1,
eventIds: ['main-index', ...events.sectionEvents.map((_: any, i: number) => `section-${i}`)]
})
};
// Test the full Level 2 workflow
const parsed = mockWorkflow.parseLevel2(understandingKnowledge);
const events = mockWorkflow.buildLevel2Events(parsed);
const result = mockWorkflow.publish(events);
test.expect(parsed.metadata.title).toBe('Understanding Knowledge');
test.expect(parsed.sections).toHaveLength(2);
test.expect(events.indexEvent.kind).toBe(30040);
test.expect(events.sectionEvents).toHaveLength(2);
test.expect(events.sectionEvents[1].content).toContain('=== Why Investigate'); // Contains subsections
test.expect(events.sectionEvents[1].content).toContain('===== 1. The Building Blocks'); // Contains deeper levels
test.expect(result.success).toBeTruthy();
test.expect(result.published).toBe(3); // 1 index + 2 sections
});
test.test('Error handling for malformed content', () => {
const invalidCases = [
{ content: '== Section\n=== Subsection\n==== Missing content', error: 'Empty content sections' },
{ content: '= Title\n\n== Section\n==== Skipped level', error: 'Invalid header nesting' },
{ content: '', error: 'Empty document' }
];
invalidCases.forEach(({ content, error }) => {
// Mock error detection
const hasEmptySections = content.includes('Missing content');
const hasSkippedLevels = content.includes('====') && !content.includes('===');
const isEmpty = content.trim() === '';
const shouldError = hasEmptySections || hasSkippedLevels || isEmpty;
test.expect(shouldError).toBeTruthy();
});
});
// =============================================================================
// Test Execution
// =============================================================================
console.log('🎯 ZettelPublisher Test-Driven Development (ITERATIVE)\n');
console.log('📋 Test Data Analysis:');
console.log(`- Understanding Knowledge: ${understandingKnowledge.split('\n').length} lines`);
console.log(`- Desire: ${desire.split('\n').length} lines`);
console.log('- Both files use = document title with metadata directly underneath');
console.log('- Sections use == with deep nesting (===, ====, =====)');
console.log('- Custom attributes like :type: podcastArticle need preservation');
console.log('- CRITICAL: Structure is ITERATIVE not recursive (per docreference.md)\n');
test.run().then(success => {
if (success) {
console.log('\n🎉 All tests defined! Ready for ITERATIVE implementation.');
console.log('\n📋 Implementation Plan:');
console.log('1. ✅ Update ParsedAsciiDoc interface for ITERATIVE parsing');
console.log('2. ✅ Fix content processing (header separation, custom attributes)');
console.log('3. ✅ Implement level-based publishing logic (30040/30041 structure)');
console.log('4. ✅ Add parse-level controlled event generation');
console.log('5. ✅ Create context-aware UI with level selector');
console.log('\n🔄 Each level can be developed and tested independently!');
} else {
console.log('\n❌ Tests ready - implement ITERATIVE features to make them pass!');
}
}).catch(console.error);
Loading…
Cancel
Save