Browse Source

Refactor export functionality in App.svelte to support both GET and POST methods for event exports, enhancing flexibility in user permissions. Update server-side handling to accommodate pubkey filtering and improve response handling for file downloads. Adjust UI components to reflect these changes, ensuring a seamless user experience.

main
mleku 3 months ago
parent
commit
9f39ca8a62
No known key found for this signature in database
  1. 2
      .aiassistant/rules/rules.md
  2. 6
      .github/workflows/go.yml
  3. 107
      app/server.go
  4. 12
      app/web/public/global.css
  5. 17
      app/web/public/index.html
  6. 52
      app/web/rollup.config.js
  7. 113
      app/web/scripts/setupTypeScript.js
  8. 69
      app/web/src/App.svelte
  9. 16
      app/web/src/constants.js
  10. 8
      app/web/src/main.js
  11. 149
      app/web/src/nostr.js
  12. 8
      cmd/benchmark/README.md
  13. 42
      cmd/benchmark/docker-compose.yml
  14. 30
      contrib/stella/APACHE-PROXY-GUIDE.md
  15. 9
      contrib/stella/DOCKER.md
  16. 29
      contrib/stella/SERVICE-WORKER-FIX.md
  17. 7
      contrib/stella/WEBSOCKET-DEBUG.md
  18. 4
      contrib/stella/docker-compose.yml
  19. 28
      docs/websocket-req-comparison.md
  20. 5
      pkg/crypto/ec/README.md
  21. 6
      pkg/crypto/ec/chainhash/README.md
  22. 3
      pkg/crypto/ec/ecdsa/README.md
  23. 65
      pkg/crypto/ec/musig2/data/key_agg_vectors.json
  24. 25
      pkg/crypto/ec/musig2/data/nonce_agg_vectors.json
  25. 107
      pkg/crypto/ec/musig2/data/sig_agg_vectors.json
  26. 141
      pkg/crypto/ec/musig2/data/sign_verify_vectors.json
  27. 134
      pkg/crypto/ec/musig2/data/tweak_vectors.json
  28. 8
      pkg/crypto/sha256/README.md
  29. 3
      pkg/utils/atomic/.codecov.yml
  30. 35
      pkg/utils/atomic/CHANGELOG.md
  31. 1
      pkg/utils/interrupt/README.md

2
.aiassistant/rules/rules.md

@ -38,7 +38,7 @@ describing how the item is used. @@ -38,7 +38,7 @@ describing how the item is used.
For documentation on package, summarise in up to 3 sentences the functions and
purpose of the package
Do not use markdown ** or __ or any similar things in initial words of a bullet
Do not use markdown \*\* or \_\_ or any similar things in initial words of a bullet
point, instead use standard godoc style # prefix for header sections
ALWAYS separate each bullet point with an empty line, and ALWAYS indent them

6
.github/workflows/go.yml

@ -16,10 +16,9 @@ name: Go @@ -16,10 +16,9 @@ name: Go
on:
push:
tags:
- 'v[0-9]+.[0-9]+.[0-9]+'
- "v[0-9]+.[0-9]+.[0-9]+"
jobs:
build:
runs-on: ubuntu-latest
steps:
@ -28,7 +27,7 @@ jobs: @@ -28,7 +27,7 @@ jobs:
- name: Set up Go
uses: actions/setup-go@v4
with:
go-version: '1.25'
go-version: "1.25"
- name: Install libsecp256k1
run: ./scripts/ubuntu_install_libsecp256k1.sh
@ -47,7 +46,6 @@ jobs: @@ -47,7 +46,6 @@ jobs:
- name: Test
run: go test -v ./...
# release:
# needs: build
# runs-on: ubuntu-latest

107
app/server.go

@ -186,10 +186,8 @@ func (s *Server) UserInterface() { @@ -186,10 +186,8 @@ func (s *Server) UserInterface() {
s.mux.HandleFunc("/api/auth/status", s.handleAuthStatus)
s.mux.HandleFunc("/api/auth/logout", s.handleAuthLogout)
s.mux.HandleFunc("/api/permissions/", s.handlePermissions)
// Export endpoints
// Export endpoint
s.mux.HandleFunc("/api/export", s.handleExport)
s.mux.HandleFunc("/api/export/mine", s.handleExportMine)
s.mux.HandleFunc("/export", s.handleExportAll)
// Events endpoints
s.mux.HandleFunc("/api/events/mine", s.handleEventsMine)
// Import endpoint (admin only)
@ -442,9 +440,10 @@ func (s *Server) handlePermissions(w http.ResponseWriter, r *http.Request) { @@ -442,9 +440,10 @@ func (s *Server) handlePermissions(w http.ResponseWriter, r *http.Request) {
w.Write(jsonData)
}
// handleExport streams all events as JSONL (NDJSON) using NIP-98 authentication. Admins only.
// handleExport streams events as JSONL (NDJSON) using NIP-98 authentication.
// Supports both GET (query params) and POST (JSON body) for pubkey filtering.
func (s *Server) handleExport(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodGet {
if r.Method != http.MethodGet && r.Method != http.MethodPost {
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
return
}
@ -467,10 +466,18 @@ func (s *Server) handleExport(w http.ResponseWriter, r *http.Request) { @@ -467,10 +466,18 @@ func (s *Server) handleExport(w http.ResponseWriter, r *http.Request) {
return
}
// Optional filtering by pubkey(s)
// Parse pubkeys from request
var pks [][]byte
q := r.URL.Query()
for _, pkHex := range q["pubkey"] {
if r.Method == http.MethodPost {
// Parse JSON body for pubkeys
var requestBody struct {
Pubkeys []string `json:"pubkeys"`
}
if err := json.NewDecoder(r.Body).Decode(&requestBody); err == nil {
// If JSON parsing succeeds, use pubkeys from body
for _, pkHex := range requestBody.Pubkeys {
if pkHex == "" {
continue
}
@ -478,82 +485,36 @@ func (s *Server) handleExport(w http.ResponseWriter, r *http.Request) { @@ -478,82 +485,36 @@ func (s *Server) handleExport(w http.ResponseWriter, r *http.Request) {
pks = append(pks, pk)
}
}
w.Header().Set("Content-Type", "application/x-ndjson")
filename := "events-" + time.Now().UTC().Format("20060102-150405Z") + ".jsonl"
w.Header().Set(
"Content-Disposition", "attachment; filename=\""+filename+"\"",
)
// Stream export
s.D.Export(s.Ctx, w, pks...)
}
// handleExportMine streams only the authenticated user's events as JSONL (NDJSON) using NIP-98 authentication.
func (s *Server) handleExportMine(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodGet {
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
return
}
// Validate NIP-98 authentication
valid, pubkey, err := httpauth.CheckAuth(r)
if chk.E(err) || !valid {
errorMsg := "NIP-98 authentication validation failed"
if err != nil {
errorMsg = err.Error()
}
http.Error(w, errorMsg, http.StatusUnauthorized)
return
}
w.Header().Set("Content-Type", "application/x-ndjson")
filename := "my-events-" + time.Now().UTC().Format("20060102-150405Z") + ".jsonl"
w.Header().Set(
"Content-Disposition", "attachment; filename=\""+filename+"\"",
)
// Stream export for this user's pubkey only
s.D.Export(s.Ctx, w, pubkey)
// If JSON parsing fails, fall back to empty pubkeys (export all)
} else {
// GET method - parse query parameters
q := r.URL.Query()
for _, pkHex := range q["pubkey"] {
if pkHex == "" {
continue
}
// handleExportAll streams all events as JSONL (NDJSON) using NIP-98 authentication. Owner only.
func (s *Server) handleExportAll(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodGet {
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
return
if pk, err := hex.Dec(pkHex); !chk.E(err) {
pks = append(pks, pk)
}
// Validate NIP-98 authentication
valid, pubkey, err := httpauth.CheckAuth(r)
if chk.E(err) || !valid {
errorMsg := "NIP-98 authentication validation failed"
if err != nil {
errorMsg = err.Error()
}
http.Error(w, errorMsg, http.StatusUnauthorized)
return
}
// Check if user has owner permission
accessLevel := acl.Registry.GetAccessLevel(pubkey, r.RemoteAddr)
if accessLevel != "owner" {
http.Error(w, "Owner permission required", http.StatusForbidden)
return
// Determine filename based on whether filtering by pubkeys
var filename string
if len(pks) == 0 {
filename = "all-events-" + time.Now().UTC().Format("20060102-150405Z") + ".jsonl"
} else if len(pks) == 1 {
filename = "my-events-" + time.Now().UTC().Format("20060102-150405Z") + ".jsonl"
} else {
filename = "filtered-events-" + time.Now().UTC().Format("20060102-150405Z") + ".jsonl"
}
// Set response headers for file download
w.Header().Set("Content-Type", "application/x-ndjson")
filename := "all-events-" + time.Now().UTC().Format("20060102-150405Z") + ".jsonl"
w.Header().Set("Content-Disposition", "attachment; filename=\""+filename+"\"")
// Disable write timeouts for this operation
if flusher, ok := w.(http.Flusher); ok {
flusher.Flush()
}
// Stream export of all events
s.D.Export(s.Ctx, w)
// Stream export
s.D.Export(s.Ctx, w, pks...)
}
// handleEventsMine returns the authenticated user's events in JSON format with pagination using NIP-98 authentication.

12
app/web/public/global.css

@ -1,4 +1,5 @@ @@ -1,4 +1,5 @@
html, body {
html,
body {
position: relative;
width: 100%;
height: 100%;
@ -9,7 +10,9 @@ body { @@ -9,7 +10,9 @@ body {
margin: 0;
padding: 8px;
box-sizing: border-box;
font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, Oxygen-Sans, Ubuntu, Cantarell, "Helvetica Neue", sans-serif;
font-family:
-apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, Oxygen-Sans, Ubuntu,
Cantarell, "Helvetica Neue", sans-serif;
}
a {
@ -29,7 +32,10 @@ label { @@ -29,7 +32,10 @@ label {
display: block;
}
input, button, select, textarea {
input,
button,
select,
textarea {
font-family: inherit;
font-size: inherit;
-webkit-padding: 0.4em 0;

17
app/web/public/index.html

@ -1,18 +1,17 @@ @@ -1,18 +1,17 @@
<!DOCTYPE html>
<!doctype html>
<html lang="en">
<head>
<meta charset='utf-8'>
<meta name='viewport' content='width=device-width,initial-scale=1'>
<meta charset="utf-8" />
<meta name="viewport" content="width=device-width,initial-scale=1" />
<title>ORLY?</title>
<link rel='icon' type='image/png' href='/orly.png'>
<link rel='stylesheet' href='/global.css'>
<link rel='stylesheet' href='/build/bundle.css'>
<link rel="icon" type="image/png" href="/orly.png" />
<link rel="stylesheet" href="/global.css" />
<link rel="stylesheet" href="/build/bundle.css" />
<script defer src='/build/bundle.js'></script>
<script defer src="/build/bundle.js"></script>
</head>
<body>
</body>
<body></body>
</html>

52
app/web/rollup.config.js

@ -1,10 +1,10 @@ @@ -1,10 +1,10 @@
import { spawn } from 'child_process';
import svelte from 'rollup-plugin-svelte';
import commonjs from '@rollup/plugin-commonjs';
import terser from '@rollup/plugin-terser';
import resolve from '@rollup/plugin-node-resolve';
import livereload from 'rollup-plugin-livereload';
import css from 'rollup-plugin-css-only';
import { spawn } from "child_process";
import svelte from "rollup-plugin-svelte";
import commonjs from "@rollup/plugin-commonjs";
import terser from "@rollup/plugin-terser";
import resolve from "@rollup/plugin-node-resolve";
import livereload from "rollup-plugin-livereload";
import css from "rollup-plugin-css-only";
const production = !process.env.ROLLUP_WATCH;
@ -18,35 +18,35 @@ function serve() { @@ -18,35 +18,35 @@ function serve() {
return {
writeBundle() {
if (server) return;
server = spawn('npm', ['run', 'start', '--', '--dev'], {
stdio: ['ignore', 'inherit', 'inherit'],
shell: true
server = spawn("npm", ["run", "start", "--", "--dev"], {
stdio: ["ignore", "inherit", "inherit"],
shell: true,
});
process.on('SIGTERM', toExit);
process.on('exit', toExit);
}
process.on("SIGTERM", toExit);
process.on("exit", toExit);
},
};
}
export default {
input: 'src/main.js',
input: "src/main.js",
output: {
sourcemap: true,
format: 'iife',
name: 'app',
file: 'dist/bundle.js'
format: "iife",
name: "app",
file: "dist/bundle.js",
},
plugins: [
svelte({
compilerOptions: {
// enable run-time checks when not in production
dev: !production
}
dev: !production,
},
}),
// we'll extract any component CSS out into
// a separate file - better for performance
css({ output: 'bundle.css' }),
css({ output: "bundle.css" }),
// If you have external dependencies installed from
// npm, you'll most likely need these plugins. In
@ -55,8 +55,8 @@ export default { @@ -55,8 +55,8 @@ export default {
// https://github.com/rollup/plugins/tree/master/packages/commonjs
resolve({
browser: true,
dedupe: ['svelte'],
exportConditions: ['svelte']
dedupe: ["svelte"],
exportConditions: ["svelte"],
}),
commonjs(),
@ -66,13 +66,13 @@ export default { @@ -66,13 +66,13 @@ export default {
// Watch the `public` directory and refresh the
// browser on changes when not in production
!production && livereload('public'),
!production && livereload("public"),
// If we're building for production (npm run build
// instead of npm run dev), minify
production && terser()
production && terser(),
],
watch: {
clearScreen: false
}
clearScreen: false,
},
};

113
app/web/scripts/setupTypeScript.js

@ -13,70 +13,78 @@ @@ -13,70 +13,78 @@
rm -rf test-template template && git clone sveltejs/template test-template && node scripts/setupTypeScript.js test-template
*/
import fs from "fs"
import path from "path"
import { argv } from "process"
import url from 'url';
import fs from "fs";
import path from "path";
import { argv } from "process";
import url from "url";
const __filename = url.fileURLToPath(import.meta.url);
const __dirname = url.fileURLToPath(new URL('.', import.meta.url));
const projectRoot = argv[2] || path.join(__dirname, "..")
const __dirname = url.fileURLToPath(new URL(".", import.meta.url));
const projectRoot = argv[2] || path.join(__dirname, "..");
// Add deps to pkg.json
const packageJSON = JSON.parse(fs.readFileSync(path.join(projectRoot, "package.json"), "utf8"))
const packageJSON = JSON.parse(
fs.readFileSync(path.join(projectRoot, "package.json"), "utf8"),
);
packageJSON.devDependencies = Object.assign(packageJSON.devDependencies, {
"svelte-check": "^3.0.0",
"svelte-preprocess": "^5.0.0",
"@rollup/plugin-typescript": "^11.0.0",
"typescript": "^4.9.0",
"tslib": "^2.5.0",
"@tsconfig/svelte": "^3.0.0"
})
typescript: "^4.9.0",
tslib: "^2.5.0",
"@tsconfig/svelte": "^3.0.0",
});
// Add script for checking
packageJSON.scripts = Object.assign(packageJSON.scripts, {
"check": "svelte-check"
})
check: "svelte-check",
});
// Write the package JSON
fs.writeFileSync(path.join(projectRoot, "package.json"), JSON.stringify(packageJSON, null, " "))
fs.writeFileSync(
path.join(projectRoot, "package.json"),
JSON.stringify(packageJSON, null, " "),
);
// mv src/main.js to main.ts - note, we need to edit rollup.config.js for this too
const beforeMainJSPath = path.join(projectRoot, "src", "main.js")
const afterMainTSPath = path.join(projectRoot, "src", "main.ts")
fs.renameSync(beforeMainJSPath, afterMainTSPath)
const beforeMainJSPath = path.join(projectRoot, "src", "main.js");
const afterMainTSPath = path.join(projectRoot, "src", "main.ts");
fs.renameSync(beforeMainJSPath, afterMainTSPath);
// Switch the app.svelte file to use TS
const appSveltePath = path.join(projectRoot, "src", "App.svelte")
let appFile = fs.readFileSync(appSveltePath, "utf8")
appFile = appFile.replace("<script>", '<script lang="ts">')
appFile = appFile.replace("export let name;", 'export let name: string;')
fs.writeFileSync(appSveltePath, appFile)
const appSveltePath = path.join(projectRoot, "src", "App.svelte");
let appFile = fs.readFileSync(appSveltePath, "utf8");
appFile = appFile.replace("<script>", '<script lang="ts">');
appFile = appFile.replace("export let name;", "export let name: string;");
fs.writeFileSync(appSveltePath, appFile);
// Edit rollup config
const rollupConfigPath = path.join(projectRoot, "rollup.config.js")
let rollupConfig = fs.readFileSync(rollupConfigPath, "utf8")
const rollupConfigPath = path.join(projectRoot, "rollup.config.js");
let rollupConfig = fs.readFileSync(rollupConfigPath, "utf8");
// Edit imports
rollupConfig = rollupConfig.replace(`'rollup-plugin-css-only';`, `'rollup-plugin-css-only';
rollupConfig = rollupConfig.replace(
`'rollup-plugin-css-only';`,
`'rollup-plugin-css-only';
import sveltePreprocess from 'svelte-preprocess';
import typescript from '@rollup/plugin-typescript';`)
import typescript from '@rollup/plugin-typescript';`,
);
// Replace name of entry point
rollupConfig = rollupConfig.replace(`'src/main.js'`, `'src/main.ts'`)
rollupConfig = rollupConfig.replace(`'src/main.js'`, `'src/main.ts'`);
// Add preprocessor
rollupConfig = rollupConfig.replace(
'compilerOptions:',
'preprocess: sveltePreprocess({ sourceMap: !production }),\n\t\t\tcompilerOptions:'
"compilerOptions:",
"preprocess: sveltePreprocess({ sourceMap: !production }),\n\t\t\tcompilerOptions:",
);
// Add TypeScript
rollupConfig = rollupConfig.replace(
'commonjs(),',
'commonjs(),\n\t\ttypescript({\n\t\t\tsourceMap: !production,\n\t\t\tinlineSources: !production\n\t\t}),'
"commonjs(),",
"commonjs(),\n\t\ttypescript({\n\t\t\tsourceMap: !production,\n\t\t\tinlineSources: !production\n\t\t}),",
);
fs.writeFileSync(rollupConfigPath, rollupConfig)
fs.writeFileSync(rollupConfigPath, rollupConfig);
// Add svelte.config.js
const tsconfig = `{
@ -84,9 +92,9 @@ const tsconfig = `{ @@ -84,9 +92,9 @@ const tsconfig = `{
"include": ["src/**/*"],
"exclude": ["node_modules/*", "__sapper__/*", "public/*"]
}`
const tsconfigPath = path.join(projectRoot, "tsconfig.json")
fs.writeFileSync(tsconfigPath, tsconfig)
}`;
const tsconfigPath = path.join(projectRoot, "tsconfig.json");
fs.writeFileSync(tsconfigPath, tsconfig);
// Add TSConfig
const svelteConfig = `import sveltePreprocess from 'svelte-preprocess';
@ -94,41 +102,46 @@ const svelteConfig = `import sveltePreprocess from 'svelte-preprocess'; @@ -94,41 +102,46 @@ const svelteConfig = `import sveltePreprocess from 'svelte-preprocess';
export default {
preprocess: sveltePreprocess()
};
`
const svelteConfigPath = path.join(projectRoot, "svelte.config.js")
fs.writeFileSync(svelteConfigPath, svelteConfig)
`;
const svelteConfigPath = path.join(projectRoot, "svelte.config.js");
fs.writeFileSync(svelteConfigPath, svelteConfig);
// Add global.d.ts
const dtsPath = path.join(projectRoot, "src", "global.d.ts")
fs.writeFileSync(dtsPath, `/// <reference types="svelte" />`)
const dtsPath = path.join(projectRoot, "src", "global.d.ts");
fs.writeFileSync(dtsPath, `/// <reference types="svelte" />`);
// Delete this script, but not during testing
if (!argv[2]) {
// Remove the script
fs.unlinkSync(path.join(__filename))
fs.unlinkSync(path.join(__filename));
// Check for Mac's DS_store file, and if it's the only one left remove it
const remainingFiles = fs.readdirSync(path.join(__dirname))
if (remainingFiles.length === 1 && remainingFiles[0] === '.DS_store') {
fs.unlinkSync(path.join(__dirname, '.DS_store'))
const remainingFiles = fs.readdirSync(path.join(__dirname));
if (remainingFiles.length === 1 && remainingFiles[0] === ".DS_store") {
fs.unlinkSync(path.join(__dirname, ".DS_store"));
}
// Check if the scripts folder is empty
if (fs.readdirSync(path.join(__dirname)).length === 0) {
// Remove the scripts folder
fs.rmdirSync(path.join(__dirname))
fs.rmdirSync(path.join(__dirname));
}
}
// Adds the extension recommendation
fs.mkdirSync(path.join(projectRoot, ".vscode"), { recursive: true })
fs.writeFileSync(path.join(projectRoot, ".vscode", "extensions.json"), `{
fs.mkdirSync(path.join(projectRoot, ".vscode"), { recursive: true });
fs.writeFileSync(
path.join(projectRoot, ".vscode", "extensions.json"),
`{
"recommendations": ["svelte.svelte-vscode"]
}
`)
`,
);
console.log("Converted to TypeScript.")
console.log("Converted to TypeScript.");
if (fs.existsSync(path.join(projectRoot, "node_modules"))) {
console.log("\nYou will need to re-run your dependency manager to get started.")
console.log(
"\nYou will need to re-run your dependency manager to get started.",
);
}

69
app/web/src/App.svelte

@ -230,19 +230,27 @@ @@ -230,19 +230,27 @@
}
// Export functionality
async function exportAllEvents() {
if (!isLoggedIn || (userRole !== 'admin' && userRole !== 'owner')) {
alert('Admin or owner permission required');
async function exportEvents(pubkeys = []) {
if (!isLoggedIn) {
alert('Please log in first');
return;
}
// Check permissions for exporting all events
if (pubkeys.length === 0 && userRole !== 'admin' && userRole !== 'owner') {
alert('Admin or owner permission required to export all events');
return;
}
try {
const authHeader = await createNIP98AuthHeader('/api/export', 'GET');
const authHeader = await createNIP98AuthHeader('/api/export', 'POST');
const response = await fetch('/api/export', {
method: 'GET',
method: 'POST',
headers: {
'Authorization': authHeader
}
'Authorization': authHeader,
'Content-Type': 'application/json'
},
body: JSON.stringify({ pubkeys })
});
if (!response.ok) {
@ -253,41 +261,18 @@ @@ -253,41 +261,18 @@
const url = window.URL.createObjectURL(blob);
const a = document.createElement('a');
a.href = url;
a.download = `all-events-${new Date().toISOString().slice(0, 19).replace(/:/g, '-')}.jsonl`;
document.body.appendChild(a);
a.click();
document.body.removeChild(a);
window.URL.revokeObjectURL(url);
} catch (error) {
console.error('Export failed:', error);
alert('Export failed: ' + error.message);
}
}
async function exportMyEvents() {
if (!isLoggedIn) {
alert('Please log in first');
return;
// Get filename from response headers or use default
const contentDisposition = response.headers.get('Content-Disposition');
let filename = 'events.jsonl';
if (contentDisposition) {
const filenameMatch = contentDisposition.match(/filename="([^"]+)"/);
if (filenameMatch) {
filename = filenameMatch[1];
}
try {
const authHeader = await createNIP98AuthHeader('/api/export/mine', 'GET');
const response = await fetch('/api/export/mine', {
method: 'GET',
headers: {
'Authorization': authHeader
}
});
if (!response.ok) {
throw new Error(`Export failed: ${response.status} ${response.statusText}`);
}
const blob = await response.blob();
const url = window.URL.createObjectURL(blob);
const a = document.createElement('a');
a.href = url;
a.download = `my-events-${new Date().toISOString().slice(0, 19).replace(/:/g, '-')}.jsonl`;
a.download = filename;
document.body.appendChild(a);
a.click();
document.body.removeChild(a);
@ -298,6 +283,14 @@ @@ -298,6 +283,14 @@
}
}
async function exportAllEvents() {
await exportEvents([]); // Empty array means export all events
}
async function exportMyEvents() {
await exportEvents([userPubkey]); // Export only current user's events
}
// Import functionality
function handleFileSelect(event) {
selectedFile = event.target.files[0];

16
app/web/src/constants.js

@ -1,11 +1,11 @@ @@ -1,11 +1,11 @@
// Default Nostr relays for searching
export const DEFAULT_RELAYS = [
'wss://relay.damus.io',
'wss://relay.nostr.band',
'wss://nos.lol',
'wss://relay.nostr.net',
'wss://relay.minibits.cash',
'wss://relay.coinos.io/',
'wss://nwc.primal.net',
'wss://relay.orly.dev',
"wss://relay.damus.io",
"wss://relay.nostr.band",
"wss://nos.lol",
"wss://relay.nostr.net",
"wss://relay.minibits.cash",
"wss://relay.coinos.io/",
"wss://nwc.primal.net",
"wss://relay.orly.dev",
];

8
app/web/src/main.js

@ -1,11 +1,11 @@ @@ -1,11 +1,11 @@
import App from './App.svelte';
import '../public/global.css';
import App from "./App.svelte";
import "../public/global.css";
const app = new App({
target: document.body,
props: {
name: 'world'
}
name: "world",
},
});
export default app;

149
app/web/src/nostr.js

@ -1,4 +1,4 @@ @@ -1,4 +1,4 @@
import { DEFAULT_RELAYS } from './constants.js';
import { DEFAULT_RELAYS } from "./constants.js";
// Simple WebSocket relay manager
class NostrClient {
@ -8,9 +8,9 @@ class NostrClient { @@ -8,9 +8,9 @@ class NostrClient {
}
async connect() {
console.log('Starting connection to', DEFAULT_RELAYS.length, 'relays...');
console.log("Starting connection to", DEFAULT_RELAYS.length, "relays...");
const connectionPromises = DEFAULT_RELAYS.map(relayUrl => {
const connectionPromises = DEFAULT_RELAYS.map((relayUrl) => {
return new Promise((resolve) => {
try {
console.log(`Attempting to connect to ${relayUrl}`);
@ -27,7 +27,11 @@ class NostrClient { @@ -27,7 +27,11 @@ class NostrClient {
};
ws.onclose = (event) => {
console.warn(`Connection closed to ${relayUrl}:`, event.code, event.reason);
console.warn(
`Connection closed to ${relayUrl}:`,
event.code,
event.reason,
);
};
ws.onmessage = (event) => {
@ -35,7 +39,11 @@ class NostrClient { @@ -35,7 +39,11 @@ class NostrClient {
try {
this.handleMessage(relayUrl, JSON.parse(event.data));
} catch (error) {
console.error(`Failed to parse message from ${relayUrl}:`, error, event.data);
console.error(
`Failed to parse message from ${relayUrl}:`,
error,
event.data,
);
}
};
@ -48,7 +56,6 @@ class NostrClient { @@ -48,7 +56,6 @@ class NostrClient {
resolve(false);
}
}, 5000);
} catch (error) {
console.error(`Failed to create WebSocket for ${relayUrl}:`, error);
resolve(false);
@ -58,10 +65,12 @@ class NostrClient { @@ -58,10 +65,12 @@ class NostrClient {
const results = await Promise.all(connectionPromises);
const successfulConnections = results.filter(Boolean).length;
console.log(`Connected to ${successfulConnections}/${DEFAULT_RELAYS.length} relays`);
console.log(
`Connected to ${successfulConnections}/${DEFAULT_RELAYS.length} relays`,
);
// Wait a bit more for connections to stabilize
await new Promise(resolve => setTimeout(resolve, 1000));
await new Promise((resolve) => setTimeout(resolve, 1000));
}
handleMessage(relayUrl, message) {
@ -70,18 +79,22 @@ class NostrClient { @@ -70,18 +79,22 @@ class NostrClient {
console.log(`Message type: ${type}, subscriptionId: ${subscriptionId}`);
if (type === 'EVENT') {
if (type === "EVENT") {
console.log(`Received EVENT for subscription ${subscriptionId}:`, event);
if (this.subscriptions.has(subscriptionId)) {
console.log(`Found callback for subscription ${subscriptionId}, executing...`);
console.log(
`Found callback for subscription ${subscriptionId}, executing...`,
);
const callback = this.subscriptions.get(subscriptionId);
callback(event);
} else {
console.warn(`No callback found for subscription ${subscriptionId}`);
}
} else if (type === 'EOSE') {
console.log(`End of stored events for subscription ${subscriptionId} from ${relayUrl}`);
} else if (type === 'NOTICE') {
} else if (type === "EOSE") {
console.log(
`End of stored events for subscription ${subscriptionId} from ${relayUrl}`,
);
} else if (type === "NOTICE") {
console.warn(`Notice from ${relayUrl}:`, subscriptionId);
} else {
console.log(`Unknown message type ${type} from ${relayUrl}:`, message);
@ -90,16 +103,21 @@ class NostrClient { @@ -90,16 +103,21 @@ class NostrClient {
subscribe(filters, callback) {
const subscriptionId = Math.random().toString(36).substring(7);
console.log(`Creating subscription ${subscriptionId} with filters:`, filters);
console.log(
`Creating subscription ${subscriptionId} with filters:`,
filters,
);
this.subscriptions.set(subscriptionId, callback);
const subscription = ['REQ', subscriptionId, filters];
const subscription = ["REQ", subscriptionId, filters];
console.log(`Subscription message:`, JSON.stringify(subscription));
let sentCount = 0;
for (const [relayUrl, ws] of this.relays) {
console.log(`Checking relay ${relayUrl}, readyState: ${ws.readyState} (${ws.readyState === WebSocket.OPEN ? 'OPEN' : 'NOT OPEN'})`);
console.log(
`Checking relay ${relayUrl}, readyState: ${ws.readyState} (${ws.readyState === WebSocket.OPEN ? "OPEN" : "NOT OPEN"})`,
);
if (ws.readyState === WebSocket.OPEN) {
try {
ws.send(JSON.stringify(subscription));
@ -113,14 +131,16 @@ class NostrClient { @@ -113,14 +131,16 @@ class NostrClient {
}
}
console.log(`Subscription ${subscriptionId} sent to ${sentCount}/${this.relays.size} relays`);
console.log(
`Subscription ${subscriptionId} sent to ${sentCount}/${this.relays.size} relays`,
);
return subscriptionId;
}
unsubscribe(subscriptionId) {
this.subscriptions.delete(subscriptionId);
const closeMessage = ['CLOSE', subscriptionId];
const closeMessage = ["CLOSE", subscriptionId];
for (const [relayUrl, ws] of this.relays) {
if (ws.readyState === WebSocket.OPEN) {
@ -142,9 +162,9 @@ class NostrClient { @@ -142,9 +162,9 @@ class NostrClient {
export const nostrClient = new NostrClient();
// IndexedDB helpers for caching events (kind 0 profiles)
const DB_NAME = 'nostrCache';
const DB_NAME = "nostrCache";
const DB_VERSION = 1;
const STORE_EVENTS = 'events';
const STORE_EVENTS = "events";
function openDB() {
return new Promise((resolve, reject) => {
@ -153,9 +173,15 @@ function openDB() { @@ -153,9 +173,15 @@ function openDB() {
req.onupgradeneeded = () => {
const db = req.result;
if (!db.objectStoreNames.contains(STORE_EVENTS)) {
const store = db.createObjectStore(STORE_EVENTS, { keyPath: 'id' });
store.createIndex('byKindAuthor', ['kind', 'pubkey'], { unique: false });
store.createIndex('byKindAuthorCreated', ['kind', 'pubkey', 'created_at'], { unique: false });
const store = db.createObjectStore(STORE_EVENTS, { keyPath: "id" });
store.createIndex("byKindAuthor", ["kind", "pubkey"], {
unique: false,
});
store.createIndex(
"byKindAuthorCreated",
["kind", "pubkey", "created_at"],
{ unique: false },
);
}
};
req.onsuccess = () => resolve(req.result);
@ -170,10 +196,13 @@ async function getLatestProfileEvent(pubkey) { @@ -170,10 +196,13 @@ async function getLatestProfileEvent(pubkey) {
try {
const db = await openDB();
return await new Promise((resolve, reject) => {
const tx = db.transaction(STORE_EVENTS, 'readonly');
const idx = tx.objectStore(STORE_EVENTS).index('byKindAuthorCreated');
const range = IDBKeyRange.bound([0, pubkey, -Infinity], [0, pubkey, Infinity]);
const req = idx.openCursor(range, 'prev'); // newest first
const tx = db.transaction(STORE_EVENTS, "readonly");
const idx = tx.objectStore(STORE_EVENTS).index("byKindAuthorCreated");
const range = IDBKeyRange.bound(
[0, pubkey, -Infinity],
[0, pubkey, Infinity],
);
const req = idx.openCursor(range, "prev"); // newest first
req.onsuccess = () => {
const cursor = req.result;
resolve(cursor ? cursor.value : null);
@ -181,7 +210,7 @@ async function getLatestProfileEvent(pubkey) { @@ -181,7 +210,7 @@ async function getLatestProfileEvent(pubkey) {
req.onerror = () => reject(req.error);
});
} catch (e) {
console.warn('IDB getLatestProfileEvent failed', e);
console.warn("IDB getLatestProfileEvent failed", e);
return null;
}
}
@ -190,29 +219,36 @@ async function putEvent(event) { @@ -190,29 +219,36 @@ async function putEvent(event) {
try {
const db = await openDB();
await new Promise((resolve, reject) => {
const tx = db.transaction(STORE_EVENTS, 'readwrite');
const tx = db.transaction(STORE_EVENTS, "readwrite");
tx.oncomplete = () => resolve();
tx.onerror = () => reject(tx.error);
tx.objectStore(STORE_EVENTS).put(event);
});
} catch (e) {
console.warn('IDB putEvent failed', e);
console.warn("IDB putEvent failed", e);
}
}
function parseProfileFromEvent(event) {
try {
const profile = JSON.parse(event.content || '{}');
const profile = JSON.parse(event.content || "{}");
return {
name: profile.name || profile.display_name || '',
picture: profile.picture || '',
banner: profile.banner || '',
about: profile.about || '',
nip05: profile.nip05 || '',
lud16: profile.lud16 || profile.lud06 || ''
name: profile.name || profile.display_name || "",
picture: profile.picture || "",
banner: profile.banner || "",
about: profile.about || "",
nip05: profile.nip05 || "",
lud16: profile.lud16 || profile.lud06 || "",
};
} catch (e) {
return { name: '', picture: '', banner: '', about: '', nip05: '', lud16: '' };
return {
name: "",
picture: "",
banner: "",
about: "",
nip05: "",
lud16: "",
};
}
}
@ -229,7 +265,9 @@ export async function fetchUserProfile(pubkey) { @@ -229,7 +265,9 @@ export async function fetchUserProfile(pubkey) {
function cleanup() {
if (subscriptionId) {
try { nostrClient.unsubscribe(subscriptionId); } catch {}
try {
nostrClient.unsubscribe(subscriptionId);
} catch {}
}
if (debounceTimer) clearTimeout(debounceTimer);
if (overallTimer) clearTimeout(overallTimer);
@ -239,20 +277,20 @@ export async function fetchUserProfile(pubkey) { @@ -239,20 +277,20 @@ export async function fetchUserProfile(pubkey) {
try {
const cachedEvent = await getLatestProfileEvent(pubkey);
if (cachedEvent) {
console.log('Using cached profile event');
console.log("Using cached profile event");
const profile = parseProfileFromEvent(cachedEvent);
resolved = true; // resolve immediately with cache
resolve(profile);
}
} catch (e) {
console.warn('Failed to load cached profile', e);
console.warn("Failed to load cached profile", e);
}
// 2) Set overall timeout
overallTimer = setTimeout(() => {
if (!newestEvent) {
console.log('Profile fetch timeout reached');
if (!resolved) reject(new Error('Profile fetch timeout'));
console.log("Profile fetch timeout reached");
if (!resolved) reject(new Error("Profile fetch timeout"));
} else if (!resolved) {
resolve(parseProfileFromEvent(newestEvent));
}
@ -261,18 +299,21 @@ export async function fetchUserProfile(pubkey) { @@ -261,18 +299,21 @@ export async function fetchUserProfile(pubkey) {
// 3) Wait a bit to ensure connections are ready and then subscribe without limit
setTimeout(() => {
console.log('Starting subscription after connection delay...');
console.log("Starting subscription after connection delay...");
subscriptionId = nostrClient.subscribe(
{
kinds: [0],
authors: [pubkey]
authors: [pubkey],
},
(event) => {
// Collect all kind 0 events and pick the newest by created_at
if (!event || event.kind !== 0) return;
console.log('Profile event received:', event);
console.log("Profile event received:", event);
if (!newestEvent || (event.created_at || 0) > (newestEvent.created_at || 0)) {
if (
!newestEvent ||
(event.created_at || 0) > (newestEvent.created_at || 0)
) {
newestEvent = event;
}
@ -286,13 +327,15 @@ export async function fetchUserProfile(pubkey) { @@ -286,13 +327,15 @@ export async function fetchUserProfile(pubkey) {
// Notify listeners that an updated profile is available
try {
if (typeof window !== 'undefined' && window.dispatchEvent) {
window.dispatchEvent(new CustomEvent('profile-updated', {
detail: { pubkey, profile, event: newestEvent }
}));
if (typeof window !== "undefined" && window.dispatchEvent) {
window.dispatchEvent(
new CustomEvent("profile-updated", {
detail: { pubkey, profile, event: newestEvent },
}),
);
}
} catch (e) {
console.warn('Failed to dispatch profile-updated event', e);
console.warn("Failed to dispatch profile-updated event", e);
}
if (!resolved) {
@ -304,7 +347,7 @@ export async function fetchUserProfile(pubkey) { @@ -304,7 +347,7 @@ export async function fetchUserProfile(pubkey) {
cleanup();
}
}, 800);
}
},
);
}, 2000);
});

8
cmd/benchmark/README.md

@ -54,6 +54,7 @@ cd cmd/benchmark @@ -54,6 +54,7 @@ cd cmd/benchmark
```
This will:
- Clone all external relay repositories
- Create Docker configurations for each relay
- Set up configuration files
@ -68,6 +69,7 @@ docker compose up --build @@ -68,6 +69,7 @@ docker compose up --build
```
The system will:
- Build and start all relay containers
- Wait for all relays to become healthy
- Run benchmarks against each relay sequentially
@ -90,7 +92,7 @@ ls reports/run_YYYYMMDD_HHMMSS/ @@ -90,7 +92,7 @@ ls reports/run_YYYYMMDD_HHMMSS/
### Docker Compose Services
| Service | Port | Description |
|---------|------|-------------|
| ---------------- | ---- | ----------------------------------------- |
| next-orly | 8001 | This repository's BadgerDB relay |
| khatru-sqlite | 8002 | Khatru with SQLite backend |
| khatru-badger | 8003 | Khatru with Badger backend |
@ -174,16 +176,19 @@ go build -o benchmark main.go @@ -174,16 +176,19 @@ go build -o benchmark main.go
## Benchmark Results Interpretation
### Peak Throughput Test
- **High events/sec**: Good write performance
- **Low latency**: Efficient event processing
- **High success rate**: Stable under load
### Burst Pattern Test
- **Consistent performance**: Good handling of variable loads
- **Low P95/P99 latency**: Predictable response times
- **No errors during bursts**: Robust queuing/buffering
### Mixed Read/Write Test
- **Balanced throughput**: Good concurrent operation handling
- **Low read latency**: Efficient query processing
- **Stable write performance**: Queries don't significantly impact writes
@ -200,6 +205,7 @@ go build -o benchmark main.go @@ -200,6 +205,7 @@ go build -o benchmark main.go
### Modifying Relay Configurations
Each relay's Dockerfile and configuration can be customized:
- **Resource limits**: Adjust memory/CPU limits in docker-compose.yml
- **Database settings**: Modify configuration files in `configs/`
- **Network settings**: Update port mappings and health checks

42
cmd/benchmark/docker-compose.yml

@ -1,4 +1,4 @@ @@ -1,4 +1,4 @@
version: '3.8'
version: "3.8"
services:
# Next.orly.dev relay (this repository)
@ -19,7 +19,11 @@ services: @@ -19,7 +19,11 @@ services:
networks:
- benchmark-net
healthcheck:
test: ["CMD-SHELL", "code=$(curl -s -o /dev/null -w '%{http_code}' http://localhost:8080 || echo 000); echo $$code | grep -E '^(101|200|400|404|426)$' >/dev/null"]
test:
[
"CMD-SHELL",
"code=$(curl -s -o /dev/null -w '%{http_code}' http://localhost:8080 || echo 000); echo $$code | grep -E '^(101|200|400|404|426)$' >/dev/null",
]
interval: 30s
timeout: 10s
retries: 3
@ -41,7 +45,11 @@ services: @@ -41,7 +45,11 @@ services:
networks:
- benchmark-net
healthcheck:
test: ["CMD-SHELL", "wget --quiet --server-response --tries=1 http://localhost:3334 2>&1 | grep -E 'HTTP/[0-9.]+ (101|200|400|404)' >/dev/null"]
test:
[
"CMD-SHELL",
"wget --quiet --server-response --tries=1 http://localhost:3334 2>&1 | grep -E 'HTTP/[0-9.]+ (101|200|400|404)' >/dev/null",
]
interval: 30s
timeout: 10s
retries: 3
@ -63,7 +71,11 @@ services: @@ -63,7 +71,11 @@ services:
networks:
- benchmark-net
healthcheck:
test: ["CMD-SHELL", "wget --quiet --server-response --tries=1 http://localhost:3334 2>&1 | grep -E 'HTTP/[0-9.]+ (101|200|400|404)' >/dev/null"]
test:
[
"CMD-SHELL",
"wget --quiet --server-response --tries=1 http://localhost:3334 2>&1 | grep -E 'HTTP/[0-9.]+ (101|200|400|404)' >/dev/null",
]
interval: 30s
timeout: 10s
retries: 3
@ -87,7 +99,11 @@ services: @@ -87,7 +99,11 @@ services:
postgres:
condition: service_healthy
healthcheck:
test: ["CMD-SHELL", "wget --quiet --server-response --tries=1 http://localhost:7447 2>&1 | grep -E 'HTTP/[0-9.]+ (101|200|400|404)' >/dev/null"]
test:
[
"CMD-SHELL",
"wget --quiet --server-response --tries=1 http://localhost:7447 2>&1 | grep -E 'HTTP/[0-9.]+ (101|200|400|404)' >/dev/null",
]
interval: 30s
timeout: 10s
retries: 3
@ -108,7 +124,11 @@ services: @@ -108,7 +124,11 @@ services:
networks:
- benchmark-net
healthcheck:
test: ["CMD-SHELL", "wget --quiet --server-response --tries=1 http://127.0.0.1:8080 2>&1 | grep -E 'HTTP/[0-9.]+ (101|200|400|404|426)' >/dev/null"]
test:
[
"CMD-SHELL",
"wget --quiet --server-response --tries=1 http://127.0.0.1:8080 2>&1 | grep -E 'HTTP/[0-9.]+ (101|200|400|404|426)' >/dev/null",
]
interval: 30s
timeout: 10s
retries: 3
@ -130,7 +150,15 @@ services: @@ -130,7 +150,15 @@ services:
networks:
- benchmark-net
healthcheck:
test: ["CMD", "wget", "--quiet", "--tries=1", "--spider", "http://localhost:8080"]
test:
[
"CMD",
"wget",
"--quiet",
"--tries=1",
"--spider",
"http://localhost:8080",
]
interval: 30s
timeout: 10s
retries: 3

30
contrib/stella/APACHE-PROXY-GUIDE.md

@ -4,6 +4,7 @@ @@ -4,6 +4,7 @@
**Updated with real-world troubleshooting solutions and latest Orly relay improvements**
## 🎯 **What This Solves**
- WebSocket connection failures (`NS_ERROR_WEBSOCKET_CONNECTION_REFUSED`)
- Nostr relay connectivity issues (`HTTP 426` instead of WebSocket upgrade)
- Docker container proxy configuration
@ -16,6 +17,7 @@ @@ -16,6 +17,7 @@
## 🐳 **Step 1: Deploy Your Docker Application**
### **For Stella's Orly Relay (Latest Version with Proxy Improvements):**
```bash
# Pull and run the relay with enhanced proxy support
docker run -d \
@ -39,6 +41,7 @@ curl -I http://127.0.0.1:7777 @@ -39,6 +41,7 @@ curl -I http://127.0.0.1:7777
```
### **For Web Apps (like Jumble):**
```bash
# Run with fixed port for easier proxy setup
docker run -d \
@ -103,6 +106,7 @@ curl -I http://127.0.0.1:3000 @@ -103,6 +106,7 @@ curl -I http://127.0.0.1:3000
```
**Then enable it:**
```bash
sudo a2ensite domain.conf
sudo systemctl reload apache2
@ -121,6 +125,7 @@ sudo systemctl reload apache2 @@ -121,6 +125,7 @@ sudo systemctl reload apache2
5. **In HTTPS section, add:**
**For Nostr Relay (port 7777):**
```apache
ProxyRequests Off
ProxyPreserveHost On
@ -190,6 +195,7 @@ apache2ctl -M | grep -E "(proxy|rewrite)" @@ -190,6 +195,7 @@ apache2ctl -M | grep -E "(proxy|rewrite)"
```
#### **For Web Apps (port 3000 or 32768):**
```apache
ProxyPreserveHost On
ProxyRequests Off
@ -269,6 +275,7 @@ sudo systemctl restart apache2 @@ -269,6 +275,7 @@ sudo systemctl restart apache2
## 🆕 **Step 4: Latest Orly Relay Improvements**
### **Enhanced Proxy Support**
The latest Orly relay includes several proxy improvements:
1. **Flexible WebSocket Scheme Handling**: Accepts both `ws://` and `wss://` schemes for authentication
@ -277,6 +284,7 @@ The latest Orly relay includes several proxy improvements: @@ -277,6 +284,7 @@ The latest Orly relay includes several proxy improvements:
4. **Proxy-Aware Logging**: Better debugging information for proxy setups
### **Key Environment Variables**
```bash
# Essential for proxy setups
ORLY_RELAY_URL=wss://your-domain.com # Must match your public URL
@ -286,6 +294,7 @@ ORLY_SUBSCRIPTION_ENABLED=false # Disable payment requirements @@ -286,6 +294,7 @@ ORLY_SUBSCRIPTION_ENABLED=false # Disable payment requirements
```
### **Testing the Enhanced Relay**
```bash
# Test local connectivity
curl -I http://127.0.0.1:7777
@ -338,32 +347,38 @@ After making changes: @@ -338,32 +347,38 @@ After making changes:
## 🚨 **Real-World Troubleshooting Guide**
*Based on actual deployment experience with Plesk and WebSocket issues*
_Based on actual deployment experience with Plesk and WebSocket issues_
### **Critical Issues & Solutions:**
#### **🔴 HTTP 503 Service Unavailable**
- **Cause**: Docker container not running
- **Check**: `docker ps | grep relay`
- **Fix**: `docker start container-name`
#### **🔴 HTTP 426 Instead of WebSocket Upgrade**
- **Cause**: Apache using `http://` proxy instead of `ws://`
- **Fix**: Use `ProxyPass / ws://127.0.0.1:7777/` (not `http://`)
#### **🔴 Plesk Configuration Not Applied**
- **Symptom**: Config not in `/etc/apache2/plesk.conf.d/vhosts/domain.conf`
- **Solution**: Use Direct Apache Override method (bypass Plesk interface)
#### **🔴 Virtual Host Conflicts**
- **Check**: `apache2ctl -S | grep domain.com`
- **Fix**: Remove Plesk config: `sudo rm /etc/apache2/plesk.conf.d/vhosts/domain.conf`
#### **🔴 Nginx Intercepting (Plesk)**
- **Symptom**: Response shows `Server: nginx`
- **Fix**: Disable nginx in Plesk settings
### **Debug Commands:**
```bash
# Essential debugging
docker ps | grep relay # Container running?
@ -383,9 +398,11 @@ docker logs relay-name | grep -i "websocket connection" @@ -383,9 +398,11 @@ docker logs relay-name | grep -i "websocket connection"
## 🚨 **Latest Troubleshooting Solutions**
### **WebSocket Scheme Validation Errors**
**Problem**: `"HTTP Scheme incorrect: expected 'ws' got 'wss'"`
**Solution**: Use the latest Orly relay image with enhanced proxy support:
```bash
# Pull the latest image with proxy improvements
docker pull silberengel/next-orly:latest
@ -396,17 +413,21 @@ docker stop orly-relay && docker rm orly-relay @@ -396,17 +413,21 @@ docker stop orly-relay && docker rm orly-relay
```
### **Malformed Client Data Errors**
**Problem**: `"invalid hex array size, got 2 expect 64"`
**Solution**: These are client-side issues, not server problems. The latest relay handles them gracefully:
- The relay now sends helpful error messages to clients
- Malformed requests are logged but don't crash the relay
- Normal operations continue despite client errors
### **Follows ACL Not Working**
**Problem**: Only owners can write, admins can't write
**Solution**: Ensure proper configuration:
```bash
# Check ACL configuration
docker exec orly-relay env | grep ACL
@ -416,9 +437,11 @@ docker exec orly-relay env | grep ACL @@ -416,9 +437,11 @@ docker exec orly-relay env | grep ACL
```
### **Spider Not Syncing Content**
**Problem**: Spider enabled but not pulling events
**Solution**: Check for relay lists and follow events:
```bash
# Check spider status
docker logs orly-relay | grep -i spider
@ -431,6 +454,7 @@ docker logs orly-relay | grep -i "kind.*3" @@ -431,6 +454,7 @@ docker logs orly-relay | grep -i "kind.*3"
```
### **Working Solution (Proven):**
```apache
<VirtualHost SERVER_IP:443>
ServerName domain.com
@ -452,6 +476,7 @@ docker logs orly-relay | grep -i "kind.*3" @@ -452,6 +476,7 @@ docker logs orly-relay | grep -i "kind.*3"
---
**Key Lessons**:
1. Plesk interface often fails to apply Apache directives
2. Use `ws://` proxy for Nostr relays, not `http://`
3. Direct Apache config files are more reliable than Plesk interface
@ -464,17 +489,20 @@ docker logs orly-relay | grep -i "kind.*3" @@ -464,17 +489,20 @@ docker logs orly-relay | grep -i "kind.*3"
## 🎉 **Summary of Latest Improvements**
### **Enhanced Proxy Support**
- ✅ Flexible WebSocket scheme validation (accepts both `ws://` and `wss://`)
- ✅ Enhanced CORS headers for better web app compatibility
- ✅ Improved error handling for malformed client data
- ✅ Proxy-aware logging for better debugging
### **Spider and ACL Features**
- ✅ Follows-based access control (`ORLY_ACL_MODE=follows`)
- ✅ Content syncing from other relays (`ORLY_SPIDER_MODE=follows`)
- ✅ No payment requirements (`ORLY_SUBSCRIPTION_ENABLED=false`)
### **Production Ready**
- ✅ Robust error handling
- ✅ Enhanced logging and debugging
- ✅ Better client compatibility

9
contrib/stella/DOCKER.md

@ -37,6 +37,7 @@ cp env.example .env @@ -37,6 +37,7 @@ cp env.example .env
```
Key settings:
- `ORLY_OWNERS`: Owner npubs (comma-separated, full control)
- `ORLY_ADMINS`: Admin npubs (comma-separated, deletion permissions)
- `ORLY_PORT`: Port to listen on (default: 7777)
@ -50,6 +51,7 @@ The relay data is stored in `./data` directory which is mounted as a volume. @@ -50,6 +51,7 @@ The relay data is stored in `./data` directory which is mounted as a volume.
### Performance Tuning
Based on the v0.4.8 optimizations:
- Concurrent event publishing using all CPU cores
- Optimized BadgerDB access patterns
- Configurable batch sizes and cache settings
@ -105,11 +107,13 @@ go run ./cmd/stresstest -relay ws://localhost:7777 @@ -105,11 +107,13 @@ go run ./cmd/stresstest -relay ws://localhost:7777
### Common Issues (Real-World Experience)
#### **Container Issues:**
1. **Port already in use**: Change `ORLY_PORT` in docker-compose.yml
2. **Permission denied**: Ensure `./data` directory is writable
3. **Container won't start**: Check logs with `docker logs container-name`
#### **WebSocket Issues:**
4. **HTTP 426 instead of WebSocket upgrade**:
- Use `ws://127.0.0.1:7777` in proxy config, not `http://`
- Ensure `proxy_wstunnel` module is enabled
@ -119,6 +123,7 @@ go run ./cmd/stresstest -relay ws://localhost:7777 @@ -119,6 +123,7 @@ go run ./cmd/stresstest -relay ws://localhost:7777
- Add CORS headers to Apache/nginx config
#### **Plesk-Specific Issues:**
6. **Plesk not applying Apache directives**:
- Check if config appears in `/etc/apache2/plesk.conf.d/vhosts/domain.conf`
- Use direct Apache override if Plesk interface fails
@ -127,6 +132,7 @@ go run ./cmd/stresstest -relay ws://localhost:7777 @@ -127,6 +132,7 @@ go run ./cmd/stresstest -relay ws://localhost:7777
- Remove conflicting Plesk configs if needed
#### **SSL Certificate Issues:**
8. **Self-signed certificate after Let's Encrypt**:
- Plesk might not be using the correct certificate
- Import Let's Encrypt certs into Plesk or use direct Apache config
@ -166,6 +172,7 @@ sudo tail -f /var/log/apache2/domain-error.log @@ -166,6 +172,7 @@ sudo tail -f /var/log/apache2/domain-error.log
### Working Reverse Proxy Config
**For Apache (direct config file):**
```apache
<VirtualHost SERVER_IP:443>
ServerName domain.com
@ -185,4 +192,4 @@ sudo tail -f /var/log/apache2/domain-error.log @@ -185,4 +192,4 @@ sudo tail -f /var/log/apache2/domain-error.log
---
*Crafted for Stella's digital forest* 🌲
_Crafted for Stella's digital forest_ 🌲

29
contrib/stella/SERVICE-WORKER-FIX.md

@ -1,16 +1,18 @@ @@ -1,16 +1,18 @@
# Service Worker Certificate Caching Fix
## 🚨 **Problem**
When accessing Jumble from the ImWald landing page, the service worker serves a cached self-signed certificate instead of the new Let's Encrypt certificate.
## ⚡ **Solutions**
### **Option 1: Force Service Worker Update**
Add this to your Jumble app's service worker or main JavaScript:
```javascript
// Force service worker update and certificate refresh
if ('serviceWorker' in navigator) {
if ("serviceWorker" in navigator) {
navigator.serviceWorker.getRegistrations().then(function (registrations) {
for (let registration of registrations) {
registration.update(); // Force update
@ -19,7 +21,7 @@ if ('serviceWorker' in navigator) { @@ -19,7 +21,7 @@ if ('serviceWorker' in navigator) {
}
// Clear all caches on certificate update
if ('caches' in window) {
if ("caches" in window) {
caches.keys().then(function (names) {
for (let name of names) {
caches.delete(name);
@ -29,17 +31,18 @@ if ('caches' in window) { @@ -29,17 +31,18 @@ if ('caches' in window) {
```
### **Option 2: Update Service Worker Cache Strategy**
In your service worker file, add cache busting for SSL-sensitive requests:
```javascript
// In your service worker
self.addEventListener('fetch', function(event) {
self.addEventListener("fetch", function (event) {
// Don't cache HTTPS requests that might have certificate issues
if (event.request.url.startsWith('https://') &&
event.request.url.includes('imwald.eu')) {
event.respondWith(
fetch(event.request, { cache: 'no-store' })
);
if (
event.request.url.startsWith("https://") &&
event.request.url.includes("imwald.eu")
) {
event.respondWith(fetch(event.request, { cache: "no-store" }));
return;
}
@ -48,15 +51,16 @@ self.addEventListener('fetch', function(event) { @@ -48,15 +51,16 @@ self.addEventListener('fetch', function(event) {
```
### **Option 3: Version Your Service Worker**
Update your service worker with a new version number:
```javascript
// At the top of your service worker
const CACHE_VERSION = 'v2.0.1'; // Increment this when certificates change
const CACHE_VERSION = "v2.0.1"; // Increment this when certificates change
const CACHE_NAME = `jumble-cache-${CACHE_VERSION}`;
// Clear old caches
self.addEventListener('activate', function(event) {
self.addEventListener("activate", function (event) {
event.waitUntil(
caches.keys().then(function (cacheNames) {
return Promise.all(
@ -64,14 +68,15 @@ self.addEventListener('activate', function(event) { @@ -64,14 +68,15 @@ self.addEventListener('activate', function(event) {
if (cacheName !== CACHE_NAME) {
return caches.delete(cacheName);
}
})
}),
);
})
}),
);
});
```
### **Option 4: Add Cache Headers**
In your Plesk Apache config for Jumble, add:
```apache

7
contrib/stella/WEBSOCKET-DEBUG.md

@ -1,11 +1,13 @@ @@ -1,11 +1,13 @@
# WebSocket Connection Debug Guide
## 🚨 **Current Issue**
`wss://orly-relay.imwald.eu/` returns `NS_ERROR_WEBSOCKET_CONNECTION_REFUSED`
## 🔍 **Debug Steps**
### **Step 1: Verify Relay is Running**
```bash
# On your server
curl -I http://127.0.0.1:7777
@ -16,6 +18,7 @@ docker ps | grep stella @@ -16,6 +18,7 @@ docker ps | grep stella
```
### **Step 2: Test Apache Modules**
```bash
# Check if WebSocket modules are enabled
apache2ctl -M | grep -E "(proxy|rewrite)"
@ -30,6 +33,7 @@ sudo systemctl restart apache2 @@ -30,6 +33,7 @@ sudo systemctl restart apache2
```
### **Step 3: Check Apache Configuration**
```bash
# Check what Plesk generated
sudo cat /etc/apache2/plesk.conf.d/vhosts/orly-relay.imwald.eu.conf
@ -39,6 +43,7 @@ grep -E "(Proxy|Rewrite)" /etc/apache2/plesk.conf.d/vhosts/orly-relay.imwald.eu. @@ -39,6 +43,7 @@ grep -E "(Proxy|Rewrite)" /etc/apache2/plesk.conf.d/vhosts/orly-relay.imwald.eu.
```
### **Step 4: Test Direct WebSocket Connection**
```bash
# Test if the issue is Apache or the relay itself
echo '["REQ","test",{}]' | websocat ws://127.0.0.1:7777/
@ -48,6 +53,7 @@ echo '["REQ","test",{}]' | websocat ws://127.0.0.1:7777/ @@ -48,6 +53,7 @@ echo '["REQ","test",{}]' | websocat ws://127.0.0.1:7777/
```
### **Step 5: Check Apache Error Logs**
```bash
# Watch Apache errors in real-time
sudo tail -f /var/log/apache2/error.log
@ -83,6 +89,7 @@ ProxyAddHeaders On @@ -83,6 +89,7 @@ ProxyAddHeaders On
```
### **Alternative Simpler Version:**
If the above doesn't work, try just:
```apache

4
contrib/stella/docker-compose.yml

@ -52,10 +52,10 @@ services: @@ -52,10 +52,10 @@ services:
resources:
limits:
memory: 1G
cpus: '1.0'
cpus: "1.0"
reservations:
memory: 256M
cpus: '0.25'
cpus: "0.25"
# Logging configuration
logging:

28
docs/websocket-req-comparison.md

@ -10,12 +10,14 @@ This document compares how two Nostr relay implementations handle WebSocket conn @@ -10,12 +10,14 @@ This document compares how two Nostr relay implementations handle WebSocket conn
## Architecture Comparison
### Khatru Architecture
- **Monolithic approach**: Single large `HandleWebsocket` method (~380 lines) processes all message types
- **Inline processing**: REQ handling is embedded within the main websocket handler
- **Hook-based extensibility**: Uses function slices for customizable behavior
- **Simple structure**: WebSocket struct with basic fields and mutex for thread safety
### Next.orly.dev Architecture
- **Modular approach**: Separate methods for each message type (`HandleReq`, `HandleEvent`, etc.)
- **Layered processing**: Message identification → envelope parsing → type-specific handling
- **Publisher-subscriber system**: Dedicated infrastructure for subscription management
@ -24,6 +26,7 @@ This document compares how two Nostr relay implementations handle WebSocket conn @@ -24,6 +26,7 @@ This document compares how two Nostr relay implementations handle WebSocket conn
## Connection Establishment
### Khatru
```go
// Simple websocket upgrade
conn, err := rl.upgrader.Upgrade(w, r, nil)
@ -36,6 +39,7 @@ ws := &WebSocket{ @@ -36,6 +39,7 @@ ws := &WebSocket{
```
### Next.orly.dev
```go
// More sophisticated setup with IP whitelisting
conn, err = websocket.Accept(w, r, &websocket.AcceptOptions{OriginPatterns: []string{"*"}})
@ -50,6 +54,7 @@ listener := &Listener{ @@ -50,6 +54,7 @@ listener := &Listener{
```
**Key Differences:**
- Next.orly.dev includes IP whitelisting and immediate authentication challenges
- Khatru uses fasthttp/websocket library vs next.orly.dev using coder/websocket
- Next.orly.dev has more detailed connection state tracking
@ -57,11 +62,13 @@ listener := &Listener{ @@ -57,11 +62,13 @@ listener := &Listener{
## Message Processing
### Khatru
- Uses `nostr.MessageParser` for sequential parsing
- Switch statement on envelope type within goroutine
- Direct processing without intermediate validation layers
### Next.orly.dev
- Custom envelope identification system (`envelopes.Identify`)
- Separate validation and processing phases
- Extensive logging and error handling at each step
@ -69,6 +76,7 @@ listener := &Listener{ @@ -69,6 +76,7 @@ listener := &Listener{
## REQ Message Handling
### Khatru REQ Processing
```go
case *nostr.ReqEnvelope:
eose := sync.WaitGroup{}
@ -93,6 +101,7 @@ case *nostr.ReqEnvelope: @@ -93,6 +101,7 @@ case *nostr.ReqEnvelope:
```
### Next.orly.dev REQ Processing
```go
// Comprehensive ACL and authentication checks first
accessLevel := acl.Registry.GetAccessLevel(l.authedPubkey.Load(), l.remote)
@ -117,12 +126,14 @@ for _, f := range *env.Filters { @@ -117,12 +126,14 @@ for _, f := range *env.Filters {
### 1. **Filter Processing Strategy**
**Khatru:**
- Processes each filter independently and concurrently
- Uses WaitGroup to coordinate EOSE across all filters
- Immediately sets up listeners for ongoing subscriptions
- Fails entire subscription if any filter is rejected
**Next.orly.dev:**
- Processes all filters sequentially in a single context
- Collects all events before applying access control
- Only sets up subscriptions for filters that need ongoing updates
@ -131,11 +142,13 @@ for _, f := range *env.Filters { @@ -131,11 +142,13 @@ for _, f := range *env.Filters {
### 2. **Access Control Integration**
**Khatru:**
- Basic NIP-42 authentication support
- Hook-based authorization via `RejectFilter` functions
- Limited built-in access control features
**Next.orly.dev:**
- Comprehensive ACL system with multiple access levels
- Built-in support for private events with npub authorization
- Privileged event filtering based on pubkey and p-tags
@ -144,6 +157,7 @@ for _, f := range *env.Filters { @@ -144,6 +157,7 @@ for _, f := range *env.Filters {
### 3. **Subscription Management**
**Khatru:**
```go
// Simple listener registration
type listenerSpec struct {
@ -155,6 +169,7 @@ rl.addListener(ws, subscriptionID, relay, filter, cancel) @@ -155,6 +169,7 @@ rl.addListener(ws, subscriptionID, relay, filter, cancel)
```
**Next.orly.dev:**
```go
// Publisher-subscriber system with rich metadata
type W struct {
@ -171,11 +186,13 @@ l.publishers.Receive(&W{...}) @@ -171,11 +186,13 @@ l.publishers.Receive(&W{...})
### 4. **Performance Optimizations**
**Khatru:**
- Concurrent filter processing
- Immediate streaming of events as they're found
- Memory-efficient with direct event streaming
**Next.orly.dev:**
- Batch processing with deduplication
- Memory management with explicit `ev.Free()` calls
- Smart subscription cancellation for ID-only queries
@ -184,11 +201,13 @@ l.publishers.Receive(&W{...}) @@ -184,11 +201,13 @@ l.publishers.Receive(&W{...})
### 5. **Error Handling & Observability**
**Khatru:**
- Basic error logging
- Simple connection state management
- Limited metrics and observability
**Next.orly.dev:**
- Comprehensive error handling with context preservation
- Detailed logging at each processing stage
- Built-in metrics (message count, REQ count, event count)
@ -197,11 +216,13 @@ l.publishers.Receive(&W{...}) @@ -197,11 +216,13 @@ l.publishers.Receive(&W{...})
## Memory Management
### Khatru
- Relies on Go's garbage collector
- Simple WebSocket struct with minimal state
- Uses sync.Map for thread-safe operations
### Next.orly.dev
- Explicit memory management with `ev.Free()` calls
- Resource pooling and reuse patterns
- Detailed tracking of connection resources
@ -209,11 +230,13 @@ l.publishers.Receive(&W{...}) @@ -209,11 +230,13 @@ l.publishers.Receive(&W{...})
## Concurrency Models
### Khatru
- Per-connection goroutine for message reading
- Additional goroutines for each message processing
- WaitGroup coordination for multi-filter EOSE
### Next.orly.dev
- Per-connection goroutine with single-threaded message processing
- Publisher-subscriber system handles concurrent event distribution
- Context-based cancellation throughout
@ -221,18 +244,21 @@ l.publishers.Receive(&W{...}) @@ -221,18 +244,21 @@ l.publishers.Receive(&W{...})
## Trade-offs Analysis
### Khatru Advantages
- **Simplicity**: Easier to understand and modify
- **Performance**: Lower latency due to concurrent processing
- **Flexibility**: Hook-based architecture allows extensive customization
- **Streaming**: Events sent as soon as they're found
### Khatru Disadvantages
- **Monolithic**: Large methods harder to maintain
- **Limited ACL**: Basic authentication and authorization
- **Error handling**: Less graceful failure recovery
- **Resource usage**: No explicit memory management
### Next.orly.dev Advantages
- **Security**: Comprehensive ACL and privacy features
- **Observability**: Extensive logging and metrics
- **Resource management**: Explicit memory and connection lifecycle management
@ -240,6 +266,7 @@ l.publishers.Receive(&W{...}) @@ -240,6 +266,7 @@ l.publishers.Receive(&W{...})
- **Robustness**: Graceful handling of edge cases and failures
### Next.orly.dev Disadvantages
- **Complexity**: Higher cognitive overhead and learning curve
- **Latency**: Sequential processing may be slower for some use cases
- **Resource overhead**: More memory usage due to batching and state tracking
@ -253,6 +280,7 @@ Both implementations represent different philosophies: @@ -253,6 +280,7 @@ Both implementations represent different philosophies:
- **Next.orly.dev** prioritizes security, observability, and robustness through comprehensive built-in features
The choice between them depends on specific requirements:
- Choose **Khatru** for high-performance relays with custom business logic
- Choose **Next.orly.dev** for production relays requiring comprehensive access control and monitoring

5
pkg/crypto/ec/README.md

@ -1,5 +1,4 @@ @@ -1,5 +1,4 @@
realy.lol/pkg/ec
=====
# realy.lol/pkg/ec
This is a full drop-in replacement for
[github.com/btcsuite/btcd/btcec](https://github.com/btcsuite/btcd/tree/master/btcec)
@ -20,7 +19,7 @@ message signing with the extra test vectors present and passing. @@ -20,7 +19,7 @@ message signing with the extra test vectors present and passing.
The remainder of this document is from the original README.md.
------------------------------------------------------------------------------
---
Package `ec` implements elliptic curve cryptography needed for working with
Bitcoin. It is designed so that it may be used with the standard

6
pkg/crypto/ec/chainhash/README.md

@ -1,8 +1,6 @@ @@ -1,8 +1,6 @@
chainhash
=========
# chainhash
[![ISC License](http://img.shields.io/badge/license-ISC-blue.svg)](http://copyfree.org)
=======
# [![ISC License](http://img.shields.io/badge/license-ISC-blue.svg)](http://copyfree.org)
chainhash provides a generic hash type and associated functions that allows the
specific hash algorithm to be abstracted.

3
pkg/crypto/ec/ecdsa/README.md

@ -1,5 +1,4 @@ @@ -1,5 +1,4 @@
ecdsa
=====
# ecdsa
[![ISC License](https://img.shields.io/badge/license-ISC-blue.svg)](http://copyfree.org)
[![GoDoc](https://img.shields.io/badge/godoc-reference-blue.svg)](https://pkg.go.dev/mleku.online/git/ec/secp/ecdsa)

65
pkg/crypto/ec/musig2/data/key_agg_vectors.json

@ -14,45 +14,25 @@ @@ -14,45 +14,25 @@
],
"valid_test_cases": [
{
"key_indices": [
0,
1,
2
],
"key_indices": [0, 1, 2],
"expected": "90539EEDE565F5D054F32CC0C220126889ED1E5D193BAF15AEF344FE59D4610C"
},
{
"key_indices": [
2,
1,
0
],
"key_indices": [2, 1, 0],
"expected": "6204DE8B083426DC6EAF9502D27024D53FC826BF7D2012148A0575435DF54B2B"
},
{
"key_indices": [
0,
0,
0
],
"key_indices": [0, 0, 0],
"expected": "B436E3BAD62B8CD409969A224731C193D051162D8C5AE8B109306127DA3AA935"
},
{
"key_indices": [
0,
0,
1,
1
],
"key_indices": [0, 0, 1, 1],
"expected": "69BC22BFA5D106306E48A20679DE1D7389386124D07571D0D872686028C26A3E"
}
],
"error_test_cases": [
{
"key_indices": [
0,
3
],
"key_indices": [0, 3],
"tweak_indices": [],
"is_xonly": [],
"error": {
@ -63,10 +43,7 @@ @@ -63,10 +43,7 @@
"comment": "Invalid public key"
},
{
"key_indices": [
0,
4
],
"key_indices": [0, 4],
"tweak_indices": [],
"is_xonly": [],
"error": {
@ -77,10 +54,7 @@ @@ -77,10 +54,7 @@
"comment": "Public key exceeds field size"
},
{
"key_indices": [
5,
0
],
"key_indices": [5, 0],
"tweak_indices": [],
"is_xonly": [],
"error": {
@ -91,16 +65,9 @@ @@ -91,16 +65,9 @@
"comment": "First byte of public key is not 2 or 3"
},
{
"key_indices": [
0,
1
],
"tweak_indices": [
0
],
"is_xonly": [
true
],
"key_indices": [0, 1],
"tweak_indices": [0],
"is_xonly": [true],
"error": {
"type": "value",
"message": "The tweak must be less than n."
@ -108,15 +75,9 @@ @@ -108,15 +75,9 @@
"comment": "Tweak is out of range"
},
{
"key_indices": [
6
],
"tweak_indices": [
1
],
"is_xonly": [
false
],
"key_indices": [6],
"tweak_indices": [1],
"is_xonly": [false],
"error": {
"type": "value",
"message": "The result of tweaking cannot be infinity."

25
pkg/crypto/ec/musig2/data/nonce_agg_vectors.json

@ -10,27 +10,18 @@ @@ -10,27 +10,18 @@
],
"valid_test_cases": [
{
"pnonce_indices": [
0,
1
],
"pnonce_indices": [0, 1],
"expected": "035FE1873B4F2967F52FEA4A06AD5A8ECCBE9D0FD73068012C894E2E87CCB5804B024725377345BDE0E9C33AF3C43C0A29A9249F2F2956FA8CFEB55C8573D0262DC8"
},
{
"pnonce_indices": [
2,
3
],
"pnonce_indices": [2, 3],
"expected": "035FE1873B4F2967F52FEA4A06AD5A8ECCBE9D0FD73068012C894E2E87CCB5804B000000000000000000000000000000000000000000000000000000000000000000",
"comment": "Sum of second points encoded in the nonces is point at infinity which is serialized as 33 zero bytes"
}
],
"error_test_cases": [
{
"pnonce_indices": [
0,
4
],
"pnonce_indices": [0, 4],
"error": {
"type": "invalid_contribution",
"signer": 1,
@ -40,10 +31,7 @@ @@ -40,10 +31,7 @@
"btcec_err": "invalid public key: unsupported format: 4"
},
{
"pnonce_indices": [
5,
1
],
"pnonce_indices": [5, 1],
"error": {
"type": "invalid_contribution",
"signer": 0,
@ -53,10 +41,7 @@ @@ -53,10 +41,7 @@
"btcec_err": "invalid public key: x coordinate 48c264cdd57d3c24d79990b0f865674eb62a0f9018277a95011b41bfc193b831 is not on the secp256k1 curve"
},
{
"pnonce_indices": [
6,
1
],
"pnonce_indices": [6, 1],
"error": {
"type": "invalid_contribution",
"signer": 0,

107
pkg/crypto/ec/musig2/data/sig_agg_vectors.json

@ -33,114 +33,49 @@ @@ -33,114 +33,49 @@
"valid_test_cases": [
{
"aggnonce": "0341432722C5CD0268D829C702CF0D1CBCE57033EED201FD335191385227C3210C03D377F2D258B64AADC0E16F26462323D701D286046A2EA93365656AFD9875982B",
"nonce_indices": [
0,
1
],
"key_indices": [
0,
1
],
"nonce_indices": [0, 1],
"key_indices": [0, 1],
"tweak_indices": [],
"is_xonly": [],
"psig_indices": [
0,
1
],
"psig_indices": [0, 1],
"expected": "041DA22223CE65C92C9A0D6C2CAC828AAF1EEE56304FEC371DDF91EBB2B9EF0912F1038025857FEDEB3FF696F8B99FA4BB2C5812F6095A2E0004EC99CE18DE1E"
},
{
"aggnonce": "0224AFD36C902084058B51B5D36676BBA4DC97C775873768E58822F87FE437D792028CB15929099EEE2F5DAE404CD39357591BA32E9AF4E162B8D3E7CB5EFE31CB20",
"nonce_indices": [
0,
2
],
"key_indices": [
0,
2
],
"nonce_indices": [0, 2],
"key_indices": [0, 2],
"tweak_indices": [],
"is_xonly": [],
"psig_indices": [
2,
3
],
"psig_indices": [2, 3],
"expected": "1069B67EC3D2F3C7C08291ACCB17A9C9B8F2819A52EB5DF8726E17E7D6B52E9F01800260A7E9DAC450F4BE522DE4CE12BA91AEAF2B4279219EF74BE1D286ADD9"
},
{
"aggnonce": "0208C5C438C710F4F96A61E9FF3C37758814B8C3AE12BFEA0ED2C87FF6954FF186020B1816EA104B4FCA2D304D733E0E19CEAD51303FF6420BFD222335CAA402916D",
"nonce_indices": [
0,
3
],
"key_indices": [
0,
2
],
"tweak_indices": [
0
],
"is_xonly": [
false
],
"psig_indices": [
4,
5
],
"nonce_indices": [0, 3],
"key_indices": [0, 2],
"tweak_indices": [0],
"is_xonly": [false],
"psig_indices": [4, 5],
"expected": "5C558E1DCADE86DA0B2F02626A512E30A22CF5255CAEA7EE32C38E9A71A0E9148BA6C0E6EC7683B64220F0298696F1B878CD47B107B81F7188812D593971E0CC"
},
{
"aggnonce": "02B5AD07AFCD99B6D92CB433FBD2A28FDEB98EAE2EB09B6014EF0F8197CD58403302E8616910F9293CF692C49F351DB86B25E352901F0E237BAFDA11F1C1CEF29FFD",
"nonce_indices": [
0,
4
],
"key_indices": [
0,
3
],
"tweak_indices": [
0,
1,
2
],
"is_xonly": [
true,
false,
true
],
"psig_indices": [
6,
7
],
"nonce_indices": [0, 4],
"key_indices": [0, 3],
"tweak_indices": [0, 1, 2],
"is_xonly": [true, false, true],
"psig_indices": [6, 7],
"expected": "839B08820B681DBA8DAF4CC7B104E8F2638F9388F8D7A555DC17B6E6971D7426CE07BF6AB01F1DB50E4E33719295F4094572B79868E440FB3DEFD3FAC1DB589E"
}
],
"error_test_cases": [
{
"aggnonce": "02B5AD07AFCD99B6D92CB433FBD2A28FDEB98EAE2EB09B6014EF0F8197CD58403302E8616910F9293CF692C49F351DB86B25E352901F0E237BAFDA11F1C1CEF29FFD",
"nonce_indices": [
0,
4
],
"key_indices": [
0,
3
],
"tweak_indices": [
0,
1,
2
],
"is_xonly": [
true,
false,
true
],
"psig_indices": [
7,
8
],
"nonce_indices": [0, 4],
"key_indices": [0, 3],
"tweak_indices": [0, 1, 2],
"is_xonly": [true, false, true],
"psig_indices": [7, 8],
"error": {
"type": "invalid_contribution",
"signer": 1

141
pkg/crypto/ec/musig2/data/sign_verify_vectors.json

@ -31,62 +31,32 @@ @@ -31,62 +31,32 @@
],
"valid_test_cases": [
{
"key_indices": [
0,
1,
2
],
"nonce_indices": [
0,
1,
2
],
"key_indices": [0, 1, 2],
"nonce_indices": [0, 1, 2],
"aggnonce_index": 0,
"msg_index": 0,
"signer_index": 0,
"expected": "012ABBCB52B3016AC03AD82395A1A415C48B93DEF78718E62A7A90052FE224FB"
},
{
"key_indices": [
1,
0,
2
],
"nonce_indices": [
1,
0,
2
],
"key_indices": [1, 0, 2],
"nonce_indices": [1, 0, 2],
"aggnonce_index": 0,
"msg_index": 0,
"signer_index": 1,
"expected": "9FF2F7AAA856150CC8819254218D3ADEEB0535269051897724F9DB3789513A52"
},
{
"key_indices": [
1,
2,
0
],
"nonce_indices": [
1,
2,
0
],
"key_indices": [1, 2, 0],
"nonce_indices": [1, 2, 0],
"aggnonce_index": 0,
"msg_index": 0,
"signer_index": 2,
"expected": "FA23C359F6FAC4E7796BB93BC9F0532A95468C539BA20FF86D7C76ED92227900"
},
{
"key_indices": [
0,
1
],
"nonce_indices": [
0,
3
],
"key_indices": [0, 1],
"nonce_indices": [0, 3],
"aggnonce_index": 1,
"msg_index": 0,
"signer_index": 0,
@ -96,10 +66,7 @@ @@ -96,10 +66,7 @@
],
"sign_error_test_cases": [
{
"key_indices": [
1,
2
],
"key_indices": [1, 2],
"aggnonce_index": 0,
"msg_index": 0,
"secnonce_index": 0,
@ -110,11 +77,7 @@ @@ -110,11 +77,7 @@
"comment": "The signers pubkey is not in the list of pubkeys"
},
{
"key_indices": [
1,
0,
3
],
"key_indices": [1, 0, 3],
"aggnonce_index": 0,
"msg_index": 0,
"secnonce_index": 0,
@ -126,11 +89,7 @@ @@ -126,11 +89,7 @@
"comment": "Signer 2 provided an invalid public key"
},
{
"key_indices": [
1,
2,
0
],
"key_indices": [1, 2, 0],
"aggnonce_index": 2,
"msg_index": 0,
"secnonce_index": 0,
@ -142,11 +101,7 @@ @@ -142,11 +101,7 @@
"comment": "Aggregate nonce is invalid due wrong tag, 0x04, in the first half"
},
{
"key_indices": [
1,
2,
0
],
"key_indices": [1, 2, 0],
"aggnonce_index": 3,
"msg_index": 0,
"secnonce_index": 0,
@ -158,11 +113,7 @@ @@ -158,11 +113,7 @@
"comment": "Aggregate nonce is invalid because the second half does not correspond to an X coordinate"
},
{
"key_indices": [
1,
2,
0
],
"key_indices": [1, 2, 0],
"aggnonce_index": 4,
"msg_index": 0,
"secnonce_index": 0,
@ -174,11 +125,7 @@ @@ -174,11 +125,7 @@
"comment": "Aggregate nonce is invalid because second half exceeds field size"
},
{
"key_indices": [
0,
1,
2
],
"key_indices": [0, 1, 2],
"aggnonce_index": 0,
"msg_index": 0,
"signer_index": 0,
@ -193,48 +140,24 @@ @@ -193,48 +140,24 @@
"verify_fail_test_cases": [
{
"sig": "97AC833ADCB1AFA42EBF9E0725616F3C9A0D5B614F6FE283CEAAA37A8FFAF406",
"key_indices": [
0,
1,
2
],
"nonce_indices": [
0,
1,
2
],
"key_indices": [0, 1, 2],
"nonce_indices": [0, 1, 2],
"msg_index": 0,
"signer_index": 0,
"comment": "Wrong signature (which is equal to the negation of valid signature)"
},
{
"sig": "68537CC5234E505BD14061F8DA9E90C220A181855FD8BDB7F127BB12403B4D3B",
"key_indices": [
0,
1,
2
],
"nonce_indices": [
0,
1,
2
],
"key_indices": [0, 1, 2],
"nonce_indices": [0, 1, 2],
"msg_index": 0,
"signer_index": 1,
"comment": "Wrong signer"
},
{
"sig": "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141",
"key_indices": [
0,
1,
2
],
"nonce_indices": [
0,
1,
2
],
"key_indices": [0, 1, 2],
"nonce_indices": [0, 1, 2],
"msg_index": 0,
"signer_index": 0,
"comment": "Signature exceeds group size"
@ -243,16 +166,8 @@ @@ -243,16 +166,8 @@
"verify_error_test_cases": [
{
"sig": "68537CC5234E505BD14061F8DA9E90C220A181855FD8BDB7F127BB12403B4D3B",
"key_indices": [
0,
1,
2
],
"nonce_indices": [
4,
1,
2
],
"key_indices": [0, 1, 2],
"nonce_indices": [4, 1, 2],
"msg_index": 0,
"signer_index": 0,
"error": {
@ -264,16 +179,8 @@ @@ -264,16 +179,8 @@
},
{
"sig": "68537CC5234E505BD14061F8DA9E90C220A181855FD8BDB7F127BB12403B4D3B",
"key_indices": [
3,
1,
2
],
"nonce_indices": [
0,
1,
2
],
"key_indices": [3, 1, 2],
"nonce_indices": [0, 1, 2],
"msg_index": 0,
"signer_index": 0,
"error": {

134
pkg/crypto/ec/musig2/data/tweak_vectors.json

@ -22,120 +22,46 @@ @@ -22,120 +22,46 @@
"msg": "F95466D086770E689964664219266FE5ED215C92AE20BAB5C9D79ADDDDF3C0CF",
"valid_test_cases": [
{
"key_indices": [
1,
2,
0
],
"nonce_indices": [
1,
2,
0
],
"tweak_indices": [
0
],
"is_xonly": [
true
],
"key_indices": [1, 2, 0],
"nonce_indices": [1, 2, 0],
"tweak_indices": [0],
"is_xonly": [true],
"signer_index": 2,
"expected": "E28A5C66E61E178C2BA19DB77B6CF9F7E2F0F56C17918CD13135E60CC848FE91",
"comment": "A single x-only tweak"
},
{
"key_indices": [
1,
2,
0
],
"nonce_indices": [
1,
2,
0
],
"tweak_indices": [
0
],
"is_xonly": [
false
],
"key_indices": [1, 2, 0],
"nonce_indices": [1, 2, 0],
"tweak_indices": [0],
"is_xonly": [false],
"signer_index": 2,
"expected": "38B0767798252F21BF5702C48028B095428320F73A4B14DB1E25DE58543D2D2D",
"comment": "A single plain tweak"
},
{
"key_indices": [
1,
2,
0
],
"nonce_indices": [
1,
2,
0
],
"tweak_indices": [
0,
1
],
"is_xonly": [
false,
true
],
"key_indices": [1, 2, 0],
"nonce_indices": [1, 2, 0],
"tweak_indices": [0, 1],
"is_xonly": [false, true],
"signer_index": 2,
"expected": "408A0A21C4A0F5DACAF9646AD6EB6FECD7F7A11F03ED1F48DFFF2185BC2C2408",
"comment": "A plain tweak followed by an x-only tweak"
},
{
"key_indices": [
1,
2,
0
],
"nonce_indices": [
1,
2,
0
],
"tweak_indices": [
0,
1,
2,
3
],
"is_xonly": [
false,
false,
true,
true
],
"key_indices": [1, 2, 0],
"nonce_indices": [1, 2, 0],
"tweak_indices": [0, 1, 2, 3],
"is_xonly": [false, false, true, true],
"signer_index": 2,
"expected": "45ABD206E61E3DF2EC9E264A6FEC8292141A633C28586388235541F9ADE75435",
"comment": "Four tweaks: plain, plain, x-only, x-only."
},
{
"key_indices": [
1,
2,
0
],
"nonce_indices": [
1,
2,
0
],
"tweak_indices": [
0,
1,
2,
3
],
"is_xonly": [
true,
false,
true,
false
],
"key_indices": [1, 2, 0],
"nonce_indices": [1, 2, 0],
"tweak_indices": [0, 1, 2, 3],
"is_xonly": [true, false, true, false],
"signer_index": 2,
"expected": "B255FDCAC27B40C7CE7848E2D3B7BF5EA0ED756DA81565AC804CCCA3E1D5D239",
"comment": "Four tweaks: x-only, plain, x-only, plain. If an implementation prohibits applying plain tweaks after x-only tweaks, it can skip this test vector or return an error."
@ -143,22 +69,10 @@ @@ -143,22 +69,10 @@
],
"error_test_cases": [
{
"key_indices": [
1,
2,
0
],
"nonce_indices": [
1,
2,
0
],
"tweak_indices": [
4
],
"is_xonly": [
false
],
"key_indices": [1, 2, 0],
"nonce_indices": [1, 2, 0],
"tweak_indices": [4],
"is_xonly": [false],
"signer_index": 2,
"error": {
"type": "value",

8
pkg/crypto/sha256/README.md

@ -95,9 +95,9 @@ Note that, because of the scheduling overhead, for small messages (< 1 MB) you @@ -95,9 +95,9 @@ Note that, because of the scheduling overhead, for small messages (< 1 MB) you
will be better off using the regular SHA256 hashing (but those are typically not
performance critical anyway). Some other tips to get the best performance:
* Have many go routines doing SHA256 calculations in parallel.
* Try to Write() messages in multiples of 64 bytes.
* Try to keep the overall length of messages to a roughly similar size ie. 5
- Have many go routines doing SHA256 calculations in parallel.
- Try to Write() messages in multiples of 64 bytes.
- Try to keep the overall length of messages to a roughly similar size ie. 5
MB (this way all 16 ‘lanes’ in the AVX512 computations are contributing as
much as possible).
@ -128,7 +128,7 @@ Below is the speed in MB/s for a single core (ranked fast to slow) for blocks @@ -128,7 +128,7 @@ Below is the speed in MB/s for a single core (ranked fast to slow) for blocks
larger than 1 MB.
| Processor | SIMD | Speed (MB/s) |
|-----------------------------------|---------|-------------:|
| --------------------------------- | ------- | -----------: |
| 3.0 GHz Intel Xeon Platinum 8124M | AVX512 | 3498 |
| 3.7 GHz AMD Ryzen 7 2700X | SHA Ext | 1979 |
| 1.2 GHz ARM Cortex-A53 | ARM64 | 638 |

3
pkg/utils/atomic/.codecov.yml

@ -7,7 +7,8 @@ coverage: @@ -7,7 +7,8 @@ coverage:
project: # measuring the overall project coverage
default: # context, you can create multiple ones with custom titles
enabled: yes # must be yes|true to enable this status
target: 100 # specify the target coverage for each commit status
target:
100 # specify the target coverage for each commit status
# option: "auto" (must increase from parent commit or pull request base)
# option: "X%" a static target percentage to hit
if_not_found: success # if parent is not found report status as success, error, or failure

35
pkg/utils/atomic/CHANGELOG.md

@ -1,24 +1,31 @@ @@ -1,24 +1,31 @@
# Changelog
All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
## Unreleased
- No changes yet.
## [1.11.0] - 2023-05-02
### Fixed
- Fix `Swap` and `CompareAndSwap` for `Value` wrappers without initialization.
### Added
- Add `String` method to `atomic.Pointer[T]` type allowing users to safely print
underlying values of pointers.
[1.11.0]: https://github.com/uber-go/atomic/compare/v1.10.0...v1.11.0
## [1.10.0] - 2022-08-11
### Added
- Add `atomic.Float32` type for atomic operations on `float32`.
- Add `CompareAndSwap` and `Swap` methods to `atomic.String`, `atomic.Error`,
and `atomic.Value`.
@ -27,6 +34,7 @@ underlying values of pointers. @@ -27,6 +34,7 @@ underlying values of pointers.
replacement for the standard library's `sync/atomic.Pointer` type.
### Changed
- Deprecate `CAS` methods on all types in favor of corresponding
`CompareAndSwap` methods.
@ -35,46 +43,59 @@ Thanks to @eNV25 and @icpd for their contributions to this release. @@ -35,46 +43,59 @@ Thanks to @eNV25 and @icpd for their contributions to this release.
[1.10.0]: https://github.com/uber-go/atomic/compare/v1.9.0...v1.10.0
## [1.9.0] - 2021-07-15
### Added
- Add `Float64.Swap` to match int atomic operations.
- Add `atomic.Time` type for atomic operations on `time.Time` values.
[1.9.0]: https://github.com/uber-go/atomic/compare/v1.8.0...v1.9.0
## [1.8.0] - 2021-06-09
### Added
- Add `atomic.Uintptr` type for atomic operations on `uintptr` values.
- Add `atomic.UnsafePointer` type for atomic operations on `unsafe.Pointer` values.
[1.8.0]: https://github.com/uber-go/atomic/compare/v1.7.0...v1.8.0
## [1.7.0] - 2020-09-14
### Added
- Support JSON serialization and deserialization of primitive atomic types.
- Support Text marshalling and unmarshalling for string atomics.
### Changed
- Disallow incorrect comparison of atomic values in a non-atomic way.
### Removed
- Remove dependency on `golang.org/x/{lint, tools}`.
[1.7.0]: https://github.com/uber-go/atomic/compare/v1.6.0...v1.7.0
## [1.6.0] - 2020-02-24
### Changed
- Drop library dependency on `golang.org/x/{lint, tools}`.
[1.6.0]: https://github.com/uber-go/atomic/compare/v1.5.1...v1.6.0
## [1.5.1] - 2019-11-19
- Fix bug where `Bool.CAS` and `Bool.Toggle` do work correctly together
causing `CAS` to fail even though the old value matches.
[1.5.1]: https://github.com/uber-go/atomic/compare/v1.5.0...v1.5.1
## [1.5.0] - 2019-10-29
### Changed
- With Go modules, only the `go.uber.org/atomic` import path is supported now.
If you need to use the old import path, please add a `replace` directive to
your `go.mod`.
@ -82,43 +103,57 @@ Thanks to @eNV25 and @icpd for their contributions to this release. @@ -82,43 +103,57 @@ Thanks to @eNV25 and @icpd for their contributions to this release.
[1.5.0]: https://github.com/uber-go/atomic/compare/v1.4.0...v1.5.0
## [1.4.0] - 2019-05-01
### Added
- Add `atomic.Error` type for atomic operations on `error` values.
[1.4.0]: https://github.com/uber-go/atomic/compare/v1.3.2...v1.4.0
## [1.3.2] - 2018-05-02
### Added
- Add `atomic.Duration` type for atomic operations on `time.Duration` values.
[1.3.2]: https://github.com/uber-go/atomic/compare/v1.3.1...v1.3.2
## [1.3.1] - 2017-11-14
### Fixed
- Revert optimization for `atomic.String.Store("")` which caused data races.
[1.3.1]: https://github.com/uber-go/atomic/compare/v1.3.0...v1.3.1
## [1.3.0] - 2017-11-13
### Added
- Add `atomic.Bool.CAS` for compare-and-swap semantics on bools.
### Changed
- Optimize `atomic.String.Store("")` by avoiding an allocation.
[1.3.0]: https://github.com/uber-go/atomic/compare/v1.2.0...v1.3.0
## [1.2.0] - 2017-04-12
### Added
- Shadow `atomic.Value` from `sync/atomic`.
[1.2.0]: https://github.com/uber-go/atomic/compare/v1.1.0...v1.2.0
## [1.1.0] - 2017-03-10
### Added
- Add atomic `Float64` type.
### Changed
- Support new `go.uber.org/atomic` import path.
[1.1.0]: https://github.com/uber-go/atomic/compare/v1.0.0...v1.1.0

1
pkg/utils/interrupt/README.md

@ -1,2 +1,3 @@ @@ -1,2 +1,3 @@
# interrupt
Handle shutdowns cleanly and enable hot reload

Loading…
Cancel
Save