Browse Source

Migrate package imports from `next.orly.dev` to new `orly` domain structure; add new `varint` and `binary` encoders with comprehensive tests; enhance existing tag and envelope implementations with additional methods, validations, and test coverage; introduce shared `test.sh` script for streamlined testing across modules.

main
mleku 4 months ago
parent
commit
91d95c6f1a
No known key found for this signature in database
  1. 10
      app/handle-message.go
  2. 2
      app/handle-relayinfo.go
  3. 12
      cmd/eventpool/eventpool.go
  4. 25
      go.mod
  5. 9
      go.sum
  6. 4
      pkg/crypto/ec/base58/base58_test.go
  7. 2
      pkg/crypto/ec/base58/base58bench_test.go
  8. 2
      pkg/crypto/ec/base58/base58check.go
  9. 2
      pkg/crypto/ec/base58/base58check_test.go
  10. 2
      pkg/crypto/ec/base58/example_test.go
  11. 2
      pkg/crypto/ec/bech32/bech32_test.go
  12. 4
      pkg/crypto/ec/bench_test.go
  13. 2
      pkg/crypto/ec/btcec.go
  14. 2
      pkg/crypto/ec/chaincfg/deployment_time_frame.go
  15. 4
      pkg/crypto/ec/chaincfg/genesis.go
  16. 4
      pkg/crypto/ec/chaincfg/params.go
  17. 4
      pkg/crypto/ec/chainhash/hash.go
  18. 2
      pkg/crypto/ec/chainhash/hash_test.go
  19. 2
      pkg/crypto/ec/chainhash/hashfuncs.go
  20. 2
      pkg/crypto/ec/ciphering.go
  21. 2
      pkg/crypto/ec/ciphering_test.go
  22. 2
      pkg/crypto/ec/curve.go
  23. 4
      pkg/crypto/ec/ecdsa/bench_test.go
  24. 2
      pkg/crypto/ec/ecdsa/signature.go
  25. 6
      pkg/crypto/ec/ecdsa/signature_test.go
  26. 2
      pkg/crypto/ec/error.go
  27. 2
      pkg/crypto/ec/field.go
  28. 2
      pkg/crypto/ec/field_test.go
  29. 2
      pkg/crypto/ec/fuzz_test.go
  30. 2
      pkg/crypto/ec/modnscalar.go
  31. 6
      pkg/crypto/ec/musig2/bench_test.go
  32. 4
      pkg/crypto/ec/musig2/context.go
  33. 10
      pkg/crypto/ec/musig2/keys.go
  34. 8
      pkg/crypto/ec/musig2/keys_test.go
  35. 6
      pkg/crypto/ec/musig2/musig2_test.go
  36. 6
      pkg/crypto/ec/musig2/nonces.go
  37. 6
      pkg/crypto/ec/musig2/nonces_test.go
  38. 11
      pkg/crypto/ec/musig2/sign.go
  39. 7
      pkg/crypto/ec/musig2/sign_test.go
  40. 2
      pkg/crypto/ec/pubkey.go
  41. 2
      pkg/crypto/ec/pubkey_test.go
  42. 8
      pkg/crypto/ec/schnorr/bench_test.go
  43. 4
      pkg/crypto/ec/schnorr/pubkey.go
  44. 6
      pkg/crypto/ec/schnorr/signature.go
  45. 6
      pkg/crypto/ec/schnorr/signature_test.go
  46. 2
      pkg/crypto/ec/seckey.go
  47. 2
      pkg/crypto/ec/secp256k1/curve.go
  48. 2
      pkg/crypto/ec/secp256k1/ecdh_test.go
  49. 6
      pkg/crypto/ec/secp256k1/example_test.go
  50. 2
      pkg/crypto/ec/secp256k1/field.go
  51. 4
      pkg/crypto/ec/secp256k1/field_test.go
  52. 2
      pkg/crypto/ec/secp256k1/modnscalar.go
  53. 4
      pkg/crypto/ec/secp256k1/modnscalar_test.go
  54. 2
      pkg/crypto/ec/secp256k1/nonce.go
  55. 6
      pkg/crypto/ec/secp256k1/nonce_test.go
  56. 2
      pkg/crypto/ec/secp256k1/precomps/genprecomps.go
  57. 2
      pkg/crypto/ec/secp256k1/pubkey_test.go
  58. 2
      pkg/crypto/ec/secp256k1/seckey_test.go
  59. 6
      pkg/crypto/ec/taproot/taproot.go
  60. 2
      pkg/crypto/ec/wire/blockheader.go
  61. 2
      pkg/crypto/ec/wire/msgtx.go
  62. 30
      pkg/crypto/go.mod
  63. 27
      pkg/crypto/go.sum
  64. 2
      pkg/crypto/p256k/btcec.go
  65. 19
      pkg/crypto/p256k/btcec/btcec.go
  66. 5
      pkg/crypto/p256k/btcec/btcec_test.go
  67. 4
      pkg/crypto/p256k/btcec/helpers-btcec.go
  68. 4
      pkg/crypto/p256k/helpers.go
  69. 8
      pkg/crypto/p256k/p256k.go
  70. 17
      pkg/crypto/p256k/p256k_test.go
  71. 6
      pkg/crypto/p256k/secp256k1.go
  72. 2
      pkg/crypto/sha256/README.md
  73. 4
      pkg/crypto/sha256/sha256.go
  74. 2
      pkg/crypto/sha256/sha256_test.go
  75. 132
      pkg/database/database.go
  76. 76
      pkg/database/delete-event.go
  77. 62
      pkg/database/delete-expired.go
  78. 106
      pkg/database/export.go
  79. 111
      pkg/database/export_test.go
  80. 38
      pkg/database/fetch-event-by-serial.go
  81. 156
      pkg/database/fetch-event-by-serial_test.go
  82. 56
      pkg/database/get-fullidpubkey-by-serial.go
  83. 74
      pkg/database/get-fullidpubkey-by-serials.go
  84. 156
      pkg/database/get-indexes-for-event.go
  85. 304
      pkg/database/get-indexes-for-event_test.go
  86. 388
      pkg/database/get-indexes-from-filter.go
  87. 587
      pkg/database/get-indexes-from-filter_test.go
  88. 77
      pkg/database/get-serial-by-id.go
  89. 101
      pkg/database/get-serial-by-id_test.go
  90. 51
      pkg/database/get-serials-by-range.go
  91. 232
      pkg/database/get-serials-by-range_test.go
  92. 51
      pkg/database/go.mod
  93. 68
      pkg/database/go.sum
  94. 83
      pkg/database/import.go
  95. 439
      pkg/database/indexes/keys.go
  96. 981
      pkg/database/indexes/keys_test.go
  97. 419
      pkg/database/indexes/types/endianness_test.go
  98. 38
      pkg/database/indexes/types/fullid.go
  99. 115
      pkg/database/indexes/types/fullid_test.go
  100. 31
      pkg/database/indexes/types/identhash.go
  101. Some files were not shown because too many files have changed in this diff Show More

10
app/handle-message.go

@ -3,13 +3,13 @@ package app @@ -3,13 +3,13 @@ package app
import (
"fmt"
"encoders.orly/envelopes"
"encoders.orly/envelopes/authenvelope"
"encoders.orly/envelopes/closeenvelope"
"encoders.orly/envelopes/eventenvelope"
"encoders.orly/envelopes/reqenvelope"
"lol.mleku.dev/chk"
"lol.mleku.dev/log"
"next.orly.dev/pkg/encoders/envelopes"
"next.orly.dev/pkg/encoders/envelopes/authenvelope"
"next.orly.dev/pkg/encoders/envelopes/closeenvelope"
"next.orly.dev/pkg/encoders/envelopes/eventenvelope"
"next.orly.dev/pkg/encoders/envelopes/reqenvelope"
)
func (s *Server) HandleMessage(msg []byte, remote string) {

2
app/handle-relayinfo.go

@ -7,8 +7,8 @@ import ( @@ -7,8 +7,8 @@ import (
"lol.mleku.dev/chk"
"lol.mleku.dev/log"
"next.orly.dev/pkg/protocol/relayinfo"
"next.orly.dev/pkg/version"
"protocol.orly/relayinfo"
)
// HandleRelayInfo generates and returns a relay information document in JSON

12
cmd/eventpool/eventpool.go

@ -3,16 +3,16 @@ package main @@ -3,16 +3,16 @@ package main
import (
"time"
"encoders.orly/event"
"encoders.orly/hex"
"encoders.orly/json"
"encoders.orly/tag"
"github.com/pkg/profile"
lol "lol.mleku.dev"
"lol.mleku.dev/chk"
"lukechampine.com/frand"
"next.orly.dev/pkg/encoders/event"
"next.orly.dev/pkg/encoders/hex"
"next.orly.dev/pkg/encoders/json"
"next.orly.dev/pkg/encoders/tag"
"next.orly.dev/pkg/utils"
"next.orly.dev/pkg/utils/bufpool"
"utils.orly"
"utils.orly/bufpool"
)
func main() {

25
go.mod

@ -3,27 +3,38 @@ module next.orly.dev @@ -3,27 +3,38 @@ module next.orly.dev
go 1.25.0
require (
encoders.orly v0.0.0-00010101000000-000000000000
github.com/adrg/xdg v0.5.3
github.com/coder/websocket v1.8.13
github.com/davecgh/go-spew v1.1.1
github.com/klauspost/cpuid/v2 v2.3.0
github.com/pkg/profile v1.7.0
github.com/stretchr/testify v1.10.0
github.com/templexxx/xhex v0.0.0-20200614015412-aed53437177b
go-simpler.org/env v0.12.0
golang.org/x/exp v0.0.0-20250819193227-8b4c13bb791b
lol.mleku.dev v1.0.2
lukechampine.com/frand v1.5.1
protocol.orly v0.0.0-00010101000000-000000000000
utils.orly v0.0.0-00010101000000-000000000000
)
require (
crypto.orly v0.0.0-00010101000000-000000000000 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/fatih/color v1.18.0 // indirect
github.com/felixge/fgprof v0.9.3 // indirect
github.com/google/pprof v0.0.0-20211214055906-6f57359322fd // indirect
github.com/klauspost/cpuid/v2 v2.3.0 // indirect
github.com/mattn/go-colorable v0.1.14 // indirect
github.com/mattn/go-isatty v0.0.20 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/templexxx/cpu v0.0.1 // indirect
github.com/templexxx/xhex v0.0.0-20200614015412-aed53437177b // indirect
golang.org/x/exp v0.0.0-20250819193227-8b4c13bb791b // indirect
golang.org/x/sys v0.35.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
interfaces.orly v0.0.0-00010101000000-000000000000 // indirect
)
replace (
crypto.orly => ./pkg/crypto
encoders.orly => ./pkg/encoders
interfaces.orly => ./pkg/interfaces
next.orly.dev => ../../
protocol.orly => ./pkg/protocol
utils.orly => ./pkg/utils
)

9
go.sum

@ -15,10 +15,6 @@ github.com/felixge/fgprof v0.9.3/go.mod h1:RdbpDgzqYVh/T9fPELJyV7EYJuHB55UTEULNu @@ -15,10 +15,6 @@ github.com/felixge/fgprof v0.9.3/go.mod h1:RdbpDgzqYVh/T9fPELJyV7EYJuHB55UTEULNu
github.com/google/pprof v0.0.0-20211214055906-6f57359322fd h1:1FjCyPC+syAzJ5/2S8fqdZK1R22vvA0J7JZKcuOIQ7Y=
github.com/google/pprof v0.0.0-20211214055906-6f57359322fd/go.mod h1:KgnwoLYCZ8IQu3XUZ8Nc/bM9CCZFOyjUNOSygVozoDg=
github.com/ianlancetaylor/demangle v0.0.0-20210905161508-09a460cdf81d/go.mod h1:aYm2/VgdVmcIU8iMfdMvDMsRAQjcfZSKFby6HOFvi/w=
github.com/karrick/bufpool v1.2.0 h1:AfhYmVv8A62iOzB31RuJrGLTdHlvBbl0+rh8Gvgvybg=
github.com/karrick/bufpool v1.2.0/go.mod h1:ZRBxSXJi05b7mfd7kcL1M86UL1x8dTValcwCQp7I7P8=
github.com/karrick/gopool v1.1.0 h1:b9C9zwnRjgu9RNQPfiGEFmCDm3OdRuLpY7qYIDf8b28=
github.com/karrick/gopool v1.1.0/go.mod h1:Llf0mwk3WWtY0AIQoodGWVOU+5xfvUWqJKvck2qNwBU=
github.com/klauspost/cpuid/v2 v2.3.0 h1:S4CRMLnYUhGeDFDqkGriYKdfoFlDnMtqTiI/sFzhA9Y=
github.com/klauspost/cpuid/v2 v2.3.0/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0=
github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE=
@ -33,8 +29,8 @@ github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+ @@ -33,8 +29,8 @@ github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
github.com/templexxx/cpu v0.0.1 h1:hY4WdLOgKdc8y13EYklu9OUTXik80BkxHoWvTO6MQQY=
github.com/templexxx/cpu v0.0.1/go.mod h1:w7Tb+7qgcAlIyX4NhLuDKt78AHA5SzPmq0Wj6HiEnnk=
github.com/templexxx/xhex v0.0.0-20200614015412-aed53437177b h1:XeDLE6c9mzHpdv3Wb1+pWBaWv/BlHK0ZYIu/KaL6eHg=
@ -47,7 +43,6 @@ golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBc @@ -47,7 +43,6 @@ golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.35.0 h1:vz1N37gP5bs89s7He8XuIYXpyY0+QlsKmzipCbUtyxI=
golang.org/x/sys v0.35.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=

4
pkg/crypto/ec/base58/base58_test.go

@ -8,8 +8,8 @@ import ( @@ -8,8 +8,8 @@ import (
"encoding/hex"
"testing"
"next.orly.dev/pkg/crypto/ec/base58"
"next.orly.dev/pkg/utils"
"crypto.orly/ec/base58"
"utils.orly"
)
var stringTests = []struct {

2
pkg/crypto/ec/base58/base58bench_test.go

@ -8,7 +8,7 @@ import ( @@ -8,7 +8,7 @@ import (
"bytes"
"testing"
"next.orly.dev/pkg/crypto/ec/base58"
"crypto.orly/ec/base58"
)
var (

2
pkg/crypto/ec/base58/base58check.go

@ -7,7 +7,7 @@ package base58 @@ -7,7 +7,7 @@ package base58
import (
"errors"
"next.orly.dev/pkg/crypto/sha256"
"crypto.orly/sha256"
)
// ErrChecksum indicates that the checksum of a check-encoded string does not verify against

2
pkg/crypto/ec/base58/base58check_test.go

@ -7,7 +7,7 @@ package base58_test @@ -7,7 +7,7 @@ package base58_test
import (
"testing"
"next.orly.dev/pkg/crypto/ec/base58"
"crypto.orly/ec/base58"
)
var checkEncodingStringTests = []struct {

2
pkg/crypto/ec/base58/example_test.go

@ -7,7 +7,7 @@ package base58_test @@ -7,7 +7,7 @@ package base58_test
import (
"fmt"
"next.orly.dev/pkg/crypto/ec/base58"
"crypto.orly/ec/base58"
)
// This example demonstrates how to decode modified base58 encoded data.

2
pkg/crypto/ec/bech32/bech32_test.go

@ -13,7 +13,7 @@ import ( @@ -13,7 +13,7 @@ import (
"strings"
"testing"
"next.orly.dev/pkg/utils"
"utils.orly"
)
// TestBech32 tests whether decoding and re-encoding the valid BIP-173 test

4
pkg/crypto/ec/bench_test.go

@ -8,8 +8,8 @@ import ( @@ -8,8 +8,8 @@ import (
"math/big"
"testing"
"next.orly.dev/pkg/crypto/ec/secp256k1"
"next.orly.dev/pkg/encoders/hex"
"crypto.orly/ec/secp256k1"
"encoders.orly/hex"
)
// setHex decodes the passed big-endian hex string into the internal field value

2
pkg/crypto/ec/btcec.go

@ -20,7 +20,7 @@ package btcec @@ -20,7 +20,7 @@ package btcec
// reverse the transform than to operate in affine coordinates.
import (
"next.orly.dev/pkg/crypto/ec/secp256k1"
"crypto.orly/ec/secp256k1"
)
// KoblitzCurve provides an implementation for secp256k1 that fits the ECC

2
pkg/crypto/ec/chaincfg/deployment_time_frame.go

@ -4,7 +4,7 @@ import ( @@ -4,7 +4,7 @@ import (
"fmt"
"time"
"next.orly.dev/pkg/crypto/ec/wire"
"crypto.orly/ec/wire"
)
var (

4
pkg/crypto/ec/chaincfg/genesis.go

@ -3,8 +3,8 @@ package chaincfg @@ -3,8 +3,8 @@ package chaincfg
import (
"time"
"next.orly.dev/pkg/crypto/ec/chainhash"
"next.orly.dev/pkg/crypto/ec/wire"
"crypto.orly/ec/chainhash"
"crypto.orly/ec/wire"
)
var (

4
pkg/crypto/ec/chaincfg/params.go

@ -5,8 +5,8 @@ import ( @@ -5,8 +5,8 @@ import (
"math/big"
"time"
"next.orly.dev/pkg/crypto/ec/chainhash"
"next.orly.dev/pkg/crypto/ec/wire"
"crypto.orly/ec/chainhash"
"crypto.orly/ec/wire"
)
var (

4
pkg/crypto/ec/chainhash/hash.go

@ -9,8 +9,8 @@ import ( @@ -9,8 +9,8 @@ import (
"encoding/json"
"fmt"
"next.orly.dev/pkg/crypto/sha256"
"next.orly.dev/pkg/encoders/hex"
"crypto.orly/sha256"
"encoders.orly/hex"
)
const (

2
pkg/crypto/ec/chainhash/hash_test.go

@ -7,7 +7,7 @@ package chainhash @@ -7,7 +7,7 @@ package chainhash
import (
"testing"
"next.orly.dev/pkg/utils"
"utils.orly"
)
// mainNetGenesisHash is the hash of the first block in the block chain for the

2
pkg/crypto/ec/chainhash/hashfuncs.go

@ -6,7 +6,7 @@ @@ -6,7 +6,7 @@
package chainhash
import (
"next.orly.dev/pkg/crypto/sha256"
"crypto.orly/sha256"
)
// HashB calculates hash(b) and returns the resulting bytes.

2
pkg/crypto/ec/ciphering.go

@ -5,7 +5,7 @@ @@ -5,7 +5,7 @@
package btcec
import (
"next.orly.dev/pkg/crypto/ec/secp256k1"
"crypto.orly/ec/secp256k1"
)
// GenerateSharedSecret generates a shared secret based on a secret key and a

2
pkg/crypto/ec/ciphering_test.go

@ -7,7 +7,7 @@ package btcec @@ -7,7 +7,7 @@ package btcec
import (
"testing"
"next.orly.dev/pkg/utils"
"utils.orly"
)
func TestGenerateSharedSecret(t *testing.T) {

2
pkg/crypto/ec/curve.go

@ -6,7 +6,7 @@ package btcec @@ -6,7 +6,7 @@ package btcec
import (
"fmt"
"next.orly.dev/pkg/crypto/ec/secp256k1"
"crypto.orly/ec/secp256k1"
)
// JacobianPoint is an element of the group formed by the secp256k1 curve in

4
pkg/crypto/ec/ecdsa/bench_test.go

@ -8,8 +8,8 @@ package ecdsa @@ -8,8 +8,8 @@ package ecdsa
import (
"testing"
"next.orly.dev/pkg/crypto/ec/secp256k1"
"next.orly.dev/pkg/encoders/hex"
"crypto.orly/ec/secp256k1"
"encoders.orly/hex"
)
// hexToModNScalar converts the passed hex string into a ModNScalar and will

2
pkg/crypto/ec/ecdsa/signature.go

@ -8,7 +8,7 @@ package ecdsa @@ -8,7 +8,7 @@ package ecdsa
import (
"fmt"
"next.orly.dev/pkg/crypto/ec/secp256k1"
"crypto.orly/ec/secp256k1"
)
// References:

6
pkg/crypto/ec/ecdsa/signature_test.go

@ -14,10 +14,10 @@ import ( @@ -14,10 +14,10 @@ import (
"testing"
"time"
"crypto.orly/ec/secp256k1"
"encoders.orly/hex"
"lol.mleku.dev/chk"
"next.orly.dev/pkg/crypto/ec/secp256k1"
"next.orly.dev/pkg/encoders/hex"
"next.orly.dev/pkg/utils"
"utils.orly"
)
// hexToBytes converts the passed hex string into bytes and will panic if there

2
pkg/crypto/ec/error.go

@ -4,7 +4,7 @@ @@ -4,7 +4,7 @@
package btcec
import (
"next.orly.dev/pkg/crypto/ec/secp256k1"
"crypto.orly/ec/secp256k1"
)
// Error identifies an error related to public key cryptography using a

2
pkg/crypto/ec/field.go

@ -1,7 +1,7 @@ @@ -1,7 +1,7 @@
package btcec
import (
"next.orly.dev/pkg/crypto/ec/secp256k1"
"crypto.orly/ec/secp256k1"
)
// FieldVal implements optimized fixed-precision arithmetic over the secp256k1

2
pkg/crypto/ec/field_test.go

@ -9,8 +9,8 @@ import ( @@ -9,8 +9,8 @@ import (
"math/rand"
"testing"
"encoders.orly/hex"
"lol.mleku.dev/chk"
"next.orly.dev/pkg/encoders/hex"
)
// TestIsZero ensures that checking if a field IsZero works as expected.

2
pkg/crypto/ec/fuzz_test.go

@ -11,7 +11,7 @@ package btcec @@ -11,7 +11,7 @@ package btcec
import (
"testing"
"next.orly.dev/pkg/encoders/hex"
"encoders.orly/hex"
)
func FuzzParsePubKey(f *testing.F) {

2
pkg/crypto/ec/modnscalar.go

@ -4,7 +4,7 @@ @@ -4,7 +4,7 @@
package btcec
import (
"next.orly.dev/pkg/crypto/ec/secp256k1"
"crypto.orly/ec/secp256k1"
)
// ModNScalar implements optimized 256-bit constant-time fixed-precision

6
pkg/crypto/ec/musig2/bench_test.go

@ -8,9 +8,9 @@ import ( @@ -8,9 +8,9 @@ import (
"fmt"
"testing"
"next.orly.dev/pkg/crypto/ec"
"next.orly.dev/pkg/crypto/ec/schnorr"
"next.orly.dev/pkg/encoders/hex"
"crypto.orly/ec"
"crypto.orly/ec/schnorr"
"encoders.orly/hex"
)
var (

4
pkg/crypto/ec/musig2/context.go

@ -5,9 +5,9 @@ package musig2 @@ -5,9 +5,9 @@ package musig2
import (
"fmt"
"crypto.orly/ec"
"crypto.orly/ec/schnorr"
"lol.mleku.dev/chk"
"next.orly.dev/pkg/crypto/ec"
"next.orly.dev/pkg/crypto/ec/schnorr"
)
var (

10
pkg/crypto/ec/musig2/keys.go

@ -7,12 +7,12 @@ import ( @@ -7,12 +7,12 @@ import (
"fmt"
"sort"
"next.orly.dev/pkg/utils"
"utils.orly"
"next.orly.dev/pkg/crypto/ec"
"next.orly.dev/pkg/crypto/ec/chainhash"
"next.orly.dev/pkg/crypto/ec/schnorr"
"next.orly.dev/pkg/crypto/ec/secp256k1"
"crypto.orly/ec"
"crypto.orly/ec/chainhash"
"crypto.orly/ec/schnorr"
"crypto.orly/ec/secp256k1"
)
var (

8
pkg/crypto/ec/musig2/keys_test.go

@ -10,10 +10,10 @@ import ( @@ -10,10 +10,10 @@ import (
"strings"
"testing"
"next.orly.dev/pkg/crypto/ec"
"next.orly.dev/pkg/crypto/ec/schnorr"
"next.orly.dev/pkg/crypto/ec/secp256k1"
"next.orly.dev/pkg/encoders/hex"
"crypto.orly/ec"
"crypto.orly/ec/schnorr"
"crypto.orly/ec/secp256k1"
"encoders.orly/hex"
"github.com/stretchr/testify/require"
)

6
pkg/crypto/ec/musig2/musig2_test.go

@ -8,9 +8,9 @@ import ( @@ -8,9 +8,9 @@ import (
"sync"
"testing"
"next.orly.dev/pkg/crypto/ec"
"next.orly.dev/pkg/crypto/sha256"
"next.orly.dev/pkg/encoders/hex"
"crypto.orly/ec"
"crypto.orly/sha256"
"encoders.orly/hex"
)
const (

6
pkg/crypto/ec/musig2/nonces.go

@ -9,10 +9,10 @@ import ( @@ -9,10 +9,10 @@ import (
"errors"
"io"
"crypto.orly/ec"
"crypto.orly/ec/chainhash"
"crypto.orly/ec/schnorr"
"lol.mleku.dev/chk"
"next.orly.dev/pkg/crypto/ec"
"next.orly.dev/pkg/crypto/ec/chainhash"
"next.orly.dev/pkg/crypto/ec/schnorr"
)
const (

6
pkg/crypto/ec/musig2/nonces_test.go

@ -9,11 +9,9 @@ import ( @@ -9,11 +9,9 @@ import (
"path"
"testing"
"next.orly.dev/pkg/utils"
"next.orly.dev/pkg/encoders/hex"
"encoders.orly/hex"
"github.com/stretchr/testify/require"
"utils.orly"
)
type nonceGenTestCase struct {

11
pkg/crypto/ec/musig2/sign.go

@ -7,13 +7,12 @@ import ( @@ -7,13 +7,12 @@ import (
"fmt"
"io"
"next.orly.dev/pkg/utils"
"crypto.orly/ec"
"crypto.orly/ec/chainhash"
"crypto.orly/ec/schnorr"
"crypto.orly/ec/secp256k1"
"lol.mleku.dev/chk"
"next.orly.dev/pkg/crypto/ec"
"next.orly.dev/pkg/crypto/ec/chainhash"
"next.orly.dev/pkg/crypto/ec/schnorr"
"next.orly.dev/pkg/crypto/ec/secp256k1"
"utils.orly"
)
var (

7
pkg/crypto/ec/musig2/sign_test.go

@ -11,10 +11,9 @@ import ( @@ -11,10 +11,9 @@ import (
"strings"
"testing"
"next.orly.dev/pkg/crypto/ec"
"next.orly.dev/pkg/crypto/ec/secp256k1"
"next.orly.dev/pkg/encoders/hex"
"crypto.orly/ec"
"crypto.orly/ec/secp256k1"
"encoders.orly/hex"
"github.com/stretchr/testify/require"
)

2
pkg/crypto/ec/pubkey.go

@ -5,7 +5,7 @@ @@ -5,7 +5,7 @@
package btcec
import (
"next.orly.dev/pkg/crypto/ec/secp256k1"
"crypto.orly/ec/secp256k1"
)
// These constants define the lengths of serialized public keys.

2
pkg/crypto/ec/pubkey_test.go

@ -7,7 +7,7 @@ package btcec @@ -7,7 +7,7 @@ package btcec
import (
"testing"
"next.orly.dev/pkg/utils"
"utils.orly"
"github.com/davecgh/go-spew/spew"
)

8
pkg/crypto/ec/schnorr/bench_test.go

@ -9,10 +9,10 @@ import ( @@ -9,10 +9,10 @@ import (
"math/big"
"testing"
"next.orly.dev/pkg/crypto/ec"
"next.orly.dev/pkg/crypto/ec/secp256k1"
"next.orly.dev/pkg/crypto/sha256"
"next.orly.dev/pkg/encoders/hex"
"crypto.orly/ec"
"crypto.orly/ec/secp256k1"
"crypto.orly/sha256"
"encoders.orly/hex"
)
// hexToBytes converts the passed hex string into bytes and will panic if there

4
pkg/crypto/ec/schnorr/pubkey.go

@ -8,8 +8,8 @@ package schnorr @@ -8,8 +8,8 @@ package schnorr
import (
"fmt"
"next.orly.dev/pkg/crypto/ec"
"next.orly.dev/pkg/crypto/ec/secp256k1"
"crypto.orly/ec"
"crypto.orly/ec/secp256k1"
)
// These constants define the lengths of serialized public keys.

6
pkg/crypto/ec/schnorr/signature.go

@ -5,10 +5,10 @@ package schnorr @@ -5,10 +5,10 @@ package schnorr
import (
"fmt"
"crypto.orly/ec"
"crypto.orly/ec/chainhash"
"crypto.orly/ec/secp256k1"
"lol.mleku.dev/chk"
"next.orly.dev/pkg/crypto/ec"
"next.orly.dev/pkg/crypto/ec/chainhash"
"next.orly.dev/pkg/crypto/ec/secp256k1"
)
const (

6
pkg/crypto/ec/schnorr/signature_test.go

@ -11,10 +11,10 @@ import ( @@ -11,10 +11,10 @@ import (
"testing"
"testing/quick"
"crypto.orly/ec"
"crypto.orly/ec/secp256k1"
"encoders.orly/hex"
"lol.mleku.dev/chk"
"next.orly.dev/pkg/crypto/ec"
"next.orly.dev/pkg/crypto/ec/secp256k1"
"next.orly.dev/pkg/encoders/hex"
"github.com/davecgh/go-spew/spew"
)

2
pkg/crypto/ec/seckey.go

@ -5,7 +5,7 @@ @@ -5,7 +5,7 @@
package btcec
import (
"next.orly.dev/pkg/crypto/ec/secp256k1"
"crypto.orly/ec/secp256k1"
)
// SecretKey wraps an ecdsa.SecretKey as a convenience mainly for signing things with the secret key without having to

2
pkg/crypto/ec/secp256k1/curve.go

@ -8,7 +8,7 @@ package secp256k1 @@ -8,7 +8,7 @@ package secp256k1
import (
"math/bits"
"next.orly.dev/pkg/encoders/hex"
"encoders.orly/hex"
)
// References:

2
pkg/crypto/ec/secp256k1/ecdh_test.go

@ -8,7 +8,7 @@ package secp256k1 @@ -8,7 +8,7 @@ package secp256k1
import (
"testing"
"next.orly.dev/pkg/utils"
"utils.orly"
)
func TestGenerateSharedSecret(t *testing.T) {

6
pkg/crypto/ec/secp256k1/example_test.go

@ -11,9 +11,9 @@ import ( @@ -11,9 +11,9 @@ import (
"encoding/binary"
"fmt"
"next.orly.dev/pkg/crypto/ec/secp256k1"
"next.orly.dev/pkg/crypto/sha256"
"next.orly.dev/pkg/encoders/hex"
"crypto.orly/ec/secp256k1"
"crypto.orly/sha256"
"encoders.orly/hex"
)
// This example demonstrates use of GenerateSharedSecret to encrypt a message

2
pkg/crypto/ec/secp256k1/field.go

@ -52,7 +52,7 @@ package secp256k1 @@ -52,7 +52,7 @@ package secp256k1
// ordinarily would. See the documentation for FieldVal for more details.
import (
"next.orly.dev/pkg/encoders/hex"
"encoders.orly/hex"
)
// Constants used to make the code more readable.

4
pkg/crypto/ec/secp256k1/field_test.go

@ -14,9 +14,9 @@ import ( @@ -14,9 +14,9 @@ import (
"testing"
"time"
"encoders.orly/hex"
"lol.mleku.dev/chk"
"next.orly.dev/pkg/encoders/hex"
"next.orly.dev/pkg/utils"
"utils.orly"
)
// SetHex decodes the passed big-endian hex string into the internal field value

2
pkg/crypto/ec/secp256k1/modnscalar.go

@ -7,7 +7,7 @@ package secp256k1 @@ -7,7 +7,7 @@ package secp256k1
import (
"math/big"
"next.orly.dev/pkg/encoders/hex"
"encoders.orly/hex"
)
// References:

4
pkg/crypto/ec/secp256k1/modnscalar_test.go

@ -12,9 +12,9 @@ import ( @@ -12,9 +12,9 @@ import (
"testing"
"time"
"encoders.orly/hex"
"lol.mleku.dev/chk"
"next.orly.dev/pkg/encoders/hex"
"next.orly.dev/pkg/utils"
"utils.orly"
)
// SetHex interprets the provided hex string as a 256-bit big-endian unsigned

2
pkg/crypto/ec/secp256k1/nonce.go

@ -9,7 +9,7 @@ import ( @@ -9,7 +9,7 @@ import (
"bytes"
"hash"
"next.orly.dev/pkg/crypto/sha256"
"crypto.orly/sha256"
)
// References:

6
pkg/crypto/ec/secp256k1/nonce_test.go

@ -8,9 +8,9 @@ package secp256k1 @@ -8,9 +8,9 @@ package secp256k1
import (
"testing"
"next.orly.dev/pkg/crypto/sha256"
"next.orly.dev/pkg/encoders/hex"
"next.orly.dev/pkg/utils"
"crypto.orly/sha256"
"encoders.orly/hex"
"utils.orly"
)
// hexToBytes converts the passed hex string into bytes and will panic if there

2
pkg/crypto/ec/secp256k1/precomps/genprecomps.go

@ -13,9 +13,9 @@ import ( @@ -13,9 +13,9 @@ import (
"math/big"
"os"
"crypto.orly/ec/secp256k1"
"lol.mleku.dev/chk"
"lol.mleku.dev/log"
"next.orly.dev/pkg/crypto/ec/secp256k1"
)
// curveParams houses the secp256k1 curve parameters for convenient access.

2
pkg/crypto/ec/secp256k1/pubkey_test.go

@ -9,7 +9,7 @@ import ( @@ -9,7 +9,7 @@ import (
"errors"
"testing"
"next.orly.dev/pkg/utils"
"utils.orly"
)
// TestParsePubKey ensures that public keys are properly parsed according

2
pkg/crypto/ec/secp256k1/seckey_test.go

@ -12,7 +12,7 @@ import ( @@ -12,7 +12,7 @@ import (
"math/big"
"testing"
"next.orly.dev/pkg/utils"
"utils.orly"
)
// TestGenerateSecretKey ensures the key generation works as expected.

6
pkg/crypto/ec/taproot/taproot.go

@ -7,10 +7,10 @@ import ( @@ -7,10 +7,10 @@ import (
"errors"
"fmt"
"crypto.orly/ec/bech32"
"crypto.orly/ec/chaincfg"
"lol.mleku.dev/chk"
"next.orly.dev/pkg/crypto/ec/bech32"
"next.orly.dev/pkg/crypto/ec/chaincfg"
"next.orly.dev/pkg/utils"
"utils.orly"
)
// AddressSegWit is the base address type for all SegWit addresses.

2
pkg/crypto/ec/wire/blockheader.go

@ -3,7 +3,7 @@ package wire @@ -3,7 +3,7 @@ package wire
import (
"time"
"next.orly.dev/pkg/crypto/ec/chainhash"
"crypto.orly/ec/chainhash"
)
// BlockHeader defines information about a block and is used in the bitcoin

2
pkg/crypto/ec/wire/msgtx.go

@ -1,7 +1,7 @@ @@ -1,7 +1,7 @@
package wire
import (
"next.orly.dev/pkg/crypto/ec/chainhash"
"crypto.orly/ec/chainhash"
)
// OutPoint defines a bitcoin data type that is used to track previous

30
pkg/crypto/go.mod

@ -0,0 +1,30 @@ @@ -0,0 +1,30 @@
module crypto.orly
go 1.25.0
require (
encoders.orly v0.0.0-00010101000000-000000000000
github.com/davecgh/go-spew v1.1.1
github.com/klauspost/cpuid/v2 v2.3.0
github.com/stretchr/testify v1.11.1
interfaces.orly v0.0.0-00010101000000-000000000000
lol.mleku.dev v1.0.2
utils.orly v0.0.0-00010101000000-000000000000
)
require (
github.com/fatih/color v1.18.0 // indirect
github.com/mattn/go-colorable v0.1.14 // indirect
github.com/mattn/go-isatty v0.0.20 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/templexxx/cpu v0.0.1 // indirect
github.com/templexxx/xhex v0.0.0-20200614015412-aed53437177b // indirect
golang.org/x/sys v0.35.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
)
replace (
encoders.orly => ../encoders
interfaces.orly => ../interfaces
utils.orly => ../utils
)

27
pkg/crypto/go.sum

@ -0,0 +1,27 @@ @@ -0,0 +1,27 @@
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM=
github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU=
github.com/klauspost/cpuid/v2 v2.3.0 h1:S4CRMLnYUhGeDFDqkGriYKdfoFlDnMtqTiI/sFzhA9Y=
github.com/klauspost/cpuid/v2 v2.3.0/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0=
github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE=
github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8=
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
github.com/templexxx/cpu v0.0.1 h1:hY4WdLOgKdc8y13EYklu9OUTXik80BkxHoWvTO6MQQY=
github.com/templexxx/cpu v0.0.1/go.mod h1:w7Tb+7qgcAlIyX4NhLuDKt78AHA5SzPmq0Wj6HiEnnk=
github.com/templexxx/xhex v0.0.0-20200614015412-aed53437177b h1:XeDLE6c9mzHpdv3Wb1+pWBaWv/BlHK0ZYIu/KaL6eHg=
github.com/templexxx/xhex v0.0.0-20200614015412-aed53437177b/go.mod h1:7rwmCH0wC2fQvNEvPZ3sKXukhyCTyiaZ5VTZMQYpZKQ=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.35.0 h1:vz1N37gP5bs89s7He8XuIYXpyY0+QlsKmzipCbUtyxI=
golang.org/x/sys v0.35.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
lol.mleku.dev v1.0.2 h1:bSV1hHnkmt1hq+9nSvRwN6wgcI7itbM3XRZ4dMB438c=
lol.mleku.dev v1.0.2/go.mod h1:DQ0WnmkntA9dPLCXgvtIgYt5G0HSqx3wSTLolHgWeLA=

2
pkg/crypto/p256k/btcec.go

@ -3,8 +3,8 @@ @@ -3,8 +3,8 @@
package p256k
import (
"crypto.orly/p256k/btcec"
"lol.mleku.dev/log"
"next.orly.dev/pkg/crypto/p256k/btcec"
)
func init() {

19
pkg/crypto/p256k/btcec/btcec.go

@ -4,19 +4,18 @@ @@ -4,19 +4,18 @@
package btcec
import (
"crypto.orly/ec/schnorr"
"crypto.orly/ec/secp256k1"
"interfaces.orly/signer"
"lol.mleku.dev/chk"
"lol.mleku.dev/errorf"
btcec3 "next.orly.dev/pkg/crypto/ec"
"next.orly.dev/pkg/crypto/ec/schnorr"
"next.orly.dev/pkg/crypto/ec/secp256k1"
"next.orly.dev/pkg/interfaces/signer"
)
// Signer is an implementation of signer.I that uses the btcec library.
type Signer struct {
SecretKey *secp256k1.SecretKey
PublicKey *secp256k1.PublicKey
BTCECSec *btcec3.SecretKey
BTCECSec *ec.SecretKey
pkb, skb []byte
}
@ -24,11 +23,11 @@ var _ signer.I = &Signer{} @@ -24,11 +23,11 @@ var _ signer.I = &Signer{}
// Generate creates a new Signer.
func (s *Signer) Generate() (err error) {
if s.SecretKey, err = btcec3.NewSecretKey(); chk.E(err) {
if s.SecretKey, err = ec.NewSecretKey(); chk.E(err) {
return
}
s.skb = s.SecretKey.Serialize()
s.BTCECSec, _ = btcec3.PrivKeyFromBytes(s.skb)
s.BTCECSec, _ = ec.PrivKeyFromBytes(s.skb)
s.PublicKey = s.SecretKey.PubKey()
s.pkb = schnorr.SerializePubKey(s.PublicKey)
return
@ -44,7 +43,7 @@ func (s *Signer) InitSec(sec []byte) (err error) { @@ -44,7 +43,7 @@ func (s *Signer) InitSec(sec []byte) (err error) {
s.SecretKey = secp256k1.SecKeyFromBytes(sec)
s.PublicKey = s.SecretKey.PubKey()
s.pkb = schnorr.SerializePubKey(s.PublicKey)
s.BTCECSec, _ = btcec3.PrivKeyFromBytes(s.skb)
s.BTCECSec, _ = ec.PrivKeyFromBytes(s.skb)
return
}
@ -143,7 +142,7 @@ func (s *Signer) ECDH(pubkeyBytes []byte) (secret []byte, err error) { @@ -143,7 +142,7 @@ func (s *Signer) ECDH(pubkeyBytes []byte) (secret []byte, err error) {
); chk.E(err) {
return
}
secret = btcec3.GenerateSharedSecret(s.BTCECSec, pub)
secret = ec.GenerateSharedSecret(s.BTCECSec, pub)
return
}
@ -155,7 +154,7 @@ type Keygen struct { @@ -155,7 +154,7 @@ type Keygen struct {
// Generate a new key pair. If the result is suitable, the embedded Signer can have its contents
// extracted.
func (k *Keygen) Generate() (pubBytes []byte, err error) {
if k.Signer.SecretKey, err = btcec3.NewSecretKey(); chk.E(err) {
if k.Signer.SecretKey, err = ec.NewSecretKey(); chk.E(err) {
return
}
k.Signer.PublicKey = k.SecretKey.PubKey()

5
pkg/crypto/p256k/btcec/btcec_test.go

@ -6,11 +6,10 @@ import ( @@ -6,11 +6,10 @@ import (
"testing"
"time"
"next.orly.dev/pkg/utils"
"crypto.orly/p256k/btcec"
"lol.mleku.dev/chk"
"lol.mleku.dev/log"
"next.orly.dev/pkg/crypto/p256k/btcec"
"utils.orly"
)
func TestSigner_Generate(t *testing.T) {

4
pkg/crypto/p256k/btcec/helpers-btcec.go

@ -3,9 +3,9 @@ @@ -3,9 +3,9 @@
package btcec
import (
"encoders.orly/hex"
"interfaces.orly/signer"
"lol.mleku.dev/chk"
"next.orly.dev/pkg/encoders/hex"
"next.orly.dev/pkg/interfaces/signer"
)
func NewSecFromHex[V []byte | string](skh V) (sign signer.I, err error) {

4
pkg/crypto/p256k/helpers.go

@ -3,9 +3,9 @@ @@ -3,9 +3,9 @@
package p256k
import (
"encoders.orly/hex"
"interfaces.orly/signer"
"lol.mleku.dev/chk"
"next.orly.dev/pkg/encoders/hex"
"next.orly.dev/pkg/interfaces/signer"
)
func NewSecFromHex[V []byte | string](skh V) (sign signer.I, err error) {

8
pkg/crypto/p256k/p256k.go

@ -4,12 +4,12 @@ package p256k @@ -4,12 +4,12 @@ package p256k
import "C"
import (
"crypto.orly/ec"
"crypto.orly/ec/secp256k1"
"interfaces.orly/signer"
"lol.mleku.dev/chk"
"lol.mleku.dev/errorf"
"lol.mleku.dev/log"
"next.orly.dev/pkg/crypto/ec"
"next.orly.dev/pkg/crypto/ec/secp256k1"
realy "next.orly.dev/pkg/interfaces/signer"
)
func init() {
@ -33,7 +33,7 @@ type Signer struct { @@ -33,7 +33,7 @@ type Signer struct {
skb, pkb []byte
}
var _ realy.I = &Signer{}
var _ signer.I = &Signer{}
// Generate a new Signer key pair using the CGO bindings to libsecp256k1
func (s *Signer) Generate() (err error) {

17
pkg/crypto/p256k/p256k_test.go

@ -6,24 +6,23 @@ import ( @@ -6,24 +6,23 @@ import (
"testing"
"time"
"next.orly.dev/pkg/utils"
"crypto.orly/p256k"
"interfaces.orly/signer"
"lol.mleku.dev/chk"
"lol.mleku.dev/log"
"next.orly.dev/pkg/crypto/p256k"
realy "next.orly.dev/pkg/interfaces/signer"
"utils.orly"
)
func TestSigner_Generate(t *testing.T) {
for _ = range 10000 {
var err error
signer := &p256k.Signer{}
sign := &p256k.Signer{}
var skb []byte
if err = signer.Generate(); chk.E(err) {
if err = sign.Generate(); chk.E(err) {
t.Fatal(err)
}
skb = signer.Sec()
if err = signer.InitSec(skb); chk.E(err) {
skb = sign.Sec()
if err = sign.InitSec(skb); chk.E(err) {
t.Fatal(err)
}
}
@ -123,7 +122,7 @@ func TestSigner_Generate(t *testing.T) { @@ -123,7 +122,7 @@ func TestSigner_Generate(t *testing.T) {
func TestECDH(t *testing.T) {
n := time.Now()
var err error
var s1, s2 realy.I
var s1, s2 signer.I
var counter int
const total = 100
for _ = range total {

6
pkg/crypto/p256k/secp256k1.go

@ -6,12 +6,12 @@ import ( @@ -6,12 +6,12 @@ import (
"crypto/rand"
"unsafe"
"crypto.orly/ec/schnorr"
"crypto.orly/ec/secp256k1"
"crypto.orly/sha256"
"lol.mleku.dev/chk"
"lol.mleku.dev/errorf"
"lol.mleku.dev/log"
"next.orly.dev/pkg/crypto/ec/schnorr"
"next.orly.dev/pkg/crypto/ec/secp256k1"
"next.orly.dev/pkg/crypto/sha256"
)
/*

2
pkg/crypto/sha256/README.md

@ -112,7 +112,7 @@ This will automatically select the fastest method for the architecture on which @@ -112,7 +112,7 @@ This will automatically select the fastest method for the architecture on which
it will be executed.
```go
import "github.com/minio/sha256-simd"
import "crypto.orly/sha256"
func main() {
...

4
pkg/crypto/sha256/sha256.go

@ -412,10 +412,10 @@ func (d *digest) MarshalBinary() ([]byte, error) { @@ -412,10 +412,10 @@ func (d *digest) MarshalBinary() ([]byte, error) {
func (d *digest) UnmarshalBinary(b []byte) error {
if len(b) < len(magic256) || string(b[:len(magic256)]) != magic256 {
return errors.New("crypto/sha256: invalid hash state identifier")
return errors.New("crypto.orly/sha256: invalid hash state identifier")
}
if len(b) != marshaledSize {
return errors.New("crypto/sha256: invalid hash state size")
return errors.New("crypto.orly/sha256: invalid hash state size")
}
b = b[len(magic256):]
b, d.h[0] = consumeUint32(b)

2
pkg/crypto/sha256/sha256_test.go

@ -59,7 +59,7 @@ import ( @@ -59,7 +59,7 @@ import (
"testing"
"lol.mleku.dev/chk"
"next.orly.dev/pkg/utils"
"utils.orly"
)
type sha256Test struct {

132
pkg/database/database.go

@ -0,0 +1,132 @@ @@ -0,0 +1,132 @@
package database
import (
"context"
"os"
"path/filepath"
"time"
"github.com/dgraph-io/badger/v4"
"github.com/dgraph-io/badger/v4/options"
"lol.mleku.dev"
"lol.mleku.dev/chk"
"lol.mleku.dev/log"
"utils.orly/apputil"
"utils.orly/units"
)
type D struct {
ctx context.Context
cancel context.CancelFunc
dataDir string
Logger *logger
*badger.DB
seq *badger.Sequence
}
func New(
ctx context.Context, cancel context.CancelFunc, dataDir, logLevel string,
) (
d *D, err error,
) {
d = &D{
ctx: ctx,
cancel: cancel,
dataDir: dataDir,
Logger: NewLogger(lol.GetLogLevel(logLevel), dataDir),
DB: nil,
seq: nil,
}
// Ensure the data directory exists
if err = os.MkdirAll(dataDir, 0755); chk.E(err) {
return
}
// Also ensure the directory exists using apputil.EnsureDir for any potential subdirectories
dummyFile := filepath.Join(dataDir, "dummy.sst")
if err = apputil.EnsureDir(dummyFile); chk.E(err) {
return
}
opts := badger.DefaultOptions(d.dataDir)
opts.BlockCacheSize = int64(units.Gb)
opts.BlockSize = units.Gb
opts.CompactL0OnClose = true
opts.LmaxCompaction = true
opts.Compression = options.None
opts.Logger = d.Logger
if d.DB, err = badger.Open(opts); chk.E(err) {
return
}
log.T.Ln("getting event sequence lease", d.dataDir)
if d.seq, err = d.DB.GetSequence([]byte("EVENTS"), 1000); chk.E(err) {
return
}
// run code that updates indexes when new indexes have been added and bumps
// the version so they aren't run again.
d.RunMigrations()
// start up the expiration tag processing and shut down and clean up the
// database after the context is canceled.
go func() {
expirationTicker := time.NewTicker(time.Minute * 10)
select {
case <-expirationTicker.C:
d.DeleteExpired()
return
case <-d.ctx.Done():
}
d.cancel()
d.seq.Release()
d.DB.Close()
}()
return
}
// Path returns the path where the database files are stored.
func (d *D) Path() string { return d.dataDir }
func (d *D) Wipe() (err error) {
// TODO implement me
panic("implement me")
}
func (d *D) SetLogLevel(level string) {
d.Logger.SetLogLevel(lol.GetLogLevel(level))
}
func (d *D) EventIdsBySerial(start uint64, count int) (
evs []uint64, err error,
) {
// TODO implement me
panic("implement me")
}
// Init initializes the database with the given path.
func (d *D) Init(path string) (err error) {
// The database is already initialized in the New function,
// so we just need to ensure the path is set correctly.
d.dataDir = path
return nil
}
// Sync flushes the database buffers to disk.
func (d *D) Sync() (err error) {
d.DB.RunValueLogGC(0.5)
return d.DB.Sync()
}
// Close releases resources and closes the database.
func (d *D) Close() (err error) {
if d.seq != nil {
if err = d.seq.Release(); chk.E(err) {
return
}
}
if d.DB != nil {
if err = d.DB.Close(); chk.E(err) {
return
}
}
return
}

76
pkg/database/delete-event.go

@ -0,0 +1,76 @@ @@ -0,0 +1,76 @@
package database
import (
"bytes"
"context"
"database.orly/indexes"
"database.orly/indexes/types"
"encoders.orly/event"
"github.com/dgraph-io/badger/v4"
"lol.mleku.dev/chk"
)
// DeleteEvent removes an event from the database identified by `eid`. If
// noTombstone is false or not provided, a tombstone is created for the event.
func (d *D) DeleteEvent(c context.Context, eid []byte) (err error) {
d.Logger.Warningf("deleting event %0x", eid)
// Get the serial number for the event ID
var ser *types.Uint40
ser, err = d.GetSerialById(eid)
if chk.E(err) {
return
}
if ser == nil {
// Event wasn't found, nothing to delete
return
}
// Fetch the event to get its data
var ev *event.E
ev, err = d.FetchEventBySerial(ser)
if chk.E(err) {
return
}
if ev == nil {
// Event wasn't found, nothing to delete. this shouldn't happen.
return
}
if err = d.DeleteEventBySerial(c, ser, ev); chk.E(err) {
return
}
return
}
func (d *D) DeleteEventBySerial(
c context.Context, ser *types.Uint40, ev *event.E,
) (err error) {
// Get all indexes for the event
var idxs [][]byte
idxs, err = GetIndexesForEvent(ev, ser.Get())
if chk.E(err) {
return
}
// Get the event key
eventKey := new(bytes.Buffer)
if err = indexes.EventEnc(ser).MarshalWrite(eventKey); chk.E(err) {
return
}
// Delete the event and all its indexes in a transaction
err = d.Update(
func(txn *badger.Txn) (err error) {
// Delete the event
if err = txn.Delete(eventKey.Bytes()); chk.E(err) {
return
}
// Delete all indexes
for _, key := range idxs {
if err = txn.Delete(key); chk.E(err) {
return
}
}
return
},
)
return
}

62
pkg/database/delete-expired.go

@ -0,0 +1,62 @@ @@ -0,0 +1,62 @@
package database
import (
"bytes"
"context"
"time"
"database.orly/indexes"
"database.orly/indexes/types"
"encoders.orly/event"
"github.com/dgraph-io/badger/v4"
"lol.mleku.dev/chk"
)
func (d *D) DeleteExpired() {
var err error
var expiredSerials types.Uint40s
// make the operation atomic and save on accesses to the system clock by
// setting the boundary at the current second
now := time.Now().Unix()
// search the expiration indexes for expiry timestamps that are now past
if err = d.View(
func(txn *badger.Txn) (err error) {
exp, ser := indexes.ExpirationVars()
expPrf := new(bytes.Buffer)
if _, err = indexes.ExpirationPrefix.Write(expPrf); chk.E(err) {
return
}
it := txn.NewIterator(badger.IteratorOptions{Prefix: expPrf.Bytes()})
defer it.Close()
for it.Rewind(); it.Valid(); it.Next() {
item := it.Item()
key := item.Key()
buf := bytes.NewBuffer(key)
if err = indexes.ExpirationDec(
exp, ser,
).UnmarshalRead(buf); chk.E(err) {
continue
}
if int64(exp.Get()) > now {
// not expired yet
continue
}
expiredSerials = append(expiredSerials, ser)
}
return
},
); chk.E(err) {
}
// delete the events and their indexes
for _, ser := range expiredSerials {
var ev *event.E
if ev, err = d.FetchEventBySerial(ser); chk.E(err) {
continue
}
if err = d.DeleteEventBySerial(
context.Background(), ser, ev,
); chk.E(err) {
continue
}
}
}

106
pkg/database/export.go

@ -0,0 +1,106 @@ @@ -0,0 +1,106 @@
package database
import (
"bytes"
"context"
"io"
"database.orly/indexes"
"database.orly/indexes/types"
"encoders.orly/event"
"github.com/dgraph-io/badger/v4"
"lol.mleku.dev/chk"
"utils.orly/units"
)
// Export the complete database of stored events to an io.Writer in line structured minified
// JSON.
func (d *D) Export(c context.Context, w io.Writer, pubkeys ...[]byte) {
var err error
evB := make([]byte, 0, units.Mb)
evBuf := bytes.NewBuffer(evB)
if len(pubkeys) == 0 {
if err = d.View(
func(txn *badger.Txn) (err error) {
buf := new(bytes.Buffer)
if err = indexes.EventEnc(nil).MarshalWrite(buf); chk.E(err) {
return
}
it := txn.NewIterator(badger.IteratorOptions{Prefix: buf.Bytes()})
defer it.Close()
for it.Rewind(); it.Valid(); it.Next() {
item := it.Item()
if err = item.Value(
func(val []byte) (err error) {
evBuf.Write(val)
return
},
); chk.E(err) {
continue
}
ev := event.New()
if err = ev.UnmarshalBinary(evBuf); chk.E(err) {
continue
}
// Serialize the event to JSON and write it to the output
if _, err = w.Write(ev.Serialize()); chk.E(err) {
return
}
if _, err = w.Write([]byte{'\n'}); chk.E(err) {
return
}
evBuf.Reset()
}
return
},
); err != nil {
return
}
} else {
for _, pubkey := range pubkeys {
if err = d.View(
func(txn *badger.Txn) (err error) {
pkBuf := new(bytes.Buffer)
ph := &types.PubHash{}
if err = ph.FromPubkey(pubkey); chk.E(err) {
return
}
if err = indexes.PubkeyEnc(
ph, nil, nil,
).MarshalWrite(pkBuf); chk.E(err) {
return
}
it := txn.NewIterator(badger.IteratorOptions{Prefix: pkBuf.Bytes()})
defer it.Close()
for it.Rewind(); it.Valid(); it.Next() {
item := it.Item()
if err = item.Value(
func(val []byte) (err error) {
evBuf.Write(val)
return
},
); chk.E(err) {
continue
}
ev := event.New()
if err = ev.UnmarshalBinary(evBuf); chk.E(err) {
continue
}
// Serialize the event to JSON and write it to the output
if _, err = w.Write(ev.Serialize()); chk.E(err) {
continue
}
if _, err = w.Write([]byte{'\n'}); chk.E(err) {
continue
}
evBuf.Reset()
}
return
},
); err != nil {
return
}
}
}
return
}

111
pkg/database/export_test.go

@ -0,0 +1,111 @@ @@ -0,0 +1,111 @@
package database
import (
"bufio"
"bytes"
"context"
"os"
"testing"
"encoders.orly/event"
"encoders.orly/event/examples"
"lol.mleku.dev/chk"
)
// TestExport tests the Export function by:
// 1. Creating a new database with events from examples.Cache
// 2. Checking that all event IDs in the cache are found in the export
// 3. Verifying this also works when only a few pubkeys are requested
func TestExport(t *testing.T) {
// Create a temporary directory for the database
tempDir, err := os.MkdirTemp("", "test-db-*")
if err != nil {
t.Fatalf("Failed to create temporary directory: %v", err)
}
defer os.RemoveAll(tempDir) // Clean up after the test
// Create a context and cancel function for the database
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
// Initialize the database
db, err := New(ctx, cancel, tempDir, "info")
if err != nil {
t.Fatalf("Failed to create database: %v", err)
}
defer db.Close()
// Create a scanner to read events from examples.Cache
scanner := bufio.NewScanner(bytes.NewBuffer(examples.Cache))
scanner.Buffer(make([]byte, 0, 1_000_000_000), 1_000_000_000)
// Maps to store event IDs and their associated pubkeys
eventIDs := make(map[string]bool)
pubkeyToEventIDs := make(map[string][]string)
// Process each event
for scanner.Scan() {
chk.E(scanner.Err())
b := scanner.Bytes()
ev := event.New()
// Unmarshal the event
if _, err = ev.Unmarshal(b); chk.E(err) {
t.Fatal(err)
}
// Save the event to the database
if _, _, err = db.SaveEvent(ctx, ev, false, nil); err != nil {
t.Fatalf("Failed to save event: %v", err)
}
// Store the event ID
eventID := string(ev.ID)
eventIDs[eventID] = true
// Store the event ID by pubkey
pubkey := string(ev.Pubkey)
pubkeyToEventIDs[pubkey] = append(pubkeyToEventIDs[pubkey], eventID)
}
// Check for scanner errors
if err = scanner.Err(); err != nil {
t.Fatalf("Scanner error: %v", err)
}
t.Logf("Saved %d events to the database", len(eventIDs))
// Test 1: Export all events and verify all IDs are in the export
var exportBuffer bytes.Buffer
db.Export(ctx, &exportBuffer)
// Parse the exported events and check that all IDs are present
exportedIDs := make(map[string]bool)
exportScanner := bufio.NewScanner(&exportBuffer)
exportScanner.Buffer(make([]byte, 0, 1_000_000_000), 1_000_000_000)
exportCount := 0
for exportScanner.Scan() {
b := exportScanner.Bytes()
ev := event.New()
if _, err = ev.Unmarshal(b); chk.E(err) {
t.Fatal(err)
}
exportedIDs[string(ev.ID)] = true
exportCount++
}
// Check for scanner errors
if err = exportScanner.Err(); err != nil {
t.Fatalf("Scanner error: %v", err)
}
t.Logf("Found %d events in the export", exportCount)
// Check that all original event IDs are in the export
for id := range eventIDs {
if !exportedIDs[id] {
t.Errorf("Event ID %s not found in export", id)
}
}
t.Logf("All %d event IDs found in export", len(eventIDs))
}

38
pkg/database/fetch-event-by-serial.go

@ -0,0 +1,38 @@ @@ -0,0 +1,38 @@
package database
import (
"bytes"
"database.orly/indexes"
"database.orly/indexes/types"
"encoders.orly/event"
"github.com/dgraph-io/badger/v4"
"lol.mleku.dev/chk"
)
func (d *D) FetchEventBySerial(ser *types.Uint40) (ev *event.E, err error) {
if err = d.View(
func(txn *badger.Txn) (err error) {
buf := new(bytes.Buffer)
if err = indexes.EventEnc(ser).MarshalWrite(buf); chk.E(err) {
return
}
var item *badger.Item
if item, err = txn.Get(buf.Bytes()); err != nil {
return
}
var v []byte
if v, err = item.ValueCopy(nil); chk.E(err) {
return
}
ev = new(event.E)
if err = ev.UnmarshalBinary(bytes.NewBuffer(v)); chk.E(err) {
return
}
return
},
); err != nil {
return
}
return
}

156
pkg/database/fetch-event-by-serial_test.go

@ -0,0 +1,156 @@ @@ -0,0 +1,156 @@
package database
import (
"bufio"
"bytes"
"context"
"os"
"testing"
"database.orly/indexes/types"
"encoders.orly/event"
"encoders.orly/event/examples"
"encoders.orly/filter"
"encoders.orly/tag"
"lol.mleku.dev/chk"
"utils.orly"
)
func TestFetchEventBySerial(t *testing.T) {
// Create a temporary directory for the database
tempDir, err := os.MkdirTemp("", "test-db-*")
if err != nil {
t.Fatalf("Failed to create temporary directory: %v", err)
}
defer os.RemoveAll(tempDir) // Clean up after the test
// Create a context and cancel function for the database
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
// Initialize the database
db, err := New(ctx, cancel, tempDir, "info")
if err != nil {
t.Fatalf("Failed to create database: %v", err)
}
defer db.Close()
// Create a scanner to read events from examples.Cache
scanner := bufio.NewScanner(bytes.NewBuffer(examples.Cache))
scanner.Buffer(make([]byte, 0, 1_000_000_000), 1_000_000_000)
// Count the number of events processed
eventCount := 0
var events []*event.E
// Process each event
for scanner.Scan() {
chk.E(scanner.Err())
b := scanner.Bytes()
ev := event.New()
// Unmarshal the event
if _, err = ev.Unmarshal(b); chk.E(err) {
t.Fatal(err)
}
events = append(events, ev)
// Save the event to the database
if _, _, err = db.SaveEvent(ctx, ev, false, nil); err != nil {
t.Fatalf("Failed to save event #%d: %v", eventCount+1, err)
}
eventCount++
}
// Check for scanner errors
if err = scanner.Err(); err != nil {
t.Fatalf("Scanner error: %v", err)
}
t.Logf("Successfully saved %d events to the database", eventCount)
// Instead of trying to find a valid serial directly, let's use QueryForIds
// which is known to work from the other tests
testEvent := events[3] // Using the same event as in other tests
// Use QueryForIds to get the IdPkTs for this event
var sers types.Uint40s
sers, err = db.QueryForSerials(
ctx, &filter.F{
Ids: tag.NewFromBytesSlice(testEvent.ID),
},
)
if err != nil {
t.Fatalf("Failed to query for Ids: %v", err)
}
// Verify we got exactly one result
if len(sers) != 1 {
t.Fatalf("Expected 1 IdPkTs, got %d", len(sers))
}
// Fetch the event by serial
fetchedEvent, err := db.FetchEventBySerial(sers[0])
if err != nil {
t.Fatalf("Failed to fetch event by serial: %v", err)
}
// Verify the fetched event is not nil
if fetchedEvent == nil {
t.Fatal("Expected fetched event to be non-nil, but got nil")
}
// Verify the fetched event has the same ID as the original event
if !utils.FastEqual(fetchedEvent.ID, testEvent.ID) {
t.Fatalf(
"Fetched event ID doesn't match original event ID. Got %x, expected %x",
fetchedEvent.ID, testEvent.ID,
)
}
// Verify other event properties match
if fetchedEvent.Kind != testEvent.Kind {
t.Fatalf(
"Fetched event kind doesn't match. Got %d, expected %d",
fetchedEvent.Kind, testEvent.Kind,
)
}
if !utils.FastEqual(fetchedEvent.Pubkey, testEvent.Pubkey) {
t.Fatalf(
"Fetched event pubkey doesn't match. Got %x, expected %x",
fetchedEvent.Pubkey, testEvent.Pubkey,
)
}
if fetchedEvent.CreatedAt != testEvent.CreatedAt {
t.Fatalf(
"Fetched event created_at doesn't match. Got %d, expected %d",
fetchedEvent.CreatedAt, testEvent.CreatedAt,
)
}
// Test with a non-existent serial
nonExistentSerial := new(types.Uint40)
err = nonExistentSerial.Set(uint64(0xFFFFFFFFFF)) // Max value
if err != nil {
t.Fatalf("Failed to create non-existent serial: %v", err)
}
// This should return an error since the serial doesn't exist
fetchedEvent, err = db.FetchEventBySerial(nonExistentSerial)
if err == nil {
t.Fatal("Expected error for non-existent serial, but got nil")
}
// The fetched event should be nil
if fetchedEvent != nil {
t.Fatalf(
"Expected nil event for non-existent serial, but got: %v",
fetchedEvent,
)
}
}

56
pkg/database/get-fullidpubkey-by-serial.go

@ -0,0 +1,56 @@ @@ -0,0 +1,56 @@
package database
import (
"bytes"
"database.orly/indexes"
"database.orly/indexes/types"
"github.com/dgraph-io/badger/v4"
"interfaces.orly/store"
"lol.mleku.dev/chk"
)
func (d *D) GetFullIdPubkeyBySerial(ser *types.Uint40) (
fidpk *store.IdPkTs, err error,
) {
if err = d.View(
func(txn *badger.Txn) (err error) {
buf := new(bytes.Buffer)
if err = indexes.FullIdPubkeyEnc(
ser, nil, nil, nil,
).MarshalWrite(buf); chk.E(err) {
return
}
prf := buf.Bytes()
it := txn.NewIterator(
badger.IteratorOptions{
Prefix: prf,
},
)
defer it.Close()
it.Seek(prf)
if it.Valid() {
item := it.Item()
key := item.Key()
ser, fid, p, ca := indexes.FullIdPubkeyVars()
buf2 := bytes.NewBuffer(key)
if err = indexes.FullIdPubkeyDec(
ser, fid, p, ca,
).UnmarshalRead(buf2); chk.E(err) {
return
}
idpkts := store.IdPkTs{
Id: fid.Bytes(),
Pub: p.Bytes(),
Ts: int64(ca.Get()),
Ser: ser.Get(),
}
fidpk = &idpkts
}
return
},
); chk.E(err) {
return
}
return
}

74
pkg/database/get-fullidpubkey-by-serials.go

@ -0,0 +1,74 @@ @@ -0,0 +1,74 @@
package database
import (
"bytes"
"database.orly/indexes"
"database.orly/indexes/types"
"github.com/dgraph-io/badger/v4"
"interfaces.orly/store"
"lol.mleku.dev/chk"
)
// GetFullIdPubkeyBySerials seeks directly to each serial's prefix in the
// FullIdPubkey index. The input sers slice is expected to be sorted in
// ascending order, allowing efficient forward-only iteration via a single
// Badger iterator.
func (d *D) GetFullIdPubkeyBySerials(sers []*types.Uint40) (
fidpks []*store.IdPkTs, err error,
) {
if len(sers) == 0 {
return
}
if err = d.View(
func(txn *badger.Txn) (err error) {
// Scope the iterator to the FullIdPubkey table using its 3-byte prefix.
buf := new(bytes.Buffer)
if err = indexes.NewPrefix(indexes.FullIdPubkey).MarshalWrite(buf); chk.E(err) {
return
}
tablePrefix := buf.Bytes()
it := txn.NewIterator(badger.IteratorOptions{Prefix: tablePrefix})
defer it.Close()
for _, s := range sers {
if s == nil {
continue
}
// Build the serial-specific prefix: 3-byte table prefix + 5-byte serial.
sbuf := new(bytes.Buffer)
if err = indexes.FullIdPubkeyEnc(
s, nil, nil, nil,
).MarshalWrite(sbuf); chk.E(err) {
return
}
serialPrefix := sbuf.Bytes()
// Seek to the first key for this serial and verify it matches the prefix.
it.Seek(serialPrefix)
if it.ValidForPrefix(serialPrefix) {
item := it.Item()
key := item.Key()
ser, fid, p, ca := indexes.FullIdPubkeyVars()
if err = indexes.FullIdPubkeyDec(
ser, fid, p, ca,
).UnmarshalRead(bytes.NewBuffer(key)); chk.E(err) {
return
}
fidpks = append(
fidpks, &store.IdPkTs{
Id: fid.Bytes(),
Pub: p.Bytes(),
Ts: int64(ca.Get()),
Ser: ser.Get(),
},
)
}
}
return
},
); chk.E(err) {
return
}
return
}

156
pkg/database/get-indexes-for-event.go

@ -0,0 +1,156 @@ @@ -0,0 +1,156 @@
package database
import (
"bytes"
"database.orly/indexes"
. "database.orly/indexes/types"
"encoders.orly/event"
"lol.mleku.dev/chk"
)
// appendIndexBytes marshals an index to a byte slice and appends it to the idxs slice
func appendIndexBytes(idxs *[][]byte, idx *indexes.T) (err error) {
buf := new(bytes.Buffer)
// Marshal the index to the buffer
if err = idx.MarshalWrite(buf); chk.E(err) {
return
}
// Copy the buffer's bytes to a new byte slice
// Append the byte slice to the idxs slice
*idxs = append(*idxs, buf.Bytes())
return
}
// GetIndexesForEvent creates all the indexes for an event.E instance as defined
// in keys.go. It returns a slice of byte slices that can be used to store the
// event in the database.
func GetIndexesForEvent(ev *event.E, serial uint64) (
idxs [][]byte, err error,
) {
defer func() {
if chk.E(err) {
idxs = nil
}
}()
// Convert serial to Uint40
ser := new(Uint40)
if err = ser.Set(serial); chk.E(err) {
return
}
// ID index
idHash := new(IdHash)
if err = idHash.FromId(ev.ID); chk.E(err) {
return
}
idIndex := indexes.IdEnc(idHash, ser)
if err = appendIndexBytes(&idxs, idIndex); chk.E(err) {
return
}
// FullIdPubkey index
fullID := new(Id)
if err = fullID.FromId(ev.ID); chk.E(err) {
return
}
pubHash := new(PubHash)
if err = pubHash.FromPubkey(ev.Pubkey); chk.E(err) {
return
}
createdAt := new(Uint64)
createdAt.Set(uint64(ev.CreatedAt))
idPubkeyIndex := indexes.FullIdPubkeyEnc(
ser, fullID, pubHash, createdAt,
)
if err = appendIndexBytes(&idxs, idPubkeyIndex); chk.E(err) {
return
}
// CreatedAt index
createdAtIndex := indexes.CreatedAtEnc(createdAt, ser)
if err = appendIndexBytes(&idxs, createdAtIndex); chk.E(err) {
return
}
// PubkeyCreatedAt index
pubkeyIndex := indexes.PubkeyEnc(pubHash, createdAt, ser)
if err = appendIndexBytes(&idxs, pubkeyIndex); chk.E(err) {
return
}
// Process tags for tag-related indexes
if ev.Tags != nil && ev.Tags.Len() > 0 {
for _, tag := range ev.Tags.ToSliceOfTags() {
// only index tags with a value field and the key is a single character
if tag.Len() >= 2 {
// Get the key and value from the tag
keyBytes := tag.Key()
// require single-letter key
if len(keyBytes) != 1 {
continue
}
// if the key is not a-zA-Z skip
if (keyBytes[0] < 'a' || keyBytes[0] > 'z') && (keyBytes[0] < 'A' || keyBytes[0] > 'Z') {
continue
}
valueBytes := tag.Value()
// Create tag key and value
key := new(Letter)
key.Set(keyBytes[0])
valueHash := new(Ident)
valueHash.FromIdent(valueBytes)
// TagPubkey index
pubkeyTagIndex := indexes.TagPubkeyEnc(
key, valueHash, pubHash, createdAt, ser,
)
if err = appendIndexBytes(
&idxs, pubkeyTagIndex,
); chk.E(err) {
return
}
// Tag index
tagIndex := indexes.TagEnc(
key, valueHash, createdAt, ser,
)
if err = appendIndexBytes(
&idxs, tagIndex,
); chk.E(err) {
return
}
// Kind-related tag indexes
kind := new(Uint16)
kind.Set(ev.Kind)
// TagKind index
kindTagIndex := indexes.TagKindEnc(
key, valueHash, kind, createdAt, ser,
)
if err = appendIndexBytes(
&idxs, kindTagIndex,
); chk.E(err) {
return
}
// TagKindPubkey index
kindPubkeyTagIndex := indexes.TagKindPubkeyEnc(
key, valueHash, kind, pubHash, createdAt, ser,
)
if err = appendIndexBytes(
&idxs, kindPubkeyTagIndex,
); chk.E(err) {
return
}
}
}
}
kind := new(Uint16)
kind.Set(uint16(ev.Kind))
// Kind index
kindIndex := indexes.KindEnc(kind, createdAt, ser)
if err = appendIndexBytes(&idxs, kindIndex); chk.E(err) {
return
}
// KindPubkey index
// Using the correct parameters based on the function signature
kindPubkeyIndex := indexes.KindPubkeyEnc(
kind, pubHash, createdAt, ser,
)
if err = appendIndexBytes(&idxs, kindPubkeyIndex); chk.E(err) {
return
}
return
}

304
pkg/database/get-indexes-for-event_test.go

@ -0,0 +1,304 @@ @@ -0,0 +1,304 @@
package database
import (
"bytes"
"testing"
"crypto.orly/sha256"
"database.orly/indexes"
types2 "database.orly/indexes/types"
"encoders.orly/event"
"encoders.orly/kind"
"encoders.orly/tag"
"lol.mleku.dev/chk"
"utils.orly"
)
func TestGetIndexesForEvent(t *testing.T) {
t.Run("BasicEvent", testBasicEvent)
t.Run("EventWithTags", testEventWithTags)
t.Run("ErrorHandling", testErrorHandling)
}
// Helper function to verify that a specific index is included in the generated
// indexes
func verifyIndexIncluded(t *testing.T, idxs [][]byte, expectedIdx *indexes.T) {
// Marshal the expected index
buf := new(bytes.Buffer)
err := expectedIdx.MarshalWrite(buf)
if chk.E(err) {
t.Fatalf("Failed to marshal expected index: %v", err)
}
expectedBytes := buf.Bytes()
found := false
for _, idx := range idxs {
if utils.FastEqual(idx, expectedBytes) {
found = true
break
}
}
if !found {
t.Errorf("Expected index not found in generated indexes")
t.Errorf("Expected: %v", expectedBytes)
t.Errorf("Generated indexes: %d indexes", len(idxs))
}
}
// Test basic event with minimal fields
func testBasicEvent(t *testing.T) {
// Create a basic event
ev := event.New()
// Set ID
id := make([]byte, sha256.Size)
for i := range id {
id[i] = byte(i)
}
ev.ID = id
// Set Pubkey
pubkey := make([]byte, 32)
for i := range pubkey {
pubkey[i] = byte(i + 1)
}
ev.Pubkey = pubkey
// Set CreatedAt
ev.CreatedAt = 12345
// Set Kind
ev.Kind = kind.TextNote.K
// Set Content
ev.Content = []byte("Test content")
// Generate indexes
serial := uint64(1)
idxs, err := GetIndexesForEvent(ev, serial)
if chk.E(err) {
t.Fatalf("GetIndexesForEvent failed: %v", err)
}
// Verify the number of indexes (should be 6 for a basic event without tags)
if len(idxs) != 6 {
t.Fatalf("Expected 6 indexes, got %d", len(idxs))
}
// Create and verify the expected indexes
// 1. ID index
ser := new(types2.Uint40)
err = ser.Set(serial)
if chk.E(err) {
t.Fatalf("Failed to create Uint40: %v", err)
}
idHash := new(types2.IdHash)
err = idHash.FromId(ev.ID)
if chk.E(err) {
t.Fatalf("Failed to create IdHash: %v", err)
}
idIndex := indexes.IdEnc(idHash, ser)
verifyIndexIncluded(t, idxs, idIndex)
// 2. FullIdPubkey index
fullID := new(types2.Id)
err = fullID.FromId(ev.ID)
if chk.E(err) {
t.Fatalf("Failed to create ID: %v", err)
}
pubHash := new(types2.PubHash)
err = pubHash.FromPubkey(ev.Pubkey)
if chk.E(err) {
t.Fatalf("Failed to create PubHash: %v", err)
}
createdAt := new(types2.Uint64)
createdAt.Set(uint64(ev.CreatedAt))
idPubkeyIndex := indexes.FullIdPubkeyEnc(ser, fullID, pubHash, createdAt)
verifyIndexIncluded(t, idxs, idPubkeyIndex)
// 3. CreatedAt index
createdAtIndex := indexes.CreatedAtEnc(createdAt, ser)
verifyIndexIncluded(t, idxs, createdAtIndex)
// 4. Pubkey index
pubkeyIndex := indexes.PubkeyEnc(pubHash, createdAt, ser)
verifyIndexIncluded(t, idxs, pubkeyIndex)
// 5. Kind index
kind := new(types2.Uint16)
kind.Set(ev.Kind)
kindIndex := indexes.KindEnc(kind, createdAt, ser)
verifyIndexIncluded(t, idxs, kindIndex)
// 6. KindPubkey index
kindPubkeyIndex := indexes.KindPubkeyEnc(kind, pubHash, createdAt, ser)
verifyIndexIncluded(t, idxs, kindPubkeyIndex)
}
// Test event with tags
func testEventWithTags(t *testing.T) {
// Create an event with tags
ev := event.New()
// Set ID
id := make([]byte, sha256.Size)
for i := range id {
id[i] = byte(i)
}
ev.ID = id
// Set Pubkey
pubkey := make([]byte, 32)
for i := range pubkey {
pubkey[i] = byte(i + 1)
}
ev.Pubkey = pubkey
// Set CreatedAt
ev.CreatedAt = 12345
// Set Kind
ev.Kind = kind.TextNote.K // TextNote kind
// Set Content
ev.Content = []byte("Test content with tags")
// Add tags
ev.Tags = tag.NewS()
// Add e tag (event reference)
eTagKey := "e"
eTagValue := "abcdef1234567890"
eTag := tag.NewFromAny(eTagKey, eTagValue)
*ev.Tags = append(*ev.Tags, eTag)
// Add p tag (pubkey reference)
pTagKey := "p"
pTagValue := "0123456789abcdef"
pTag := tag.NewFromAny(pTagKey, pTagValue)
*ev.Tags = append(*ev.Tags, pTag)
// Generate indexes
serial := uint64(2)
idxs, err := GetIndexesForEvent(ev, serial)
if chk.E(err) {
t.Fatalf("GetIndexesForEvent failed: %v", err)
}
// Verify the number of indexes (should be 14 for an event with 2 tags)
// 6 basic indexes + 4 indexes per tag (TagPubkey, Tag, TagKind, TagKindPubkey)
if len(idxs) != 14 {
t.Fatalf("Expected 14 indexes, got %d", len(idxs))
}
// Create and verify the basic indexes (same as in testBasicEvent)
ser := new(types2.Uint40)
err = ser.Set(serial)
if chk.E(err) {
t.Fatalf("Failed to create Uint40: %v", err)
}
idHash := new(types2.IdHash)
err = idHash.FromId(ev.ID)
if chk.E(err) {
t.Fatalf("Failed to create IdHash: %v", err)
}
// Verify one of the tag-related indexes (e tag)
pubHash := new(types2.PubHash)
err = pubHash.FromPubkey(ev.Pubkey)
if chk.E(err) {
t.Fatalf("Failed to create PubHash: %v", err)
}
createdAt := new(types2.Uint64)
createdAt.Set(uint64(ev.CreatedAt))
// Create tag key and value for e tag
eKey := new(types2.Letter)
eKey.Set('e')
eValueHash := new(types2.Ident)
eValueHash.FromIdent([]byte("abcdef1234567890"))
// Verify TagPubkey index for e tag
pubkeyTagIndex := indexes.TagPubkeyEnc(
eKey, eValueHash, pubHash, createdAt, ser,
)
verifyIndexIncluded(t, idxs, pubkeyTagIndex)
// Verify Tag index for e tag
tagIndex := indexes.TagEnc(
eKey, eValueHash, createdAt, ser,
)
verifyIndexIncluded(t, idxs, tagIndex)
// Verify TagKind index for e tag
kind := new(types2.Uint16)
kind.Set(ev.Kind)
kindTagIndex := indexes.TagKindEnc(eKey, eValueHash, kind, createdAt, ser)
verifyIndexIncluded(t, idxs, kindTagIndex)
// Verify TagKindPubkey index for e tag
kindPubkeyTagIndex := indexes.TagKindPubkeyEnc(
eKey, eValueHash, kind, pubHash, createdAt, ser,
)
verifyIndexIncluded(t, idxs, kindPubkeyTagIndex)
}
// Test error handling
func testErrorHandling(t *testing.T) {
// Test with invalid serial number (too large for Uint40)
ev := event.New()
// Set ID
id := make([]byte, sha256.Size)
for i := range id {
id[i] = byte(i)
}
ev.ID = id
// Set Pubkey
pubkey := make([]byte, 32)
for i := range pubkey {
pubkey[i] = byte(i + 1)
}
ev.Pubkey = pubkey
// Set CreatedAt
ev.CreatedAt = 12345
// Set Kind
ev.Kind = kind.TextNote.K
// Set Content
ev.Content = []byte("Test content")
// Use an invalid serial number (too large for Uint40)
invalidSerial := uint64(1) << 40 // 2^40, which is too large for Uint40
// Generate indexes
idxs, err := GetIndexesForEvent(ev, invalidSerial)
// Verify that an error was returned
if err == nil {
t.Fatalf("Expected error for invalid serial number, got nil")
}
// Verify that idxs is nil when an error occurs
if idxs != nil {
t.Fatalf("Expected nil idxs when error occurs, got %v", idxs)
}
// Note: We don't test with nil event as it causes a panic
// The function doesn't have nil checks, which is a potential improvement
}

388
pkg/database/get-indexes-from-filter.go

@ -0,0 +1,388 @@ @@ -0,0 +1,388 @@
package database
import (
"bytes"
"math"
"sort"
"database.orly/indexes"
types2 "database.orly/indexes/types"
"encoders.orly/filter"
"lol.mleku.dev/chk"
)
type Range struct {
Start, End []byte
}
// IsHexString checks if the byte slice contains only hex characters
func IsHexString(data []byte) (isHex bool) {
if len(data)%2 != 0 {
return false
}
for _, b := range data {
if !((b >= '0' && b <= '9') || (b >= 'a' && b <= 'f') || (b >= 'A' && b <= 'F')) {
return false
}
}
return true
}
// CreateIdHashFromData creates an IdHash from data that could be hex or binary
func CreateIdHashFromData(data []byte) (i *types2.IdHash, err error) {
i = new(types2.IdHash)
// If data looks like hex string and has the right length for hex-encoded
// sha256
if len(data) == 64 {
if err = i.FromIdHex(string(data)); chk.E(err) {
err = nil
} else {
return
}
}
// Assume it's binary data
if err = i.FromId(data); chk.E(err) {
return
}
return
}
// CreatePubHashFromData creates a PubHash from data that could be hex or binary
func CreatePubHashFromData(data []byte) (p *types2.PubHash, err error) {
p = new(types2.PubHash)
// If data looks like hex string and has the right length for hex-encoded
// pubkey
if len(data) == 64 {
if err = p.FromPubkeyHex(string(data)); chk.E(err) {
err = nil
} else {
return
}
} else {
// Assume it's binary data
if err = p.FromPubkey(data); chk.E(err) {
return
}
}
return
}
// GetIndexesFromFilter returns encoded indexes based on the given filter.
//
// An error is returned if any input values are invalid during encoding.
//
// The indexes are designed so that only one table needs to be iterated, being a
// complete set of combinations of all fields in the event, thus there is no
// need to decode events until they are to be delivered.
func GetIndexesFromFilter(f *filter.F) (idxs []Range, err error) {
// ID eid
//
// If there is any Ids in the filter, none of the other fields matter. It
// should be an error, but convention just ignores it.
if f.Ids.Len() > 0 {
for _, id := range f.Ids.ToSliceOfBytes() {
if err = func() (err error) {
var i *types2.IdHash
if i, err = CreateIdHashFromData(id); chk.E(err) {
return
}
buf := new(bytes.Buffer)
idx := indexes.IdEnc(i, nil)
if err = idx.MarshalWrite(buf); chk.E(err) {
return
}
b := buf.Bytes()
r := Range{b, b}
idxs = append(idxs, r)
return
}(); chk.E(err) {
return
}
}
return
}
caStart := new(types2.Uint64)
caEnd := new(types2.Uint64)
// Set the start of range (Since or default to zero)
if f.Since != nil && f.Since.V != 0 {
caStart.Set(uint64(f.Since.V))
} else {
caStart.Set(uint64(0))
}
// Set the end of range (Until or default to math.MaxInt64)
if f.Until != nil && f.Until.V != 0 {
caEnd.Set(uint64(f.Until.V))
} else {
caEnd.Set(uint64(math.MaxInt64))
}
if f.Tags != nil && f.Tags.Len() > 0 {
// sort the tags so they are in iteration order (reverse)
tmp := f.Tags.ToSliceOfTags()
sort.Slice(
tmp, func(i, j int) bool {
return bytes.Compare(tmp[i].Key(), tmp[j].Key()) > 0
},
)
}
// TagKindPubkey tkp
if f.Kinds != nil && f.Kinds.Len() > 0 && f.Authors != nil && f.Authors.Len() > 0 && f.Tags != nil && f.Tags.Len() > 0 {
for _, k := range f.Kinds.ToUint16() {
for _, author := range f.Authors.ToSliceOfBytes() {
for _, tag := range f.Tags.ToSliceOfTags() {
// accept single-letter keys like "e" or filter-style keys like "#e"
if tag.Len() >= 2 && (len(tag.Key()) == 1 || (len(tag.Key()) == 2 && tag.Key()[0] == '#')) {
kind := new(types2.Uint16)
kind.Set(k)
var p *types2.PubHash
if p, err = CreatePubHashFromData(author); chk.E(err) {
return
}
keyBytes := tag.Key()
key := new(types2.Letter)
// If the tag key starts with '#', use the second character as the key
if len(keyBytes) == 2 && keyBytes[0] == '#' {
key.Set(keyBytes[1])
} else {
key.Set(keyBytes[0])
}
for _, valueBytes := range tag.ToSliceOfBytes()[1:] {
valueHash := new(types2.Ident)
valueHash.FromIdent(valueBytes)
start, end := new(bytes.Buffer), new(bytes.Buffer)
idxS := indexes.TagKindPubkeyEnc(
key, valueHash, kind, p, caStart, nil,
)
if err = idxS.MarshalWrite(start); chk.E(err) {
return
}
idxE := indexes.TagKindPubkeyEnc(
key, valueHash, kind, p, caEnd, nil,
)
if err = idxE.MarshalWrite(end); chk.E(err) {
return
}
idxs = append(
idxs, Range{
start.Bytes(), end.Bytes(),
},
)
}
}
}
}
}
return
}
// TagKind tkc
if f.Kinds != nil && f.Kinds.Len() > 0 && f.Tags != nil && f.Tags.Len() > 0 {
for _, k := range f.Kinds.ToUint16() {
for _, tag := range f.Tags.ToSliceOfTags() {
if tag.Len() >= 2 && (len(tag.Key()) == 1 || (len(tag.Key()) == 2 && tag.Key()[0] == '#')) {
kind := new(types2.Uint16)
kind.Set(k)
keyBytes := tag.Key()
key := new(types2.Letter)
// If the tag key starts with '#', use the second character as the key
if len(keyBytes) == 2 && keyBytes[0] == '#' {
key.Set(keyBytes[1])
} else {
key.Set(keyBytes[0])
}
for _, valueBytes := range tag.ToSliceOfBytes()[1:] {
valueHash := new(types2.Ident)
valueHash.FromIdent(valueBytes)
start, end := new(bytes.Buffer), new(bytes.Buffer)
idxS := indexes.TagKindEnc(
key, valueHash, kind, caStart, nil,
)
if err = idxS.MarshalWrite(start); chk.E(err) {
return
}
idxE := indexes.TagKindEnc(
key, valueHash, kind, caEnd, nil,
)
if err = idxE.MarshalWrite(end); chk.E(err) {
return
}
idxs = append(
idxs, Range{
start.Bytes(), end.Bytes(),
},
)
}
}
}
}
return
}
// TagPubkey tpc
if f.Authors != nil && f.Authors.Len() > 0 && f.Tags != nil && f.Tags.Len() > 0 {
for _, author := range f.Authors.ToSliceOfBytes() {
for _, tag := range f.Tags.ToSliceOfTags() {
if tag.Len() >= 2 && (len(tag.Key()) == 1 || (len(tag.Key()) == 2 && tag.Key()[0] == '#')) {
var p *types2.PubHash
if p, err = CreatePubHashFromData(author); chk.E(err) {
return
}
keyBytes := tag.Key()
key := new(types2.Letter)
// If the tag key starts with '#', use the second character as the key
if len(keyBytes) == 2 && keyBytes[0] == '#' {
key.Set(keyBytes[1])
} else {
key.Set(keyBytes[0])
}
for _, valueBytes := range tag.ToSliceOfBytes()[1:] {
valueHash := new(types2.Ident)
valueHash.FromIdent(valueBytes)
start, end := new(bytes.Buffer), new(bytes.Buffer)
idxS := indexes.TagPubkeyEnc(
key, valueHash, p, caStart, nil,
)
if err = idxS.MarshalWrite(start); chk.E(err) {
return
}
idxE := indexes.TagPubkeyEnc(
key, valueHash, p, caEnd, nil,
)
if err = idxE.MarshalWrite(end); chk.E(err) {
return
}
idxs = append(
idxs, Range{start.Bytes(), end.Bytes()},
)
}
}
}
}
return
}
// Tag tc-
if f.Tags != nil && f.Tags.Len() > 0 && (f.Authors == nil || f.Authors.Len() == 0) && (f.Kinds == nil || f.Kinds.Len() == 0) {
for _, tag := range f.Tags.ToSliceOfTags() {
if tag.Len() >= 2 && (len(tag.Key()) == 1 || (len(tag.Key()) == 2 && tag.Key()[0] == '#')) {
keyBytes := tag.Key()
key := new(types2.Letter)
// If the tag key starts with '#', use the second character as the key
if len(keyBytes) == 2 && keyBytes[0] == '#' {
key.Set(keyBytes[1])
} else {
key.Set(keyBytes[0])
}
for _, valueBytes := range tag.ToSliceOfBytes()[1:] {
valueHash := new(types2.Ident)
valueHash.FromIdent(valueBytes)
start, end := new(bytes.Buffer), new(bytes.Buffer)
idxS := indexes.TagEnc(key, valueHash, caStart, nil)
if err = idxS.MarshalWrite(start); chk.E(err) {
return
}
idxE := indexes.TagEnc(key, valueHash, caEnd, nil)
if err = idxE.MarshalWrite(end); chk.E(err) {
return
}
idxs = append(
idxs, Range{start.Bytes(), end.Bytes()},
)
}
}
}
return
}
// KindPubkey kpc
if f.Kinds != nil && f.Kinds.Len() > 0 && f.Authors != nil && f.Authors.Len() > 0 {
for _, k := range f.Kinds.ToUint16() {
for _, author := range f.Authors.ToSliceOfBytes() {
kind := new(types2.Uint16)
kind.Set(k)
p := new(types2.PubHash)
if err = p.FromPubkey(author); chk.E(err) {
return
}
start, end := new(bytes.Buffer), new(bytes.Buffer)
idxS := indexes.KindPubkeyEnc(kind, p, caStart, nil)
if err = idxS.MarshalWrite(start); chk.E(err) {
return
}
idxE := indexes.KindPubkeyEnc(kind, p, caEnd, nil)
if err = idxE.MarshalWrite(end); chk.E(err) {
return
}
idxs = append(
idxs, Range{start.Bytes(), end.Bytes()},
)
}
}
return
}
// Kind kc-
if f.Kinds != nil && f.Kinds.Len() > 0 && (f.Authors == nil || f.Authors.Len() == 0) && (f.Tags == nil || f.Tags.Len() == 0) {
for _, k := range f.Kinds.ToUint16() {
kind := new(types2.Uint16)
kind.Set(k)
start, end := new(bytes.Buffer), new(bytes.Buffer)
idxS := indexes.KindEnc(kind, caStart, nil)
if err = idxS.MarshalWrite(start); chk.E(err) {
return
}
idxE := indexes.KindEnc(kind, caEnd, nil)
if err = idxE.MarshalWrite(end); chk.E(err) {
return
}
idxs = append(
idxs, Range{start.Bytes(), end.Bytes()},
)
}
return
}
// Pubkey pc-
if f.Authors != nil && f.Authors.Len() > 0 {
for _, author := range f.Authors.ToSliceOfBytes() {
p := new(types2.PubHash)
if err = p.FromPubkey(author); chk.E(err) {
return
}
start, end := new(bytes.Buffer), new(bytes.Buffer)
idxS := indexes.PubkeyEnc(p, caStart, nil)
if err = idxS.MarshalWrite(start); chk.E(err) {
return
}
idxE := indexes.PubkeyEnc(p, caEnd, nil)
if err = idxE.MarshalWrite(end); chk.E(err) {
return
}
idxs = append(
idxs, Range{start.Bytes(), end.Bytes()},
)
}
return
}
// CreatedAt c--
start, end := new(bytes.Buffer), new(bytes.Buffer)
idxS := indexes.CreatedAtEnc(caStart, nil)
if err = idxS.MarshalWrite(start); chk.E(err) {
return
}
idxE := indexes.CreatedAtEnc(caEnd, nil)
if err = idxE.MarshalWrite(end); chk.E(err) {
return
}
idxs = append(
idxs, Range{start.Bytes(), end.Bytes()},
)
return
}

587
pkg/database/get-indexes-from-filter_test.go

@ -0,0 +1,587 @@ @@ -0,0 +1,587 @@
package database
import (
"bytes"
"math"
"testing"
"crypto.orly/sha256"
"database.orly/indexes"
types2 "database.orly/indexes/types"
"encoders.orly/filter"
"encoders.orly/kind"
"encoders.orly/tag"
"encoders.orly/timestamp"
"lol.mleku.dev/chk"
"utils.orly"
)
// TestGetIndexesFromFilter tests the GetIndexesFromFilter function
func TestGetIndexesFromFilter(t *testing.T) {
t.Run("ID", testIdFilter)
t.Run("Pubkey", testPubkeyFilter)
t.Run("CreatedAt", testCreatedAtFilter)
t.Run("CreatedAtUntil", testCreatedAtUntilFilter)
t.Run("TagPubkey", testPubkeyTagFilter)
t.Run("Tag", testTagFilter)
t.Run("Kind", testKindFilter)
t.Run("KindPubkey", testKindPubkeyFilter)
t.Run("MultipleKindPubkey", testMultipleKindPubkeyFilter)
t.Run("TagKind", testKindTagFilter)
t.Run("TagKindPubkey", testKindPubkeyTagFilter)
}
// Helper function to verify that the generated index matches the expected indexes
func verifyIndex(
t *testing.T, idxs []Range, expectedStartIdx, expectedEndIdx *indexes.T,
) {
if len(idxs) != 1 {
t.Fatalf("Expected 1 index, got %d", len(idxs))
}
// Marshal the expected start index
startBuf := new(bytes.Buffer)
err := expectedStartIdx.MarshalWrite(startBuf)
if chk.E(err) {
t.Fatalf("Failed to marshal expected start index: %v", err)
}
// Compare the generated start index with the expected start index
if !utils.FastEqual(idxs[0].Start, startBuf.Bytes()) {
t.Errorf("Generated start index does not match expected start index")
t.Errorf("Generated: %v", idxs[0].Start)
t.Errorf("Expected: %v", startBuf.Bytes())
}
// If expectedEndIdx is nil, use expectedStartIdx
endIdx := expectedEndIdx
if endIdx == nil {
endIdx = expectedStartIdx
}
// Marshal the expected end index
endBuf := new(bytes.Buffer)
err = endIdx.MarshalWrite(endBuf)
if chk.E(err) {
t.Fatalf("Failed to marshal expected End index: %v", err)
}
// Compare the generated end index with the expected end index
if !utils.FastEqual(idxs[0].End, endBuf.Bytes()) {
t.Errorf("Generated End index does not match expected End index")
t.Errorf("Generated: %v", idxs[0].End)
t.Errorf("Expected: %v", endBuf.Bytes())
}
}
// Test ID filter
func testIdFilter(t *testing.T) {
// Create a filter with an ID
f := filter.New()
id := make([]byte, sha256.Size)
for i := range id {
id[i] = byte(i)
}
f.Ids.T = append(f.Ids.T, id)
// Generate indexes
idxs, err := GetIndexesFromFilter(f)
if chk.E(err) {
t.Fatalf("GetIndexesFromFilter failed: %v", err)
}
// Create the expected index
idHash := new(types2.IdHash)
err = idHash.FromId(id)
if chk.E(err) {
t.Fatalf("Failed to create IdHash: %v", err)
}
expectedIdx := indexes.IdEnc(idHash, nil)
// Verify the generated index
// For ID filter, both start and end indexes are the same
verifyIndex(t, idxs, expectedIdx, expectedIdx)
}
// Test Pubkey filter
func testPubkeyFilter(t *testing.T) {
// Create a filter with an Author, Since, and Until
f := filter.New()
pubkey := make([]byte, 32)
for i := range pubkey {
pubkey[i] = byte(i)
}
f.Authors.T = append(f.Authors.T, pubkey)
f.Since = timestamp.FromUnix(12345)
f.Until = timestamp.FromUnix(67890) // Added Until field
// Generate indexes
idxs, err := GetIndexesFromFilter(f)
if chk.E(err) {
t.Fatalf("GetIndexesFromFilter failed: %v", err)
}
// Create the expected indexes
p := new(types2.PubHash)
err = p.FromPubkey(pubkey)
if chk.E(err) {
t.Fatalf("Failed to create PubHash: %v", err)
}
// Start index uses Since
caStart := new(types2.Uint64)
caStart.Set(uint64(f.Since.V))
expectedStartIdx := indexes.PubkeyEnc(p, caStart, nil)
// End index uses Until
caEnd := new(types2.Uint64)
caEnd.Set(uint64(f.Until.V))
expectedEndIdx := indexes.PubkeyEnc(p, caEnd, nil)
// Verify the generated index
verifyIndex(t, idxs, expectedStartIdx, expectedEndIdx)
}
// Test CreatedAt filter
func testCreatedAtFilter(t *testing.T) {
// Create a filter with Since
f := filter.New()
f.Since = timestamp.FromUnix(12345)
// Generate indexes
idxs, err := GetIndexesFromFilter(f)
if chk.E(err) {
t.Fatalf("GetIndexesFromFilter failed: %v", err)
}
// Create the expected start index (using Since)
caStart := new(types2.Uint64)
caStart.Set(uint64(f.Since.V))
expectedStartIdx := indexes.CreatedAtEnc(caStart, nil)
// Create the expected end index (using math.MaxInt64 since Until is not specified)
caEnd := new(types2.Uint64)
caEnd.Set(uint64(math.MaxInt64))
expectedEndIdx := indexes.CreatedAtEnc(caEnd, nil)
// Verify the generated index
verifyIndex(t, idxs, expectedStartIdx, expectedEndIdx)
}
// Test CreatedAt filter with Until
func testCreatedAtUntilFilter(t *testing.T) {
// Create a filter with Until
f := filter.New()
f.Until = timestamp.FromUnix(67890)
// Generate indexes
idxs, err := GetIndexesFromFilter(f)
if chk.E(err) {
t.Fatalf("GetIndexesFromFilter failed: %v", err)
}
// Create the expected start index (using 0 since Since is not specified)
caStart := new(types2.Uint64)
caStart.Set(uint64(0))
expectedStartIdx := indexes.CreatedAtEnc(caStart, nil)
// Create the expected end index (using Until)
caEnd := new(types2.Uint64)
caEnd.Set(uint64(f.Until.V))
expectedEndIdx := indexes.CreatedAtEnc(caEnd, nil)
// Verify the generated index
verifyIndex(t, idxs, expectedStartIdx, expectedEndIdx)
}
// Test TagPubkey filter
func testPubkeyTagFilter(t *testing.T) {
// Create a filter with an Author, a Tag, and Since
f := filter.New()
pubkey := make([]byte, 32)
for i := range pubkey {
pubkey[i] = byte(i)
}
f.Authors.T = append(f.Authors.T, pubkey)
// Create a tag
tagKey := "e"
tagValue := "test-value"
tagT := tag.NewFromAny(tagKey, tagValue)
*f.Tags = append(*f.Tags, tagT)
f.Since = timestamp.FromUnix(12345)
// Generate indexes
idxs, err := GetIndexesFromFilter(f)
if chk.E(err) {
t.Fatalf("GetIndexesFromFilter failed: %v", err)
}
// Create the expected indexes
p := new(types2.PubHash)
err = p.FromPubkey(pubkey)
if chk.E(err) {
t.Fatalf("Failed to create PubHash: %v", err)
}
key := new(types2.Letter)
key.Set(tagKey[0])
valueHash := new(types2.Ident)
valueHash.FromIdent([]byte(tagValue))
// Start index uses Since
caStart := new(types2.Uint64)
caStart.Set(uint64(f.Since.V))
expectedStartIdx := indexes.TagPubkeyEnc(key, valueHash, p, caStart, nil)
// End index uses math.MaxInt64 since Until is not specified
caEnd := new(types2.Uint64)
caEnd.Set(uint64(math.MaxInt64))
expectedEndIdx := indexes.TagPubkeyEnc(key, valueHash, p, caEnd, nil)
// Verify the generated index
verifyIndex(t, idxs, expectedStartIdx, expectedEndIdx)
}
// Test Tag filter
func testTagFilter(t *testing.T) {
// Create a filter with a Tag and Since
f := filter.New()
// Create a tag
tagKey := "e"
tagValue := "test-value"
tagT := tag.NewFromAny(tagKey, tagValue)
*f.Tags = append(*f.Tags, tagT)
f.Since = timestamp.FromUnix(12345)
// Generate indexes
idxs, err := GetIndexesFromFilter(f)
if chk.E(err) {
t.Fatalf("GetIndexesFromFilter failed: %v", err)
}
// Create the expected indexes
key := new(types2.Letter)
key.Set(tagKey[0])
valueHash := new(types2.Ident)
valueHash.FromIdent([]byte(tagValue))
// Start index uses Since
caStart := new(types2.Uint64)
caStart.Set(uint64(f.Since.V))
expectedStartIdx := indexes.TagEnc(key, valueHash, caStart, nil)
// End index uses math.MaxInt64 since Until is not specified
caEnd := new(types2.Uint64)
caEnd.Set(uint64(math.MaxInt64))
expectedEndIdx := indexes.TagEnc(key, valueHash, caEnd, nil)
// Verify the generated index
verifyIndex(t, idxs, expectedStartIdx, expectedEndIdx)
}
// Test Kind filter
func testKindFilter(t *testing.T) {
// Create a filter with a Kind and Since
f := filter.New()
f.Kinds = kind.NewS(kind.TextNote)
f.Since = timestamp.FromUnix(12345)
// Generate indexes
idxs, err := GetIndexesFromFilter(f)
if chk.E(err) {
t.Fatalf("GetIndexesFromFilter failed: %v", err)
}
// Create the expected indexes
k := new(types2.Uint16)
k.Set(1)
// Start index uses Since
caStart := new(types2.Uint64)
caStart.Set(uint64(f.Since.V))
expectedStartIdx := indexes.KindEnc(k, caStart, nil)
// End index uses math.MaxInt64 since Until is not specified
caEnd := new(types2.Uint64)
caEnd.Set(uint64(math.MaxInt64))
expectedEndIdx := indexes.KindEnc(k, caEnd, nil)
// Verify the generated index
verifyIndex(t, idxs, expectedStartIdx, expectedEndIdx)
}
// Test KindPubkey filter
func testKindPubkeyFilter(t *testing.T) {
// Create a filter with a Kind, an Author, and Since
f := filter.New()
f.Kinds = kind.NewS(kind.TextNote)
pubkey := make([]byte, 32)
for i := range pubkey {
pubkey[i] = byte(i)
}
f.Authors.T = append(f.Authors.T, pubkey)
f.Since = timestamp.FromUnix(12345)
// Generate indexes
idxs, err := GetIndexesFromFilter(f)
if chk.E(err) {
t.Fatalf("GetIndexesFromFilter failed: %v", err)
}
// Create the expected indexes
k := new(types2.Uint16)
k.Set(1)
p := new(types2.PubHash)
err = p.FromPubkey(pubkey)
if chk.E(err) {
t.Fatalf("Failed to create PubHash: %v", err)
}
// Start index uses Since
caStart := new(types2.Uint64)
caStart.Set(uint64(f.Since.V))
expectedStartIdx := indexes.KindPubkeyEnc(k, p, caStart, nil)
// End index uses math.MaxInt64 since Until is not specified
caEnd := new(types2.Uint64)
caEnd.Set(uint64(math.MaxInt64))
expectedEndIdx := indexes.KindPubkeyEnc(k, p, caEnd, nil)
// Verify the generated index
verifyIndex(t, idxs, expectedStartIdx, expectedEndIdx)
}
// Test TagKind filter
func testKindTagFilter(t *testing.T) {
// Create a filter with a Kind, a Tag, and Since
f := filter.New()
f.Kinds = kind.NewS(kind.TextNote)
// Create a tag
tagKey := "e"
tagValue := "test-value"
tagT := tag.NewFromAny(tagKey, tagValue)
*f.Tags = append(*f.Tags, tagT)
f.Since = timestamp.FromUnix(12345)
// Generate indexes
idxs, err := GetIndexesFromFilter(f)
if chk.E(err) {
t.Fatalf("GetIndexesFromFilter failed: %v", err)
}
// Create the expected indexes
k := new(types2.Uint16)
k.Set(1)
key := new(types2.Letter)
key.Set(tagKey[0])
valueHash := new(types2.Ident)
valueHash.FromIdent([]byte(tagValue))
// Start index uses Since
caStart := new(types2.Uint64)
caStart.Set(uint64(f.Since.V))
expectedStartIdx := indexes.TagKindEnc(key, valueHash, k, caStart, nil)
// End index uses math.MaxInt64 since Until is not specified
caEnd := new(types2.Uint64)
caEnd.Set(uint64(math.MaxInt64))
expectedEndIdx := indexes.TagKindEnc(key, valueHash, k, caEnd, nil)
// Verify the generated index
verifyIndex(t, idxs, expectedStartIdx, expectedEndIdx)
}
// Test Multiple KindPubkey filter
func testMultipleKindPubkeyFilter(t *testing.T) {
// Create a filter with multiple Kinds and multiple Authors
f := filter.New()
f.Kinds = kind.NewS(kind.New(1), kind.New(2))
// Create two pubkeys
pubkey1 := make([]byte, 32)
pubkey2 := make([]byte, 32)
for i := range pubkey1 {
pubkey1[i] = byte(i)
pubkey2[i] = byte(i + 100)
}
f.Authors.T = append(f.Authors.T, pubkey1)
f.Authors.T = append(f.Authors.T, pubkey2)
f.Since = timestamp.FromUnix(12345)
// Generate indexes
idxs, err := GetIndexesFromFilter(f)
if chk.E(err) {
t.Fatalf("GetIndexesFromFilter failed: %v", err)
}
// We should have 4 indexes (2 kinds * 2 pubkeys)
if len(idxs) != 4 {
t.Fatalf("Expected 4 indexes, got %d", len(idxs))
}
// Create the expected indexes
k1 := new(types2.Uint16)
k1.Set(1)
k2 := new(types2.Uint16)
k2.Set(2)
p1 := new(types2.PubHash)
err = p1.FromPubkey(pubkey1)
if chk.E(err) {
t.Fatalf("Failed to create PubHash: %v", err)
}
p2 := new(types2.PubHash)
err = p2.FromPubkey(pubkey2)
if chk.E(err) {
t.Fatalf("Failed to create PubHash: %v", err)
}
// Start index uses Since
caStart := new(types2.Uint64)
caStart.Set(uint64(f.Since.V))
// End index uses math.MaxInt64 since Until is not specified
caEnd := new(types2.Uint64)
caEnd.Set(uint64(math.MaxInt64))
// Create all expected combinations
expectedIdxs := make([][]byte, 8) // 4 combinations * 2 (start/end)
// Kind 1, Pubkey 1
startBuf1 := new(bytes.Buffer)
idxS1 := indexes.KindPubkeyEnc(k1, p1, caStart, nil)
if err = idxS1.MarshalWrite(startBuf1); chk.E(err) {
t.Fatalf("Failed to marshal index: %v", err)
}
expectedIdxs[0] = startBuf1.Bytes()
endBuf1 := new(bytes.Buffer)
idxE1 := indexes.KindPubkeyEnc(k1, p1, caEnd, nil)
if err = idxE1.MarshalWrite(endBuf1); chk.E(err) {
t.Fatalf("Failed to marshal index: %v", err)
}
expectedIdxs[1] = endBuf1.Bytes()
// Kind 1, Pubkey 2
startBuf2 := new(bytes.Buffer)
idxS2 := indexes.KindPubkeyEnc(k1, p2, caStart, nil)
if err = idxS2.MarshalWrite(startBuf2); chk.E(err) {
t.Fatalf("Failed to marshal index: %v", err)
}
expectedIdxs[2] = startBuf2.Bytes()
endBuf2 := new(bytes.Buffer)
idxE2 := indexes.KindPubkeyEnc(k1, p2, caEnd, nil)
if err = idxE2.MarshalWrite(endBuf2); chk.E(err) {
t.Fatalf("Failed to marshal index: %v", err)
}
expectedIdxs[3] = endBuf2.Bytes()
// Kind 2, Pubkey 1
startBuf3 := new(bytes.Buffer)
idxS3 := indexes.KindPubkeyEnc(k2, p1, caStart, nil)
if err = idxS3.MarshalWrite(startBuf3); chk.E(err) {
t.Fatalf("Failed to marshal index: %v", err)
}
expectedIdxs[4] = startBuf3.Bytes()
endBuf3 := new(bytes.Buffer)
idxE3 := indexes.KindPubkeyEnc(k2, p1, caEnd, nil)
if err = idxE3.MarshalWrite(endBuf3); chk.E(err) {
t.Fatalf("Failed to marshal index: %v", err)
}
expectedIdxs[5] = endBuf3.Bytes()
// Kind 2, Pubkey 2
startBuf4 := new(bytes.Buffer)
idxS4 := indexes.KindPubkeyEnc(k2, p2, caStart, nil)
if err = idxS4.MarshalWrite(startBuf4); chk.E(err) {
t.Fatalf("Failed to marshal index: %v", err)
}
expectedIdxs[6] = startBuf4.Bytes()
endBuf4 := new(bytes.Buffer)
idxE4 := indexes.KindPubkeyEnc(k2, p2, caEnd, nil)
if err = idxE4.MarshalWrite(endBuf4); chk.E(err) {
t.Fatalf("Failed to marshal index: %v", err)
}
expectedIdxs[7] = endBuf4.Bytes()
// Verify that all expected combinations are present
foundCombinations := 0
for _, idx := range idxs {
for i := 0; i < len(expectedIdxs); i += 2 {
if utils.FastEqual(idx.Start, expectedIdxs[i]) && utils.FastEqual(
idx.End, expectedIdxs[i+1],
) {
foundCombinations++
break
}
}
}
if foundCombinations != 4 {
t.Fatalf("Expected to find 4 combinations, found %d", foundCombinations)
}
}
// Test TagKindPubkey filter
func testKindPubkeyTagFilter(t *testing.T) {
// Create a filter with a Kind, an Author, a Tag, and Since
f := filter.New()
f.Kinds = kind.NewS(kind.New(1))
pubkey := make([]byte, 32)
for i := range pubkey {
pubkey[i] = byte(i)
}
f.Authors.T = append(f.Authors.T, pubkey)
// Create a tag
tagKey := "e"
tagValue := "test-value"
tagT := tag.NewFromAny(tagKey, tagValue)
*f.Tags = append(*f.Tags, tagT)
f.Since = timestamp.FromUnix(12345)
// Generate indexes
idxs, err := GetIndexesFromFilter(f)
if chk.E(err) {
t.Fatalf("GetIndexesFromFilter failed: %v", err)
}
// Create the expected indexes
k := new(types2.Uint16)
k.Set(1)
p := new(types2.PubHash)
err = p.FromPubkey(pubkey)
if chk.E(err) {
t.Fatalf("Failed to create PubHash: %v", err)
}
key := new(types2.Letter)
key.Set(tagKey[0])
valueHash := new(types2.Ident)
valueHash.FromIdent([]byte(tagValue))
// Start index uses Since
caStart := new(types2.Uint64)
caStart.Set(uint64(f.Since.V))
expectedStartIdx := indexes.TagKindPubkeyEnc(
key, valueHash, k, p, caStart, nil,
)
// End index uses math.MaxInt64 since Until is not specified
caEnd := new(types2.Uint64)
caEnd.Set(uint64(math.MaxInt64))
expectedEndIdx := indexes.TagKindPubkeyEnc(
key, valueHash, k, p, caEnd, nil,
)
// Verify the generated index
verifyIndex(t, idxs, expectedStartIdx, expectedEndIdx)
}

77
pkg/database/get-serial-by-id.go

@ -0,0 +1,77 @@ @@ -0,0 +1,77 @@
package database
import (
"bytes"
"database.orly/indexes/types"
"encoders.orly/filter"
"encoders.orly/tag"
"github.com/dgraph-io/badger/v4"
"lol.mleku.dev/chk"
"lol.mleku.dev/errorf"
)
func (d *D) GetSerialById(id []byte) (ser *types.Uint40, err error) {
var idxs []Range
if idxs, err = GetIndexesFromFilter(&filter.F{Ids: tag.NewFromBytesSlice(id)}); chk.E(err) {
return
}
if len(idxs) == 0 {
err = errorf.E("no indexes found for id %0x", id)
}
if err = d.View(
func(txn *badger.Txn) (err error) {
it := txn.NewIterator(badger.DefaultIteratorOptions)
var key []byte
defer it.Close()
it.Seek(idxs[0].Start)
if it.ValidForPrefix(idxs[0].Start) {
item := it.Item()
key = item.Key()
ser = new(types.Uint40)
buf := bytes.NewBuffer(key[len(key)-5:])
if err = ser.UnmarshalRead(buf); chk.E(err) {
return
}
} else {
// just don't return what we don't have? others may be
// found tho.
}
return
},
); chk.E(err) {
return
}
return
}
//
// func (d *D) GetSerialBytesById(id []byte) (ser []byte, err error) {
// var idxs []Range
// if idxs, err = GetIndexesFromFilter(&filter.F{Ids: tag.New(id)}); chk.E(err) {
// return
// }
// if len(idxs) == 0 {
// err = errorf.E("no indexes found for id %0x", id)
// }
// if err = d.View(
// func(txn *badger.Txn) (err error) {
// it := txn.NewIterator(badger.DefaultIteratorOptions)
// var key []byte
// defer it.Close()
// it.Seek(idxs[0].Start)
// if it.ValidForPrefix(idxs[0].Start) {
// item := it.Item()
// key = item.Key()
// ser = key[len(key)-5:]
// } else {
// // just don't return what we don't have? others may be
// // found tho.
// }
// return
// },
// ); chk.E(err) {
// return
// }
// return
// }

101
pkg/database/get-serial-by-id_test.go

@ -0,0 +1,101 @@ @@ -0,0 +1,101 @@
package database
import (
"bufio"
"bytes"
"context"
"os"
"testing"
"encoders.orly/event"
"encoders.orly/event/examples"
"lol.mleku.dev/chk"
)
func TestGetSerialById(t *testing.T) {
// Create a temporary directory for the database
tempDir, err := os.MkdirTemp("", "test-db-*")
if err != nil {
t.Fatalf("Failed to create temporary directory: %v", err)
}
defer os.RemoveAll(tempDir) // Clean up after the test
// Create a context and cancel function for the database
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
// Initialize the database
db, err := New(ctx, cancel, tempDir, "info")
if err != nil {
t.Fatalf("Failed to create database: %v", err)
}
defer db.Close()
// Create a scanner to read events from examples.Cache
scanner := bufio.NewScanner(bytes.NewBuffer(examples.Cache))
scanner.Buffer(make([]byte, 0, 1_000_000_000), 1_000_000_000)
// Count the number of events processed
eventCount := 0
var events []*event.E
// Process each event
for scanner.Scan() {
chk.E(scanner.Err())
b := scanner.Bytes()
ev := event.New()
// Unmarshal the event
if _, err = ev.Unmarshal(b); chk.E(err) {
t.Fatal(err)
}
events = append(events, ev)
// Save the event to the database
if _, _, err = db.SaveEvent(ctx, ev, false, nil); err != nil {
t.Fatalf("Failed to save event #%d: %v", eventCount+1, err)
}
eventCount++
}
// Check for scanner errors
if err = scanner.Err(); err != nil {
t.Fatalf("Scanner error: %v", err)
}
t.Logf("Successfully saved %d events to the database", eventCount)
// Test GetSerialById with a known event ID
testEvent := events[3] // Using the same event as in QueryForIds test
// Get the serial by ID
serial, err := db.GetSerialById(testEvent.ID)
if err != nil {
t.Fatalf("Failed to get serial by ID: %v", err)
}
// Verify the serial is not nil
if serial == nil {
t.Fatal("Expected serial to be non-nil, but got nil")
}
// Test with a non-existent ID
nonExistentId := make([]byte, len(testEvent.ID))
// Ensure it's different from any real ID
for i := range nonExistentId {
nonExistentId[i] = ^testEvent.ID[i]
}
serial, err = db.GetSerialById(nonExistentId)
if err != nil {
t.Fatalf("Expected no error for non-existent ID, but got: %v", err)
}
// For non-existent Ids, the function should return nil serial
if serial != nil {
t.Fatalf("Expected nil serial for non-existent ID, but got: %v", serial)
}
}

51
pkg/database/get-serials-by-range.go

@ -0,0 +1,51 @@ @@ -0,0 +1,51 @@
package database
import (
"bytes"
"sort"
"database.orly/indexes/types"
"github.com/dgraph-io/badger/v4"
"lol.mleku.dev/chk"
)
func (d *D) GetSerialsByRange(idx Range) (
sers types.Uint40s, err error,
) {
if err = d.View(
func(txn *badger.Txn) (err error) {
it := txn.NewIterator(
badger.IteratorOptions{
Reverse: true,
},
)
defer it.Close()
for it.Seek(idx.End); it.Valid(); it.Next() {
item := it.Item()
var key []byte
key = item.Key()
if bytes.Compare(
key[:len(key)-5], idx.Start,
) < 0 {
// didn't find it within the timestamp range
return
}
ser := new(types.Uint40)
buf := bytes.NewBuffer(key[len(key)-5:])
if err = ser.UnmarshalRead(buf); chk.E(err) {
return
}
sers = append(sers, ser)
}
return
},
); chk.E(err) {
return
}
sort.Slice(
sers, func(i, j int) bool {
return sers[i].Get() < sers[j].Get()
},
)
return
}

232
pkg/database/get-serials-by-range_test.go

@ -0,0 +1,232 @@ @@ -0,0 +1,232 @@
package database
import (
"bufio"
"bytes"
"context"
"os"
"testing"
"database.orly/indexes/types"
"encoders.orly/event"
"encoders.orly/event/examples"
"encoders.orly/filter"
"encoders.orly/kind"
"encoders.orly/tag"
"encoders.orly/timestamp"
"lol.mleku.dev/chk"
"utils.orly"
)
func TestGetSerialsByRange(t *testing.T) {
// Create a temporary directory for the database
tempDir, err := os.MkdirTemp("", "test-db-*")
if err != nil {
t.Fatalf("Failed to create temporary directory: %v", err)
}
defer os.RemoveAll(tempDir) // Clean up after the test
// Create a context and cancel function for the database
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
// Initialize the database
db, err := New(ctx, cancel, tempDir, "info")
if err != nil {
t.Fatalf("Failed to create database: %v", err)
}
defer db.Close()
// Create a scanner to read events from examples.Cache
scanner := bufio.NewScanner(bytes.NewBuffer(examples.Cache))
scanner.Buffer(make([]byte, 0, 1_000_000_000), 1_000_000_000)
// Count the number of events processed
eventCount := 0
var events []*event.E
var eventSerials = make(map[string]*types.Uint40) // Map event ID (hex) to serial
// Process each event
for scanner.Scan() {
chk.E(scanner.Err())
b := scanner.Bytes()
ev := event.New()
// Unmarshal the event
if _, err = ev.Unmarshal(b); chk.E(err) {
t.Fatal(err)
}
events = append(events, ev)
// Save the event to the database
if _, _, err = db.SaveEvent(ctx, ev, false, nil); err != nil {
t.Fatalf("Failed to save event #%d: %v", eventCount+1, err)
}
// Get the serial for this event
serial, err := db.GetSerialById(ev.ID)
if err != nil {
t.Fatalf(
"Failed to get serial for event #%d: %v", eventCount+1, err,
)
}
if serial != nil {
eventSerials[string(ev.ID)] = serial
}
eventCount++
}
// Check for scanner errors
if err = scanner.Err(); err != nil {
t.Fatalf("Scanner error: %v", err)
}
t.Logf("Successfully saved %d events to the database", eventCount)
// Test GetSerialsByRange with a time range filter
// Use the timestamp from the middle event as a reference
middleIndex := len(events) / 2
middleEvent := events[middleIndex]
// Create a timestamp range that includes events before and after the middle event
sinceTime := new(timestamp.T)
sinceTime.V = middleEvent.CreatedAt - 3600 // 1 hour before middle event
untilTime := new(timestamp.T)
untilTime.V = middleEvent.CreatedAt + 3600 // 1 hour after middle event
// Create a filter with the time range
timeFilter := &filter.F{
Since: sinceTime,
Until: untilTime,
}
// Get the indexes from the filter
ranges, err := GetIndexesFromFilter(timeFilter)
if err != nil {
t.Fatalf("Failed to get indexes from filter: %v", err)
}
// Verify we got at least one range
if len(ranges) == 0 {
t.Fatal("Expected at least one range from filter, but got none")
}
// Test GetSerialsByRange with the first range
serials, err := db.GetSerialsByRange(ranges[0])
if err != nil {
t.Fatalf("Failed to get serials by range: %v", err)
}
// Verify we got results
if len(serials) == 0 {
t.Fatal("Expected serials for events in time range, but got none")
}
// Verify the serials correspond to events within the time range
for i, serial := range serials {
// Fetch the event using the serial
ev, err := db.FetchEventBySerial(serial)
if err != nil {
t.Fatalf("Failed to fetch event for serial %d: %v", i, err)
}
if ev.CreatedAt < sinceTime.V || ev.CreatedAt > untilTime.V {
t.Fatalf(
"Event %d is outside the time range. Got %d, expected between %d and %d",
i, ev.CreatedAt, sinceTime.V, untilTime.V,
)
}
}
// Test GetSerialsByRange with a kind filter
testKind := kind.New(1) // Kind 1 is typically text notes
kindFilter := &filter.F{
Kinds: kind.NewS(testKind),
}
// Get the indexes from the filter
ranges, err = GetIndexesFromFilter(kindFilter)
if err != nil {
t.Fatalf("Failed to get indexes from filter: %v", err)
}
// Verify we got at least one range
if len(ranges) == 0 {
t.Fatal("Expected at least one range from filter, but got none")
}
// Test GetSerialsByRange with the first range
serials, err = db.GetSerialsByRange(ranges[0])
if err != nil {
t.Fatalf("Failed to get serials by range: %v", err)
}
// Verify we got results
if len(serials) == 0 {
t.Fatal("Expected serials for events with kind 1, but got none")
}
// Verify the serials correspond to events with the correct kind
for i, serial := range serials {
// Fetch the event using the serial
ev, err := db.FetchEventBySerial(serial)
if err != nil {
t.Fatalf("Failed to fetch event for serial %d: %v", i, err)
}
if ev.Kind != testKind.K {
t.Fatalf(
"Event %d has incorrect kind. Got %d, expected %d",
i, ev.Kind, testKind.K,
)
}
}
// Test GetSerialsByRange with an author filter
authorFilter := &filter.F{
Authors: tag.NewFromBytesSlice(events[1].Pubkey),
}
// Get the indexes from the filter
ranges, err = GetIndexesFromFilter(authorFilter)
if err != nil {
t.Fatalf("Failed to get indexes from filter: %v", err)
}
// Verify we got at least one range
if len(ranges) == 0 {
t.Fatal("Expected at least one range from filter, but got none")
}
// Test GetSerialsByRange with the first range
serials, err = db.GetSerialsByRange(ranges[0])
if err != nil {
t.Fatalf("Failed to get serials by range: %v", err)
}
// Verify we got results
if len(serials) == 0 {
t.Fatal("Expected serials for events from author, but got none")
}
// Verify the serials correspond to events with the correct author
for i, serial := range serials {
// Fetch the event using the serial
ev, err := db.FetchEventBySerial(serial)
if err != nil {
t.Fatalf("Failed to fetch event for serial %d: %v", i, err)
}
if !utils.FastEqual(ev.Pubkey, events[1].Pubkey) {
t.Fatalf(
"Event %d has incorrect author. Got %x, expected %x",
i, ev.Pubkey, events[1].Pubkey,
)
}
}
}

51
pkg/database/go.mod

@ -0,0 +1,51 @@ @@ -0,0 +1,51 @@
module database.orly
go 1.25.0
replace (
crypto.orly => ../crypto
encoders.orly => ../encoders
interfaces.orly => ../interfaces
next.orly.dev => ../../
protocol.orly => ../protocol
utils.orly => ../utils
)
require (
crypto.orly v0.0.0-00010101000000-000000000000
encoders.orly v0.0.0-00010101000000-000000000000
github.com/dgraph-io/badger/v4 v4.8.0
go.uber.org/atomic v1.11.0
interfaces.orly v0.0.0-00010101000000-000000000000
lol.mleku.dev v1.0.2
lukechampine.com/frand v1.5.1
utils.orly v0.0.0-00010101000000-000000000000
)
require (
github.com/adrg/xdg v0.5.3 // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/dgraph-io/ristretto/v2 v2.2.0 // indirect
github.com/dustin/go-humanize v1.0.1 // indirect
github.com/fatih/color v1.18.0 // indirect
github.com/go-logr/logr v1.4.3 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/google/flatbuffers v25.2.10+incompatible // indirect
github.com/klauspost/compress v1.18.0 // indirect
github.com/klauspost/cpuid/v2 v2.3.0 // indirect
github.com/mattn/go-colorable v0.1.14 // indirect
github.com/mattn/go-isatty v0.0.20 // indirect
github.com/templexxx/cpu v0.0.1 // indirect
github.com/templexxx/xhex v0.0.0-20200614015412-aed53437177b // indirect
go-simpler.org/env v0.12.0 // indirect
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
go.opentelemetry.io/otel v1.37.0 // indirect
go.opentelemetry.io/otel/metric v1.37.0 // indirect
go.opentelemetry.io/otel/trace v1.37.0 // indirect
golang.org/x/exp v0.0.0-20250819193227-8b4c13bb791b // indirect
golang.org/x/net v0.41.0 // indirect
golang.org/x/sys v0.35.0 // indirect
google.golang.org/protobuf v1.36.6 // indirect
next.orly.dev v0.0.0-00010101000000-000000000000 // indirect
)

68
pkg/database/go.sum

@ -0,0 +1,68 @@ @@ -0,0 +1,68 @@
github.com/adrg/xdg v0.5.3 h1:xRnxJXne7+oWDatRhR1JLnvuccuIeCoBu2rtuLqQB78=
github.com/adrg/xdg v0.5.3/go.mod h1:nlTsY+NNiCBGCK2tpm09vRqfVzrc2fLmXGpBLF0zlTQ=
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/dgraph-io/badger/v4 v4.8.0 h1:JYph1ChBijCw8SLeybvPINizbDKWZ5n/GYbz2yhN/bs=
github.com/dgraph-io/badger/v4 v4.8.0/go.mod h1:U6on6e8k/RTbUWxqKR0MvugJuVmkxSNc79ap4917h4w=
github.com/dgraph-io/ristretto/v2 v2.2.0 h1:bkY3XzJcXoMuELV8F+vS8kzNgicwQFAaGINAEJdWGOM=
github.com/dgraph-io/ristretto/v2 v2.2.0/go.mod h1:RZrm63UmcBAaYWC1DotLYBmTvgkrs0+XhBd7Npn7/zI=
github.com/dgryski/go-farm v0.0.0-20240924180020-3414d57e47da h1:aIftn67I1fkbMa512G+w+Pxci9hJPB8oMnkcP3iZF38=
github.com/dgryski/go-farm v0.0.0-20240924180020-3414d57e47da/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw=
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM=
github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU=
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI=
github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
github.com/google/flatbuffers v25.2.10+incompatible h1:F3vclr7C3HpB1k9mxCGRMXq6FdUalZ6H/pNX4FP1v0Q=
github.com/google/flatbuffers v25.2.10+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8=
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
github.com/klauspost/cpuid/v2 v2.3.0 h1:S4CRMLnYUhGeDFDqkGriYKdfoFlDnMtqTiI/sFzhA9Y=
github.com/klauspost/cpuid/v2 v2.3.0/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0=
github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE=
github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8=
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
github.com/templexxx/cpu v0.0.1 h1:hY4WdLOgKdc8y13EYklu9OUTXik80BkxHoWvTO6MQQY=
github.com/templexxx/cpu v0.0.1/go.mod h1:w7Tb+7qgcAlIyX4NhLuDKt78AHA5SzPmq0Wj6HiEnnk=
github.com/templexxx/xhex v0.0.0-20200614015412-aed53437177b h1:XeDLE6c9mzHpdv3Wb1+pWBaWv/BlHK0ZYIu/KaL6eHg=
github.com/templexxx/xhex v0.0.0-20200614015412-aed53437177b/go.mod h1:7rwmCH0wC2fQvNEvPZ3sKXukhyCTyiaZ5VTZMQYpZKQ=
go-simpler.org/env v0.12.0 h1:kt/lBts0J1kjWJAnB740goNdvwNxt5emhYngL0Fzufs=
go-simpler.org/env v0.12.0/go.mod h1:cc/5Md9JCUM7LVLtN0HYjPTDcI3Q8TDaPlNTAlDU+WI=
go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
go.opentelemetry.io/otel v1.37.0 h1:9zhNfelUvx0KBfu/gb+ZgeAfAgtWrfHJZcAqFC228wQ=
go.opentelemetry.io/otel v1.37.0/go.mod h1:ehE/umFRLnuLa/vSccNq9oS1ErUlkkK71gMcN34UG8I=
go.opentelemetry.io/otel/metric v1.37.0 h1:mvwbQS5m0tbmqML4NqK+e3aDiO02vsf/WgbsdpcPoZE=
go.opentelemetry.io/otel/metric v1.37.0/go.mod h1:04wGrZurHYKOc+RKeye86GwKiTb9FKm1WHtO+4EVr2E=
go.opentelemetry.io/otel/trace v1.37.0 h1:HLdcFNbRQBE2imdSEgm/kwqmQj1Or1l/7bW6mxVK7z4=
go.opentelemetry.io/otel/trace v1.37.0/go.mod h1:TlgrlQ+PtQO5XFerSPUYG0JSgGyryXewPGyayAWSBS0=
go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE=
go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
golang.org/x/exp v0.0.0-20250819193227-8b4c13bb791b h1:DXr+pvt3nC887026GRP39Ej11UATqWDmWuS99x26cD0=
golang.org/x/exp v0.0.0-20250819193227-8b4c13bb791b/go.mod h1:4QTo5u+SEIbbKW1RacMZq1YEfOBqeXa19JeshGi+zc4=
golang.org/x/net v0.41.0 h1:vBTly1HeNPEn3wtREYfy4GZ/NECgw2Cnl+nK6Nz3uvw=
golang.org/x/net v0.41.0/go.mod h1:B/K4NNqkfmg07DQYrbwvSluqCJOOXwUjeb/5lOisjbA=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.35.0 h1:vz1N37gP5bs89s7He8XuIYXpyY0+QlsKmzipCbUtyxI=
golang.org/x/sys v0.35.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY=
google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
lol.mleku.dev v1.0.2 h1:bSV1hHnkmt1hq+9nSvRwN6wgcI7itbM3XRZ4dMB438c=
lol.mleku.dev v1.0.2/go.mod h1:DQ0WnmkntA9dPLCXgvtIgYt5G0HSqx3wSTLolHgWeLA=
lukechampine.com/frand v1.5.1 h1:fg0eRtdmGFIxhP5zQJzM1lFDbD6CUfu/f+7WgAZd5/w=
lukechampine.com/frand v1.5.1/go.mod h1:4VstaWc2plN4Mjr10chUD46RAVGWhpkZ5Nja8+Azp0Q=

83
pkg/database/import.go

@ -0,0 +1,83 @@ @@ -0,0 +1,83 @@
package database
import (
"bufio"
"io"
"os"
"runtime/debug"
"encoders.orly/event"
"lol.mleku.dev/chk"
"lol.mleku.dev/log"
)
const maxLen = 500000000
// Import a collection of events in line structured minified JSON format (JSONL).
func (d *D) Import(rr io.Reader) {
// store to disk so we can return fast
tmpPath := os.TempDir() + string(os.PathSeparator) + "orly"
os.MkdirAll(tmpPath, 0700)
tmp, err := os.CreateTemp(tmpPath, "")
if chk.E(err) {
return
}
log.I.F("buffering upload to %s", tmp.Name())
if _, err = io.Copy(tmp, rr); chk.E(err) {
return
}
if _, err = tmp.Seek(0, 0); chk.E(err) {
return
}
go func() {
var err error
// Create a scanner to read the buffer line by line
scan := bufio.NewScanner(tmp)
scanBuf := make([]byte, maxLen)
scan.Buffer(scanBuf, maxLen)
var count, total int
for scan.Scan() {
select {
case <-d.ctx.Done():
log.I.F("context closed")
return
default:
}
b := scan.Bytes()
total += len(b) + 1
if len(b) < 1 {
continue
}
ev := &event.E{}
if _, err = ev.Unmarshal(b); err != nil {
continue
}
if _, _, err = d.SaveEvent(d.ctx, ev, false, nil); err != nil {
continue
}
b = nil
ev = nil
count++
if count%100 == 0 {
log.I.F("received %d events", count)
debug.FreeOSMemory()
}
}
log.I.F("read %d bytes and saved %d events", total, count)
err = scan.Err()
if chk.E(err) {
}
// Help garbage collection
tmp = nil
}()
return
}

439
pkg/database/indexes/keys.go

@ -0,0 +1,439 @@ @@ -0,0 +1,439 @@
package indexes
import (
"io"
"reflect"
"database.orly/indexes/types"
"interfaces.orly/codec"
"lol.mleku.dev/chk"
)
var counter int
func init() {
// Initialize the counter to ensure it starts from 0
counter = 0
}
func next() int { counter++; return counter - 1 }
type P struct {
val []byte
}
func NewPrefix(prf ...int) (p *P) {
if len(prf) > 0 {
prefix := Prefix(prf[0])
if prefix == "" {
panic("unknown prefix")
}
return &P{[]byte(prefix)}
} else {
return &P{[]byte{0, 0, 0}}
}
}
func (p *P) Bytes() (b []byte) { return p.val }
func (p *P) MarshalWrite(w io.Writer) (err error) {
_, err = w.Write(p.val)
return
}
func (p *P) UnmarshalRead(r io.Reader) (err error) {
// Allocate a buffer for val if it's nil or empty
if p.val == nil || len(p.val) == 0 {
p.val = make([]byte, 3) // Prefixes are 3 bytes
}
_, err = r.Read(p.val)
return
}
type I string
func (i I) Write(w io.Writer) (n int, err error) { return w.Write([]byte(i)) }
const (
EventPrefix = I("evt")
IdPrefix = I("eid")
FullIdPubkeyPrefix = I("fpc") // full id, pubkey, created at
CreatedAtPrefix = I("c--") // created at
KindPrefix = I("kc-") // kind, created at
PubkeyPrefix = I("pc-") // pubkey, created at
KindPubkeyPrefix = I("kpc") // kind, pubkey, created at
TagPrefix = I("tc-") // tag, created at
TagKindPrefix = I("tkc") // tag, kind, created at
TagPubkeyPrefix = I("tpc") // tag, pubkey, created at
TagKindPubkeyPrefix = I("tkp") // tag, kind, pubkey, created at
ExpirationPrefix = I("exp") // timestamp of expiration
VersionPrefix = I("ver") // database version number, for triggering reindexes when new keys are added (policy is add-only).
)
// Prefix returns the three byte human-readable prefixes that go in front of
// database indexes.
func Prefix(prf int) (i I) {
switch prf {
case Event:
return EventPrefix
case Id:
return IdPrefix
case FullIdPubkey:
return FullIdPubkeyPrefix
case CreatedAt:
return CreatedAtPrefix
case Kind:
return KindPrefix
case Pubkey:
return PubkeyPrefix
case KindPubkey:
return KindPubkeyPrefix
case Tag:
return TagPrefix
case TagKind:
return TagKindPrefix
case TagPubkey:
return TagPubkeyPrefix
case TagKindPubkey:
return TagKindPubkeyPrefix
case Expiration:
return ExpirationPrefix
case Version:
return VersionPrefix
}
return
}
func Identify(r io.Reader) (i int, err error) {
// this is here for completeness; however, searches don't need to identify
// this as they work via generated prefixes made using Prefix.
var b [3]byte
_, err = r.Read(b[:])
if err != nil {
i = -1
return
}
switch I(b[:]) {
case EventPrefix:
i = Event
case IdPrefix:
i = Id
case FullIdPubkeyPrefix:
i = FullIdPubkey
case CreatedAtPrefix:
i = CreatedAt
case KindPrefix:
i = Kind
case PubkeyPrefix:
i = Pubkey
case KindPubkeyPrefix:
i = KindPubkey
case TagPrefix:
i = Tag
case TagKindPrefix:
i = TagKind
case TagPubkeyPrefix:
i = TagPubkey
case TagKindPubkeyPrefix:
i = TagKindPubkey
case ExpirationPrefix:
i = Expiration
}
return
}
type Encs []codec.I
// T is a wrapper around an array of codec.I. The caller provides the Encs so
// they can then call the accessor methods of the codec.I implementation.
type T struct{ Encs }
// New creates a new indexes.T. The helper functions below have an encode and
// decode variant, the decode variant doesn't add the prefix encoder because it
// has been read by Identify or just is being read, and found because it was
// written for the prefix in the iteration.
func New(encoders ...codec.I) (i *T) { return &T{encoders} }
func (t *T) MarshalWrite(w io.Writer) (err error) {
for _, e := range t.Encs {
if e == nil || reflect.ValueOf(e).IsNil() {
// Skip nil encoders instead of returning early. This enables
// generating search prefixes.
continue
}
if err = e.MarshalWrite(w); chk.E(err) {
return
}
}
return
}
func (t *T) UnmarshalRead(r io.Reader) (err error) {
for _, e := range t.Encs {
if err = e.UnmarshalRead(r); chk.E(err) {
return
}
}
return
}
// Event is the whole event stored in binary format
//
// prefix|5 serial - event in binary format
var Event = next()
func EventVars() (ser *types.Uint40) { return new(types.Uint40) }
func EventEnc(ser *types.Uint40) (enc *T) {
return New(NewPrefix(Event), ser)
}
func EventDec(ser *types.Uint40) (enc *T) { return New(NewPrefix(), ser) }
// Id contains a truncated 8-byte hash of an event index. This is the secondary
// key of an event, the primary key is the serial found in the Event.
//
// 3 prefix|8 ID hash|5 serial
var Id = next()
func IdVars() (id *types.IdHash, ser *types.Uint40) {
return new(types.IdHash), new(types.Uint40)
}
func IdEnc(id *types.IdHash, ser *types.Uint40) (enc *T) {
return New(NewPrefix(Id), id, ser)
}
func IdDec(id *types.IdHash, ser *types.Uint40) (enc *T) {
return New(NewPrefix(), id, ser)
}
// FullIdPubkey is an index designed to enable sorting and filtering of
// results found via other indexes, without having to decode the event.
//
// 3 prefix|5 serial|32 ID|8 pubkey hash|8 timestamp
var FullIdPubkey = next()
func FullIdPubkeyVars() (
ser *types.Uint40, fid *types.Id, p *types.PubHash, ca *types.Uint64,
) {
return new(types.Uint40), new(types.Id), new(types.PubHash), new(types.Uint64)
}
func FullIdPubkeyEnc(
ser *types.Uint40, fid *types.Id, p *types.PubHash, ca *types.Uint64,
) (enc *T) {
return New(NewPrefix(FullIdPubkey), ser, fid, p, ca)
}
func FullIdPubkeyDec(
ser *types.Uint40, fid *types.Id, p *types.PubHash, ca *types.Uint64,
) (enc *T) {
return New(NewPrefix(), ser, fid, p, ca)
}
// CreatedAt is an index that allows search for the timestamp on the event.
//
// 3 prefix|8 timestamp|5 serial
var CreatedAt = next()
func CreatedAtVars() (ca *types.Uint64, ser *types.Uint40) {
return new(types.Uint64), new(types.Uint40)
}
func CreatedAtEnc(ca *types.Uint64, ser *types.Uint40) (enc *T) {
return New(NewPrefix(CreatedAt), ca, ser)
}
func CreatedAtDec(ca *types.Uint64, ser *types.Uint40) (enc *T) {
return New(NewPrefix(), ca, ser)
}
// Kind
//
// 3 prefix|2 kind|8 timestamp|5 serial
var Kind = next()
func KindVars() (ki *types.Uint16, ca *types.Uint64, ser *types.Uint40) {
return new(types.Uint16), new(types.Uint64), new(types.Uint40)
}
func KindEnc(ki *types.Uint16, ca *types.Uint64, ser *types.Uint40) (enc *T) {
return New(NewPrefix(Kind), ki, ca, ser)
}
func KindDec(ki *types.Uint16, ca *types.Uint64, ser *types.Uint40) (enc *T) {
return New(NewPrefix(), ki, ca, ser)
}
// Pubkey is a composite index that allows search by pubkey
// filtered by timestamp.
//
// 3 prefix|8 pubkey hash|8 timestamp|5 serial
var Pubkey = next()
func PubkeyVars() (p *types.PubHash, ca *types.Uint64, ser *types.Uint40) {
return new(types.PubHash), new(types.Uint64), new(types.Uint40)
}
func PubkeyEnc(p *types.PubHash, ca *types.Uint64, ser *types.Uint40) (enc *T) {
return New(NewPrefix(Pubkey), p, ca, ser)
}
func PubkeyDec(p *types.PubHash, ca *types.Uint64, ser *types.Uint40) (enc *T) {
return New(NewPrefix(), p, ca, ser)
}
// KindPubkey
//
// 3 prefix|2 kind|8 pubkey hash|8 timestamp|5 serial
var KindPubkey = next()
func KindPubkeyVars() (
ki *types.Uint16, p *types.PubHash, ca *types.Uint64, ser *types.Uint40,
) {
return new(types.Uint16), new(types.PubHash), new(types.Uint64), new(types.Uint40)
}
func KindPubkeyEnc(
ki *types.Uint16, p *types.PubHash, ca *types.Uint64, ser *types.Uint40,
) (enc *T) {
return New(NewPrefix(KindPubkey), ki, p, ca, ser)
}
func KindPubkeyDec(
ki *types.Uint16, p *types.PubHash, ca *types.Uint64, ser *types.Uint40,
) (enc *T) {
return New(NewPrefix(), ki, p, ca, ser)
}
// Tag allows searching for a tag and filter by timestamp.
//
// 3 prefix|1 key letter|8 value hash|8 timestamp|5 serial
var Tag = next()
func TagVars() (
k *types.Letter, v *types.Ident, ca *types.Uint64, ser *types.Uint40,
) {
return new(types.Letter), new(types.Ident), new(types.Uint64), new(types.Uint40)
}
func TagEnc(
k *types.Letter, v *types.Ident, ca *types.Uint64, ser *types.Uint40,
) (enc *T) {
return New(NewPrefix(Tag), k, v, ca, ser)
}
func TagDec(
k *types.Letter, v *types.Ident, ca *types.Uint64, ser *types.Uint40,
) (enc *T) {
return New(NewPrefix(), k, v, ca, ser)
}
// TagKind
//
// 3 prefix|1 key letter|8 value hash|2 kind|8 timestamp|5 serial
var TagKind = next()
func TagKindVars() (
k *types.Letter, v *types.Ident, ki *types.Uint16, ca *types.Uint64,
ser *types.Uint40,
) {
return new(types.Letter), new(types.Ident), new(types.Uint16), new(types.Uint64), new(types.Uint40)
}
func TagKindEnc(
k *types.Letter, v *types.Ident, ki *types.Uint16, ca *types.Uint64,
ser *types.Uint40,
) (enc *T) {
return New(NewPrefix(TagKind), ki, k, v, ca, ser)
}
func TagKindDec(
k *types.Letter, v *types.Ident, ki *types.Uint16, ca *types.Uint64,
ser *types.Uint40,
) (enc *T) {
return New(NewPrefix(), ki, k, v, ca, ser)
}
// TagPubkey allows searching for a pubkey, tag and timestamp.
//
// 3 prefix|1 key letter|8 value hash|8 pubkey hash|8 timestamp|5 serial
var TagPubkey = next()
func TagPubkeyVars() (
k *types.Letter, v *types.Ident, p *types.PubHash, ca *types.Uint64,
ser *types.Uint40,
) {
return new(types.Letter), new(types.Ident), new(types.PubHash), new(types.Uint64), new(types.Uint40)
}
func TagPubkeyEnc(
k *types.Letter, v *types.Ident, p *types.PubHash, ca *types.Uint64,
ser *types.Uint40,
) (enc *T) {
return New(NewPrefix(TagPubkey), p, k, v, ca, ser)
}
func TagPubkeyDec(
k *types.Letter, v *types.Ident, p *types.PubHash, ca *types.Uint64,
ser *types.Uint40,
) (enc *T) {
return New(NewPrefix(), p, k, v, ca, ser)
}
// TagKindPubkey
//
// 3 prefix|1 key letter|8 value hash|2 kind|8 pubkey hash|8 bytes timestamp|5 serial
var TagKindPubkey = next()
func TagKindPubkeyVars() (
k *types.Letter, v *types.Ident, ki *types.Uint16, p *types.PubHash,
ca *types.Uint64,
ser *types.Uint40,
) {
return new(types.Letter), new(types.Ident), new(types.Uint16), new(types.PubHash), new(types.Uint64), new(types.Uint40)
}
func TagKindPubkeyEnc(
k *types.Letter, v *types.Ident, ki *types.Uint16, p *types.PubHash,
ca *types.Uint64,
ser *types.Uint40,
) (enc *T) {
return New(NewPrefix(TagKindPubkey), ki, p, k, v, ca, ser)
}
func TagKindPubkeyDec(
k *types.Letter, v *types.Ident, ki *types.Uint16, p *types.PubHash,
ca *types.Uint64,
ser *types.Uint40,
) (enc *T) {
return New(NewPrefix(), ki, p, k, v, ca, ser)
}
// Expiration
//
// 3 prefix|8 timestamp|5 serial
var Expiration = next()
func ExpirationVars() (
exp *types.Uint64, ser *types.Uint40,
) {
return new(types.Uint64), new(types.Uint40)
}
func ExpirationEnc(
exp *types.Uint64, ser *types.Uint40,
) (enc *T) {
return New(NewPrefix(Expiration), exp, ser)
}
func ExpirationDec(
exp *types.Uint64, ser *types.Uint40,
) (enc *T) {
return New(NewPrefix(), exp, ser)
}
// Version
//
// 3 prefix|4 version
var Version = next()
func VersionVars() (
ver *types.Uint32,
) {
return new(types.Uint32)
}
func VersionEnc(
ver *types.Uint32,
) (enc *T) {
return New(NewPrefix(Version), ver)
}
func VersionDec(
ver *types.Uint32,
) (enc *T) {
return New(NewPrefix(), ver)
}

981
pkg/database/indexes/keys_test.go

@ -0,0 +1,981 @@ @@ -0,0 +1,981 @@
package indexes
import (
"bytes"
"io"
"testing"
"database.orly/indexes/types"
"lol.mleku.dev/chk"
"utils.orly"
)
// TestNewPrefix tests the NewPrefix function with and without arguments
func TestNewPrefix(t *testing.T) {
// Test with no arguments (default prefix)
defaultPrefix := NewPrefix()
if len(defaultPrefix.Bytes()) != 3 {
t.Errorf(
"Default prefix should be 3 bytes, got %d",
len(defaultPrefix.Bytes()),
)
}
// Test with a valid prefix index
validPrefix := NewPrefix(Event)
if string(validPrefix.Bytes()) != string(EventPrefix) {
t.Errorf("Expected prefix %q, got %q", EventPrefix, validPrefix.Bytes())
}
// Test with an invalid prefix index (should panic)
defer func() {
if r := recover(); r == nil {
t.Errorf("NewPrefix should panic with invalid prefix index")
}
}()
_ = NewPrefix(-1) // This should panic
}
// TestPrefixMethods tests the methods of the P struct
func TestPrefixMethods(t *testing.T) {
// Create a prefix
prefix := NewPrefix(Event)
// Test Bytes method
if !utils.FastEqual(prefix.Bytes(), []byte(EventPrefix)) {
t.Errorf(
"Bytes method returned %v, expected %v", prefix.Bytes(),
[]byte(EventPrefix),
)
}
// Test MarshalWrite method
buf := new(bytes.Buffer)
err := prefix.MarshalWrite(buf)
if chk.E(err) {
t.Fatalf("MarshalWrite failed: %v", err)
}
if !utils.FastEqual(buf.Bytes(), []byte(EventPrefix)) {
t.Errorf(
"MarshalWrite wrote %v, expected %v", buf.Bytes(),
[]byte(EventPrefix),
)
}
// Test UnmarshalRead method
newPrefix := &P{}
err = newPrefix.UnmarshalRead(bytes.NewBuffer(buf.Bytes()))
if chk.E(err) {
t.Fatalf("UnmarshalRead failed: %v", err)
}
if !utils.FastEqual(newPrefix.Bytes(), []byte(EventPrefix)) {
t.Errorf(
"UnmarshalRead read %v, expected %v", newPrefix.Bytes(),
[]byte(EventPrefix),
)
}
}
// TestPrefixFunction tests the Prefix function
func TestPrefixFunction(t *testing.T) {
testCases := []struct {
name string
index int
expected I
}{
{"Event", Event, EventPrefix},
{"ID", Id, IdPrefix},
{"FullIdPubkey", FullIdPubkey, FullIdPubkeyPrefix},
{"Pubkey", Pubkey, PubkeyPrefix},
{"CreatedAt", CreatedAt, CreatedAtPrefix},
{"TagPubkey", TagPubkey, TagPubkeyPrefix},
{"Tag", Tag, TagPrefix},
{"Kind", Kind, KindPrefix},
{"KindPubkey", KindPubkey, KindPubkeyPrefix},
{"TagKind", TagKind, TagKindPrefix},
{
"TagKindPubkey", TagKindPubkey,
TagKindPubkeyPrefix,
},
{"Invalid", -1, ""},
}
for _, tc := range testCases {
t.Run(
tc.name, func(t *testing.T) {
result := Prefix(tc.index)
if result != tc.expected {
t.Errorf(
"Prefix(%d) = %q, expected %q", tc.index, result,
tc.expected,
)
}
},
)
}
}
// TestIdentify tests the Identify function
func TestIdentify(t *testing.T) {
testCases := []struct {
name string
prefix I
expected int
}{
{"Event", EventPrefix, Event},
{"ID", IdPrefix, Id},
{"FullIdPubkey", FullIdPubkeyPrefix, FullIdPubkey},
{"Pubkey", PubkeyPrefix, Pubkey},
{"CreatedAt", CreatedAtPrefix, CreatedAt},
{"TagPubkey", TagPubkeyPrefix, TagPubkey},
{"Tag", TagPrefix, Tag},
{"Kind", KindPrefix, Kind},
{"KindPubkey", KindPubkeyPrefix, KindPubkey},
{"TagKind", TagKindPrefix, TagKind},
{
"TagKindPubkey", TagKindPubkeyPrefix,
TagKindPubkey,
},
}
for _, tc := range testCases {
t.Run(
tc.name, func(t *testing.T) {
result, err := Identify(bytes.NewReader([]byte(tc.prefix)))
if chk.E(err) {
t.Fatalf("Identify failed: %v", err)
}
if result != tc.expected {
t.Errorf(
"Identify(%q) = %d, expected %d", tc.prefix, result,
tc.expected,
)
}
},
)
}
// Test with invalid data
t.Run(
"Invalid", func(t *testing.T) {
result, err := Identify(bytes.NewReader([]byte("xyz")))
if chk.E(err) {
t.Fatalf("Identify failed: %v", err)
}
if result != 0 {
t.Errorf(
"Identify with invalid prefix should return 0, got %d",
result,
)
}
},
)
// Test with error from reader
t.Run(
"ReaderError", func(t *testing.T) {
errReader := &errorReader{}
result, err := Identify(errReader)
if err == nil {
t.Errorf("Identify should return error with failing reader")
}
if result != -1 {
t.Errorf(
"Identify with reader error should return -1, got %d",
result,
)
}
},
)
}
// errorReader is a mock reader that always returns an error
type errorReader struct{}
func (e *errorReader) Read(p []byte) (n int, err error) {
return 0, io.ErrUnexpectedEOF
}
// TestTStruct tests the T struct and its methods
func TestTStruct(t *testing.T) {
// Create some test encoders
prefix := NewPrefix(Event)
ser := new(types.Uint40)
ser.Set(12345)
// Test New function
enc := New(prefix, ser)
if len(enc.Encs) != 2 {
t.Errorf("New should create T with 2 encoders, got %d", len(enc.Encs))
}
// Test MarshalWrite
buf := new(bytes.Buffer)
err := enc.MarshalWrite(buf)
if chk.E(err) {
t.Fatalf("MarshalWrite failed: %v", err)
}
// Test UnmarshalRead
dec := New(NewPrefix(), new(types.Uint40))
err = dec.UnmarshalRead(bytes.NewBuffer(buf.Bytes()))
if chk.E(err) {
t.Fatalf("UnmarshalRead failed: %v", err)
}
// Verify the decoded values
decodedPrefix := dec.Encs[0].(*P)
decodedSer := dec.Encs[1].(*types.Uint40)
if !utils.FastEqual(decodedPrefix.Bytes(), prefix.Bytes()) {
t.Errorf(
"Decoded prefix %v, expected %v", decodedPrefix.Bytes(),
prefix.Bytes(),
)
}
if decodedSer.Get() != ser.Get() {
t.Errorf("Decoded serial %d, expected %d", decodedSer.Get(), ser.Get())
}
// Test with nil encoder
encWithNil := New(prefix, nil, ser)
buf.Reset()
err = encWithNil.MarshalWrite(buf)
if chk.E(err) {
t.Fatalf("MarshalWrite with nil encoder failed: %v", err)
}
}
// TestEventFunctions tests the Event-related functions
func TestEventFunctions(t *testing.T) {
// Test EventVars
ser := EventVars()
if ser == nil {
t.Fatalf("EventVars should return non-nil *types.Uint40")
}
// Set a value
ser.Set(12345)
// Test EventEnc
enc := EventEnc(ser)
if len(enc.Encs) != 2 {
t.Errorf(
"EventEnc should create T with 2 encoders, got %d", len(enc.Encs),
)
}
// Test EventDec
dec := EventDec(ser)
if len(dec.Encs) != 2 {
t.Errorf(
"EventDec should create T with 2 encoders, got %d", len(dec.Encs),
)
}
// Test marshaling and unmarshaling
buf := new(bytes.Buffer)
err := enc.MarshalWrite(buf)
if chk.E(err) {
t.Fatalf("MarshalWrite failed: %v", err)
}
// Create new variables for decoding
newSer := new(types.Uint40)
newDec := EventDec(newSer)
err = newDec.UnmarshalRead(bytes.NewBuffer(buf.Bytes()))
if chk.E(err) {
t.Fatalf("UnmarshalRead failed: %v", err)
}
// Verify the decoded value
if newSer.Get() != ser.Get() {
t.Errorf("Decoded serial %d, expected %d", newSer.Get(), ser.Get())
}
}
// TestIdFunctions tests the Id-related functions
func TestIdFunctions(t *testing.T) {
// Test IdVars
id, ser := IdVars()
if id == nil || ser == nil {
t.Fatalf("IdVars should return non-nil *types.IdHash and *types.Uint40")
}
// Set values
id.Set([]byte{1, 2, 3, 4, 5, 6, 7, 8})
ser.Set(12345)
// Test IdEnc
enc := IdEnc(id, ser)
if len(enc.Encs) != 3 {
t.Errorf("IdEnc should create T with 3 encoders, got %d", len(enc.Encs))
}
// Test IdDec
dec := IdDec(id, ser)
if len(dec.Encs) != 3 {
t.Errorf("IdDec should create T with 3 encoders, got %d", len(dec.Encs))
}
// Test marshaling and unmarshaling
buf := new(bytes.Buffer)
err := enc.MarshalWrite(buf)
if chk.E(err) {
t.Fatalf("MarshalWrite failed: %v", err)
}
// Create new variables for decoding
newId, newSer := IdVars()
newDec := IdDec(newId, newSer)
err = newDec.UnmarshalRead(bytes.NewBuffer(buf.Bytes()))
if chk.E(err) {
t.Fatalf("UnmarshalRead failed: %v", err)
}
// Verify the decoded values
if !utils.FastEqual(newId.Bytes(), id.Bytes()) {
t.Errorf("Decoded id %v, expected %v", newId.Bytes(), id.Bytes())
}
if newSer.Get() != ser.Get() {
t.Errorf("Decoded serial %d, expected %d", newSer.Get(), ser.Get())
}
}
// TestIdPubkeyFunctions tests the FullIdPubkey-related functions
func TestIdPubkeyFunctions(t *testing.T) {
// Test FullIdPubkeyVars
ser, fid, p, ca := FullIdPubkeyVars()
if ser == nil || fid == nil || p == nil || ca == nil {
t.Fatalf("FullIdPubkeyVars should return non-nil values")
}
// Set values
ser.Set(12345)
err := fid.FromId(
[]byte{
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
},
)
if chk.E(err) {
t.Fatalf("FromId failed: %v", err)
}
err = p.FromPubkey(
[]byte{
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
},
)
if chk.E(err) {
t.Fatalf("FromPubkey failed: %v", err)
}
ca.Set(98765)
// Test FullIdPubkeyEnc
enc := FullIdPubkeyEnc(ser, fid, p, ca)
if len(enc.Encs) != 5 {
t.Errorf(
"FullIdPubkeyEnc should create T with 5 encoders, got %d",
len(enc.Encs),
)
}
// Test FullIdPubkeyDec
dec := FullIdPubkeyDec(ser, fid, p, ca)
if len(dec.Encs) != 5 {
t.Errorf(
"FullIdPubkeyDec should create T with 5 encoders, got %d",
len(dec.Encs),
)
}
// Test marshaling and unmarshaling
buf := new(bytes.Buffer)
err = enc.MarshalWrite(buf)
if chk.E(err) {
t.Fatalf("MarshalWrite failed: %v", err)
}
// Create new variables for decoding
newSer, newFid, newP, newCa := FullIdPubkeyVars()
newDec := FullIdPubkeyDec(newSer, newFid, newP, newCa)
err = newDec.UnmarshalRead(bytes.NewBuffer(buf.Bytes()))
if chk.E(err) {
t.Fatalf("UnmarshalRead failed: %v", err)
}
// Verify the decoded values
if newSer.Get() != ser.Get() {
t.Errorf("Decoded serial %d, expected %d", newSer.Get(), ser.Get())
}
if !utils.FastEqual(newFid.Bytes(), fid.Bytes()) {
t.Errorf("Decoded id %v, expected %v", newFid.Bytes(), fid.Bytes())
}
if !utils.FastEqual(newP.Bytes(), p.Bytes()) {
t.Errorf("Decoded pubkey hash %v, expected %v", newP.Bytes(), p.Bytes())
}
if newCa.Get() != ca.Get() {
t.Errorf("Decoded created at %d, expected %d", newCa.Get(), ca.Get())
}
}
// TestCreatedAtFunctions tests the CreatedAt-related functions
func TestCreatedAtFunctions(t *testing.T) {
// Test CreatedAtVars
ca, ser := CreatedAtVars()
if ca == nil || ser == nil {
t.Fatalf("CreatedAtVars should return non-nil values")
}
// Set values
ca.Set(98765)
ser.Set(12345)
// Test CreatedAtEnc
enc := CreatedAtEnc(ca, ser)
if len(enc.Encs) != 3 {
t.Errorf(
"CreatedAtEnc should create T with 3 encoders, got %d",
len(enc.Encs),
)
}
// Test CreatedAtDec
dec := CreatedAtDec(ca, ser)
if len(dec.Encs) != 3 {
t.Errorf(
"CreatedAtDec should create T with 3 encoders, got %d",
len(dec.Encs),
)
}
// Test marshaling and unmarshaling
buf := new(bytes.Buffer)
err := enc.MarshalWrite(buf)
if chk.E(err) {
t.Fatalf("MarshalWrite failed: %v", err)
}
// Create new variables for decoding
newCa, newSer := CreatedAtVars()
newDec := CreatedAtDec(newCa, newSer)
err = newDec.UnmarshalRead(bytes.NewBuffer(buf.Bytes()))
if chk.E(err) {
t.Fatalf("UnmarshalRead failed: %v", err)
}
// Verify the decoded values
if newCa.Get() != ca.Get() {
t.Errorf("Decoded created at %d, expected %d", newCa.Get(), ca.Get())
}
if newSer.Get() != ser.Get() {
t.Errorf("Decoded serial %d, expected %d", newSer.Get(), ser.Get())
}
}
// TestPubkeyFunctions tests the Pubkey-related functions
func TestPubkeyFunctions(t *testing.T) {
// Test PubkeyVars
p, ca, ser := PubkeyVars()
if p == nil || ca == nil || ser == nil {
t.Fatalf("PubkeyVars should return non-nil values")
}
// Set values
err := p.FromPubkey(
[]byte{
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
},
)
if chk.E(err) {
t.Fatalf("FromPubkey failed: %v", err)
}
ca.Set(98765)
ser.Set(12345)
// Test PubkeyEnc
enc := PubkeyEnc(p, ca, ser)
if len(enc.Encs) != 4 {
t.Errorf(
"PubkeyEnc should create T with 4 encoders, got %d",
len(enc.Encs),
)
}
// Test PubkeyDec
dec := PubkeyDec(p, ca, ser)
if len(dec.Encs) != 4 {
t.Errorf(
"PubkeyDec should create T with 4 encoders, got %d",
len(dec.Encs),
)
}
// Test marshaling and unmarshaling
buf := new(bytes.Buffer)
err = enc.MarshalWrite(buf)
if chk.E(err) {
t.Fatalf("MarshalWrite failed: %v", err)
}
// Create new variables for decoding
newP, newCa, newSer := PubkeyVars()
newDec := PubkeyDec(newP, newCa, newSer)
err = newDec.UnmarshalRead(bytes.NewBuffer(buf.Bytes()))
if chk.E(err) {
t.Fatalf("UnmarshalRead failed: %v", err)
}
// Verify the decoded values
if !utils.FastEqual(newP.Bytes(), p.Bytes()) {
t.Errorf("Decoded pubkey hash %v, expected %v", newP.Bytes(), p.Bytes())
}
if newCa.Get() != ca.Get() {
t.Errorf("Decoded created at %d, expected %d", newCa.Get(), ca.Get())
}
if newSer.Get() != ser.Get() {
t.Errorf("Decoded serial %d, expected %d", newSer.Get(), ser.Get())
}
}
// TestPubkeyTagFunctions tests the TagPubkey-related functions
func TestPubkeyTagFunctions(t *testing.T) {
// Test TagPubkeyVars
k, v, p, ca, ser := TagPubkeyVars()
if p == nil || k == nil || v == nil || ca == nil || ser == nil {
t.Fatalf("TagPubkeyVars should return non-nil values")
}
// Set values
err := p.FromPubkey(
[]byte{
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
},
)
if chk.E(err) {
t.Fatalf("FromPubkey failed: %v", err)
}
k.Set('e')
v.FromIdent([]byte("test-value"))
if chk.E(err) {
t.Fatalf("FromIdent failed: %v", err)
}
ca.Set(98765)
ser.Set(12345)
// Test TagPubkeyEnc
enc := TagPubkeyEnc(k, v, p, ca, ser)
if len(enc.Encs) != 6 {
t.Errorf(
"TagPubkeyEnc should create T with 6 encoders, got %d",
len(enc.Encs),
)
}
// Test TagPubkeyDec
dec := TagPubkeyDec(k, v, p, ca, ser)
if len(dec.Encs) != 6 {
t.Errorf(
"TagPubkeyDec should create T with 6 encoders, got %d",
len(dec.Encs),
)
}
// Test marshaling and unmarshaling
buf := new(bytes.Buffer)
err = enc.MarshalWrite(buf)
if chk.E(err) {
t.Fatalf("MarshalWrite failed: %v", err)
}
// Create new variables for decoding
newK, newV, newP, newCa, newSer := TagPubkeyVars()
newDec := TagPubkeyDec(newK, newV, newP, newCa, newSer)
err = newDec.UnmarshalRead(bytes.NewBuffer(buf.Bytes()))
if chk.E(err) {
t.Fatalf("UnmarshalRead failed: %v", err)
}
// Verify the decoded values
if !utils.FastEqual(newP.Bytes(), p.Bytes()) {
t.Errorf("Decoded pubkey hash %v, expected %v", newP.Bytes(), p.Bytes())
}
if newK.Letter() != k.Letter() {
t.Errorf(
"Decoded key letter %c, expected %c", newK.Letter(), k.Letter(),
)
}
if !utils.FastEqual(newV.Bytes(), v.Bytes()) {
t.Errorf("Decoded value hash %v, expected %v", newV.Bytes(), v.Bytes())
}
if newCa.Get() != ca.Get() {
t.Errorf("Decoded created at %d, expected %d", newCa.Get(), ca.Get())
}
if newSer.Get() != ser.Get() {
t.Errorf("Decoded serial %d, expected %d", newSer.Get(), ser.Get())
}
}
// TestTagFunctions tests the Tag-related functions
func TestTagFunctions(t *testing.T) {
var err error
// Test TagVars
k, v, ca, ser := TagVars()
if k == nil || v == nil || ca == nil || ser == nil {
t.Fatalf("TagVars should return non-nil values")
}
// Set values
k.Set('e')
v.FromIdent([]byte("test-value"))
if chk.E(err) {
t.Fatalf("FromIdent failed: %v", err)
}
ca.Set(98765)
ser.Set(12345)
// Test TagEnc
enc := TagEnc(k, v, ca, ser)
if len(enc.Encs) != 5 {
t.Errorf(
"TagEnc should create T with 5 encoders, got %d",
len(enc.Encs),
)
}
// Test TagDec
dec := TagDec(k, v, ca, ser)
if len(dec.Encs) != 5 {
t.Errorf(
"TagDec should create T with 5 encoders, got %d",
len(dec.Encs),
)
}
// Test marshaling and unmarshaling
buf := new(bytes.Buffer)
err = enc.MarshalWrite(buf)
if chk.E(err) {
t.Fatalf("MarshalWrite failed: %v", err)
}
// Create new variables for decoding
newK, newV, newCa, newSer := TagVars()
newDec := TagDec(newK, newV, newCa, newSer)
err = newDec.UnmarshalRead(bytes.NewBuffer(buf.Bytes()))
if chk.E(err) {
t.Fatalf("UnmarshalRead failed: %v", err)
}
// Verify the decoded values
if newK.Letter() != k.Letter() {
t.Errorf(
"Decoded key letter %c, expected %c", newK.Letter(), k.Letter(),
)
}
if !utils.FastEqual(newV.Bytes(), v.Bytes()) {
t.Errorf("Decoded value hash %v, expected %v", newV.Bytes(), v.Bytes())
}
if newCa.Get() != ca.Get() {
t.Errorf("Decoded created at %d, expected %d", newCa.Get(), ca.Get())
}
if newSer.Get() != ser.Get() {
t.Errorf("Decoded serial %d, expected %d", newSer.Get(), ser.Get())
}
}
// TestKindFunctions tests the Kind-related functions
func TestKindFunctions(t *testing.T) {
// Test KindVars
ki, ca, ser := KindVars()
if ki == nil || ca == nil || ser == nil {
t.Fatalf("KindVars should return non-nil values")
}
// Set values
ki.Set(1234)
ca.Set(98765)
ser.Set(12345)
// Test KindEnc
enc := KindEnc(ki, ca, ser)
if len(enc.Encs) != 4 {
t.Errorf(
"KindEnc should create T with 4 encoders, got %d",
len(enc.Encs),
)
}
// Test KindDec
dec := KindDec(ki, ca, ser)
if len(dec.Encs) != 4 {
t.Errorf(
"KindDec should create T with 4 encoders, got %d",
len(dec.Encs),
)
}
// Test marshaling and unmarshaling
buf := new(bytes.Buffer)
err := enc.MarshalWrite(buf)
if chk.E(err) {
t.Fatalf("MarshalWrite failed: %v", err)
}
// Create new variables for decoding
newKi, newCa, newSer := KindVars()
newDec := KindDec(newKi, newCa, newSer)
err = newDec.UnmarshalRead(bytes.NewBuffer(buf.Bytes()))
if chk.E(err) {
t.Fatalf("UnmarshalRead failed: %v", err)
}
// Verify the decoded values
if newKi.Get() != ki.Get() {
t.Errorf("Decoded kind %d, expected %d", newKi.Get(), ki.Get())
}
if newCa.Get() != ca.Get() {
t.Errorf("Decoded created at %d, expected %d", newCa.Get(), ca.Get())
}
if newSer.Get() != ser.Get() {
t.Errorf("Decoded serial %d, expected %d", newSer.Get(), ser.Get())
}
}
// TestKindTagFunctions tests the TagKind-related functions
func TestKindTagFunctions(t *testing.T) {
var err error
// Test TagKindVars
k, v, ki, ca, ser := TagKindVars()
if ki == nil || k == nil || v == nil || ca == nil || ser == nil {
t.Fatalf("TagKindVars should return non-nil values")
}
// Set values
ki.Set(1234)
k.Set('e')
v.FromIdent([]byte("test-value"))
if chk.E(err) {
t.Fatalf("FromIdent failed: %v", err)
}
ca.Set(98765)
ser.Set(12345)
// Test TagKindEnc
enc := TagKindEnc(k, v, ki, ca, ser)
if len(enc.Encs) != 6 {
t.Errorf(
"TagKindEnc should create T with 6 encoders, got %d",
len(enc.Encs),
)
}
// Test TagKindDec
dec := TagKindDec(k, v, ki, ca, ser)
if len(dec.Encs) != 6 {
t.Errorf(
"TagKindDec should create T with 6 encoders, got %d",
len(dec.Encs),
)
}
// Test marshaling and unmarshaling
buf := new(bytes.Buffer)
err = enc.MarshalWrite(buf)
if chk.E(err) {
t.Fatalf("MarshalWrite failed: %v", err)
}
// Create new variables for decoding
newK, newV, newKi, newCa, newSer := TagKindVars()
newDec := TagKindDec(newK, newV, newKi, newCa, newSer)
err = newDec.UnmarshalRead(bytes.NewBuffer(buf.Bytes()))
if chk.E(err) {
t.Fatalf("UnmarshalRead failed: %v", err)
}
// Verify the decoded values
if newKi.Get() != ki.Get() {
t.Errorf("Decoded kind %d, expected %d", newKi.Get(), ki.Get())
}
if newK.Letter() != k.Letter() {
t.Errorf(
"Decoded key letter %c, expected %c", newK.Letter(), k.Letter(),
)
}
if !utils.FastEqual(newV.Bytes(), v.Bytes()) {
t.Errorf("Decoded value hash %v, expected %v", newV.Bytes(), v.Bytes())
}
if newCa.Get() != ca.Get() {
t.Errorf("Decoded created at %d, expected %d", newCa.Get(), ca.Get())
}
if newSer.Get() != ser.Get() {
t.Errorf("Decoded serial %d, expected %d", newSer.Get(), ser.Get())
}
}
// TestKindPubkeyFunctions tests the KindPubkey-related functions
func TestKindPubkeyFunctions(t *testing.T) {
// Test KindPubkeyVars
ki, p, ca, ser := KindPubkeyVars()
if ki == nil || p == nil || ca == nil || ser == nil {
t.Fatalf("KindPubkeyVars should return non-nil values")
}
// Set values
ki.Set(1234)
err := p.FromPubkey(
[]byte{
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
},
)
if chk.E(err) {
t.Fatalf("FromPubkey failed: %v", err)
}
ca.Set(98765)
ser.Set(12345)
// Test KindPubkeyEnc
enc := KindPubkeyEnc(ki, p, ca, ser)
if len(enc.Encs) != 5 {
t.Errorf(
"KindPubkeyEnc should create T with 5 encoders, got %d",
len(enc.Encs),
)
}
// Test KindPubkeyDec
dec := KindPubkeyDec(ki, p, ca, ser)
if len(dec.Encs) != 5 {
t.Errorf(
"KindPubkeyDec should create T with 5 encoders, got %d",
len(dec.Encs),
)
}
// Test marshaling and unmarshaling
buf := new(bytes.Buffer)
err = enc.MarshalWrite(buf)
if chk.E(err) {
t.Fatalf("MarshalWrite failed: %v", err)
}
// Create new variables for decoding
newKi, newP, newCa, newSer := KindPubkeyVars()
newDec := KindPubkeyDec(newKi, newP, newCa, newSer)
err = newDec.UnmarshalRead(bytes.NewBuffer(buf.Bytes()))
if chk.E(err) {
t.Fatalf("UnmarshalRead failed: %v", err)
}
// Verify the decoded values
if newKi.Get() != ki.Get() {
t.Errorf("Decoded kind %d, expected %d", newKi.Get(), ki.Get())
}
if !utils.FastEqual(newP.Bytes(), p.Bytes()) {
t.Errorf("Decoded pubkey hash %v, expected %v", newP.Bytes(), p.Bytes())
}
if newCa.Get() != ca.Get() {
t.Errorf("Decoded created at %d, expected %d", newCa.Get(), ca.Get())
}
if newSer.Get() != ser.Get() {
t.Errorf("Decoded serial %d, expected %d", newSer.Get(), ser.Get())
}
}
// TestKindPubkeyTagFunctions tests the TagKindPubkey-related functions
func TestKindPubkeyTagFunctions(t *testing.T) {
// Test TagKindPubkeyVars
k, v, ki, p, ca, ser := TagKindPubkeyVars()
if ki == nil || p == nil || k == nil || v == nil || ca == nil || ser == nil {
t.Fatalf("TagKindPubkeyVars should return non-nil values")
}
// Set values
ki.Set(1234)
err := p.FromPubkey(
[]byte{
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
},
)
if chk.E(err) {
t.Fatalf("FromPubkey failed: %v", err)
}
k.Set('e')
v.FromIdent([]byte("test-value"))
if chk.E(err) {
t.Fatalf("FromIdent failed: %v", err)
}
ca.Set(98765)
ser.Set(12345)
// Test TagKindPubkeyEnc
enc := TagKindPubkeyEnc(k, v, ki, p, ca, ser)
if len(enc.Encs) != 7 {
t.Errorf(
"TagKindPubkeyEnc should create T with 7 encoders, got %d",
len(enc.Encs),
)
}
// Test TagKindPubkeyDec
dec := TagKindPubkeyDec(k, v, ki, p, ca, ser)
if len(dec.Encs) != 7 {
t.Errorf(
"TagKindPubkeyDec should create T with 7 encoders, got %d",
len(dec.Encs),
)
}
// Test marshaling and unmarshaling
buf := new(bytes.Buffer)
err = enc.MarshalWrite(buf)
if chk.E(err) {
t.Fatalf("MarshalWrite failed: %v", err)
}
// Create new variables for decoding
newK, newV, newKi, newP, newCa, newSer := TagKindPubkeyVars()
newDec := TagKindPubkeyDec(newK, newV, newKi, newP, newCa, newSer)
err = newDec.UnmarshalRead(bytes.NewBuffer(buf.Bytes()))
if chk.E(err) {
t.Fatalf("UnmarshalRead failed: %v", err)
}
// Verify the decoded values
if newKi.Get() != ki.Get() {
t.Errorf("Decoded kind %d, expected %d", newKi.Get(), ki.Get())
}
if !utils.FastEqual(newP.Bytes(), p.Bytes()) {
t.Errorf("Decoded pubkey hash %v, expected %v", newP.Bytes(), p.Bytes())
}
if newK.Letter() != k.Letter() {
t.Errorf(
"Decoded key letter %c, expected %c", newK.Letter(), k.Letter(),
)
}
if !utils.FastEqual(newV.Bytes(), v.Bytes()) {
t.Errorf("Decoded value hash %v, expected %v", newV.Bytes(), v.Bytes())
}
if newCa.Get() != ca.Get() {
t.Errorf("Decoded created at %d, expected %d", newCa.Get(), ca.Get())
}
if newSer.Get() != ser.Get() {
t.Errorf("Decoded serial %d, expected %d", newSer.Get(), ser.Get())
}
}

419
pkg/database/indexes/types/endianness_test.go

@ -0,0 +1,419 @@ @@ -0,0 +1,419 @@
package types
import (
"bytes"
"encoding/binary"
"testing"
)
// TestTypesSortLexicographically tests if the numeric types sort lexicographically
// when using bytes.Compare after marshaling.
func TestTypesSortLexicographically(t *testing.T) {
// Test Uint16
t.Run("Uint16", func(t *testing.T) {
testUint16Sorting(t)
})
// Test Uint24
t.Run("Uint24", func(t *testing.T) {
testUint24Sorting(t)
})
// Test Uint32
t.Run("Uint32", func(t *testing.T) {
testUint32Sorting(t)
})
// Test Uint40
t.Run("Uint40", func(t *testing.T) {
testUint40Sorting(t)
})
// Test Uint64
t.Run("Uint64", func(t *testing.T) {
testUint64Sorting(t)
})
}
// TestEdgeCases tests sorting with edge cases like zero, max values, and adjacent values
func TestEdgeCases(t *testing.T) {
// Test Uint16 edge cases
t.Run("Uint16EdgeCases", func(t *testing.T) {
testUint16EdgeCases(t)
})
// Test Uint24 edge cases
t.Run("Uint24EdgeCases", func(t *testing.T) {
testUint24EdgeCases(t)
})
// Test Uint32 edge cases
t.Run("Uint32EdgeCases", func(t *testing.T) {
testUint32EdgeCases(t)
})
// Test Uint40 edge cases
t.Run("Uint40EdgeCases", func(t *testing.T) {
testUint40EdgeCases(t)
})
// Test Uint64 edge cases
t.Run("Uint64EdgeCases", func(t *testing.T) {
testUint64EdgeCases(t)
})
}
func testUint16Sorting(t *testing.T) {
values := []uint16{1, 10, 100, 1000, 10000, 65535}
// Marshal each value
marshaledValues := make([][]byte, len(values))
for i, val := range values {
u := new(Uint16)
u.Set(val)
buf := new(bytes.Buffer)
err := u.MarshalWrite(buf)
if err != nil {
t.Fatalf("Failed to marshal Uint16 %d: %v", val, err)
}
marshaledValues[i] = buf.Bytes()
}
// Check if they sort correctly with bytes.Compare
for i := 0; i < len(marshaledValues)-1; i++ {
if bytes.Compare(marshaledValues[i], marshaledValues[i+1]) >= 0 {
t.Errorf("Uint16 values don't sort correctly: %v should be less than %v",
values[i], values[i+1])
t.Logf("Bytes representation: %v vs %v", marshaledValues[i], marshaledValues[i+1])
}
}
}
func testUint24Sorting(t *testing.T) {
values := []uint32{1, 10, 100, 1000, 10000, 100000, 1000000, 16777215}
// Marshal each value
marshaledValues := make([][]byte, len(values))
for i, val := range values {
u := new(Uint24)
err := u.Set(val)
if err != nil {
t.Fatalf("Failed to set Uint24 %d: %v", val, err)
}
buf := new(bytes.Buffer)
err = u.MarshalWrite(buf)
if err != nil {
t.Fatalf("Failed to marshal Uint24 %d: %v", val, err)
}
marshaledValues[i] = buf.Bytes()
}
// Check if they sort correctly with bytes.Compare
for i := 0; i < len(marshaledValues)-1; i++ {
if bytes.Compare(marshaledValues[i], marshaledValues[i+1]) >= 0 {
t.Errorf("Uint24 values don't sort correctly: %v should be less than %v",
values[i], values[i+1])
t.Logf("Bytes representation: %v vs %v", marshaledValues[i], marshaledValues[i+1])
}
}
}
func testUint32Sorting(t *testing.T) {
values := []uint32{1, 10, 100, 1000, 10000, 100000, 1000000, 4294967295}
// Marshal each value
marshaledValues := make([][]byte, len(values))
for i, val := range values {
u := new(Uint32)
u.Set(val)
buf := new(bytes.Buffer)
err := u.MarshalWrite(buf)
if err != nil {
t.Fatalf("Failed to marshal Uint32 %d: %v", val, err)
}
marshaledValues[i] = buf.Bytes()
}
// Check if they sort correctly with bytes.Compare
for i := 0; i < len(marshaledValues)-1; i++ {
if bytes.Compare(marshaledValues[i], marshaledValues[i+1]) >= 0 {
t.Errorf("Uint32 values don't sort correctly: %v should be less than %v",
values[i], values[i+1])
t.Logf("Bytes representation: %v vs %v", marshaledValues[i], marshaledValues[i+1])
}
}
}
func testUint40Sorting(t *testing.T) {
values := []uint64{1, 10, 100, 1000, 10000, 100000, 1000000, 1099511627775}
// Marshal each value
marshaledValues := make([][]byte, len(values))
for i, val := range values {
u := new(Uint40)
err := u.Set(val)
if err != nil {
t.Fatalf("Failed to set Uint40 %d: %v", val, err)
}
buf := new(bytes.Buffer)
err = u.MarshalWrite(buf)
if err != nil {
t.Fatalf("Failed to marshal Uint40 %d: %v", val, err)
}
marshaledValues[i] = buf.Bytes()
}
// Check if they sort correctly with bytes.Compare
for i := 0; i < len(marshaledValues)-1; i++ {
if bytes.Compare(marshaledValues[i], marshaledValues[i+1]) >= 0 {
t.Errorf("Uint40 values don't sort correctly: %v should be less than %v",
values[i], values[i+1])
t.Logf("Bytes representation: %v vs %v", marshaledValues[i], marshaledValues[i+1])
}
}
}
func testUint64Sorting(t *testing.T) {
values := []uint64{1, 10, 100, 1000, 10000, 100000, 1000000, 18446744073709551615}
// Marshal each value
marshaledValues := make([][]byte, len(values))
for i, val := range values {
u := new(Uint64)
u.Set(val)
buf := new(bytes.Buffer)
err := u.MarshalWrite(buf)
if err != nil {
t.Fatalf("Failed to marshal Uint64 %d: %v", val, err)
}
marshaledValues[i] = buf.Bytes()
}
// Check if they sort correctly with bytes.Compare
for i := 0; i < len(marshaledValues)-1; i++ {
if bytes.Compare(marshaledValues[i], marshaledValues[i+1]) >= 0 {
t.Errorf("Uint64 values don't sort correctly: %v should be less than %v",
values[i], values[i+1])
t.Logf("Bytes representation: %v vs %v", marshaledValues[i], marshaledValues[i+1])
}
}
}
// Edge case test functions
func testUint16EdgeCases(t *testing.T) {
// Test edge cases: 0, max value, and adjacent values
values := []uint16{0, 1, 2, 65534, 65535}
// Marshal each value
marshaledValues := make([][]byte, len(values))
for i, val := range values {
u := new(Uint16)
u.Set(val)
buf := new(bytes.Buffer)
err := u.MarshalWrite(buf)
if err != nil {
t.Fatalf("Failed to marshal Uint16 %d: %v", val, err)
}
marshaledValues[i] = buf.Bytes()
}
// Check if they sort correctly with bytes.Compare
for i := 0; i < len(marshaledValues)-1; i++ {
if bytes.Compare(marshaledValues[i], marshaledValues[i+1]) >= 0 {
t.Errorf("Uint16 edge case values don't sort correctly: %v should be less than %v",
values[i], values[i+1])
t.Logf("Bytes representation: %v vs %v", marshaledValues[i], marshaledValues[i+1])
}
}
}
func testUint24EdgeCases(t *testing.T) {
// Test edge cases: 0, max value, and adjacent values
values := []uint32{0, 1, 2, 16777214, 16777215}
// Marshal each value
marshaledValues := make([][]byte, len(values))
for i, val := range values {
u := new(Uint24)
err := u.Set(val)
if err != nil {
t.Fatalf("Failed to set Uint24 %d: %v", val, err)
}
buf := new(bytes.Buffer)
err = u.MarshalWrite(buf)
if err != nil {
t.Fatalf("Failed to marshal Uint24 %d: %v", val, err)
}
marshaledValues[i] = buf.Bytes()
}
// Check if they sort correctly with bytes.Compare
for i := 0; i < len(marshaledValues)-1; i++ {
if bytes.Compare(marshaledValues[i], marshaledValues[i+1]) >= 0 {
t.Errorf("Uint24 edge case values don't sort correctly: %v should be less than %v",
values[i], values[i+1])
t.Logf("Bytes representation: %v vs %v", marshaledValues[i], marshaledValues[i+1])
}
}
}
func testUint32EdgeCases(t *testing.T) {
// Test edge cases: 0, max value, and adjacent values
values := []uint32{0, 1, 2, 4294967294, 4294967295}
// Marshal each value
marshaledValues := make([][]byte, len(values))
for i, val := range values {
u := new(Uint32)
u.Set(val)
buf := new(bytes.Buffer)
err := u.MarshalWrite(buf)
if err != nil {
t.Fatalf("Failed to marshal Uint32 %d: %v", val, err)
}
marshaledValues[i] = buf.Bytes()
}
// Check if they sort correctly with bytes.Compare
for i := 0; i < len(marshaledValues)-1; i++ {
if bytes.Compare(marshaledValues[i], marshaledValues[i+1]) >= 0 {
t.Errorf("Uint32 edge case values don't sort correctly: %v should be less than %v",
values[i], values[i+1])
t.Logf("Bytes representation: %v vs %v", marshaledValues[i], marshaledValues[i+1])
}
}
}
func testUint40EdgeCases(t *testing.T) {
// Test edge cases: 0, max value, and adjacent values
values := []uint64{0, 1, 2, 1099511627774, 1099511627775}
// Marshal each value
marshaledValues := make([][]byte, len(values))
for i, val := range values {
u := new(Uint40)
err := u.Set(val)
if err != nil {
t.Fatalf("Failed to set Uint40 %d: %v", val, err)
}
buf := new(bytes.Buffer)
err = u.MarshalWrite(buf)
if err != nil {
t.Fatalf("Failed to marshal Uint40 %d: %v", val, err)
}
marshaledValues[i] = buf.Bytes()
}
// Check if they sort correctly with bytes.Compare
for i := 0; i < len(marshaledValues)-1; i++ {
if bytes.Compare(marshaledValues[i], marshaledValues[i+1]) >= 0 {
t.Errorf("Uint40 edge case values don't sort correctly: %v should be less than %v",
values[i], values[i+1])
t.Logf("Bytes representation: %v vs %v", marshaledValues[i], marshaledValues[i+1])
}
}
}
func testUint64EdgeCases(t *testing.T) {
// Test edge cases: 0, max value, and adjacent values
values := []uint64{0, 1, 2, 18446744073709551614, 18446744073709551615}
// Marshal each value
marshaledValues := make([][]byte, len(values))
for i, val := range values {
u := new(Uint64)
u.Set(val)
buf := new(bytes.Buffer)
err := u.MarshalWrite(buf)
if err != nil {
t.Fatalf("Failed to marshal Uint64 %d: %v", val, err)
}
marshaledValues[i] = buf.Bytes()
}
// Check if they sort correctly with bytes.Compare
for i := 0; i < len(marshaledValues)-1; i++ {
if bytes.Compare(marshaledValues[i], marshaledValues[i+1]) >= 0 {
t.Errorf("Uint64 edge case values don't sort correctly: %v should be less than %v",
values[i], values[i+1])
t.Logf("Bytes representation: %v vs %v", marshaledValues[i], marshaledValues[i+1])
}
}
}
// TestEndianness demonstrates why BigEndian is used instead of LittleEndian
// for lexicographical sorting with bytes.Compare
func TestEndianness(t *testing.T) {
// Test with uint32 values
values := []uint32{1, 10, 100, 1000, 10000}
// Marshal each value using BigEndian
bigEndianValues := make([][]byte, len(values))
for i, val := range values {
buf := make([]byte, 4)
binary.BigEndian.PutUint32(buf, val)
bigEndianValues[i] = buf
}
// Marshal each value using LittleEndian
littleEndianValues := make([][]byte, len(values))
for i, val := range values {
buf := make([]byte, 4)
binary.LittleEndian.PutUint32(buf, val)
littleEndianValues[i] = buf
}
// Check if BigEndian values sort correctly with bytes.Compare
t.Log("Testing BigEndian sorting:")
for i := 0; i < len(bigEndianValues)-1; i++ {
result := bytes.Compare(bigEndianValues[i], bigEndianValues[i+1])
t.Logf("Compare %d with %d: result = %d", values[i], values[i+1], result)
if result >= 0 {
t.Errorf("BigEndian values don't sort correctly: %v should be less than %v",
values[i], values[i+1])
t.Logf("Bytes representation: %v vs %v", bigEndianValues[i], bigEndianValues[i+1])
}
}
// Check if LittleEndian values sort correctly with bytes.Compare
t.Log("Testing LittleEndian sorting:")
correctOrder := true
for i := 0; i < len(littleEndianValues)-1; i++ {
result := bytes.Compare(littleEndianValues[i], littleEndianValues[i+1])
t.Logf("Compare %d with %d: result = %d", values[i], values[i+1], result)
if result >= 0 {
correctOrder = false
t.Logf("LittleEndian values don't sort correctly: %v should be less than %v",
values[i], values[i+1])
t.Logf("Bytes representation: %v vs %v", littleEndianValues[i], littleEndianValues[i+1])
}
}
// We expect LittleEndian to NOT sort correctly
if correctOrder {
t.Error("LittleEndian values unexpectedly sorted correctly")
} else {
t.Log("As expected, LittleEndian values don't sort correctly with bytes.Compare")
}
}

38
pkg/database/indexes/types/fullid.go

@ -0,0 +1,38 @@ @@ -0,0 +1,38 @@
package types
import (
"io"
"crypto.orly/sha256"
"lol.mleku.dev/errorf"
)
const IdLen = sha256.Size
type Id struct {
val [IdLen]byte
}
func (fi *Id) FromId(id []byte) (err error) {
if len(id) != IdLen {
err = errorf.E(
"fullid.FromId: invalid ID length, got %d require %d", len(id),
IdLen,
)
return
}
copy(fi.val[:], id)
return
}
func (fi *Id) Bytes() (b []byte) { return fi.val[:] }
func (fi *Id) MarshalWrite(w io.Writer) (err error) {
_, err = w.Write(fi.val[:])
return
}
func (fi *Id) UnmarshalRead(r io.Reader) (err error) {
copy(fi.val[:], fi.val[:IdLen])
_, err = r.Read(fi.val[:])
return
}

115
pkg/database/indexes/types/fullid_test.go

@ -0,0 +1,115 @@ @@ -0,0 +1,115 @@
package types
import (
"bytes"
"testing"
"lol.mleku.dev/chk"
"utils.orly"
"crypto.orly/sha256"
)
func TestFromId(t *testing.T) {
// Create a valid ID (32 bytes)
validId := make([]byte, sha256.Size)
for i := 0; i < sha256.Size; i++ {
validId[i] = byte(i)
}
// Create an invalid ID (wrong size)
invalidId := make([]byte, sha256.Size-1)
// Test with valid ID
fi := &Id{}
err := fi.FromId(validId)
if chk.E(err) {
t.Fatalf("FromId failed with valid ID: %v", err)
}
// Verify the ID was set correctly
if !utils.FastEqual(fi.Bytes(), validId) {
t.Errorf(
"FromId did not set the ID correctly: got %v, want %v", fi.Bytes(),
validId,
)
}
// Test with invalid ID
fi = &Id{}
err = fi.FromId(invalidId)
if err == nil {
t.Errorf("FromId should have failed with invalid ID size")
}
}
func TestIdMarshalWriteUnmarshalRead(t *testing.T) {
// Create a ID with a known value
fi1 := &Id{}
validId := make([]byte, sha256.Size)
for i := 0; i < sha256.Size; i++ {
validId[i] = byte(i)
}
err := fi1.FromId(validId)
if chk.E(err) {
t.Fatalf("FromId failed: %v", err)
}
// Test MarshalWrite
buf := new(bytes.Buffer)
err = fi1.MarshalWrite(buf)
if chk.E(err) {
t.Fatalf("MarshalWrite failed: %v", err)
}
// Verify the written bytes
if !utils.FastEqual(buf.Bytes(), validId) {
t.Errorf("MarshalWrite wrote %v, want %v", buf.Bytes(), validId)
}
// Test UnmarshalRead
fi2 := &Id{}
err = fi2.UnmarshalRead(bytes.NewBuffer(buf.Bytes()))
if chk.E(err) {
t.Fatalf("UnmarshalRead failed: %v", err)
}
// Verify the read value
if !utils.FastEqual(fi2.Bytes(), validId) {
t.Errorf("UnmarshalRead read %v, want %v", fi2.Bytes(), validId)
}
}
func TestIdUnmarshalReadWithCorruptedData(t *testing.T) {
// Create a ID with a known value
fi1 := &Id{}
validId := make([]byte, sha256.Size)
for i := 0; i < sha256.Size; i++ {
validId[i] = byte(i)
}
err := fi1.FromId(validId)
if chk.E(err) {
t.Fatalf("FromId failed: %v", err)
}
// Create a second ID with a different value
fi2 := &Id{}
differentId := make([]byte, sha256.Size)
for i := 0; i < sha256.Size; i++ {
differentId[i] = byte(sha256.Size - i - 1)
}
err = fi2.FromId(differentId)
if chk.E(err) {
t.Fatalf("FromId failed: %v", err)
}
// Test UnmarshalRead with corrupted data (less than Len bytes)
corruptedData := make([]byte, sha256.Size/2)
fi2.UnmarshalRead(bytes.NewBuffer(corruptedData))
// The UnmarshalRead method should not have copied the original data to itself
// before reading, so the value should be partially overwritten
if utils.FastEqual(fi2.Bytes(), differentId) {
t.Errorf("UnmarshalRead did not modify the value as expected")
}
}

31
pkg/database/indexes/types/identhash.go

@ -0,0 +1,31 @@ @@ -0,0 +1,31 @@
package types
import (
"io"
"crypto.orly/sha256"
)
const IdentLen = 8
type Ident struct{ val [IdentLen]byte }
func (i *Ident) FromIdent(id []byte) {
idh := sha256.Sum256(id)
copy(i.val[:], idh[:IdentLen])
return
}
func (i *Ident) Bytes() (b []byte) { return i.val[:] }
func (i *Ident) MarshalWrite(w io.Writer) (err error) {
_, err = w.Write(i.val[:])
return
}
func (i *Ident) UnmarshalRead(r io.Reader) (err error) {
copy(i.val[:], i.val[:IdentLen])
_, err = r.Read(i.val[:])
return
}

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save