From dda88eed2a58447b58dc98f08dc22450b8f4d809 Mon Sep 17 00:00:00 2001 From: sweexordious Date: Tue, 4 Jun 2024 19:49:32 +0400 Subject: [PATCH 01/52] feat: add support for the Blobstream API in node --- api/rpc/client/client.go | 39 +- api/rpc_test.go | 42 +- go.mod | 20 +- go.sum | 50 +- nodebuilder/blobstream/blobstream.go | 56 ++ nodebuilder/blobstream/mocks/api.go | 98 +++ nodebuilder/blobstream/module.go | 12 + nodebuilder/blobstream/service.go | 549 ++++++++++++ nodebuilder/blobstream/service_test.go | 1086 ++++++++++++++++++++++++ nodebuilder/blobstream/types.go | 186 ++++ nodebuilder/module.go | 2 + nodebuilder/node.go | 18 +- nodebuilder/rpc/constructors.go | 3 + 13 files changed, 2075 insertions(+), 86 deletions(-) create mode 100644 nodebuilder/blobstream/blobstream.go create mode 100644 nodebuilder/blobstream/mocks/api.go create mode 100644 nodebuilder/blobstream/module.go create mode 100644 nodebuilder/blobstream/service.go create mode 100644 nodebuilder/blobstream/service_test.go create mode 100644 nodebuilder/blobstream/types.go diff --git a/api/rpc/client/client.go b/api/rpc/client/client.go index ff206d723e..f77b774dd1 100644 --- a/api/rpc/client/client.go +++ b/api/rpc/client/client.go @@ -3,6 +3,7 @@ package client import ( "context" "fmt" + "github.com/celestiaorg/celestia-node/nodebuilder/blobstream" "net/http" "github.com/filecoin-project/go-jsonrpc" @@ -26,15 +27,16 @@ var ( ) type Client struct { - Fraud fraud.API - Header header.API - State state.API - Share share.API - DAS das.API - P2P p2p.API - Node node.API - Blob blob.API - DA da.API + Fraud fraud.API + Header header.API + State state.API + Share share.API + DAS das.API + P2P p2p.API + Node node.API + Blob blob.API + DA da.API + Blobstream blobstream.API closer multiClientCloser } @@ -85,14 +87,15 @@ func newClient(ctx context.Context, addr string, authHeader http.Header) (*Clien func moduleMap(client *Client) map[string]interface{} { // TODO: this duplication of strings many times across the codebase can be avoided with issue #1176 return map[string]interface{}{ - "share": &client.Share.Internal, - "state": &client.State.Internal, - "header": &client.Header.Internal, - "fraud": &client.Fraud.Internal, - "das": &client.DAS.Internal, - "p2p": &client.P2P.Internal, - "node": &client.Node.Internal, - "blob": &client.Blob.Internal, - "da": &client.DA.Internal, + "share": &client.Share.Internal, + "state": &client.State.Internal, + "header": &client.Header.Internal, + "fraud": &client.Fraud.Internal, + "das": &client.DAS.Internal, + "p2p": &client.P2P.Internal, + "node": &client.Node.Internal, + "blob": &client.Blob.Internal, + "da": &client.DA.Internal, + "blobstream": &client.Blobstream.Internal, } } diff --git a/api/rpc_test.go b/api/rpc_test.go index e019412f4d..665a9baf56 100644 --- a/api/rpc_test.go +++ b/api/rpc_test.go @@ -3,6 +3,7 @@ package api import ( "context" "encoding/json" + "github.com/celestiaorg/celestia-node/nodebuilder/blobstream" "reflect" "strconv" "testing" @@ -22,6 +23,7 @@ import ( "github.com/celestiaorg/celestia-node/nodebuilder" "github.com/celestiaorg/celestia-node/nodebuilder/blob" blobMock "github.com/celestiaorg/celestia-node/nodebuilder/blob/mocks" + BlobstreamMock "github.com/celestiaorg/celestia-node/nodebuilder/blobstream/mocks" "github.com/celestiaorg/celestia-node/nodebuilder/da" daMock "github.com/celestiaorg/celestia-node/nodebuilder/da/mocks" "github.com/celestiaorg/celestia-node/nodebuilder/das" @@ -85,15 +87,16 @@ func TestRPCCallsUnderlyingNode(t *testing.T) { // api contains all modules that are made available as the node's // public API surface type api struct { - Fraud fraud.Module - Header header.Module - State statemod.Module - Share share.Module - DAS das.Module - Node node.Module - P2P p2p.Module - Blob blob.Module - DA da.Module + Fraud fraud.Module + Header header.Module + State statemod.Module + Share share.Module + DAS das.Module + Node node.Module + P2P p2p.Module + Blob blob.Module + DA da.Module + Blobstream blobstream.Module } func TestModulesImplementFullAPI(t *testing.T) { @@ -300,6 +303,7 @@ func setupNodeWithAuthedRPC(t *testing.T, auth jwt.Signer) (*nodebuilder.Node, * nodeMock.NewMockModule(ctrl), blobMock.NewMockModule(ctrl), daMock.NewMockModule(ctrl), + BlobstreamMock.NewMockModule(ctrl), } // given the behavior of fx.Invoke, this invoke will be called last as it is added at the root @@ -314,6 +318,7 @@ func setupNodeWithAuthedRPC(t *testing.T, auth jwt.Signer) (*nodebuilder.Node, * srv.RegisterService("node", mockAPI.Node, &node.API{}) srv.RegisterService("blob", mockAPI.Blob, &blob.API{}) srv.RegisterService("da", mockAPI.DA, &da.API{}) + srv.RegisterService("blobstream", mockAPI.Blobstream, &blobstream.API{}) }) // fx.Replace does not work here, but fx.Decorate does nd := nodebuilder.TestNode(t, node.Full, invokeRPC, fx.Decorate(func() (jwt.Signer, error) { @@ -330,13 +335,14 @@ func setupNodeWithAuthedRPC(t *testing.T, auth jwt.Signer) (*nodebuilder.Node, * } type mockAPI struct { - State *stateMock.MockModule - Share *shareMock.MockModule - Fraud *fraudMock.MockModule - Header *headerMock.MockModule - Das *dasMock.MockModule - P2P *p2pMock.MockModule - Node *nodeMock.MockModule - Blob *blobMock.MockModule - DA *daMock.MockModule + State *stateMock.MockModule + Share *shareMock.MockModule + Fraud *fraudMock.MockModule + Header *headerMock.MockModule + Das *dasMock.MockModule + P2P *p2pMock.MockModule + Node *nodeMock.MockModule + Blob *blobMock.MockModule + DA *daMock.MockModule + Blobstream *BlobstreamMock.MockModule } diff --git a/go.mod b/go.mod index e8afaa6b79..f9503ca250 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/celestiaorg/celestia-node -go 1.22.0 +go 1.22.3 require ( cosmossdk.io/errors v1.0.1 @@ -8,11 +8,11 @@ require ( github.com/BurntSushi/toml v1.3.2 github.com/alecthomas/jsonschema v0.0.0-20220216202328-9eeeec9d044b github.com/benbjohnson/clock v1.3.5 - github.com/celestiaorg/celestia-app v1.9.0 + github.com/celestiaorg/celestia-app v1.10.1 github.com/celestiaorg/go-fraud v0.2.1 github.com/celestiaorg/go-header v0.6.1 github.com/celestiaorg/go-libp2p-messenger v0.2.0 - github.com/celestiaorg/nmt v0.21.0 + github.com/celestiaorg/nmt v0.21.1-0.20240602221058-a81b748b6f51 // TODO replace with release when ready github.com/celestiaorg/rsmt2d v0.13.1 github.com/cosmos/cosmos-sdk v0.46.16 github.com/cristalhq/jwt v1.2.0 @@ -126,7 +126,7 @@ require ( github.com/creachadair/taskgroup v0.3.2 // indirect github.com/cskr/pubsub v1.0.2 // indirect github.com/danieljoos/wincred v1.1.2 // indirect - github.com/davecgh/go-spew v1.1.1 // indirect + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c // indirect github.com/deckarep/golang-set/v2 v2.1.0 // indirect github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 // indirect @@ -145,7 +145,7 @@ require ( github.com/felixge/httpsnoop v1.0.4 // indirect github.com/flynn/noise v1.1.0 // indirect github.com/francoispqt/gojay v1.2.13 // indirect - github.com/fsnotify/fsnotify v1.6.0 // indirect + github.com/fsnotify/fsnotify v1.7.0 // indirect github.com/gammazero/deque v0.2.0 // indirect github.com/go-kit/kit v0.12.0 // indirect github.com/go-kit/log v0.2.1 // indirect @@ -267,11 +267,11 @@ require ( github.com/opencontainers/runtime-spec v1.2.0 // indirect github.com/opentracing/opentracing-go v1.2.0 // indirect github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect - github.com/pelletier/go-toml/v2 v2.0.7 // indirect + github.com/pelletier/go-toml/v2 v2.1.0 // indirect github.com/petar/GoLLRB v0.0.0-20210522233825-ae3b015fd3e9 // indirect github.com/petermattis/goid v0.0.0-20230317030725-371a4b8eda08 // indirect github.com/pkg/errors v0.9.1 // indirect - github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/polydawn/refmt v0.89.0 // indirect github.com/prometheus/client_model v0.6.0 // indirect github.com/prometheus/common v0.47.0 // indirect @@ -288,11 +288,11 @@ require ( github.com/sasha-s/go-deadlock v0.3.1 // indirect github.com/shirou/gopsutil v3.21.6+incompatible // indirect github.com/spaolacci/murmur3 v1.1.0 // indirect - github.com/spf13/afero v1.9.3 // indirect - github.com/spf13/cast v1.5.0 // indirect + github.com/spf13/afero v1.11.0 // indirect + github.com/spf13/cast v1.6.0 // indirect github.com/spf13/jwalterweatherman v1.1.0 // indirect github.com/spf13/viper v1.15.0 // indirect - github.com/subosito/gotenv v1.4.2 // indirect + github.com/subosito/gotenv v1.6.0 // indirect github.com/supranational/blst v0.3.11 // indirect github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d // indirect github.com/tecbot/gorocksdb v0.0.0-20191217155057-f0fad39f321c // indirect diff --git a/go.sum b/go.sum index 460f50ad75..d6afbc28b2 100644 --- a/go.sum +++ b/go.sum @@ -6,7 +6,6 @@ cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSR cloud.google.com/go v0.43.0/go.mod h1:BOSR3VbTLkk6FDC/TcffxP4NF/FFBGA5ku+jvKOP7pg= cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= -cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= @@ -20,7 +19,6 @@ cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOY cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= -cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY= cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= @@ -175,7 +173,6 @@ cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0Zeo cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= -cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq6kuBTW58Y= cloud.google.com/go/storage v1.23.0/go.mod h1:vOEEDNFnciUMhBeT6hsJIn3ieU5cFRmzeLgDvXzfIXc= cloud.google.com/go/storage v1.27.0/go.mod h1:x9DOL8TK/ygDUMieqwfhdpQryTeEkhGKMi80i/iqR2s= @@ -358,8 +355,8 @@ github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7 github.com/bwesterb/go-ristretto v1.2.0/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0= github.com/c-bata/go-prompt v0.2.2/go.mod h1:VzqtzE2ksDBcdln8G7mk2RX9QyGjH+OVqOCSiVIqS34= github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= -github.com/celestiaorg/celestia-app v1.9.0 h1:B0Sou7uGsAwRXMzVwMpb2wVXtMFC4FR9ODfXrWTIDaw= -github.com/celestiaorg/celestia-app v1.9.0/go.mod h1:Z50B4+LvY0JIusd0qlQvA4/bNM2GzkFyDloYGU6A3fw= +github.com/celestiaorg/celestia-app v1.10.1 h1:Aw5tgotgLPxzNoy4lytsvnliUtNqQXCZme27o7Ks3PM= +github.com/celestiaorg/celestia-app v1.10.1/go.mod h1:SKsXFo1gdhq6EoGeE4kuc8ZZpZOi2XVKrXI2PwE7eso= github.com/celestiaorg/celestia-core v1.35.0-tm-v0.34.29 h1:sXERzNXgyHyqTKNQx4S29C/NMDzgav62DaQDNF49HUQ= github.com/celestiaorg/celestia-core v1.35.0-tm-v0.34.29/go.mod h1:weZR4wYx1Vcw3g1Jc5G8VipG4M+KUDSqeIzyyWszmsQ= github.com/celestiaorg/cosmos-sdk v1.20.1-sdk-v0.46.16 h1:9U9UthIJSOyVjabD5PkD6aczvqlWOyAFTOXw0duPT5k= @@ -374,8 +371,8 @@ github.com/celestiaorg/go-libp2p-messenger v0.2.0 h1:/0MuPDcFamQMbw9xTZ73yImqgTO github.com/celestiaorg/go-libp2p-messenger v0.2.0/go.mod h1:s9PIhMi7ApOauIsfBcQwbr7m+HBzmVfDIS+QLdgzDSo= github.com/celestiaorg/merkletree v0.0.0-20230308153949-c33506a7aa26 h1:P2RI1xJ49EZ8cuHMcH+ZSBonfRDtBS8OS9Jdt1BWX3k= github.com/celestiaorg/merkletree v0.0.0-20230308153949-c33506a7aa26/go.mod h1:2m8ukndOegwB0PU0AfJCwDUQHqd7QQRlSXvQL5VToVY= -github.com/celestiaorg/nmt v0.21.0 h1:81MBqxNn3orByoiCtdNVjwi5WsLgMkzHwP02ZMhTBHM= -github.com/celestiaorg/nmt v0.21.0/go.mod h1:ia/EpCk0enD5yO5frcxoNoFToz2Ghtk2i+blmCRjIY8= +github.com/celestiaorg/nmt v0.21.1-0.20240602221058-a81b748b6f51 h1:vOLlAiHwCtXA7LNsXokDysmPHl2UvorPTARyhHQPQQA= +github.com/celestiaorg/nmt v0.21.1-0.20240602221058-a81b748b6f51/go.mod h1:ia/EpCk0enD5yO5frcxoNoFToz2Ghtk2i+blmCRjIY8= github.com/celestiaorg/quantum-gravity-bridge/v2 v2.1.2 h1:Q8nr5SAtDW5gocrBwqwDJcSS/JedqU58WwQA2SP+nXw= github.com/celestiaorg/quantum-gravity-bridge/v2 v2.1.2/go.mod h1:s/LzLUw0WeYPJ6qdk4q46jKLOq7rc9Z5Mdrxtfpcigw= github.com/celestiaorg/rsmt2d v0.13.1 h1:eRhp79DKTkDojwInKVs1lRK6f6zJc1BVlmZfUfI19yQ= @@ -519,8 +516,9 @@ github.com/danieljoos/wincred v1.1.2/go.mod h1:GijpziifJoIBfYh+S7BbkdUTU4LfM+QnG github.com/dave/jennifer v1.2.0/go.mod h1:fIb+770HOpJ2fmN9EPPKOqm1vMGhB+TwXKMZhrIygKg= github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davidlazar/go-crypto v0.0.0-20170701192655-dcfb0a7ac018/go.mod h1:rQYf4tfk5sSwFsnDg3qYaBxSjsD9S8+59vW0dKUgme4= github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c h1:pFUpOrbxDR6AkioZ1ySsx5yxlDQZ8stG2b88gTPxgJU= github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c/go.mod h1:6UhI8N9EjYm1c2odKpFpAYeR8dsBeM7PtzQhRgxRr9U= @@ -649,8 +647,8 @@ github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMo github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU= github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= -github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= -github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= +github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= +github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= github.com/gabriel-vasile/mimetype v1.4.1/go.mod h1:05Vi0w3Y9c/lNvJOdmIwvrrAhX3rYhfQQCaf9VJcv7M= github.com/gammazero/deque v0.2.0 h1:SkieyNB4bg2/uZZLxvya0Pq6diUlwx7m2TeT7GAIWaA= github.com/gammazero/deque v0.2.0/go.mod h1:LFroj8x4cMYCukHJDbxFCkT+r9AndaJnFMuZDV34tuU= @@ -860,7 +858,6 @@ github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= @@ -900,7 +897,6 @@ github.com/googleapis/gax-go/v2 v2.6.0/go.mod h1:1mjbznJAPHFpesgE5ucqfYEscaz5kMd github.com/googleapis/gax-go/v2 v2.12.0 h1:A+gCJKdRfqXkr+BIRGtZLibNXf0m1f9E4HG56etFpas= github.com/googleapis/gax-go/v2 v2.12.0/go.mod h1:y+aIqrI5eb1YGMVJfuV3185Ts/D7qKpsEkdD5+I6QGU= github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4= -github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gopherjs/gopherjs v0.0.0-20190430165422-3e4dfb77656c h1:7lF+Vz0LqiRidnzC1Oq86fpX1q/iEv2KJdrCtttYjT4= github.com/gopherjs/gopherjs v0.0.0-20190430165422-3e4dfb77656c/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= @@ -1375,7 +1371,6 @@ github.com/koron/go-ssdp v0.0.2/go.mod h1:XoLfkAiA2KeZsYh4DbHxD7h3nR2AZNqVQOa+LJ github.com/koron/go-ssdp v0.0.3/go.mod h1:b2MxI6yh02pKrsyNoQUsk4+YNikaGhe4894J+Q5lDvA= github.com/koron/go-ssdp v0.0.4 h1:1IDwrghSKYM7yLf7XCzbByg2sJ/JcNOZRXS2jczTwz0= github.com/koron/go-ssdp v0.0.4/go.mod h1:oDXq+E5IL5q0U8uSBcoAXzTzInwy5lEgC91HoKtbmZk= -github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= @@ -2004,8 +1999,8 @@ github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 h1:onHthvaw9LFnH4t2D github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58/go.mod h1:DXv8WO4yhMYhSNPKjeNKa5WY9YCIEBRbNzFFPJbWO6Y= github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= -github.com/pelletier/go-toml/v2 v2.0.7 h1:muncTPStnKRos5dpVKULv2FVd4bMOhNePj9CjgDb8Us= -github.com/pelletier/go-toml/v2 v2.0.7/go.mod h1:eumQOmlWiOPt5WriQQqoM5y18pDHwha2N+QD+EUNTek= +github.com/pelletier/go-toml/v2 v2.1.0 h1:FnwAJ4oYMvbT/34k9zzHuZNrhlz48GB3/s6at6/MHO4= +github.com/pelletier/go-toml/v2 v2.1.0/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc= github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac= github.com/petar/GoLLRB v0.0.0-20210522233825-ae3b015fd3e9 h1:1/WtZae0yGtPq+TI6+Tv1WTxkukpXeMlviSxvL7SRgk= github.com/petar/GoLLRB v0.0.0-20210522233825-ae3b015fd3e9/go.mod h1:x3N5drFsm2uilKKuuYo6LdyD8vZAW55sH/9w+pbo1sw= @@ -2024,10 +2019,10 @@ github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA= -github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= github.com/pkg/term v0.0.0-20180730021639-bffc007b7fd5/go.mod h1:eCbImbZ95eXtAUIbLAuAVnBnwf83mjf6QIVH8SHYwqQ= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/polydawn/refmt v0.0.0-20190221155625-df39d6c2d992/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o= github.com/polydawn/refmt v0.0.0-20190408063855-01bf1e26dd14/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o= github.com/polydawn/refmt v0.0.0-20190807091052-3d65705ee9f1/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o= @@ -2202,11 +2197,11 @@ github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasO github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= -github.com/spf13/afero v1.9.3 h1:41FoI0fD7OR7mGcKE/aOiLkGreyf8ifIOQmJANWogMk= -github.com/spf13/afero v1.9.3/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcDf8Y= +github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= +github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w= -github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU= +github.com/spf13/cast v1.6.0 h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0= +github.com/spf13/cast v1.6.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0= @@ -2249,8 +2244,8 @@ github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= -github.com/subosito/gotenv v1.4.2 h1:X1TuBLAMDFbaTAChgCBLu3DU3UPyELpnF2jjJ2cz/S8= -github.com/subosito/gotenv v1.4.2/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0= +github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= +github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= github.com/supranational/blst v0.3.11 h1:LyU6FolezeWAhvQk0k6O/d49jqgO52MSDDfYgbeoEm4= github.com/supranational/blst v0.3.11/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw= github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY= @@ -2508,12 +2503,10 @@ golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= -golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210506145944-38f3c27a63bf/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8= golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8= golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220331220935-ae2d96664a29/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220427172511-eb4f295cb31f/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= @@ -2639,7 +2632,6 @@ golang.org/x/net v0.0.0-20201022231255-08b38378de70/go.mod h1:sp8m0HH+o8qH0wwXwY golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210220033124-5f55cee0dc0d/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= @@ -2804,7 +2796,6 @@ golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210309074719-68d13333faf2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -2816,7 +2807,6 @@ golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210420205809-ac73e9fd8988/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210426080607-c94f62235c83/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210511113859-b0526f3d8744/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -2858,7 +2848,6 @@ golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220919091848-fb04ddd9f9c8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20221010170243-090e33056c14/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -2969,7 +2958,6 @@ golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4f golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210112230658-8b4aab62c064/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= @@ -3112,10 +3100,8 @@ google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210126160654-44e461bb6506/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= diff --git a/nodebuilder/blobstream/blobstream.go b/nodebuilder/blobstream/blobstream.go new file mode 100644 index 0000000000..42855fd0e8 --- /dev/null +++ b/nodebuilder/blobstream/blobstream.go @@ -0,0 +1,56 @@ +package blobstream + +import ( + "context" + "github.com/celestiaorg/celestia-node/share" + "github.com/tendermint/tendermint/libs/bytes" +) + +var _ Module = (*API)(nil) + +// Module defines the API related to interacting with the proofs +// +//go:generate mockgen -destination=mocks/api.go -package=mocks . Module +type Module interface { + // DataCommitment collects the data roots over a provided ordered range of blocks, + // and then creates a new Merkle root of those data roots. The range is end exclusive. + DataCommitment(ctx context.Context, start, end uint64) (*ResultDataCommitment, error) + + // DataRootInclusionProof creates an inclusion proof for the data root of block + // height `height` in the set of blocks defined by `start` and `end`. The range + // is end exclusive. + DataRootInclusionProof(ctx context.Context, height int64, start, end uint64) (*ResultDataRootInclusionProof, error) + + // ProveShares generates a share proof for a share range. + ProveShares(ctx context.Context, height uint64, start, end uint64) (*ResultShareProof, error) + // ProveCommitment generates a commitment proof for a share commitment. + ProveCommitment(ctx context.Context, height uint64, namespace share.Namespace, shareCommitment bytes.HexBytes) (*ResultCommitmentProof, error) +} + +type Internal struct { + DataCommitment func(ctx context.Context, start, end uint64) (*ResultDataCommitment, error) + DataRootInclusionProof func(ctx context.Context, height int64, start, end uint64) (*ResultDataRootInclusionProof, error) + ProveShares func(ctx context.Context, height uint64, start, end uint64) (*ResultShareProof, error) + ProveCommitment func(ctx context.Context, height uint64, namespace share.Namespace, shareCommitment bytes.HexBytes) (*ResultCommitmentProof, error) +} + +// API is a wrapper around the Module for RPC. +type API struct { + Internal Internal +} + +func (api *API) DataCommitment(ctx context.Context, start, end uint64) (*ResultDataCommitment, error) { + return api.Internal.DataCommitment(ctx, start, end) +} + +func (api *API) DataRootInclusionProof(ctx context.Context, height int64, start, end uint64) (*ResultDataRootInclusionProof, error) { + return api.Internal.DataRootInclusionProof(ctx, height, start, end) +} + +func (api *API) ProveShares(ctx context.Context, height uint64, start, end uint64) (*ResultShareProof, error) { + return api.Internal.ProveShares(ctx, height, start, end) +} + +func (api *API) ProveCommitment(ctx context.Context, height uint64, namespace share.Namespace, shareCommitment bytes.HexBytes) (*ResultCommitmentProof, error) { + return api.Internal.ProveCommitment(ctx, height, namespace, shareCommitment) +} diff --git a/nodebuilder/blobstream/mocks/api.go b/nodebuilder/blobstream/mocks/api.go new file mode 100644 index 0000000000..a6f5928f34 --- /dev/null +++ b/nodebuilder/blobstream/mocks/api.go @@ -0,0 +1,98 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/celestiaorg/celestia-node/nodebuilder/blobstream (interfaces: Module) + +// Package mocks is a generated GoMock package. +package mocks + +import ( + context "context" + reflect "reflect" + + blobstream "github.com/celestiaorg/celestia-node/nodebuilder/blobstream" + share "github.com/celestiaorg/celestia-node/share" + gomock "github.com/golang/mock/gomock" + bytes "github.com/tendermint/tendermint/libs/bytes" +) + +// MockModule is a mock of Module interface. +type MockModule struct { + ctrl *gomock.Controller + recorder *MockModuleMockRecorder +} + +// MockModuleMockRecorder is the mock recorder for MockModule. +type MockModuleMockRecorder struct { + mock *MockModule +} + +// NewMockModule creates a new mock instance. +func NewMockModule(ctrl *gomock.Controller) *MockModule { + mock := &MockModule{ctrl: ctrl} + mock.recorder = &MockModuleMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockModule) EXPECT() *MockModuleMockRecorder { + return m.recorder +} + +// DataCommitment mocks base method. +func (m *MockModule) DataCommitment(arg0 context.Context, arg1, arg2 uint64) (*blobstream.ResultDataCommitment, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DataCommitment", arg0, arg1, arg2) + ret0, _ := ret[0].(*blobstream.ResultDataCommitment) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DataCommitment indicates an expected call of DataCommitment. +func (mr *MockModuleMockRecorder) DataCommitment(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DataCommitment", reflect.TypeOf((*MockModule)(nil).DataCommitment), arg0, arg1, arg2) +} + +// DataRootInclusionProof mocks base method. +func (m *MockModule) DataRootInclusionProof(arg0 context.Context, arg1 int64, arg2, arg3 uint64) (*blobstream.ResultDataRootInclusionProof, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DataRootInclusionProof", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(*blobstream.ResultDataRootInclusionProof) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DataRootInclusionProof indicates an expected call of DataRootInclusionProof. +func (mr *MockModuleMockRecorder) DataRootInclusionProof(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DataRootInclusionProof", reflect.TypeOf((*MockModule)(nil).DataRootInclusionProof), arg0, arg1, arg2, arg3) +} + +// ProveCommitment mocks base method. +func (m *MockModule) ProveCommitment(arg0 context.Context, arg1 uint64, arg2 share.Namespace, arg3 bytes.HexBytes) (*blobstream.ResultCommitmentProof, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ProveCommitment", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(*blobstream.ResultCommitmentProof) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ProveCommitment indicates an expected call of ProveCommitment. +func (mr *MockModuleMockRecorder) ProveCommitment(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ProveCommitment", reflect.TypeOf((*MockModule)(nil).ProveCommitment), arg0, arg1, arg2, arg3) +} + +// ProveShares mocks base method. +func (m *MockModule) ProveShares(arg0 context.Context, arg1, arg2, arg3 uint64) (*blobstream.ResultShareProof, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ProveShares", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(*blobstream.ResultShareProof) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ProveShares indicates an expected call of ProveShares. +func (mr *MockModuleMockRecorder) ProveShares(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ProveShares", reflect.TypeOf((*MockModule)(nil).ProveShares), arg0, arg1, arg2, arg3) +} diff --git a/nodebuilder/blobstream/module.go b/nodebuilder/blobstream/module.go new file mode 100644 index 0000000000..c8deb1db10 --- /dev/null +++ b/nodebuilder/blobstream/module.go @@ -0,0 +1,12 @@ +package blobstream + +import "go.uber.org/fx" + +func ConstructModule() fx.Option { + return fx.Module("blobstream", + fx.Provide(NewService), + fx.Provide(func(serv *Service) Module { + return serv + }), + ) +} diff --git a/nodebuilder/blobstream/service.go b/nodebuilder/blobstream/service.go new file mode 100644 index 0000000000..c3e072f3de --- /dev/null +++ b/nodebuilder/blobstream/service.go @@ -0,0 +1,549 @@ +package blobstream + +import ( + bytes2 "bytes" + "context" + "encoding/hex" + "fmt" + "github.com/celestiaorg/celestia-app/pkg/appconsts" + pkgproof "github.com/celestiaorg/celestia-app/pkg/proof" + "github.com/celestiaorg/celestia-app/pkg/shares" + "github.com/celestiaorg/celestia-node/blob" + nodeblob "github.com/celestiaorg/celestia-node/nodebuilder/blob" + headerServ "github.com/celestiaorg/celestia-node/nodebuilder/header" + shareServ "github.com/celestiaorg/celestia-node/nodebuilder/share" + "github.com/celestiaorg/celestia-node/share" + "github.com/celestiaorg/nmt" + logging "github.com/ipfs/go-log/v2" + "github.com/tendermint/tendermint/crypto/merkle" + tmbytes "github.com/tendermint/tendermint/libs/bytes" + "github.com/tendermint/tendermint/types" + "math" + "strconv" +) + +var _ Module = (*Service)(nil) + +var log = logging.Logger("go-blobstream") + +type Service struct { + blobServ nodeblob.Module + headerServ headerServ.Module + shareServ shareServ.Module +} + +func NewService(blobMod nodeblob.Module, headerMod headerServ.Module, shareMod shareServ.Module) *Service { + return &Service{ + blobServ: blobMod, + headerServ: headerMod, + shareServ: shareMod, + } +} + +// DataCommitment collects the data roots over a provided ordered range of blocks, +// and then creates a new Merkle root of those data roots. The range is end exclusive. +func (s *Service) DataCommitment(ctx context.Context, start, end uint64) (*ResultDataCommitment, error) { + log.Debugw("validating the data commitment range", "start", start, "end", end) + err := s.validateDataCommitmentRange(ctx, start, end) + if err != nil { + return nil, err + } + log.Debugw("fetching the data root tuples", "start", start, "end", end) + tuples, err := s.fetchDataRootTuples(ctx, start, end) + if err != nil { + return nil, err + } + log.Debugw("hashing the data root tuples", "start", start, "end", end) + root, err := hashDataRootTuples(tuples) + if err != nil { + return nil, err + } + // Create data commitment + return &ResultDataCommitment{DataCommitment: root}, nil +} + +// DataRootInclusionProof creates an inclusion proof for the data root of block +// height `height` in the set of blocks defined by `start` and `end`. The range +// is end exclusive. +func (s *Service) DataRootInclusionProof( + ctx context.Context, + height int64, + start, + end uint64, +) (*ResultDataRootInclusionProof, error) { + log.Debugw("validating the data root inclusion proof request", "start", start, "end", end, "height", height) + err := s.validateDataRootInclusionProofRequest(ctx, uint64(height), start, end) + if err != nil { + return nil, err + } + log.Debugw("fetching the data root tuples", "start", start, "end", end) + tuples, err := s.fetchDataRootTuples(ctx, start, end) + if err != nil { + return nil, err + } + log.Debugw("proving the data root tuples", "start", start, "end", end) + proof, err := proveDataRootTuples(tuples, height) + if err != nil { + return nil, err + } + return &ResultDataRootInclusionProof{Proof: *proof}, nil +} + +// padBytes Pad bytes to given length +func padBytes(byt []byte, length int) ([]byte, error) { + l := len(byt) + if l > length { + return nil, fmt.Errorf( + "cannot pad bytes because length of bytes array: %d is greater than given length: %d", + l, + length, + ) + } + if l == length { + return byt, nil + } + tmp := make([]byte, length) + copy(tmp[length-l:], byt) + return tmp, nil +} + +// To32PaddedHexBytes takes a number and returns its hex representation padded to 32 bytes. +// Used to mimic the result of `abi.encode(number)` in Ethereum. +func To32PaddedHexBytes(number uint64) ([]byte, error) { + hexRepresentation := strconv.FormatUint(number, 16) + // Make sure hex representation has even length. + // The `strconv.FormatUint` can return odd length hex encodings. + // For example, `strconv.FormatUint(10, 16)` returns `a`. + // Thus, we need to pad it. + if len(hexRepresentation)%2 == 1 { + hexRepresentation = "0" + hexRepresentation + } + hexBytes, hexErr := hex.DecodeString(hexRepresentation) + if hexErr != nil { + return nil, hexErr + } + paddedBytes, padErr := padBytes(hexBytes, 32) + if padErr != nil { + return nil, padErr + } + return paddedBytes, nil +} + +// DataRootTuple contains the data that will be used to create the QGB commitments. +// The commitments will be signed by orchestrators and submitted to an EVM chain via a relayer. +// For more information: https://github.com/celestiaorg/quantum-gravity-bridge/blob/master/src/DataRootTuple.sol +type DataRootTuple struct { + height uint64 + dataRoot [32]byte +} + +// EncodeDataRootTuple takes a height and a data root, and returns the equivalent of +// `abi.encode(...)` in Ethereum. +// The encoded type is a DataRootTuple, which has the following ABI: +// +// { +// "components":[ +// { +// "internalType":"uint256", +// "name":"height", +// "type":"uint256" +// }, +// { +// "internalType":"bytes32", +// "name":"dataRoot", +// "type":"bytes32" +// }, +// { +// "internalType":"structDataRootTuple", +// "name":"_tuple", +// "type":"tuple" +// } +// ] +// } +// +// padding the hex representation of the height padded to 32 bytes concatenated to the data root. +// For more information, refer to: +// https://github.com/celestiaorg/quantum-gravity-bridge/blob/master/src/DataRootTuple.sol +func EncodeDataRootTuple(height uint64, dataRoot [32]byte) ([]byte, error) { + paddedHeight, err := To32PaddedHexBytes(height) + if err != nil { + return nil, err + } + return append(paddedHeight, dataRoot[:]...), nil +} + +// dataCommitmentBlocksLimit The maximum number of blocks to be used to create a data commitment. +// It's a local parameter to protect the API from creating unnecessarily large commitments. +const dataCommitmentBlocksLimit = 10_000 // ~33 hours of blocks assuming 12-second blocks. + +// validateDataCommitmentRange runs basic checks on the asc sorted list of +// heights that will be used subsequently in generating data commitments over +// the defined set of heights. +func (s *Service) validateDataCommitmentRange(ctx context.Context, start uint64, end uint64) error { + if start == 0 { + return fmt.Errorf("the start block is 0") + } + if start >= end { + return fmt.Errorf("end block is smaller or equal to the start block") + } + heightsRange := end - start + if heightsRange > uint64(dataCommitmentBlocksLimit) { + return fmt.Errorf("the query exceeds the limit of allowed blocks %d", dataCommitmentBlocksLimit) + } + + currentHeader, err := s.headerServ.NetworkHead(ctx) + if err != nil { + return err + } + // the data commitment range is end exclusive + if end > uint64(currentHeader.Height())+1 { + return fmt.Errorf( + "end block %d is higher than current chain height %d", + end, + currentHeader.Height(), + ) + } + + currentLocalHeader, err := s.headerServ.LocalHead(ctx) + if err != nil { + return err + } + // the data commitment range is end exclusive + if end > uint64(currentLocalHeader.Height())+1 { + return fmt.Errorf( + "end block %d is higher than local chain height %d. Wait for the node until it syncs up to %d", + end, + currentLocalHeader.Height(), + end, + ) + } + return nil +} + +// hashDataRootTuples hashes a list of blocks data root tuples, i.e., height, data root and square size, +// then returns their merkle root. +func hashDataRootTuples(tuples []DataRootTuple) ([]byte, error) { + if len(tuples) == 0 { + return nil, fmt.Errorf("cannot hash an empty list of data root tuples") + } + dataRootEncodedTuples := make([][]byte, 0, len(tuples)) + for _, tuple := range tuples { + encodedTuple, err := EncodeDataRootTuple( + tuple.height, + tuple.dataRoot, + ) + if err != nil { + return nil, err + } + dataRootEncodedTuples = append(dataRootEncodedTuples, encodedTuple) + } + root := merkle.HashFromByteSlices(dataRootEncodedTuples) + return root, nil +} + +// validateDataRootInclusionProofRequest validates the request to generate a data root +// inclusion proof. +func (s *Service) validateDataRootInclusionProofRequest(ctx context.Context, height uint64, start uint64, end uint64) error { + err := s.validateDataCommitmentRange(ctx, start, end) + if err != nil { + return err + } + if height < start || height >= end { + return fmt.Errorf( + "height %d should be in the end exclusive interval first_block %d last_block %d", + height, + start, + end, + ) + } + return nil +} + +// proveDataRootTuples returns the merkle inclusion proof for a height. +func proveDataRootTuples(tuples []DataRootTuple, height int64) (*merkle.Proof, error) { + if len(tuples) == 0 { + return nil, fmt.Errorf("cannot prove an empty list of tuples") + } + if height < 0 { + return nil, fmt.Errorf("cannot prove a strictly negative height %d", height) + } + currentHeight := tuples[0].height - 1 + for _, tuple := range tuples { + if tuple.height != currentHeight+1 { + return nil, fmt.Errorf("the provided tuples are not consecutive %d vs %d", currentHeight, tuple.height) + } + currentHeight += 1 + } + dataRootEncodedTuples := make([][]byte, 0, len(tuples)) + for _, tuple := range tuples { + encodedTuple, err := EncodeDataRootTuple( + tuple.height, + tuple.dataRoot, + ) + if err != nil { + return nil, err + } + dataRootEncodedTuples = append(dataRootEncodedTuples, encodedTuple) + } + _, proofs := merkle.ProofsFromByteSlices(dataRootEncodedTuples) + return proofs[height-int64(tuples[0].height)], nil +} + +// fetchDataRootTuples takes an end exclusive range of heights and fetches its +// corresponding data root tuples. +func (s *Service) fetchDataRootTuples(ctx context.Context, start, end uint64) ([]DataRootTuple, error) { + tuples := make([]DataRootTuple, 0, end-start) + for height := start; height < end; height++ { + block, err := s.headerServ.GetByHeight(ctx, height) + if err != nil { + return nil, err + } + if block == nil { + return nil, fmt.Errorf("couldn't load block %d", height) + } + tuples = append(tuples, DataRootTuple{ + height: block.Height(), + dataRoot: *(*[32]byte)(block.DataHash), + }) + } + return tuples, nil +} + +// ProveShares generates a share proof for a share range. +// Note: queries the whole EDS to generate the proof. +// This can be improved by selecting the set of shares that will need to be used to create +// the proof and only querying them. However, that would require re-implementing the logic +// in Core. Also, core also queries the whole EDS to generate the proof. So, it's fine for +// now. In the future, when blocks get way bigger, we should revisit this and improve it. +func (s *Service) ProveShares(ctx context.Context, height uint64, start, end uint64) (*ResultShareProof, error) { + log.Debugw("proving share range", "start", start, "end", end, "height", height) + if height == 0 { + return nil, fmt.Errorf("height cannot be equal to 0") + } + if start == end { + return nil, fmt.Errorf("start share cannot be equal to end share") + } + if start > end { + return nil, fmt.Errorf("start share %d cannot be greater than end share %d", start, end) + } + + log.Debugw("getting extended header", "height", height) + extendedHeader, err := s.headerServ.GetByHeight(ctx, height) + if err != nil { + return nil, err + } + log.Debugw("getting eds", "height", height) + eds, err := s.shareServ.GetEDS(ctx, extendedHeader) + if err != nil { + return nil, err + } + + startInt, err := uint64ToInt(start) + if err != nil { + return nil, err + } + endInt, err := uint64ToInt(end) + if err != nil { + return nil, err + } + odsShares, err := shares.FromBytes(eds.FlattenedODS()) + if err != nil { + return nil, err + } + nID, err := pkgproof.ParseNamespace(odsShares, startInt, endInt) + if err != nil { + return nil, err + } + log.Debugw("generating the share proof", "start", start, "end", end, "height", height) + proof, err := pkgproof.NewShareInclusionProofFromEDS(eds, nID, shares.NewRange(startInt, endInt)) + if err != nil { + return nil, err + } + return &ResultShareProof{ShareProof: proof}, nil +} + +// ProveCommitment generates a commitment proof for a share commitment. +// It takes as argument the height of the block containing the blob of data, its +// namespace and its share commitment. +func (s *Service) ProveCommitment(ctx context.Context, height uint64, namespace share.Namespace, shareCommitment tmbytes.HexBytes) (*ResultCommitmentProof, error) { + log.Debugw("proving share commitment", "height", height, "commitment", shareCommitment.Bytes(), "namespace", namespace) + if height == 0 { + return nil, fmt.Errorf("height cannot be equal to 0") + } + + // get the share to row root proofs. these proofs coincide with the subtree root to row root proofs. + log.Debugw("getting the blob proof", "height", height, "commitment", shareCommitment.Bytes(), "namespace", namespace) + shareToRowRootProofs, err := s.blobServ.GetProof(ctx, height, namespace, blob.Commitment(shareCommitment)) + if err != nil { + return nil, err + } + + // get the blob to compute the subtree roots + log.Debugw("getting the blob", "height", height, "commitment", shareCommitment.Bytes(), "namespace", namespace) + blb, err := s.blobServ.Get(ctx, height, namespace, shareCommitment.Bytes()) + if err != nil { + return nil, err + } + log.Debugw("converting the blob to shares", "height", height, "commitment", shareCommitment, "namespace", namespace) + blobShares, err := blob.BlobsToShares(blb) + if err != nil { + return nil, err + } + + // compute the subtree roots of the blob shares + log.Debugw("computing the subtree roots", "height", height, "commitment", shareCommitment, "namespace", namespace) + var subtreeRoots [][]byte + var dataCursor int + for _, proof := range *shareToRowRootProofs { + // TODO: do we want directly use the default subtree root threshold or want to allow specifying which version to use? + ranges, err := nmt.ToLeafRanges(proof.Start(), proof.End(), shares.SubTreeWidth(len(blobShares), appconsts.DefaultSubtreeRootThreshold)) + if err != nil { + return nil, err + } + roots, err := computeSubtreeRoots(blobShares[dataCursor:dataCursor+proof.End()-proof.Start()], ranges, proof.Start()) + if err != nil { + return nil, err + } + subtreeRoots = append(subtreeRoots, roots...) + dataCursor += proof.End() - proof.Start() + } + + // get the extended header to get the row/column roots + log.Debugw("getting the extended header", "height", height) + extendedHeader, err := s.headerServ.GetByHeight(ctx, height) + if err != nil { + return nil, err + } + + // rowWidth is the width of the square's rows. + rowWidth := len(extendedHeader.DAH.ColumnRoots) + + // finding the rows of the square that contain the blob + log.Debugw("getting the eds rows", "height", height) + startingRowIndex := -1 + for index, row := range extendedHeader.DAH.RowRoots { + if startingRowIndex >= 0 { + // we found the starting row of the share data + break + } + if !namespace.IsOutsideRange(row, row) { + // we found the first row where the namespace data starts + // we should go over the row shares to find the row where the data lives + for i := 0; i < rowWidth; i++ { + // an alternative to this would be querying the whole EDS. + // if that's faster given the number of the queries to the network, + // we can change that in here. + sh, err := s.shareServ.GetShare(ctx, extendedHeader, index, i) + if err != nil { + return nil, err + } + if bytes2.Equal(sh, blobShares[0]) { + // if the queried share is the same as the blob's data first share, + // then we found the first row of our data. + startingRowIndex = index + break + } + } + } + } + + if startingRowIndex < 0 { + return nil, fmt.Errorf("couldn't find the blob starting row") + } + + // the blob's data row roots start at the starting row index, and span over the number of row proofs that we have + dataRowRoots := func() []tmbytes.HexBytes { + var tmBytesRowRoots []tmbytes.HexBytes + for _, rowRoot := range extendedHeader.DAH.RowRoots[startingRowIndex : startingRowIndex+len(*shareToRowRootProofs)] { + tmBytesRowRoots = append(tmBytesRowRoots, tmbytes.FromBytes(rowRoot)...) + } + return tmBytesRowRoots + }() + + // generate all the row proofs + log.Debugw("generating the row roots proofs", "height", height) + _, allRowProofs := merkle.ProofsFromByteSlices(append(extendedHeader.DAH.RowRoots, extendedHeader.DAH.ColumnRoots...)) + + log.Debugw("successfuly proved the share commitment", "height", height, "commitment", shareCommitment, "namespace", namespace) + commitmentProof := CommitmentProof{ + SubtreeRoots: subtreeRoots, + SubtreeRootProofs: *shareToRowRootProofs, + NamespaceID: namespace.ID(), + RowProof: types.RowProof{ + RowRoots: dataRowRoots, + Proofs: allRowProofs[startingRowIndex : startingRowIndex+len(*shareToRowRootProofs)], + StartRow: uint32(startingRowIndex), // these conversions are safe because we return if the startingRowIndex is strictly negative + EndRow: uint32(startingRowIndex + len(*shareToRowRootProofs) - 1), + }, + NamespaceVersion: namespace.Version(), + } + + return &ResultCommitmentProof{CommitmentProof: commitmentProof}, nil +} + +// computeSubtreeRoots takes a set of shares and ranges and returns the corresponding subtree roots. +// the offset is the number of shares that are before the subtree roots we're calculating. +func computeSubtreeRoots(shares []share.Share, ranges []nmt.LeafRange, offset int) ([][]byte, error) { + if len(shares) == 0 { + return nil, fmt.Errorf("cannot compute subtree roots for an empty shares list") + } + if len(ranges) == 0 { + return nil, fmt.Errorf("cannot compute subtree roots for an empty ranges list") + } + if offset < 0 { + return nil, fmt.Errorf("the offset %d cannot be stricly negative", offset) + } + + // create a tree containing the shares to generate their subtree roots + tree := nmt.New(appconsts.NewBaseHashFunc(), nmt.IgnoreMaxNamespace(true), nmt.NamespaceIDSize(share.NamespaceSize)) + for _, sh := range shares { + var leafData []byte + leafData = append(append(leafData, share.GetNamespace(sh)...), sh...) + err := tree.Push(leafData) + if err != nil { + return nil, err + } + } + + // generate the subtree roots + var subtreeRoots [][]byte + for _, rg := range ranges { + root, err := tree.ComputeSubtreeRoot(rg.Start-offset, rg.End-offset) + if err != nil { + return nil, err + } + subtreeRoots = append(subtreeRoots, root) + } + return subtreeRoots, nil +} + +func uint64ToInt(number uint64) (int, error) { + if number >= math.MaxInt { + return 0, fmt.Errorf("number %d is higher than max int %d", number, math.MaxInt) + } + return int(number), nil +} + +// ProveSubtreeRootToCommitment generates a subtree root to share commitment inclusion proof. +// Note: this method is not part of the API. It will not be served by any endpoint, however, +// it can be called directly programmatically. +func ProveSubtreeRootToCommitment(subtreeRoots [][]byte, subtreeRootIndex uint64) (*ResultSubtreeRootToCommitmentProof, error) { + _, proofs := merkle.ProofsFromByteSlices(subtreeRoots) + return &ResultSubtreeRootToCommitmentProof{ + SubtreeRootToCommitmentProof: SubtreeRootToCommitmentProof{ + Proof: *proofs[subtreeRootIndex], + }, + }, nil +} + +// ProveShareToSubtreeRoot generates a share to subtree root inclusion proof +// Note: this method is not part of the API. It will not be served by any endpoint, however, +// it can be called directly programmatically. +func ProveShareToSubtreeRoot(shares [][]byte, shareIndex uint64) (*ResultShareToSubtreeRootProof, error) { + _, proofs := merkle.ProofsFromByteSlices(shares) + return &ResultShareToSubtreeRootProof{ + ShareToSubtreeRootProof: ShareToSubtreeRootProof{ + Proof: *proofs[shareIndex], + }, + }, nil +} diff --git a/nodebuilder/blobstream/service_test.go b/nodebuilder/blobstream/service_test.go new file mode 100644 index 0000000000..209ae101f7 --- /dev/null +++ b/nodebuilder/blobstream/service_test.go @@ -0,0 +1,1086 @@ +package blobstream + +import ( + "bytes" + "context" + "crypto/sha256" + "encoding/hex" + "errors" + "fmt" + nodeblob "github.com/celestiaorg/celestia-node/nodebuilder/blob" + headerServ "github.com/celestiaorg/celestia-node/nodebuilder/header" + shareServ "github.com/celestiaorg/celestia-node/nodebuilder/share" + libhead "github.com/celestiaorg/go-header" + "github.com/celestiaorg/go-header/sync" + "math" + "testing" + + "github.com/celestiaorg/celestia-app/test/util/blobfactory" + + "github.com/celestiaorg/celestia-app/app" + "github.com/celestiaorg/celestia-app/app/encoding" + "github.com/celestiaorg/celestia-app/pkg/appconsts" + "github.com/celestiaorg/celestia-app/pkg/da" + "github.com/celestiaorg/celestia-app/pkg/namespace" + pkgproof "github.com/celestiaorg/celestia-app/pkg/proof" + "github.com/celestiaorg/celestia-app/pkg/shares" + "github.com/celestiaorg/celestia-app/pkg/square" + "github.com/celestiaorg/celestia-app/test/util/testfactory" + "github.com/celestiaorg/celestia-app/x/blob/types" + "github.com/celestiaorg/celestia-node/blob" + "github.com/celestiaorg/celestia-node/header" + "github.com/celestiaorg/celestia-node/share" + "github.com/celestiaorg/nmt" + "github.com/celestiaorg/rsmt2d" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/crypto/merkle" + bytes2 "github.com/tendermint/tendermint/libs/bytes" + coretypes "github.com/tendermint/tendermint/types" +) + +func TestPadBytes(t *testing.T) { + tests := []struct { + input []byte + length int + expected []byte + expectErr bool + }{ + {input: []byte{1, 2, 3}, length: 5, expected: []byte{0, 0, 1, 2, 3}}, + {input: []byte{1, 2, 3}, length: 3, expected: []byte{1, 2, 3}}, + {input: []byte{1, 2, 3}, length: 2, expected: nil, expectErr: true}, + {input: []byte{}, length: 3, expected: []byte{0, 0, 0}}, + } + + for _, test := range tests { + result, err := padBytes(test.input, test.length) + if test.expectErr { + assert.Error(t, err) + } else { + assert.NoError(t, err) + assert.Equal(t, test.expected, result) + } + } +} + +func TestTo32PaddedHexBytes(t *testing.T) { + tests := []struct { + number uint64 + expected []byte + expectError bool + }{ + { + number: 10, + expected: func() []byte { + res, _ := hex.DecodeString("000000000000000000000000000000000000000000000000000000000000000a") + return res + }(), + }, + { + number: 255, + expected: func() []byte { + res, _ := hex.DecodeString("00000000000000000000000000000000000000000000000000000000000000ff") + return res + }(), + }, + { + number: 255, + expected: func() []byte { + res, _ := hex.DecodeString("00000000000000000000000000000000000000000000000000000000000000ff") + return res + }(), + }, + { + number: 4294967295, + expected: func() []byte { + res, _ := hex.DecodeString("00000000000000000000000000000000000000000000000000000000ffffffff") + return res + }(), + }, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("number: %d", test.number), func(t *testing.T) { + result, err := To32PaddedHexBytes(test.number) + if test.expectError { + assert.Error(t, err) + } else { + assert.NoError(t, err) + assert.Equal(t, test.expected, result) + } + }) + } +} + +func TestEncodeDataRootTuple(t *testing.T) { + height := uint64(2) + dataRoot, err := hex.DecodeString("82dc1607d84557d3579ce602a45f5872e821c36dbda7ec926dfa17ebc8d5c013") + require.NoError(t, err) + + expectedEncoding, err := hex.DecodeString( + // hex representation of height padded to 32 bytes + "0000000000000000000000000000000000000000000000000000000000000002" + + // data root + "82dc1607d84557d3579ce602a45f5872e821c36dbda7ec926dfa17ebc8d5c013", + ) + require.NoError(t, err) + require.NotNil(t, expectedEncoding) + + actualEncoding, err := EncodeDataRootTuple(height, *(*[32]byte)(dataRoot)) + require.NoError(t, err) + require.NotNil(t, actualEncoding) + + // Check that the length of packed data is correct + assert.Equal(t, len(actualEncoding), 64) + assert.Equal(t, expectedEncoding, actualEncoding) +} + +func TestHashDataRootTuples(t *testing.T) { + tests := map[string]struct { + tuples []DataRootTuple + expectedHash []byte + expectErr bool + }{ + "empty tuples list": {tuples: nil, expectErr: true}, + "valid list of data root tuples": { + tuples: []DataRootTuple{ + { + height: 1, + dataRoot: [32]byte{0x1}, + }, + { + height: 2, + dataRoot: [32]byte{0x2}, + }, + }, + expectedHash: func() []byte { + tuple1, _ := EncodeDataRootTuple(1, [32]byte{0x1}) + tuple2, _ := EncodeDataRootTuple(2, [32]byte{0x2}) + + return merkle.HashFromByteSlices([][]byte{tuple1, tuple2}) + }(), + }, + } + + for name, tc := range tests { + t.Run(name, func(t *testing.T) { + result, err := hashDataRootTuples(tc.tuples) + if tc.expectErr { + assert.Error(t, err) + } else { + assert.NoError(t, err) + assert.Equal(t, tc.expectedHash, result) + } + }) + } +} + +func TestProveDataRootTuples(t *testing.T) { + tests := map[string]struct { + tuples []DataRootTuple + height int64 + expectedProof merkle.Proof + expectErr bool + }{ + "empty tuples list": {tuples: nil, expectErr: true}, + "strictly negative height": { + height: -1, + tuples: []DataRootTuple{ + { + height: 1, + dataRoot: [32]byte{0x1}, + }, + }, + expectErr: true, + }, + "non consecutive list of tuples at the beginning": { + tuples: []DataRootTuple{ + { + height: 1, + dataRoot: [32]byte{0x1}, + }, + { + height: 3, + dataRoot: [32]byte{0x2}, + }, + { + height: 4, + dataRoot: [32]byte{0x4}, + }, + }, + expectErr: true, + }, + "non consecutive list of tuples in the middle": { + tuples: []DataRootTuple{ + { + height: 1, + dataRoot: [32]byte{0x1}, + }, + { + height: 2, + dataRoot: [32]byte{0x2}, + }, + { + height: 3, + dataRoot: [32]byte{0x2}, + }, + { + height: 5, + dataRoot: [32]byte{0x4}, + }, + { + height: 6, + dataRoot: [32]byte{0x5}, + }, + }, + expectErr: true, + }, + "non consecutive list of tuples at the end": { + tuples: []DataRootTuple{ + { + height: 1, + dataRoot: [32]byte{0x1}, + }, + { + height: 2, + dataRoot: [32]byte{0x2}, + }, + { + height: 4, + dataRoot: [32]byte{0x4}, + }, + }, + expectErr: true, + }, + "duplicate height at the beginning": { + tuples: []DataRootTuple{ + { + height: 1, + dataRoot: [32]byte{0x1}, + }, + { + height: 1, + dataRoot: [32]byte{0x1}, + }, + { + height: 4, + dataRoot: [32]byte{0x4}, + }, + }, + expectErr: true, + }, + "duplicate height in the middle": { + tuples: []DataRootTuple{ + { + height: 1, + dataRoot: [32]byte{0x1}, + }, + { + height: 2, + dataRoot: [32]byte{0x2}, + }, + { + height: 2, + dataRoot: [32]byte{0x2}, + }, + { + height: 3, + dataRoot: [32]byte{0x3}, + }, + }, + expectErr: true, + }, + "duplicate height at the end": { + tuples: []DataRootTuple{ + { + height: 1, + dataRoot: [32]byte{0x1}, + }, + { + height: 2, + dataRoot: [32]byte{0x2}, + }, + { + height: 2, + dataRoot: [32]byte{0x2}, + }, + }, + expectErr: true, + }, + "valid proof": { + height: 3, + tuples: []DataRootTuple{ + { + height: 1, + dataRoot: [32]byte{0x1}, + }, + { + height: 2, + dataRoot: [32]byte{0x2}, + }, + { + height: 3, + dataRoot: [32]byte{0x3}, + }, + { + height: 4, + dataRoot: [32]byte{0x4}, + }, + }, + expectedProof: func() merkle.Proof { + encodedTuple1, _ := EncodeDataRootTuple(1, [32]byte{0x1}) + encodedTuple2, _ := EncodeDataRootTuple(2, [32]byte{0x2}) + encodedTuple3, _ := EncodeDataRootTuple(3, [32]byte{0x3}) + encodedTuple4, _ := EncodeDataRootTuple(4, [32]byte{0x4}) + _, proofs := merkle.ProofsFromByteSlices([][]byte{encodedTuple1, encodedTuple2, encodedTuple3, encodedTuple4}) + return *proofs[2] + }(), + }, + } + + for name, tc := range tests { + t.Run(name, func(t *testing.T) { + result, err := proveDataRootTuples(tc.tuples, tc.height) + if tc.expectErr { + assert.Error(t, err) + } else { + assert.NoError(t, err) + assert.Equal(t, tc.expectedProof, *result) + } + }) + } +} + +func TestUint64ToInt(t *testing.T) { + tests := []struct { + number uint64 + expected int + expectErr bool + }{ + {number: 0, expected: 0}, + {number: 10, expected: 10}, + {number: math.MaxInt - 1, expected: math.MaxInt - 1}, + {number: math.MaxInt, expected: 0, expectErr: true}, + {number: math.MaxInt + 1, expected: 0, expectErr: true}, + {number: math.MaxUint64, expected: 0, expectErr: true}, + } + + for _, test := range tests { + result, err := uint64ToInt(test.number) + if test.expectErr { + assert.Error(t, err) + } else { + assert.NoError(t, err) + assert.Equal(t, test.expected, result) + } + } +} + +func TestDataCommitment(t *testing.T) { + api := newTestAPI(t, 10, 1000, 10) + tests := map[string]struct { + start, end uint64 + expectedDataCommitment bytes2.HexBytes + expectErr bool + }{ + "start == 0": {start: 0, expectErr: true}, + "start block == end block": {start: 2, end: 2, expectErr: true}, + "start block > end block": {start: 3, end: 2, expectErr: true}, + "range exceeds data commitment blocks limit": {start: 3, end: dataCommitmentBlocksLimit + 10, expectErr: true}, + "end block is greater than the network block height": {start: 3, end: 15, expectErr: true}, + "valid case": { + start: 5, + end: 9, + expectedDataCommitment: func() bytes2.HexBytes { + tuples := []DataRootTuple{ + { + height: 5, + dataRoot: [32]byte(api.blocks[5].dataRoot), + }, + { + height: 6, + dataRoot: [32]byte(api.blocks[6].dataRoot), + }, + { + height: 7, + dataRoot: [32]byte(api.blocks[7].dataRoot), + }, + { + height: 8, + dataRoot: [32]byte(api.blocks[8].dataRoot), + }, + } + hash, err := hashDataRootTuples(tuples) + require.NoError(t, err) + return hash + }(), + }, + } + + for name, tc := range tests { + t.Run(name, func(t *testing.T) { + result, err := api.blobService.DataCommitment(context.Background(), tc.start, tc.end) + if tc.expectErr { + assert.Error(t, err) + } else { + assert.NoError(t, err) + assert.Equal(t, tc.expectedDataCommitment, result.DataCommitment) + } + }) + } +} + +func TestDataRootInclusionProof(t *testing.T) { + api := newTestAPI(t, 10, 1000, 10) + tests := map[string]struct { + height int64 + start, end uint64 + expectedProof merkle.Proof + expectErr bool + }{ + "height < 0": {height: -1, expectErr: true}, + "height == 0": {height: 0, expectErr: true}, + "start == 0": {start: 0, expectErr: true}, + "start block == end block": {start: 2, end: 2, expectErr: true}, + "start block > end block": {start: 3, end: 2, expectErr: true}, + "height < start": {height: 2, start: 3, end: 2, expectErr: true}, + "height == end": {height: 4, start: 3, end: 4, expectErr: true}, + "height > end": {height: 5, start: 3, end: 4, expectErr: true}, + "range exceeds data commitment blocks limit": {start: 3, end: dataCommitmentBlocksLimit + 10, expectErr: true}, + "end block is greater than the network block height": {start: 3, end: 15, expectErr: true}, + "start block is greater than the network block height": {start: 12, end: 15, height: 14, expectErr: true}, + "height block is greater than the network block height": {start: 1, end: 15, height: 14, expectErr: true}, + "valid case": { + height: 6, + start: 5, + end: 9, + expectedProof: func() merkle.Proof { + encodedTuple5, _ := EncodeDataRootTuple(5, [32]byte(api.blocks[5].dataRoot)) + encodedTuple6, _ := EncodeDataRootTuple(6, [32]byte(api.blocks[6].dataRoot)) + encodedTuple7, _ := EncodeDataRootTuple(7, [32]byte(api.blocks[7].dataRoot)) + encodedTuple8, _ := EncodeDataRootTuple(8, [32]byte(api.blocks[8].dataRoot)) + _, proofs := merkle.ProofsFromByteSlices([][]byte{encodedTuple5, encodedTuple6, encodedTuple7, encodedTuple8}) + return *proofs[1] + }(), + }, + } + + for name, tc := range tests { + t.Run(name, func(t *testing.T) { + result, err := api.blobService.DataRootInclusionProof(context.Background(), tc.height, tc.start, tc.end) + if tc.expectErr { + assert.Error(t, err) + } else { + assert.NoError(t, err) + assert.Equal(t, tc.expectedProof, result.Proof) + } + }) + } +} + +func TestProveShares(t *testing.T) { + api := newTestAPI(t, 10, 1000, 10) + tests := map[string]struct { + height uint64 + start, end uint64 + expectedProof ResultShareProof + expectErr bool + }{ + "height == 0": {height: 0, expectErr: true}, + "height > blockchain tip": {height: 100, expectErr: true}, + "start share == end share": {start: 2, end: 2, expectErr: true}, + "start share > end share": {start: 3, end: 2, expectErr: true}, + "start share > number of shares in the block": {start: 200, end: 201, expectErr: true}, + "end share > number of shares in the block": {start: 1, end: 201, expectErr: true}, + "valid case": { + height: 6, + start: 0, + end: 2, + expectedProof: func() ResultShareProof { + proof, err := pkgproof.NewShareInclusionProofFromEDS(api.blocks[6].eds, namespace.PayForBlobNamespace, shares.NewRange(0, 2)) + require.NoError(t, err) + require.NoError(t, proof.Validate(api.blocks[6].dataRoot)) + return ResultShareProof{ShareProof: proof} + }(), + }, + } + + for name, tc := range tests { + t.Run(name, func(t *testing.T) { + result, err := api.blobService.ProveShares(context.Background(), tc.height, tc.start, tc.end) + if tc.expectErr { + assert.Error(t, err) + } else { + assert.NoError(t, err) + assert.Equal(t, tc.expectedProof, *result) + assert.NoError(t, result.ShareProof.Validate(api.blocks[6].dataRoot)) + } + }) + } +} + +func TestProveCommitment(t *testing.T) { + api := newTestAPI(t, 10, 300, 10) + + tests := map[string]struct { + height uint64 + commitment bytes2.HexBytes + ns share.Namespace + expectedProof ResultCommitmentProof + expectErr bool + }{ + "height == 0": {height: 0, expectErr: true}, + "valid case": { + height: 6, + ns: api.blocks[6].msgs[0].Namespaces[0], + commitment: api.blocks[6].msgs[0].ShareCommitments[0], + expectedProof: func() ResultCommitmentProof { + commitmentProof := generateCommitmentProofFromBlock(t, api.blocks[6], 0) + + // make sure we're creating a valid proof for the test + require.NoError(t, commitmentProof.CommitmentProof.Validate()) + valid, err := commitmentProof.CommitmentProof.Verify(api.blocks[6].dataRoot, appconsts.DefaultSubtreeRootThreshold) + require.NoError(t, err) + require.True(t, valid) + + return commitmentProof + }(), + }, + } + + for name, tc := range tests { + t.Run(name, func(t *testing.T) { + result, err := api.blobService.ProveCommitment(context.Background(), tc.height, tc.ns, tc.commitment) + if tc.expectErr { + assert.Error(t, err) + } else { + assert.NoError(t, err) + // make sure the actual proof can be validated and verified + assert.NoError(t, result.CommitmentProof.Validate()) + valid, err := result.CommitmentProof.Verify(api.blocks[tc.height].dataRoot, appconsts.DefaultSubtreeRootThreshold) + assert.NoError(t, err) + assert.True(t, valid) + + // make sure the expected proof is the same as the actual proof + assert.Equal(t, tc.expectedProof, *result) + + // make sure the expected commitment commits to the subtree roots in the actual proof + actualCommitment, _ := merkle.ProofsFromByteSlices(result.CommitmentProof.SubtreeRoots) + assert.Equal(t, tc.commitment.Bytes(), actualCommitment) + } + }) + } +} + +// TestProveCommitmentAllCombinations tests proving all the commitments in a block. +// The number of shares per blob increases with each blob to cover proving a large number +// of possibilities. +func TestProveCommitmentAllCombinations(t *testing.T) { + tests := map[string]struct { + numberOfBlocks int + blobSize int + }{ + "very small blobs that take less than a share": {numberOfBlocks: 20, blobSize: 350}, + "small blobs that take 2 shares": {numberOfBlocks: 20, blobSize: 1000}, + "small blobs that take ~10 shares": {numberOfBlocks: 10, blobSize: 5000}, + "large blobs ~100 shares": {numberOfBlocks: 5, blobSize: 50000}, + "large blobs ~150 shares": {numberOfBlocks: 5, blobSize: 75000}, + "large blobs ~300 shares": {numberOfBlocks: 5, blobSize: 150000}, + "very large blobs ~1500 shares": {numberOfBlocks: 3, blobSize: 750000}, + } + + for name, tc := range tests { + t.Run(name, func(t *testing.T) { + proveAllCommitments(t, tc.numberOfBlocks, tc.blobSize) + }) + } +} + +func proveAllCommitments(t *testing.T, numberOfBlocks, blobSize int) { + api := newTestAPI(t, numberOfBlocks, blobSize, 10) + for blockIndex, block := range api.blocks { + for msgIndex, msg := range block.msgs { + t.Run(fmt.Sprintf("height=%d, blobIndex=%d", blockIndex, msgIndex), func(t *testing.T) { + // compute the commitment + actualCommitmentProof, err := api.blobService.ProveCommitment(context.Background(), uint64(blockIndex), msg.Namespaces[0], msg.ShareCommitments[0]) + require.NoError(t, err) + + // make sure the actual commitment attests to the data + require.NoError(t, actualCommitmentProof.CommitmentProof.Validate()) + valid, err := actualCommitmentProof.CommitmentProof.Verify(block.dataRoot, appconsts.DefaultSubtreeRootThreshold) + require.NoError(t, err) + require.True(t, valid) + + // generate an expected proof and verify it's valid + expectedCommitmentProof := generateCommitmentProofFromBlock(t, block, msgIndex) + require.NoError(t, expectedCommitmentProof.CommitmentProof.Validate()) + valid, err = expectedCommitmentProof.CommitmentProof.Verify(block.dataRoot, appconsts.DefaultSubtreeRootThreshold) + require.NoError(t, err) + require.True(t, valid) + + // make sure the expected proof is the same as the actual on + assert.Equal(t, expectedCommitmentProof, *actualCommitmentProof) + + // make sure the expected commitment commits to the subtree roots in the result proof + actualCommitment, _ := merkle.ProofsFromByteSlices(actualCommitmentProof.CommitmentProof.SubtreeRoots) + assert.Equal(t, msg.ShareCommitments[0], actualCommitment) + }) + } + } +} + +// testBlock is a block struct used to keep track of all the information +// needed to mock the API. +type testBlock struct { + msgs []*types.MsgPayForBlobs + blobs []*types.Blob + nss []namespace.Namespace + eds *rsmt2d.ExtendedDataSquare + coreTxs coretypes.Txs + dah *da.DataAvailabilityHeader + dataRoot []byte +} + +// testAPI an API that allows mocking all the methods and thoroughly testing them +type testAPI struct { + blobService *Service + blocks []testBlock +} + +// newTestAPI creates a new test API that fetches data from a test blockchain that has +// a specific number of blocks. Each block has a number of PFBs. Each PFB has a single blob with +// size blobSize or bigger. +func newTestAPI(t *testing.T, numberOfBlocks int, blobSize int, numberOfPFBs int) *testAPI { + blocks := []testBlock{{}} // so that the heights match the slice indexes + blocks = append(blocks, generateTestBlocks(t, numberOfBlocks, blobSize, numberOfPFBs)...) + + newTestService := NewService( + mockBlobService{blocks}, + mockHeaderService{blocks}, + mockShareService{blocks}, + ) + api := &testAPI{ + blobService: newTestService, + blocks: blocks, + } + + return api +} + +// addBlock adds a new block the testAPI. +// The added block can be created in the tests and added to the chain +// to test specific cases. +// +//nolint:unused +func (api *testAPI) addBlock(t *testing.T, numberOfBlobs, blobSize int) int { + acc := "blobstream-api-tests" + kr := testfactory.GenerateKeyring(acc) + signer := types.NewKeyringSigner(kr, acc, "test") + + var msgs []*types.MsgPayForBlobs + var blobs []*types.Blob + var nss []namespace.Namespace + var coreTxs coretypes.Txs + + for i := 0; i < numberOfBlobs; i++ { + ns, msg, blob, coreTx := createTestBlobTransaction(t, signer, blobSize) + msgs = append(msgs, msg) + blobs = append(blobs, blob) + nss = append(nss, ns) + coreTxs = append(coreTxs, coreTx) + } + + var txs coretypes.Txs + txs = append(txs, coreTxs...) + dataSquare, err := square.Construct(txs.ToSliceOfBytes(), appconsts.LatestVersion, appconsts.SquareSizeUpperBound(appconsts.LatestVersion)) + require.NoError(t, err) + + // erasure the data square which we use to create the data root. + eds, err := da.ExtendShares(shares.ToBytes(dataSquare)) + require.NoError(t, err) + + // create the new data root by creating the data availability header (merkle + // roots of each row and col of the erasure data). + dah, err := da.NewDataAvailabilityHeader(eds) + require.NoError(t, err) + dataRoot := dah.Hash() + api.blocks = append(api.blocks, testBlock{ + msgs: msgs, + blobs: blobs, + nss: nss, + coreTxs: coreTxs, + eds: eds, + dah: &dah, + dataRoot: dataRoot, + }) + + return len(api.blocks) - 1 +} + +// generateCommitmentProofFromBlock takes a block and a PFB index and generates the commitment proof +// using the traditional way of doing, instead of using the API. +func generateCommitmentProofFromBlock(t *testing.T, block testBlock, blobIndex int) ResultCommitmentProof { + // parse the namespace + ns, err := share.NamespaceFromBytes( + append( + []byte{byte(block.blobs[blobIndex].NamespaceVersion)}, block.blobs[blobIndex].NamespaceId..., + ), + ) + require.NoError(t, err) + + // create the blob from the data + blb, err := blob.NewBlob(uint8(block.blobs[blobIndex].ShareVersion), ns, block.blobs[blobIndex].Data) + require.NoError(t, err) + + // convert the blob to a number of shares + blobShares, err := blob.BlobsToShares(blb) + require.NoError(t, err) + + // find the first share of the blob in the ODS + startShareIndex := -1 + for i, sh := range block.eds.FlattenedODS() { + if bytes.Equal(sh, blobShares[0]) { + startShareIndex = i + break + } + } + require.Greater(t, startShareIndex, 0) + + // create an inclusion proof of the blob using the share range instead of the commitment + sharesProof, err := pkgproof.NewShareInclusionProofFromEDS(block.eds, ns.ToAppNamespace(), shares.NewRange(startShareIndex, startShareIndex+len(blobShares))) + require.NoError(t, err) + require.NoError(t, sharesProof.Validate(block.dataRoot)) + + // calculate the subtree roots + var subtreeRoots [][]byte + var dataCursor int + for _, proof := range sharesProof.ShareProofs { + ranges, err := nmt.ToLeafRanges(int(proof.Start), int(proof.End), shares.SubTreeWidth(len(blobShares), appconsts.DefaultSubtreeRootThreshold)) + require.NoError(t, err) + roots, err := computeSubtreeRoots(blobShares[dataCursor:int32(dataCursor)+proof.End-proof.Start], ranges, int(proof.Start)) + require.NoError(t, err) + subtreeRoots = append(subtreeRoots, roots...) + dataCursor += int(proof.End - proof.Start) + } + + // convert the nmt proof to be accepted by the commitment proof + var nmtProofs []*nmt.Proof + for _, proof := range sharesProof.ShareProofs { + nmtProof := nmt.NewInclusionProof(int(proof.Start), int(proof.End), proof.Nodes, true) + nmtProofs = append(nmtProofs, &nmtProof) + } + + commitmentProof := CommitmentProof{ + SubtreeRoots: subtreeRoots, + SubtreeRootProofs: nmtProofs, + NamespaceID: sharesProof.NamespaceID, + RowProof: sharesProof.RowProof, + NamespaceVersion: uint8(sharesProof.NamespaceVersion), + } + + return ResultCommitmentProof{CommitmentProof: commitmentProof} +} + +// generateTestBlocks generates a set of test blocks with a specific blob size and number of transactions +func generateTestBlocks(t *testing.T, numberOfBlocks int, blobSize int, numberOfTransactions int) []testBlock { + require.Greater(t, numberOfBlocks, 1) + var blocks []testBlock + for i := 1; i <= numberOfBlocks; i++ { + nss, msgs, blobs, coreTxs := createTestBlobTransactions(t, numberOfTransactions, blobSize) + + var txs coretypes.Txs + txs = append(txs, coreTxs...) + dataSquare, err := square.Construct(txs.ToSliceOfBytes(), appconsts.LatestVersion, appconsts.SquareSizeUpperBound(appconsts.LatestVersion)) + require.NoError(t, err) + + // erasure the data square which we use to create the data root. + eds, err := da.ExtendShares(shares.ToBytes(dataSquare)) + require.NoError(t, err) + + // create the new data root by creating the data availability header (merkle + // roots of each row and col of the erasure data). + dah, err := da.NewDataAvailabilityHeader(eds) + require.NoError(t, err) + dataRoot := dah.Hash() + blocks = append(blocks, testBlock{ + msgs: msgs, + blobs: blobs, + nss: nss, + eds: eds, + dah: &dah, + dataRoot: dataRoot, + coreTxs: coreTxs, + }) + } + return blocks +} + +// createTestBlobTransactions generates a set of transactions that can be added to a blob. +// The number of transactions dictates the number of PFBs that will be returned. +// The size refers to the size of the data contained in the PFBs in bytes. +func createTestBlobTransactions(t *testing.T, numberOfTransactions int, size int) ([]namespace.Namespace, []*types.MsgPayForBlobs, []*types.Blob, []coretypes.Tx) { + acc := "blobstream-api-tests" + kr := testfactory.GenerateKeyring(acc) + signer := types.NewKeyringSigner(kr, acc, "test") + + var nss []namespace.Namespace + var msgs []*types.MsgPayForBlobs + var blobs []*types.Blob + var coreTxs []coretypes.Tx + for i := 0; i < numberOfTransactions; i++ { + ns, msg, blob, coreTx := createTestBlobTransaction(t, signer, size+i*1000) + nss = append(nss, ns) + msgs = append(msgs, msg) + blobs = append(blobs, blob) + coreTxs = append(coreTxs, coreTx) + } + + return nss, msgs, blobs, coreTxs +} + +// createTestBlobTransaction creates a test blob transaction using a specific signer and a specific PFB size. +// The size is in bytes. +func createTestBlobTransaction(t *testing.T, signer *types.KeyringSigner, size int) (namespace.Namespace, *types.MsgPayForBlobs, *types.Blob, coretypes.Tx) { + addr, err := signer.GetSignerInfo().GetAddress() + require.NoError(t, err) + + ns := namespace.RandomBlobNamespace() + msg, blob := blobfactory.RandMsgPayForBlobsWithNamespaceAndSigner(addr.String(), ns, size) + require.NoError(t, err) + + builder := signer.NewTxBuilder() + stx, err := signer.BuildSignedTx(builder, msg) + require.NoError(t, err) + rawTx, err := encoding.MakeConfig(app.ModuleEncodingRegisters...).TxConfig.TxEncoder()(stx) + require.NoError(t, err) + cTx, err := coretypes.MarshalBlobTx(rawTx, blob) + require.NoError(t, err) + return ns, msg, blob, cTx +} + +func TestShareToSubtreeRootProof(t *testing.T) { + var shares [][]byte + // generate some shares + for i := 0; i < 10; i++ { + shares = append(shares, bytes.Repeat([]byte{0x1}, appconsts.ShareSize)) + } + // calculate the expected proof + subtreeRoot, expectedProofs := merkle.ProofsFromByteSlices(shares) + + // calculate the actual proofs + var actualProofs []*ResultShareToSubtreeRootProof + for i := range shares { + proof, err := ProveShareToSubtreeRoot(shares, uint64(i)) + require.NoError(t, err) + actualProofs = append(actualProofs, proof) + } + + // compare the proofs and validate + for shareIndex, actualProof := range actualProofs { + t.Run(fmt.Sprintf("shareIndex=%d", shareIndex), func(t *testing.T) { + valid, err := actualProof.ShareToSubtreeRootProof.Verify(subtreeRoot, shares[shareIndex]) + assert.NoError(t, err) + assert.True(t, valid) + assert.Equal(t, *expectedProofs[shareIndex], actualProof.ShareToSubtreeRootProof.Proof) + }) + } +} + +func TestSubtreeRootsToCommitmentProof(t *testing.T) { + rowRootSize := sha256.Size + 2*appconsts.NamespaceSize + var subtreeRoots [][]byte + // generate some subtreeRoots + for i := 0; i < 10; i++ { + subtreeRoots = append(subtreeRoots, bytes.Repeat([]byte{0x1}, rowRootSize)) + } + // calculate the expected proof + shareCommitment, expectedProofs := merkle.ProofsFromByteSlices(subtreeRoots) + + // calculate the actual proofs + var actualProofs []*ResultSubtreeRootToCommitmentProof + for i := range subtreeRoots { + proof, err := ProveSubtreeRootToCommitment(subtreeRoots, uint64(i)) + require.NoError(t, err) + actualProofs = append(actualProofs, proof) + } + + // compare the proofs and validate + for subtreeRootIndex, actualProof := range actualProofs { + t.Run(fmt.Sprintf("subtreeRootIndex=%d", subtreeRootIndex), func(t *testing.T) { + valid, err := actualProof.SubtreeRootToCommitmentProof.Verify(shareCommitment, subtreeRoots[subtreeRootIndex]) + assert.NoError(t, err) + assert.True(t, valid) + assert.Equal(t, *expectedProofs[subtreeRootIndex], actualProof.SubtreeRootToCommitmentProof.Proof) + }) + } +} + +var _ nodeblob.Module = &mockBlobService{} + +type mockBlobService struct { + blocks []testBlock +} + +func (m mockBlobService) Submit(_ context.Context, _ []*blob.Blob, _ blob.GasPrice) (height uint64, _ error) { + //TODO implement me + panic("implement me") +} + +func (m mockBlobService) Get(ctx context.Context, height uint64, ns share.Namespace, commitment blob.Commitment) (*blob.Blob, error) { + if height > uint64(len(m.blocks)) { + return nil, errors.New("height greater than the blockchain") + } + for i, msg := range m.blocks[height].msgs { + if bytes.Equal(msg.ShareCommitments[0], commitment) { + blb, err := blob.NewBlob(uint8(m.blocks[height].blobs[i].ShareVersion), ns, m.blocks[height].blobs[i].Data) + if err != nil { + return nil, err + } + return blb, nil + } + } + return nil, fmt.Errorf("coudln't find commitment") +} + +func (m mockBlobService) GetAll(_ context.Context, height uint64, _ []share.Namespace) ([]*blob.Blob, error) { + //TODO implement me + panic("implement me") +} + +func (m mockBlobService) GetProof(ctx context.Context, height uint64, ns share.Namespace, commitment blob.Commitment) (*blob.Proof, error) { + if height >= uint64(len(m.blocks)) { + return nil, errors.New("height greater than the blockchain") + } + for i, msg := range m.blocks[height].msgs { + if bytes.Equal(msg.ShareCommitments[0], commitment) { + blobShareRange, err := square.BlobShareRange(m.blocks[height].coreTxs.ToSliceOfBytes(), i, 0, appconsts.LatestVersion) + if err != nil { + return nil, err + } + proof, err := pkgproof.NewShareInclusionProofFromEDS(m.blocks[height].eds, m.blocks[height].nss[i], blobShareRange) + if err != nil { + return nil, err + } + var nmtProofs []*nmt.Proof + for _, proof := range proof.ShareProofs { + nmtProof := nmt.NewInclusionProof(int(proof.Start), + int(proof.End), + proof.Nodes, + true) + nmtProofs = append( + nmtProofs, + &nmtProof, + ) + } + blobProof := blob.Proof(nmtProofs) + return &blobProof, nil + } + } + return nil, fmt.Errorf("coudln't find commitment") +} + +func (m mockBlobService) Included(_ context.Context, height uint64, _ share.Namespace, _ *blob.Proof, _ blob.Commitment) (bool, error) { + //TODO implement me + panic("implement me") +} + +var _ shareServ.Module = &mockShareService{} + +type mockShareService struct { + blocks []testBlock +} + +func (m mockShareService) SharesAvailable(ctx context.Context, extendedHeader *header.ExtendedHeader) error { + //TODO implement me + panic("implement me") +} + +func (m mockShareService) GetShare(ctx context.Context, header *header.ExtendedHeader, row, col int) (share.Share, error) { + if header.Height() > uint64(len(m.blocks)) { + return nil, errors.New("height greater than the blockchain") + } + return m.blocks[header.Height()].eds.GetCell(uint(row), uint(col)), nil +} + +func (m mockShareService) GetEDS(ctx context.Context, header *header.ExtendedHeader) (*rsmt2d.ExtendedDataSquare, error) { + if header.Height() >= uint64(len(m.blocks)) { + return nil, errors.New("height greater than the blockchain") + } + return m.blocks[header.Height()].eds, nil +} + +func (m mockShareService) GetSharesByNamespace(ctx context.Context, header *header.ExtendedHeader, namespace share.Namespace) (share.NamespacedShares, error) { + //TODO implement me + panic("implement me") +} + +var _ headerServ.Module = &mockHeaderService{} + +type mockHeaderService struct { + blocks []testBlock +} + +func (m mockHeaderService) LocalHead(ctx context.Context) (*header.ExtendedHeader, error) { + return &header.ExtendedHeader{ + RawHeader: header.RawHeader{ + Height: int64(len(m.blocks) - 1), + DataHash: m.blocks[len(m.blocks)-1].dataRoot, + }, + DAH: m.blocks[len(m.blocks)-1].dah, + }, nil +} + +func (m mockHeaderService) GetByHash(ctx context.Context, hash libhead.Hash) (*header.ExtendedHeader, error) { + //TODO implement me + panic("implement me") +} + +func (m mockHeaderService) GetRangeByHeight(ctx context.Context, from *header.ExtendedHeader, to uint64) ([]*header.ExtendedHeader, error) { + //TODO implement me + panic("implement me") +} + +func (m mockHeaderService) GetByHeight(ctx context.Context, height uint64) (*header.ExtendedHeader, error) { + if height >= uint64(len(m.blocks)) { + return nil, errors.New("height greater than the blockchain") + } + return &header.ExtendedHeader{ + RawHeader: header.RawHeader{ + Height: int64(height), + DataHash: m.blocks[height].dataRoot, + }, + DAH: m.blocks[height].dah, + }, nil +} + +func (m mockHeaderService) WaitForHeight(ctx context.Context, u uint64) (*header.ExtendedHeader, error) { + //TODO implement me + panic("implement me") +} + +func (m mockHeaderService) SyncState(ctx context.Context) (sync.State, error) { + //TODO implement me + panic("implement me") +} + +func (m mockHeaderService) SyncWait(ctx context.Context) error { + //TODO implement me + panic("implement me") +} + +func (m mockHeaderService) NetworkHead(ctx context.Context) (*header.ExtendedHeader, error) { + return &header.ExtendedHeader{ + RawHeader: header.RawHeader{ + Height: int64(len(m.blocks) - 1), + DataHash: m.blocks[len(m.blocks)-1].dataRoot, + }, + Commit: nil, + ValidatorSet: nil, + DAH: m.blocks[len(m.blocks)-1].dah, + }, nil +} + +func (m mockHeaderService) Subscribe(ctx context.Context) (<-chan *header.ExtendedHeader, error) { + //TODO implement me + panic("implement me") +} diff --git a/nodebuilder/blobstream/types.go b/nodebuilder/blobstream/types.go new file mode 100644 index 0000000000..1c45cd94db --- /dev/null +++ b/nodebuilder/blobstream/types.go @@ -0,0 +1,186 @@ +package blobstream + +import ( + "fmt" + + "github.com/celestiaorg/celestia-app/pkg/shares" + + "github.com/celestiaorg/celestia-app/pkg/appconsts" + + "github.com/celestiaorg/celestia-node/share" + "github.com/celestiaorg/nmt" + "github.com/celestiaorg/nmt/namespace" + "github.com/tendermint/tendermint/crypto/merkle" + + "github.com/tendermint/tendermint/libs/bytes" + "github.com/tendermint/tendermint/types" +) + +// ResultDataCommitment is the API response containing a data +// commitment, aka data root tuple root. +type ResultDataCommitment struct { + DataCommitment bytes.HexBytes `json:"data_commitment"` +} + +// ResultDataRootInclusionProof is the API response containing the binary merkle +// inclusion proof of a height to a data commitment. +type ResultDataRootInclusionProof struct { + Proof merkle.Proof `json:"proof"` +} + +// ResultShareProof is the API response that contains a ShareProof. +// A share proof is a proof of a set of shares to the data root. +type ResultShareProof struct { + ShareProof types.ShareProof `json:"share_proof"` +} + +// ResultCommitmentProof is an API response that contains a CommitmentProof. +// A commitment proof is a proof of a blob share commitment to the data root. +type ResultCommitmentProof struct { + CommitmentProof CommitmentProof `json:"commitment_proof"` +} + +// CommitmentProof is an inclusion proof of a commitment to the data root. +// TODO: Ask reviewers if we need protobuf definitions for this +type CommitmentProof struct { + // SubtreeRoots are the subtree roots of the blob's data that are + // used to create the commitment. + SubtreeRoots [][]byte `json:"subtree_roots"` + // SubtreeRootProofs are the NMT proofs for the subtree roots + // to the row roots. + SubtreeRootProofs []*nmt.Proof `json:"subtree_root_proofs"` + // NamespaceID is the namespace id of the commitment being proven. This + // namespace id is used when verifying the proof. If the namespace id doesn't + // match the namespace of the shares, the proof will fail verification. + NamespaceID namespace.ID `json:"namespace_id"` + // RowProof is the proof of the rows containing the blob's data to the + // data root. + RowProof types.RowProof `json:"row_proof"` + NamespaceVersion uint8 `json:"namespace_version"` +} + +// Validate performs basic validation to the commitment proof. +// Note: it doesn't verify if the proof is valid or not. +// Check Verify() for that. +func (commitmentProof CommitmentProof) Validate() error { + if len(commitmentProof.SubtreeRoots) < len(commitmentProof.SubtreeRootProofs) { + return fmt.Errorf( + "the number of subtree roots %d should be bigger than the number of subtree root proofs %d", + len(commitmentProof.SubtreeRoots), + len(commitmentProof.SubtreeRootProofs), + ) + } + if len(commitmentProof.SubtreeRootProofs) != len(commitmentProof.RowProof.Proofs) { + return fmt.Errorf( + "the number of subtree root proofs %d should be equal to the number of row root proofs %d", + len(commitmentProof.SubtreeRoots), + len(commitmentProof.RowProof.Proofs), + ) + } + if int(commitmentProof.RowProof.EndRow-commitmentProof.RowProof.StartRow+1) != len(commitmentProof.RowProof.RowRoots) { + return fmt.Errorf( + "the number of rows %d must equal the number of row roots %d", + int(commitmentProof.RowProof.EndRow-commitmentProof.RowProof.StartRow+1), + len(commitmentProof.RowProof.RowRoots), + ) + } + if len(commitmentProof.RowProof.Proofs) != len(commitmentProof.RowProof.RowRoots) { + return fmt.Errorf( + "the number of proofs %d must equal the number of row roots %d", + len(commitmentProof.RowProof.Proofs), + len(commitmentProof.RowProof.RowRoots), + ) + } + return nil +} + +// Verify verifies that a commitment proof is valid, i.e., the subtree roots commit +// to some data that was posted to a square. +// Expects the commitment proof to be properly formulated and validated +// using the Validate() function. +func (commitmentProof CommitmentProof) Verify(root []byte, subtreeRootThreshold int) (bool, error) { + nmtHasher := nmt.NewNmtHasher(appconsts.NewBaseHashFunc(), share.NamespaceSize, true) + + // computes the total number of shares proven. + numberOfShares := 0 + for _, proof := range commitmentProof.SubtreeRootProofs { + numberOfShares += proof.End() - proof.Start() + } + + // use the computed total number of shares to calculate the subtree roots + // width. + // the subtree roots width is defined in ADR-013: + // https://github.com/celestiaorg/celestia-app/blob/main/docs/architecture/adr-013-non-interactive-default-rules-for-zero-padding.md + subtreeRootsWidth := shares.SubTreeWidth(numberOfShares, subtreeRootThreshold) + + // verify the proof of the subtree roots + subtreeRootsCursor := 0 + for i, subtreeRootProof := range commitmentProof.SubtreeRootProofs { + // calculate the share range that each subtree root commits to. + ranges, err := nmt.ToLeafRanges(subtreeRootProof.Start(), subtreeRootProof.End(), subtreeRootsWidth) + if err != nil { + return false, err + } + valid, err := subtreeRootProof.VerifySubtreeRootInclusion( + nmtHasher, + commitmentProof.SubtreeRoots[subtreeRootsCursor:subtreeRootsCursor+len(ranges)], + subtreeRootsWidth, + commitmentProof.RowProof.RowRoots[i], + ) + if err != nil { + return false, err + } + if !valid { + return false, fmt.Errorf("subtree root proof for range [%d, %d) is invalid", subtreeRootProof.Start(), subtreeRootProof.End()) + } + subtreeRootsCursor += len(ranges) + } + + // verify row roots to data root proof + return commitmentProof.RowProof.VerifyProof(root), nil +} + +// GenerateCommitment generates the share commitment of the corresponding subtree roots. +func (commitmentProof CommitmentProof) GenerateCommitment() bytes.HexBytes { + return merkle.HashFromByteSlices(commitmentProof.SubtreeRoots) +} + +// ResultSubtreeRootToCommitmentProof is an API response that contains a SubtreeRootToCommitmentProof. +// A subtree root to commitment proof is a proof of a subtree root to a share commitment. +type ResultSubtreeRootToCommitmentProof struct { + SubtreeRootToCommitmentProof SubtreeRootToCommitmentProof `json:"subtree_root_to_commitment_proof"` +} + +// SubtreeRootToCommitmentProof a subtree root to commitment proof is a proof of a subtree root to a share commitment. +type SubtreeRootToCommitmentProof struct { + Proof merkle.Proof `json:"proof"` +} + +// Verify verifies that a share commitment commits to the provided subtree root. +func (subtreeRootProof SubtreeRootToCommitmentProof) Verify(shareCommitment bytes.HexBytes, subtreeRoot []byte) (bool, error) { + err := subtreeRootProof.Proof.Verify(shareCommitment.Bytes(), subtreeRoot) + if err != nil { + return false, err + } + return true, nil +} + +// ResultShareToSubtreeRootProof is an API response that contains a ShareToSubtreeRootProof. +// A share to subtree root proof is an inclusion proof of a share to a subtree root. +type ResultShareToSubtreeRootProof struct { + ShareToSubtreeRootProof ShareToSubtreeRootProof `json:"share_to_subtree_root_proof"` +} + +// ShareToSubtreeRootProof a share to subtree root proof is an inclusion proof of a share to a subtree root. +type ShareToSubtreeRootProof struct { + Proof merkle.Proof `json:"proof"` +} + +// Verify verifies that a share commitment commits to the provided subtree root. +func (shareToSubtreeRootProof ShareToSubtreeRootProof) Verify(subtreeRoot []byte, share []byte) (bool, error) { + err := shareToSubtreeRootProof.Proof.Verify(subtreeRoot, share) + if err != nil { + return false, err + } + return true, nil +} diff --git a/nodebuilder/module.go b/nodebuilder/module.go index 8f196f3b1d..4ca37293ad 100644 --- a/nodebuilder/module.go +++ b/nodebuilder/module.go @@ -2,6 +2,7 @@ package nodebuilder import ( "context" + "github.com/celestiaorg/celestia-node/nodebuilder/blobstream" "go.uber.org/fx" @@ -56,6 +57,7 @@ func ConstructModule(tp node.Type, network p2p.Network, cfg *Config, store Store node.ConstructModule(tp), pruner.ConstructModule(tp, &cfg.Pruner), rpc.ConstructModule(tp, &cfg.RPC), + blobstream.ConstructModule(), ) return fx.Module( diff --git a/nodebuilder/node.go b/nodebuilder/node.go index 9ec1c4d4e0..31d11732c0 100644 --- a/nodebuilder/node.go +++ b/nodebuilder/node.go @@ -4,6 +4,7 @@ import ( "context" "errors" "fmt" + "github.com/celestiaorg/celestia-node/nodebuilder/blobstream" "strings" "github.com/cristalhq/jwt" @@ -69,14 +70,15 @@ type Node struct { // p2p protocols PubSub *pubsub.PubSub // services - ShareServ share.Module // not optional - HeaderServ header.Module // not optional - StateServ state.Module // not optional - FraudServ fraud.Module // not optional - BlobServ blob.Module // not optional - DASer das.Module // not optional - AdminServ node.Module // not optional - DAMod da.Module // not optional + ShareServ share.Module // not optional + HeaderServ header.Module // not optional + StateServ state.Module // not optional + FraudServ fraud.Module // not optional + BlobServ blob.Module // not optional + DASer das.Module // not optional + AdminServ node.Module // not optional + DAMod da.Module // not optional + BlobstreamMod blobstream.Module // start and stop control ref internal fx.App lifecycle funcs to be called from Start and Stop start, stop lifecycleFunc diff --git a/nodebuilder/rpc/constructors.go b/nodebuilder/rpc/constructors.go index 43a8055207..6f83330740 100644 --- a/nodebuilder/rpc/constructors.go +++ b/nodebuilder/rpc/constructors.go @@ -1,6 +1,7 @@ package rpc import ( + "github.com/celestiaorg/celestia-node/nodebuilder/blobstream" "github.com/cristalhq/jwt" "github.com/celestiaorg/celestia-node/api/rpc" @@ -26,6 +27,7 @@ func registerEndpoints( nodeMod node.Module, blobMod blob.Module, daMod da.Module, + blobstreamMod blobstream.Module, serv *rpc.Server, ) { serv.RegisterService("fraud", fraudMod, &fraud.API{}) @@ -37,6 +39,7 @@ func registerEndpoints( serv.RegisterService("node", nodeMod, &node.API{}) serv.RegisterService("blob", blobMod, &blob.API{}) serv.RegisterService("da", daMod, &da.API{}) + serv.RegisterService("blobstream", blobstreamMod, &blobstream.API{}) } func server(cfg *Config, auth jwt.Signer) *rpc.Server { From 41bfd50e7a02851f37b1e515786f4d5709df9ee8 Mon Sep 17 00:00:00 2001 From: sweexordious Date: Tue, 4 Jun 2024 20:31:10 +0400 Subject: [PATCH 02/52] chore: add permissions to methods --- nodebuilder/blobstream/blobstream.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/nodebuilder/blobstream/blobstream.go b/nodebuilder/blobstream/blobstream.go index 42855fd0e8..03c4b0bbc1 100644 --- a/nodebuilder/blobstream/blobstream.go +++ b/nodebuilder/blobstream/blobstream.go @@ -28,10 +28,10 @@ type Module interface { } type Internal struct { - DataCommitment func(ctx context.Context, start, end uint64) (*ResultDataCommitment, error) - DataRootInclusionProof func(ctx context.Context, height int64, start, end uint64) (*ResultDataRootInclusionProof, error) - ProveShares func(ctx context.Context, height uint64, start, end uint64) (*ResultShareProof, error) - ProveCommitment func(ctx context.Context, height uint64, namespace share.Namespace, shareCommitment bytes.HexBytes) (*ResultCommitmentProof, error) + DataCommitment func(ctx context.Context, start, end uint64) (*ResultDataCommitment, error) `perm:"read"` + DataRootInclusionProof func(ctx context.Context, height int64, start, end uint64) (*ResultDataRootInclusionProof, error) `perm:"read"` + ProveShares func(ctx context.Context, height uint64, start, end uint64) (*ResultShareProof, error) `perm:"read"` + ProveCommitment func(ctx context.Context, height uint64, namespace share.Namespace, shareCommitment bytes.HexBytes) (*ResultCommitmentProof, error) `perm:"read"` } // API is a wrapper around the Module for RPC. From a3a32f3a37e3e8702e440b428e0c8c7af39fd29e Mon Sep 17 00:00:00 2001 From: sweexordious Date: Wed, 5 Jun 2024 15:13:37 +0400 Subject: [PATCH 03/52] fix: use the share proofs to generate the commitment proofs --- nodebuilder/blobstream/blobstream.go | 13 ++- nodebuilder/blobstream/service.go | 140 ++++++++++++--------------- 2 files changed, 68 insertions(+), 85 deletions(-) diff --git a/nodebuilder/blobstream/blobstream.go b/nodebuilder/blobstream/blobstream.go index 03c4b0bbc1..677a3fddd3 100644 --- a/nodebuilder/blobstream/blobstream.go +++ b/nodebuilder/blobstream/blobstream.go @@ -3,7 +3,6 @@ package blobstream import ( "context" "github.com/celestiaorg/celestia-node/share" - "github.com/tendermint/tendermint/libs/bytes" ) var _ Module = (*API)(nil) @@ -24,14 +23,14 @@ type Module interface { // ProveShares generates a share proof for a share range. ProveShares(ctx context.Context, height uint64, start, end uint64) (*ResultShareProof, error) // ProveCommitment generates a commitment proof for a share commitment. - ProveCommitment(ctx context.Context, height uint64, namespace share.Namespace, shareCommitment bytes.HexBytes) (*ResultCommitmentProof, error) + ProveCommitment(ctx context.Context, height uint64, namespace share.Namespace, shareCommitment []byte) (*ResultCommitmentProof, error) } type Internal struct { - DataCommitment func(ctx context.Context, start, end uint64) (*ResultDataCommitment, error) `perm:"read"` - DataRootInclusionProof func(ctx context.Context, height int64, start, end uint64) (*ResultDataRootInclusionProof, error) `perm:"read"` - ProveShares func(ctx context.Context, height uint64, start, end uint64) (*ResultShareProof, error) `perm:"read"` - ProveCommitment func(ctx context.Context, height uint64, namespace share.Namespace, shareCommitment bytes.HexBytes) (*ResultCommitmentProof, error) `perm:"read"` + DataCommitment func(ctx context.Context, start, end uint64) (*ResultDataCommitment, error) `perm:"read"` + DataRootInclusionProof func(ctx context.Context, height int64, start, end uint64) (*ResultDataRootInclusionProof, error) `perm:"read"` + ProveShares func(ctx context.Context, height uint64, start, end uint64) (*ResultShareProof, error) `perm:"read"` + ProveCommitment func(ctx context.Context, height uint64, namespace share.Namespace, shareCommitment []byte) (*ResultCommitmentProof, error) `perm:"read"` } // API is a wrapper around the Module for RPC. @@ -51,6 +50,6 @@ func (api *API) ProveShares(ctx context.Context, height uint64, start, end uint6 return api.Internal.ProveShares(ctx, height, start, end) } -func (api *API) ProveCommitment(ctx context.Context, height uint64, namespace share.Namespace, shareCommitment bytes.HexBytes) (*ResultCommitmentProof, error) { +func (api *API) ProveCommitment(ctx context.Context, height uint64, namespace share.Namespace, shareCommitment []byte) (*ResultCommitmentProof, error) { return api.Internal.ProveCommitment(ctx, height, namespace, shareCommitment) } diff --git a/nodebuilder/blobstream/service.go b/nodebuilder/blobstream/service.go index c3e072f3de..7b6f292d61 100644 --- a/nodebuilder/blobstream/service.go +++ b/nodebuilder/blobstream/service.go @@ -6,6 +6,7 @@ import ( "encoding/hex" "fmt" "github.com/celestiaorg/celestia-app/pkg/appconsts" + appns "github.com/celestiaorg/celestia-app/pkg/namespace" pkgproof "github.com/celestiaorg/celestia-app/pkg/proof" "github.com/celestiaorg/celestia-app/pkg/shares" "github.com/celestiaorg/celestia-node/blob" @@ -16,8 +17,6 @@ import ( "github.com/celestiaorg/nmt" logging "github.com/ipfs/go-log/v2" "github.com/tendermint/tendermint/crypto/merkle" - tmbytes "github.com/tendermint/tendermint/libs/bytes" - "github.com/tendermint/tendermint/types" "math" "strconv" ) @@ -365,36 +364,82 @@ func (s *Service) ProveShares(ctx context.Context, height uint64, start, end uin // ProveCommitment generates a commitment proof for a share commitment. // It takes as argument the height of the block containing the blob of data, its // namespace and its share commitment. -func (s *Service) ProveCommitment(ctx context.Context, height uint64, namespace share.Namespace, shareCommitment tmbytes.HexBytes) (*ResultCommitmentProof, error) { - log.Debugw("proving share commitment", "height", height, "commitment", shareCommitment.Bytes(), "namespace", namespace) +func (s *Service) ProveCommitment(ctx context.Context, height uint64, namespace share.Namespace, shareCommitment []byte) (*ResultCommitmentProof, error) { + log.Debugw("proving share commitment", "height", height, "commitment", shareCommitment, "namespace", namespace) if height == 0 { return nil, fmt.Errorf("height cannot be equal to 0") } - // get the share to row root proofs. these proofs coincide with the subtree root to row root proofs. - log.Debugw("getting the blob proof", "height", height, "commitment", shareCommitment.Bytes(), "namespace", namespace) - shareToRowRootProofs, err := s.blobServ.GetProof(ctx, height, namespace, blob.Commitment(shareCommitment)) + // get the blob to compute the subtree roots + log.Debugw("getting the blob", "height", height, "commitment", shareCommitment, "namespace", namespace) + blb, err := s.blobServ.Get(ctx, height, namespace, shareCommitment) if err != nil { return nil, err } - // get the blob to compute the subtree roots - log.Debugw("getting the blob", "height", height, "commitment", shareCommitment.Bytes(), "namespace", namespace) - blb, err := s.blobServ.Get(ctx, height, namespace, shareCommitment.Bytes()) + log.Debugw("converting the blob to shares", "height", height, "commitment", shareCommitment, "namespace", namespace) + blobShares, err := blob.BlobsToShares(blb) if err != nil { return nil, err } - log.Debugw("converting the blob to shares", "height", height, "commitment", shareCommitment, "namespace", namespace) - blobShares, err := blob.BlobsToShares(blb) + if len(blobShares) == 0 { + // TODO we return the share commitment as hex or some other format? + return nil, fmt.Errorf("the blob shares for commitment %s are empty", hex.EncodeToString(shareCommitment)) + } + + // get the extended header + log.Debugw("getting the extended header", "height", height) + extendedHeader, err := s.headerServ.GetByHeight(ctx, height) + if err != nil { + return nil, err + } + + log.Debugw("getting eds", "height", height) + eds, err := s.shareServ.GetEDS(ctx, extendedHeader) + if err != nil { + return nil, err + } + + // find the blob shares in the EDS + blobSharesStartIndex := -1 + for index, share := range eds.FlattenedODS() { + if bytes2.Equal(share, blobShares[0]) { + blobSharesStartIndex = index + } + } + if blobSharesStartIndex < 0 { + return nil, fmt.Errorf("couldn't find the blob shares in the ODS") + } + + nID, err := appns.From(namespace) if err != nil { return nil, err } + log.Debugw("generating the blob share proof for commitment", "commitment", shareCommitment, "start_share", blobSharesStartIndex, "end_share", blobSharesStartIndex+len(blobShares), "height", height) + sharesProof, err := pkgproof.NewShareInclusionProofFromEDS(eds, nID, shares.NewRange(blobSharesStartIndex, blobSharesStartIndex+len(blobShares))) + if err != nil { + return nil, err + } + + // convert the shares to row root proofs to nmt proofs + var nmtProofs []*nmt.Proof + for _, proof := range sharesProof.ShareProofs { + nmtProof := nmt.NewInclusionProof(int(proof.Start), + int(proof.End), + proof.Nodes, + true) + nmtProofs = append( + nmtProofs, + &nmtProof, + ) + } + // compute the subtree roots of the blob shares log.Debugw("computing the subtree roots", "height", height, "commitment", shareCommitment, "namespace", namespace) var subtreeRoots [][]byte var dataCursor int - for _, proof := range *shareToRowRootProofs { + for _, proof := range nmtProofs { // TODO: do we want directly use the default subtree root threshold or want to allow specifying which version to use? ranges, err := nmt.ToLeafRanges(proof.Start(), proof.End(), shares.SubTreeWidth(len(blobShares), appconsts.DefaultSubtreeRootThreshold)) if err != nil { @@ -408,74 +453,13 @@ func (s *Service) ProveCommitment(ctx context.Context, height uint64, namespace dataCursor += proof.End() - proof.Start() } - // get the extended header to get the row/column roots - log.Debugw("getting the extended header", "height", height) - extendedHeader, err := s.headerServ.GetByHeight(ctx, height) - if err != nil { - return nil, err - } - - // rowWidth is the width of the square's rows. - rowWidth := len(extendedHeader.DAH.ColumnRoots) - - // finding the rows of the square that contain the blob - log.Debugw("getting the eds rows", "height", height) - startingRowIndex := -1 - for index, row := range extendedHeader.DAH.RowRoots { - if startingRowIndex >= 0 { - // we found the starting row of the share data - break - } - if !namespace.IsOutsideRange(row, row) { - // we found the first row where the namespace data starts - // we should go over the row shares to find the row where the data lives - for i := 0; i < rowWidth; i++ { - // an alternative to this would be querying the whole EDS. - // if that's faster given the number of the queries to the network, - // we can change that in here. - sh, err := s.shareServ.GetShare(ctx, extendedHeader, index, i) - if err != nil { - return nil, err - } - if bytes2.Equal(sh, blobShares[0]) { - // if the queried share is the same as the blob's data first share, - // then we found the first row of our data. - startingRowIndex = index - break - } - } - } - } - - if startingRowIndex < 0 { - return nil, fmt.Errorf("couldn't find the blob starting row") - } - - // the blob's data row roots start at the starting row index, and span over the number of row proofs that we have - dataRowRoots := func() []tmbytes.HexBytes { - var tmBytesRowRoots []tmbytes.HexBytes - for _, rowRoot := range extendedHeader.DAH.RowRoots[startingRowIndex : startingRowIndex+len(*shareToRowRootProofs)] { - tmBytesRowRoots = append(tmBytesRowRoots, tmbytes.FromBytes(rowRoot)...) - } - return tmBytesRowRoots - }() - - // generate all the row proofs - log.Debugw("generating the row roots proofs", "height", height) - _, allRowProofs := merkle.ProofsFromByteSlices(append(extendedHeader.DAH.RowRoots, extendedHeader.DAH.ColumnRoots...)) - - log.Debugw("successfuly proved the share commitment", "height", height, "commitment", shareCommitment, "namespace", namespace) + log.Debugw("successfully proved the share commitment", "height", height, "commitment", shareCommitment, "namespace", namespace) commitmentProof := CommitmentProof{ SubtreeRoots: subtreeRoots, - SubtreeRootProofs: *shareToRowRootProofs, + SubtreeRootProofs: nmtProofs, NamespaceID: namespace.ID(), - RowProof: types.RowProof{ - RowRoots: dataRowRoots, - Proofs: allRowProofs[startingRowIndex : startingRowIndex+len(*shareToRowRootProofs)], - StartRow: uint32(startingRowIndex), // these conversions are safe because we return if the startingRowIndex is strictly negative - EndRow: uint32(startingRowIndex + len(*shareToRowRootProofs) - 1), - }, - NamespaceVersion: namespace.Version(), + RowProof: sharesProof.RowProof, + NamespaceVersion: namespace.Version(), } return &ResultCommitmentProof{CommitmentProof: commitmentProof}, nil From e5d2e1deb4d22d8eb366faf61381a35a16971f6f Mon Sep 17 00:00:00 2001 From: sweexordious Date: Wed, 5 Jun 2024 15:14:13 +0400 Subject: [PATCH 04/52] chore: gofumpt --- nodebuilder/blobstream/blobstream.go | 1 + nodebuilder/blobstream/service.go | 5 +++-- nodebuilder/blobstream/service_test.go | 27 +++++++++++++------------- 3 files changed, 18 insertions(+), 15 deletions(-) diff --git a/nodebuilder/blobstream/blobstream.go b/nodebuilder/blobstream/blobstream.go index 677a3fddd3..70655a56b2 100644 --- a/nodebuilder/blobstream/blobstream.go +++ b/nodebuilder/blobstream/blobstream.go @@ -2,6 +2,7 @@ package blobstream import ( "context" + "github.com/celestiaorg/celestia-node/share" ) diff --git a/nodebuilder/blobstream/service.go b/nodebuilder/blobstream/service.go index 7b6f292d61..3709eda1fc 100644 --- a/nodebuilder/blobstream/service.go +++ b/nodebuilder/blobstream/service.go @@ -5,6 +5,9 @@ import ( "context" "encoding/hex" "fmt" + "math" + "strconv" + "github.com/celestiaorg/celestia-app/pkg/appconsts" appns "github.com/celestiaorg/celestia-app/pkg/namespace" pkgproof "github.com/celestiaorg/celestia-app/pkg/proof" @@ -17,8 +20,6 @@ import ( "github.com/celestiaorg/nmt" logging "github.com/ipfs/go-log/v2" "github.com/tendermint/tendermint/crypto/merkle" - "math" - "strconv" ) var _ Module = (*Service)(nil) diff --git a/nodebuilder/blobstream/service_test.go b/nodebuilder/blobstream/service_test.go index 209ae101f7..597562f9df 100644 --- a/nodebuilder/blobstream/service_test.go +++ b/nodebuilder/blobstream/service_test.go @@ -7,13 +7,14 @@ import ( "encoding/hex" "errors" "fmt" + "math" + "testing" + nodeblob "github.com/celestiaorg/celestia-node/nodebuilder/blob" headerServ "github.com/celestiaorg/celestia-node/nodebuilder/header" shareServ "github.com/celestiaorg/celestia-node/nodebuilder/share" libhead "github.com/celestiaorg/go-header" "github.com/celestiaorg/go-header/sync" - "math" - "testing" "github.com/celestiaorg/celestia-app/test/util/blobfactory" @@ -922,7 +923,7 @@ type mockBlobService struct { } func (m mockBlobService) Submit(_ context.Context, _ []*blob.Blob, _ blob.GasPrice) (height uint64, _ error) { - //TODO implement me + // TODO implement me panic("implement me") } @@ -943,7 +944,7 @@ func (m mockBlobService) Get(ctx context.Context, height uint64, ns share.Namesp } func (m mockBlobService) GetAll(_ context.Context, height uint64, _ []share.Namespace) ([]*blob.Blob, error) { - //TODO implement me + // TODO implement me panic("implement me") } @@ -980,7 +981,7 @@ func (m mockBlobService) GetProof(ctx context.Context, height uint64, ns share.N } func (m mockBlobService) Included(_ context.Context, height uint64, _ share.Namespace, _ *blob.Proof, _ blob.Commitment) (bool, error) { - //TODO implement me + // TODO implement me panic("implement me") } @@ -991,7 +992,7 @@ type mockShareService struct { } func (m mockShareService) SharesAvailable(ctx context.Context, extendedHeader *header.ExtendedHeader) error { - //TODO implement me + // TODO implement me panic("implement me") } @@ -1010,7 +1011,7 @@ func (m mockShareService) GetEDS(ctx context.Context, header *header.ExtendedHea } func (m mockShareService) GetSharesByNamespace(ctx context.Context, header *header.ExtendedHeader, namespace share.Namespace) (share.NamespacedShares, error) { - //TODO implement me + // TODO implement me panic("implement me") } @@ -1031,12 +1032,12 @@ func (m mockHeaderService) LocalHead(ctx context.Context) (*header.ExtendedHeade } func (m mockHeaderService) GetByHash(ctx context.Context, hash libhead.Hash) (*header.ExtendedHeader, error) { - //TODO implement me + // TODO implement me panic("implement me") } func (m mockHeaderService) GetRangeByHeight(ctx context.Context, from *header.ExtendedHeader, to uint64) ([]*header.ExtendedHeader, error) { - //TODO implement me + // TODO implement me panic("implement me") } @@ -1054,17 +1055,17 @@ func (m mockHeaderService) GetByHeight(ctx context.Context, height uint64) (*hea } func (m mockHeaderService) WaitForHeight(ctx context.Context, u uint64) (*header.ExtendedHeader, error) { - //TODO implement me + // TODO implement me panic("implement me") } func (m mockHeaderService) SyncState(ctx context.Context) (sync.State, error) { - //TODO implement me + // TODO implement me panic("implement me") } func (m mockHeaderService) SyncWait(ctx context.Context) error { - //TODO implement me + // TODO implement me panic("implement me") } @@ -1081,6 +1082,6 @@ func (m mockHeaderService) NetworkHead(ctx context.Context) (*header.ExtendedHea } func (m mockHeaderService) Subscribe(ctx context.Context) (<-chan *header.ExtendedHeader, error) { - //TODO implement me + // TODO implement me panic("implement me") } From cb6f660e6e1192d662fe93c6e849145756456fff Mon Sep 17 00:00:00 2001 From: sweexordious Date: Wed, 5 Jun 2024 15:23:50 +0400 Subject: [PATCH 05/52] chore: docs --- nodebuilder/blobstream/service.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/nodebuilder/blobstream/service.go b/nodebuilder/blobstream/service.go index 3709eda1fc..dcb834ed56 100644 --- a/nodebuilder/blobstream/service.go +++ b/nodebuilder/blobstream/service.go @@ -365,6 +365,9 @@ func (s *Service) ProveShares(ctx context.Context, height uint64, start, end uin // ProveCommitment generates a commitment proof for a share commitment. // It takes as argument the height of the block containing the blob of data, its // namespace and its share commitment. +// Note: queries the whole EDS to generate the proof. +// This can be improved once `GetProof` returns the proof only for the blob and not the whole +// namespace. func (s *Service) ProveCommitment(ctx context.Context, height uint64, namespace share.Namespace, shareCommitment []byte) (*ResultCommitmentProof, error) { log.Debugw("proving share commitment", "height", height, "commitment", shareCommitment, "namespace", namespace) if height == 0 { From 8220364440ffd3329dddda41d2563ec0aecb3be6 Mon Sep 17 00:00:00 2001 From: CHAMI Rachid Date: Wed, 5 Jun 2024 13:25:20 +0200 Subject: [PATCH 06/52] Update nodebuilder/blobstream/service.go --- nodebuilder/blobstream/service.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nodebuilder/blobstream/service.go b/nodebuilder/blobstream/service.go index dcb834ed56..7d4889157b 100644 --- a/nodebuilder/blobstream/service.go +++ b/nodebuilder/blobstream/service.go @@ -163,7 +163,7 @@ type DataRootTuple struct { // // padding the hex representation of the height padded to 32 bytes concatenated to the data root. // For more information, refer to: -// https://github.com/celestiaorg/quantum-gravity-bridge/blob/master/src/DataRootTuple.sol +// https://github.com/celestiaorg/blobstream-contracts/blob/master/src/DataRootTuple.sol func EncodeDataRootTuple(height uint64, dataRoot [32]byte) ([]byte, error) { paddedHeight, err := To32PaddedHexBytes(height) if err != nil { From 15b59ddb4d41ce12e3956dda5e9261acad30755b Mon Sep 17 00:00:00 2001 From: sweexordious Date: Wed, 5 Jun 2024 15:26:45 +0400 Subject: [PATCH 07/52] chore: go mod tidy --- go.sum | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/go.sum b/go.sum index 2a4d85e41b..7a3678d0fe 100644 --- a/go.sum +++ b/go.sum @@ -369,8 +369,8 @@ github.com/celestiaorg/go-libp2p-messenger v0.2.0 h1:/0MuPDcFamQMbw9xTZ73yImqgTO github.com/celestiaorg/go-libp2p-messenger v0.2.0/go.mod h1:s9PIhMi7ApOauIsfBcQwbr7m+HBzmVfDIS+QLdgzDSo= github.com/celestiaorg/merkletree v0.0.0-20230308153949-c33506a7aa26 h1:P2RI1xJ49EZ8cuHMcH+ZSBonfRDtBS8OS9Jdt1BWX3k= github.com/celestiaorg/merkletree v0.0.0-20230308153949-c33506a7aa26/go.mod h1:2m8ukndOegwB0PU0AfJCwDUQHqd7QQRlSXvQL5VToVY= -github.com/celestiaorg/nmt v0.21.0 h1:81MBqxNn3orByoiCtdNVjwi5WsLgMkzHwP02ZMhTBHM= -github.com/celestiaorg/nmt v0.21.0/go.mod h1:ia/EpCk0enD5yO5frcxoNoFToz2Ghtk2i+blmCRjIY8= +github.com/celestiaorg/nmt v0.21.1-0.20240602221058-a81b748b6f51 h1:vOLlAiHwCtXA7LNsXokDysmPHl2UvorPTARyhHQPQQA= +github.com/celestiaorg/nmt v0.21.1-0.20240602221058-a81b748b6f51/go.mod h1:ia/EpCk0enD5yO5frcxoNoFToz2Ghtk2i+blmCRjIY8= github.com/celestiaorg/quantum-gravity-bridge/v2 v2.1.2 h1:Q8nr5SAtDW5gocrBwqwDJcSS/JedqU58WwQA2SP+nXw= github.com/celestiaorg/quantum-gravity-bridge/v2 v2.1.2/go.mod h1:s/LzLUw0WeYPJ6qdk4q46jKLOq7rc9Z5Mdrxtfpcigw= github.com/celestiaorg/rsmt2d v0.13.1 h1:eRhp79DKTkDojwInKVs1lRK6f6zJc1BVlmZfUfI19yQ= From d31ee83fcb6f4d62da01c50471ffeffb1e2fd090 Mon Sep 17 00:00:00 2001 From: sweexordious Date: Wed, 5 Jun 2024 15:45:06 +0400 Subject: [PATCH 08/52] chore: lint --- api/rpc_test.go | 3 +- nodebuilder/blobstream/service.go | 16 +++++------ nodebuilder/blobstream/service_test.go | 38 +++++++++++++------------- 3 files changed, 29 insertions(+), 28 deletions(-) diff --git a/api/rpc_test.go b/api/rpc_test.go index 665a9baf56..c0994b191f 100644 --- a/api/rpc_test.go +++ b/api/rpc_test.go @@ -3,12 +3,13 @@ package api import ( "context" "encoding/json" - "github.com/celestiaorg/celestia-node/nodebuilder/blobstream" "reflect" "strconv" "testing" "time" + "github.com/celestiaorg/celestia-node/nodebuilder/blobstream" + sdk "github.com/cosmos/cosmos-sdk/types" "github.com/cristalhq/jwt" "github.com/golang/mock/gomock" diff --git a/nodebuilder/blobstream/service.go b/nodebuilder/blobstream/service.go index 7d4889157b..91f03f74ad 100644 --- a/nodebuilder/blobstream/service.go +++ b/nodebuilder/blobstream/service.go @@ -196,7 +196,7 @@ func (s *Service) validateDataCommitmentRange(ctx context.Context, start uint64, return err } // the data commitment range is end exclusive - if end > uint64(currentHeader.Height())+1 { + if end > currentHeader.Height()+1 { return fmt.Errorf( "end block %d is higher than current chain height %d", end, @@ -209,7 +209,7 @@ func (s *Service) validateDataCommitmentRange(ctx context.Context, start uint64, return err } // the data commitment range is end exclusive - if end > uint64(currentLocalHeader.Height())+1 { + if end > currentLocalHeader.Height()+1 { return fmt.Errorf( "end block %d is higher than local chain height %d. Wait for the node until it syncs up to %d", end, @@ -272,7 +272,7 @@ func proveDataRootTuples(tuples []DataRootTuple, height int64) (*merkle.Proof, e if tuple.height != currentHeight+1 { return nil, fmt.Errorf("the provided tuples are not consecutive %d vs %d", currentHeight, tuple.height) } - currentHeight += 1 + currentHeight++ } dataRootEncodedTuples := make([][]byte, 0, len(tuples)) for _, tuple := range tuples { @@ -427,7 +427,7 @@ func (s *Service) ProveCommitment(ctx context.Context, height uint64, namespace } // convert the shares to row root proofs to nmt proofs - var nmtProofs []*nmt.Proof + nmtProofs := make([]*nmt.Proof, 0) for _, proof := range sharesProof.ShareProofs { nmtProof := nmt.NewInclusionProof(int(proof.Start), int(proof.End), @@ -441,8 +441,8 @@ func (s *Service) ProveCommitment(ctx context.Context, height uint64, namespace // compute the subtree roots of the blob shares log.Debugw("computing the subtree roots", "height", height, "commitment", shareCommitment, "namespace", namespace) - var subtreeRoots [][]byte - var dataCursor int + subtreeRoots := make([][]byte, 0) + dataCursor := 0 for _, proof := range nmtProofs { // TODO: do we want directly use the default subtree root threshold or want to allow specifying which version to use? ranges, err := nmt.ToLeafRanges(proof.Start(), proof.End(), shares.SubTreeWidth(len(blobShares), appconsts.DefaultSubtreeRootThreshold)) @@ -485,7 +485,7 @@ func computeSubtreeRoots(shares []share.Share, ranges []nmt.LeafRange, offset in // create a tree containing the shares to generate their subtree roots tree := nmt.New(appconsts.NewBaseHashFunc(), nmt.IgnoreMaxNamespace(true), nmt.NamespaceIDSize(share.NamespaceSize)) for _, sh := range shares { - var leafData []byte + leafData := make([]byte, 0) leafData = append(append(leafData, share.GetNamespace(sh)...), sh...) err := tree.Push(leafData) if err != nil { @@ -494,7 +494,7 @@ func computeSubtreeRoots(shares []share.Share, ranges []nmt.LeafRange, offset in } // generate the subtree roots - var subtreeRoots [][]byte + subtreeRoots := make([][]byte, 0) for _, rg := range ranges { root, err := tree.ComputeSubtreeRoot(rg.Start-offset, rg.End-offset) if err != nil { diff --git a/nodebuilder/blobstream/service_test.go b/nodebuilder/blobstream/service_test.go index 597562f9df..1d5558100b 100644 --- a/nodebuilder/blobstream/service_test.go +++ b/nodebuilder/blobstream/service_test.go @@ -678,10 +678,10 @@ func (api *testAPI) addBlock(t *testing.T, numberOfBlobs, blobSize int) int { kr := testfactory.GenerateKeyring(acc) signer := types.NewKeyringSigner(kr, acc, "test") - var msgs []*types.MsgPayForBlobs - var blobs []*types.Blob - var nss []namespace.Namespace - var coreTxs coretypes.Txs + msgs := make([]*types.MsgPayForBlobs, 0) + blobs := make([]*types.Blob, 0) + nss := make([]namespace.Namespace, 0) + coreTxs := make(coretypes.Txs, 0) for i := 0; i < numberOfBlobs; i++ { ns, msg, blob, coreTx := createTestBlobTransaction(t, signer, blobSize) @@ -691,7 +691,7 @@ func (api *testAPI) addBlock(t *testing.T, numberOfBlobs, blobSize int) int { coreTxs = append(coreTxs, coreTx) } - var txs coretypes.Txs + txs := make(coretypes.Txs, 0) txs = append(txs, coreTxs...) dataSquare, err := square.Construct(txs.ToSliceOfBytes(), appconsts.LatestVersion, appconsts.SquareSizeUpperBound(appconsts.LatestVersion)) require.NoError(t, err) @@ -753,8 +753,8 @@ func generateCommitmentProofFromBlock(t *testing.T, block testBlock, blobIndex i require.NoError(t, sharesProof.Validate(block.dataRoot)) // calculate the subtree roots - var subtreeRoots [][]byte - var dataCursor int + subtreeRoots := make([][]byte, 0) + dataCursor := 0 for _, proof := range sharesProof.ShareProofs { ranges, err := nmt.ToLeafRanges(int(proof.Start), int(proof.End), shares.SubTreeWidth(len(blobShares), appconsts.DefaultSubtreeRootThreshold)) require.NoError(t, err) @@ -765,7 +765,7 @@ func generateCommitmentProofFromBlock(t *testing.T, block testBlock, blobIndex i } // convert the nmt proof to be accepted by the commitment proof - var nmtProofs []*nmt.Proof + nmtProofs := make([]*nmt.Proof, 0) for _, proof := range sharesProof.ShareProofs { nmtProof := nmt.NewInclusionProof(int(proof.Start), int(proof.End), proof.Nodes, true) nmtProofs = append(nmtProofs, &nmtProof) @@ -785,11 +785,11 @@ func generateCommitmentProofFromBlock(t *testing.T, block testBlock, blobIndex i // generateTestBlocks generates a set of test blocks with a specific blob size and number of transactions func generateTestBlocks(t *testing.T, numberOfBlocks int, blobSize int, numberOfTransactions int) []testBlock { require.Greater(t, numberOfBlocks, 1) - var blocks []testBlock + blocks := make([]testBlock, 0) for i := 1; i <= numberOfBlocks; i++ { nss, msgs, blobs, coreTxs := createTestBlobTransactions(t, numberOfTransactions, blobSize) - var txs coretypes.Txs + txs := make(coretypes.Txs, 0) txs = append(txs, coreTxs...) dataSquare, err := square.Construct(txs.ToSliceOfBytes(), appconsts.LatestVersion, appconsts.SquareSizeUpperBound(appconsts.LatestVersion)) require.NoError(t, err) @@ -824,10 +824,10 @@ func createTestBlobTransactions(t *testing.T, numberOfTransactions int, size int kr := testfactory.GenerateKeyring(acc) signer := types.NewKeyringSigner(kr, acc, "test") - var nss []namespace.Namespace - var msgs []*types.MsgPayForBlobs - var blobs []*types.Blob - var coreTxs []coretypes.Tx + nss := make([]namespace.Namespace, 0) + msgs := make([]*types.MsgPayForBlobs, 0) + blobs := make([]*types.Blob, 0) + coreTxs := make([]coretypes.Tx, 0) for i := 0; i < numberOfTransactions; i++ { ns, msg, blob, coreTx := createTestBlobTransaction(t, signer, size+i*1000) nss = append(nss, ns) @@ -860,7 +860,7 @@ func createTestBlobTransaction(t *testing.T, signer *types.KeyringSigner, size i } func TestShareToSubtreeRootProof(t *testing.T) { - var shares [][]byte + shares := make([][]byte, 0) // generate some shares for i := 0; i < 10; i++ { shares = append(shares, bytes.Repeat([]byte{0x1}, appconsts.ShareSize)) @@ -869,7 +869,7 @@ func TestShareToSubtreeRootProof(t *testing.T) { subtreeRoot, expectedProofs := merkle.ProofsFromByteSlices(shares) // calculate the actual proofs - var actualProofs []*ResultShareToSubtreeRootProof + actualProofs := make([]*ResultShareToSubtreeRootProof, 0) for i := range shares { proof, err := ProveShareToSubtreeRoot(shares, uint64(i)) require.NoError(t, err) @@ -889,7 +889,7 @@ func TestShareToSubtreeRootProof(t *testing.T) { func TestSubtreeRootsToCommitmentProof(t *testing.T) { rowRootSize := sha256.Size + 2*appconsts.NamespaceSize - var subtreeRoots [][]byte + subtreeRoots := make([][]byte, 0) // generate some subtreeRoots for i := 0; i < 10; i++ { subtreeRoots = append(subtreeRoots, bytes.Repeat([]byte{0x1}, rowRootSize)) @@ -898,7 +898,7 @@ func TestSubtreeRootsToCommitmentProof(t *testing.T) { shareCommitment, expectedProofs := merkle.ProofsFromByteSlices(subtreeRoots) // calculate the actual proofs - var actualProofs []*ResultSubtreeRootToCommitmentProof + actualProofs := make([]*ResultSubtreeRootToCommitmentProof, 0) for i := range subtreeRoots { proof, err := ProveSubtreeRootToCommitment(subtreeRoots, uint64(i)) require.NoError(t, err) @@ -962,7 +962,7 @@ func (m mockBlobService) GetProof(ctx context.Context, height uint64, ns share.N if err != nil { return nil, err } - var nmtProofs []*nmt.Proof + nmtProofs := make([]*nmt.Proof, 0) for _, proof := range proof.ShareProofs { nmtProof := nmt.NewInclusionProof(int(proof.Start), int(proof.End), From a784be9fd92df3fb24e4a9c589df39a2e0555485 Mon Sep 17 00:00:00 2001 From: sweexordious Date: Wed, 5 Jun 2024 15:56:56 +0400 Subject: [PATCH 09/52] chore: lint --- nodebuilder/blobstream/blobstream.go | 61 +++- nodebuilder/blobstream/service.go | 128 +++++++-- nodebuilder/blobstream/service_test.go | 380 ++++++++++++++++++++----- nodebuilder/blobstream/types.go | 17 +- 4 files changed, 481 insertions(+), 105 deletions(-) diff --git a/nodebuilder/blobstream/blobstream.go b/nodebuilder/blobstream/blobstream.go index 70655a56b2..408136d022 100644 --- a/nodebuilder/blobstream/blobstream.go +++ b/nodebuilder/blobstream/blobstream.go @@ -19,19 +19,43 @@ type Module interface { // DataRootInclusionProof creates an inclusion proof for the data root of block // height `height` in the set of blocks defined by `start` and `end`. The range // is end exclusive. - DataRootInclusionProof(ctx context.Context, height int64, start, end uint64) (*ResultDataRootInclusionProof, error) + DataRootInclusionProof( + ctx context.Context, + height int64, + start, end uint64, + ) (*ResultDataRootInclusionProof, error) // ProveShares generates a share proof for a share range. - ProveShares(ctx context.Context, height uint64, start, end uint64) (*ResultShareProof, error) + ProveShares(ctx context.Context, height, start, end uint64) (*ResultShareProof, error) // ProveCommitment generates a commitment proof for a share commitment. - ProveCommitment(ctx context.Context, height uint64, namespace share.Namespace, shareCommitment []byte) (*ResultCommitmentProof, error) + ProveCommitment( + ctx context.Context, + height uint64, + namespace share.Namespace, + shareCommitment []byte, + ) (*ResultCommitmentProof, error) } type Internal struct { - DataCommitment func(ctx context.Context, start, end uint64) (*ResultDataCommitment, error) `perm:"read"` - DataRootInclusionProof func(ctx context.Context, height int64, start, end uint64) (*ResultDataRootInclusionProof, error) `perm:"read"` - ProveShares func(ctx context.Context, height uint64, start, end uint64) (*ResultShareProof, error) `perm:"read"` - ProveCommitment func(ctx context.Context, height uint64, namespace share.Namespace, shareCommitment []byte) (*ResultCommitmentProof, error) `perm:"read"` + DataCommitment func( + ctx context.Context, + start, end uint64, + ) (*ResultDataCommitment, error) `perm:"read"` + DataRootInclusionProof func( + ctx context.Context, + height int64, + start, end uint64, + ) (*ResultDataRootInclusionProof, error) `perm:"read"` + ProveShares func( + ctx context.Context, + height, start, end uint64, + ) (*ResultShareProof, error) `perm:"read"` + ProveCommitment func( + ctx context.Context, + height uint64, + namespace share.Namespace, + shareCommitment []byte, + ) (*ResultCommitmentProof, error) `perm:"read"` } // API is a wrapper around the Module for RPC. @@ -39,18 +63,33 @@ type API struct { Internal Internal } -func (api *API) DataCommitment(ctx context.Context, start, end uint64) (*ResultDataCommitment, error) { +func (api *API) DataCommitment( + ctx context.Context, + start, end uint64, +) (*ResultDataCommitment, error) { return api.Internal.DataCommitment(ctx, start, end) } -func (api *API) DataRootInclusionProof(ctx context.Context, height int64, start, end uint64) (*ResultDataRootInclusionProof, error) { +func (api *API) DataRootInclusionProof( + ctx context.Context, + height int64, + start, end uint64, +) (*ResultDataRootInclusionProof, error) { return api.Internal.DataRootInclusionProof(ctx, height, start, end) } -func (api *API) ProveShares(ctx context.Context, height uint64, start, end uint64) (*ResultShareProof, error) { +func (api *API) ProveShares( + ctx context.Context, + height, start, end uint64, +) (*ResultShareProof, error) { return api.Internal.ProveShares(ctx, height, start, end) } -func (api *API) ProveCommitment(ctx context.Context, height uint64, namespace share.Namespace, shareCommitment []byte) (*ResultCommitmentProof, error) { +func (api *API) ProveCommitment( + ctx context.Context, + height uint64, + namespace share.Namespace, + shareCommitment []byte, +) (*ResultCommitmentProof, error) { return api.Internal.ProveCommitment(ctx, height, namespace, shareCommitment) } diff --git a/nodebuilder/blobstream/service.go b/nodebuilder/blobstream/service.go index 91f03f74ad..95f31755bb 100644 --- a/nodebuilder/blobstream/service.go +++ b/nodebuilder/blobstream/service.go @@ -12,14 +12,15 @@ import ( appns "github.com/celestiaorg/celestia-app/pkg/namespace" pkgproof "github.com/celestiaorg/celestia-app/pkg/proof" "github.com/celestiaorg/celestia-app/pkg/shares" + "github.com/celestiaorg/nmt" + logging "github.com/ipfs/go-log/v2" + "github.com/tendermint/tendermint/crypto/merkle" + "github.com/celestiaorg/celestia-node/blob" nodeblob "github.com/celestiaorg/celestia-node/nodebuilder/blob" headerServ "github.com/celestiaorg/celestia-node/nodebuilder/header" shareServ "github.com/celestiaorg/celestia-node/nodebuilder/share" "github.com/celestiaorg/celestia-node/share" - "github.com/celestiaorg/nmt" - logging "github.com/ipfs/go-log/v2" - "github.com/tendermint/tendermint/crypto/merkle" ) var _ Module = (*Service)(nil) @@ -71,7 +72,15 @@ func (s *Service) DataRootInclusionProof( start, end uint64, ) (*ResultDataRootInclusionProof, error) { - log.Debugw("validating the data root inclusion proof request", "start", start, "end", end, "height", height) + log.Debugw( + "validating the data root inclusion proof request", + "start", + start, + "end", + end, + "height", + height, + ) err := s.validateDataRootInclusionProofRequest(ctx, uint64(height), start, end) if err != nil { return nil, err @@ -179,7 +188,7 @@ const dataCommitmentBlocksLimit = 10_000 // ~33 hours of blocks assuming 12-seco // validateDataCommitmentRange runs basic checks on the asc sorted list of // heights that will be used subsequently in generating data commitments over // the defined set of heights. -func (s *Service) validateDataCommitmentRange(ctx context.Context, start uint64, end uint64) error { +func (s *Service) validateDataCommitmentRange(ctx context.Context, start, end uint64) error { if start == 0 { return fmt.Errorf("the start block is 0") } @@ -243,7 +252,10 @@ func hashDataRootTuples(tuples []DataRootTuple) ([]byte, error) { // validateDataRootInclusionProofRequest validates the request to generate a data root // inclusion proof. -func (s *Service) validateDataRootInclusionProofRequest(ctx context.Context, height uint64, start uint64, end uint64) error { +func (s *Service) validateDataRootInclusionProofRequest( + ctx context.Context, + height, start, end uint64, +) error { err := s.validateDataCommitmentRange(ctx, start, end) if err != nil { return err @@ -315,7 +327,7 @@ func (s *Service) fetchDataRootTuples(ctx context.Context, start, end uint64) ([ // the proof and only querying them. However, that would require re-implementing the logic // in Core. Also, core also queries the whole EDS to generate the proof. So, it's fine for // now. In the future, when blocks get way bigger, we should revisit this and improve it. -func (s *Service) ProveShares(ctx context.Context, height uint64, start, end uint64) (*ResultShareProof, error) { +func (s *Service) ProveShares(ctx context.Context, height, start, end uint64) (*ResultShareProof, error) { log.Debugw("proving share range", "start", start, "end", end, "height", height) if height == 0 { return nil, fmt.Errorf("height cannot be equal to 0") @@ -368,20 +380,41 @@ func (s *Service) ProveShares(ctx context.Context, height uint64, start, end uin // Note: queries the whole EDS to generate the proof. // This can be improved once `GetProof` returns the proof only for the blob and not the whole // namespace. -func (s *Service) ProveCommitment(ctx context.Context, height uint64, namespace share.Namespace, shareCommitment []byte) (*ResultCommitmentProof, error) { +func (s *Service) ProveCommitment( + ctx context.Context, + height uint64, + namespace share.Namespace, + shareCommitment []byte, +) (*ResultCommitmentProof, error) { log.Debugw("proving share commitment", "height", height, "commitment", shareCommitment, "namespace", namespace) if height == 0 { return nil, fmt.Errorf("height cannot be equal to 0") } // get the blob to compute the subtree roots - log.Debugw("getting the blob", "height", height, "commitment", shareCommitment, "namespace", namespace) + log.Debugw( + "getting the blob", + "height", + height, + "commitment", + shareCommitment, + "namespace", + namespace, + ) blb, err := s.blobServ.Get(ctx, height, namespace, shareCommitment) if err != nil { return nil, err } - log.Debugw("converting the blob to shares", "height", height, "commitment", shareCommitment, "namespace", namespace) + log.Debugw( + "converting the blob to shares", + "height", + height, + "commitment", + shareCommitment, + "namespace", + namespace, + ) blobShares, err := blob.BlobsToShares(blb) if err != nil { return nil, err @@ -392,7 +425,11 @@ func (s *Service) ProveCommitment(ctx context.Context, height uint64, namespace } // get the extended header - log.Debugw("getting the extended header", "height", height) + log.Debugw( + "getting the extended header", + "height", + height, + ) extendedHeader, err := s.headerServ.GetByHeight(ctx, height) if err != nil { return nil, err @@ -420,8 +457,22 @@ func (s *Service) ProveCommitment(ctx context.Context, height uint64, namespace return nil, err } - log.Debugw("generating the blob share proof for commitment", "commitment", shareCommitment, "start_share", blobSharesStartIndex, "end_share", blobSharesStartIndex+len(blobShares), "height", height) - sharesProof, err := pkgproof.NewShareInclusionProofFromEDS(eds, nID, shares.NewRange(blobSharesStartIndex, blobSharesStartIndex+len(blobShares))) + log.Debugw( + "generating the blob share proof for commitment", + "commitment", + shareCommitment, + "start_share", + blobSharesStartIndex, + "end_share", + blobSharesStartIndex+len(blobShares), + "height", + height, + ) + sharesProof, err := pkgproof.NewShareInclusionProofFromEDS( + eds, + nID, + shares.NewRange(blobSharesStartIndex, blobSharesStartIndex+len(blobShares)), + ) if err != nil { return nil, err } @@ -440,16 +491,33 @@ func (s *Service) ProveCommitment(ctx context.Context, height uint64, namespace } // compute the subtree roots of the blob shares - log.Debugw("computing the subtree roots", "height", height, "commitment", shareCommitment, "namespace", namespace) + log.Debugw( + "computing the subtree roots", + "height", + height, + "commitment", + shareCommitment, + "namespace", + namespace, + ) subtreeRoots := make([][]byte, 0) dataCursor := 0 for _, proof := range nmtProofs { - // TODO: do we want directly use the default subtree root threshold or want to allow specifying which version to use? - ranges, err := nmt.ToLeafRanges(proof.Start(), proof.End(), shares.SubTreeWidth(len(blobShares), appconsts.DefaultSubtreeRootThreshold)) + // TODO: do we want directly use the default subtree root threshold + // or want to allow specifying which version to use? + ranges, err := nmt.ToLeafRanges( + proof.Start(), + proof.End(), + shares.SubTreeWidth(len(blobShares), appconsts.DefaultSubtreeRootThreshold), + ) if err != nil { return nil, err } - roots, err := computeSubtreeRoots(blobShares[dataCursor:dataCursor+proof.End()-proof.Start()], ranges, proof.Start()) + roots, err := computeSubtreeRoots( + blobShares[dataCursor:dataCursor+proof.End()-proof.Start()], + ranges, + proof.Start(), + ) if err != nil { return nil, err } @@ -457,7 +525,15 @@ func (s *Service) ProveCommitment(ctx context.Context, height uint64, namespace dataCursor += proof.End() - proof.Start() } - log.Debugw("successfully proved the share commitment", "height", height, "commitment", shareCommitment, "namespace", namespace) + log.Debugw( + "successfully proved the share commitment", + "height", + height, + "commitment", + shareCommitment, + "namespace", + namespace, + ) commitmentProof := CommitmentProof{ SubtreeRoots: subtreeRoots, SubtreeRootProofs: nmtProofs, @@ -483,7 +559,11 @@ func computeSubtreeRoots(shares []share.Share, ranges []nmt.LeafRange, offset in } // create a tree containing the shares to generate their subtree roots - tree := nmt.New(appconsts.NewBaseHashFunc(), nmt.IgnoreMaxNamespace(true), nmt.NamespaceIDSize(share.NamespaceSize)) + tree := nmt.New( + appconsts.NewBaseHashFunc(), + nmt.IgnoreMaxNamespace(true), + nmt.NamespaceIDSize(share.NamespaceSize), + ) for _, sh := range shares { leafData := make([]byte, 0) leafData = append(append(leafData, share.GetNamespace(sh)...), sh...) @@ -515,7 +595,10 @@ func uint64ToInt(number uint64) (int, error) { // ProveSubtreeRootToCommitment generates a subtree root to share commitment inclusion proof. // Note: this method is not part of the API. It will not be served by any endpoint, however, // it can be called directly programmatically. -func ProveSubtreeRootToCommitment(subtreeRoots [][]byte, subtreeRootIndex uint64) (*ResultSubtreeRootToCommitmentProof, error) { +func ProveSubtreeRootToCommitment( + subtreeRoots [][]byte, + subtreeRootIndex uint64, +) (*ResultSubtreeRootToCommitmentProof, error) { _, proofs := merkle.ProofsFromByteSlices(subtreeRoots) return &ResultSubtreeRootToCommitmentProof{ SubtreeRootToCommitmentProof: SubtreeRootToCommitmentProof{ @@ -527,7 +610,10 @@ func ProveSubtreeRootToCommitment(subtreeRoots [][]byte, subtreeRootIndex uint64 // ProveShareToSubtreeRoot generates a share to subtree root inclusion proof // Note: this method is not part of the API. It will not be served by any endpoint, however, // it can be called directly programmatically. -func ProveShareToSubtreeRoot(shares [][]byte, shareIndex uint64) (*ResultShareToSubtreeRootProof, error) { +func ProveShareToSubtreeRoot( + shares [][]byte, + shareIndex uint64, +) (*ResultShareToSubtreeRootProof, error) { _, proofs := merkle.ProofsFromByteSlices(shares) return &ResultShareToSubtreeRootProof{ ShareToSubtreeRootProof: ShareToSubtreeRootProof{ diff --git a/nodebuilder/blobstream/service_test.go b/nodebuilder/blobstream/service_test.go index 1d5558100b..90ec01c3db 100644 --- a/nodebuilder/blobstream/service_test.go +++ b/nodebuilder/blobstream/service_test.go @@ -10,11 +10,12 @@ import ( "math" "testing" + libhead "github.com/celestiaorg/go-header" + "github.com/celestiaorg/go-header/sync" + nodeblob "github.com/celestiaorg/celestia-node/nodebuilder/blob" headerServ "github.com/celestiaorg/celestia-node/nodebuilder/header" shareServ "github.com/celestiaorg/celestia-node/nodebuilder/share" - libhead "github.com/celestiaorg/go-header" - "github.com/celestiaorg/go-header/sync" "github.com/celestiaorg/celestia-app/test/util/blobfactory" @@ -28,9 +29,6 @@ import ( "github.com/celestiaorg/celestia-app/pkg/square" "github.com/celestiaorg/celestia-app/test/util/testfactory" "github.com/celestiaorg/celestia-app/x/blob/types" - "github.com/celestiaorg/celestia-node/blob" - "github.com/celestiaorg/celestia-node/header" - "github.com/celestiaorg/celestia-node/share" "github.com/celestiaorg/nmt" "github.com/celestiaorg/rsmt2d" "github.com/stretchr/testify/assert" @@ -38,6 +36,10 @@ import ( "github.com/tendermint/tendermint/crypto/merkle" bytes2 "github.com/tendermint/tendermint/libs/bytes" coretypes "github.com/tendermint/tendermint/types" + + "github.com/celestiaorg/celestia-node/blob" + "github.com/celestiaorg/celestia-node/header" + "github.com/celestiaorg/celestia-node/share" ) func TestPadBytes(t *testing.T) { @@ -384,11 +386,30 @@ func TestDataCommitment(t *testing.T) { expectedDataCommitment bytes2.HexBytes expectErr bool }{ - "start == 0": {start: 0, expectErr: true}, - "start block == end block": {start: 2, end: 2, expectErr: true}, - "start block > end block": {start: 3, end: 2, expectErr: true}, - "range exceeds data commitment blocks limit": {start: 3, end: dataCommitmentBlocksLimit + 10, expectErr: true}, - "end block is greater than the network block height": {start: 3, end: 15, expectErr: true}, + "start == 0": { + start: 0, + expectErr: true, + }, + "start block == end block": { + start: 2, + end: 2, + expectErr: true, + }, + "start block > end block": { + start: 3, + end: 2, + expectErr: true, + }, + "range exceeds data commitment blocks limit": { + start: 3, + end: dataCommitmentBlocksLimit + 10, + expectErr: true, + }, + "end block is greater than the network block height": { + start: 3, + end: 15, + expectErr: true, + }, "valid case": { start: 5, end: 9, @@ -439,28 +460,95 @@ func TestDataRootInclusionProof(t *testing.T) { expectedProof merkle.Proof expectErr bool }{ - "height < 0": {height: -1, expectErr: true}, - "height == 0": {height: 0, expectErr: true}, - "start == 0": {start: 0, expectErr: true}, - "start block == end block": {start: 2, end: 2, expectErr: true}, - "start block > end block": {start: 3, end: 2, expectErr: true}, - "height < start": {height: 2, start: 3, end: 2, expectErr: true}, - "height == end": {height: 4, start: 3, end: 4, expectErr: true}, - "height > end": {height: 5, start: 3, end: 4, expectErr: true}, - "range exceeds data commitment blocks limit": {start: 3, end: dataCommitmentBlocksLimit + 10, expectErr: true}, - "end block is greater than the network block height": {start: 3, end: 15, expectErr: true}, - "start block is greater than the network block height": {start: 12, end: 15, height: 14, expectErr: true}, - "height block is greater than the network block height": {start: 1, end: 15, height: 14, expectErr: true}, + "height < 0": { + height: -1, + expectErr: true, + }, + "height == 0": { + height: 0, + expectErr: true, + }, + "start == 0": { + start: 0, + expectErr: true, + }, + "start block == end block": { + start: 2, + end: 2, + expectErr: true, + }, + "start block > end block": { + start: 3, + end: 2, + expectErr: true, + }, + "height < start": { + height: 2, + start: 3, + end: 2, + expectErr: true, + }, + "height == end": { + height: 4, + start: 3, + end: 4, + expectErr: true, + }, + "height > end": { + height: 5, + start: 3, + end: 4, + expectErr: true, + }, + "range exceeds data commitment blocks limit": { + start: 3, + end: dataCommitmentBlocksLimit + 10, + expectErr: true, + }, + "end block is greater than the network block height": { + start: 3, + end: 15, + expectErr: true, + }, + "start block is greater than the network block height": { + start: 12, + end: 15, + height: 14, + expectErr: true, + }, + "height block is greater than the network block height": { + start: 1, + end: 15, + height: 14, + expectErr: true, + }, "valid case": { height: 6, start: 5, end: 9, expectedProof: func() merkle.Proof { - encodedTuple5, _ := EncodeDataRootTuple(5, [32]byte(api.blocks[5].dataRoot)) - encodedTuple6, _ := EncodeDataRootTuple(6, [32]byte(api.blocks[6].dataRoot)) - encodedTuple7, _ := EncodeDataRootTuple(7, [32]byte(api.blocks[7].dataRoot)) - encodedTuple8, _ := EncodeDataRootTuple(8, [32]byte(api.blocks[8].dataRoot)) - _, proofs := merkle.ProofsFromByteSlices([][]byte{encodedTuple5, encodedTuple6, encodedTuple7, encodedTuple8}) + encodedTuple5, _ := EncodeDataRootTuple( + 5, + [32]byte(api.blocks[5].dataRoot), + ) + encodedTuple6, _ := EncodeDataRootTuple( + 6, + [32]byte(api.blocks[6].dataRoot), + ) + encodedTuple7, _ := EncodeDataRootTuple( + 7, + [32]byte(api.blocks[7].dataRoot), + ) + encodedTuple8, _ := EncodeDataRootTuple( + 8, + [32]byte(api.blocks[8].dataRoot), + ) + _, proofs := merkle.ProofsFromByteSlices([][]byte{ + encodedTuple5, + encodedTuple6, + encodedTuple7, + encodedTuple8, + }) return *proofs[1] }(), }, @@ -487,18 +575,44 @@ func TestProveShares(t *testing.T) { expectedProof ResultShareProof expectErr bool }{ - "height == 0": {height: 0, expectErr: true}, - "height > blockchain tip": {height: 100, expectErr: true}, - "start share == end share": {start: 2, end: 2, expectErr: true}, - "start share > end share": {start: 3, end: 2, expectErr: true}, - "start share > number of shares in the block": {start: 200, end: 201, expectErr: true}, - "end share > number of shares in the block": {start: 1, end: 201, expectErr: true}, + "height == 0": { + height: 0, + expectErr: true, + }, + "height > blockchain tip": { + height: 100, + expectErr: true, + }, + "start share == end share": { + start: 2, + end: 2, + expectErr: true, + }, + "start share > end share": { + start: 3, + end: 2, + expectErr: true, + }, + "start share > number of shares in the block": { + start: 200, + end: 201, + expectErr: true, + }, + "end share > number of shares in the block": { + start: 1, + end: 201, + expectErr: true, + }, "valid case": { height: 6, start: 0, end: 2, expectedProof: func() ResultShareProof { - proof, err := pkgproof.NewShareInclusionProofFromEDS(api.blocks[6].eds, namespace.PayForBlobNamespace, shares.NewRange(0, 2)) + proof, err := pkgproof.NewShareInclusionProofFromEDS( + api.blocks[6].eds, + namespace.PayForBlobNamespace, + shares.NewRange(0, 2), + ) require.NoError(t, err) require.NoError(t, proof.Validate(api.blocks[6].dataRoot)) return ResultShareProof{ShareProof: proof} @@ -603,19 +717,30 @@ func proveAllCommitments(t *testing.T, numberOfBlocks, blobSize int) { for msgIndex, msg := range block.msgs { t.Run(fmt.Sprintf("height=%d, blobIndex=%d", blockIndex, msgIndex), func(t *testing.T) { // compute the commitment - actualCommitmentProof, err := api.blobService.ProveCommitment(context.Background(), uint64(blockIndex), msg.Namespaces[0], msg.ShareCommitments[0]) + actualCommitmentProof, err := api.blobService.ProveCommitment( + context.Background(), + uint64(blockIndex), + msg.Namespaces[0], + msg.ShareCommitments[0], + ) require.NoError(t, err) // make sure the actual commitment attests to the data require.NoError(t, actualCommitmentProof.CommitmentProof.Validate()) - valid, err := actualCommitmentProof.CommitmentProof.Verify(block.dataRoot, appconsts.DefaultSubtreeRootThreshold) + valid, err := actualCommitmentProof.CommitmentProof.Verify( + block.dataRoot, + appconsts.DefaultSubtreeRootThreshold, + ) require.NoError(t, err) require.True(t, valid) // generate an expected proof and verify it's valid expectedCommitmentProof := generateCommitmentProofFromBlock(t, block, msgIndex) require.NoError(t, expectedCommitmentProof.CommitmentProof.Validate()) - valid, err = expectedCommitmentProof.CommitmentProof.Verify(block.dataRoot, appconsts.DefaultSubtreeRootThreshold) + valid, err = expectedCommitmentProof.CommitmentProof.Verify( + block.dataRoot, + appconsts.DefaultSubtreeRootThreshold, + ) require.NoError(t, err) require.True(t, valid) @@ -651,9 +776,12 @@ type testAPI struct { // newTestAPI creates a new test API that fetches data from a test blockchain that has // a specific number of blocks. Each block has a number of PFBs. Each PFB has a single blob with // size blobSize or bigger. -func newTestAPI(t *testing.T, numberOfBlocks int, blobSize int, numberOfPFBs int) *testAPI { +func newTestAPI(t *testing.T, numberOfBlocks, blobSize, numberOfPFBs int) *testAPI { blocks := []testBlock{{}} // so that the heights match the slice indexes - blocks = append(blocks, generateTestBlocks(t, numberOfBlocks, blobSize, numberOfPFBs)...) + blocks = append( + blocks, + generateTestBlocks(t, numberOfBlocks, blobSize, numberOfPFBs)..., + ) newTestService := NewService( mockBlobService{blocks}, @@ -693,7 +821,11 @@ func (api *testAPI) addBlock(t *testing.T, numberOfBlobs, blobSize int) int { txs := make(coretypes.Txs, 0) txs = append(txs, coreTxs...) - dataSquare, err := square.Construct(txs.ToSliceOfBytes(), appconsts.LatestVersion, appconsts.SquareSizeUpperBound(appconsts.LatestVersion)) + dataSquare, err := square.Construct( + txs.ToSliceOfBytes(), + appconsts.LatestVersion, + appconsts.SquareSizeUpperBound(appconsts.LatestVersion), + ) require.NoError(t, err) // erasure the data square which we use to create the data root. @@ -720,17 +852,26 @@ func (api *testAPI) addBlock(t *testing.T, numberOfBlobs, blobSize int) int { // generateCommitmentProofFromBlock takes a block and a PFB index and generates the commitment proof // using the traditional way of doing, instead of using the API. -func generateCommitmentProofFromBlock(t *testing.T, block testBlock, blobIndex int) ResultCommitmentProof { +func generateCommitmentProofFromBlock( + t *testing.T, + block testBlock, + blobIndex int, +) ResultCommitmentProof { // parse the namespace ns, err := share.NamespaceFromBytes( append( - []byte{byte(block.blobs[blobIndex].NamespaceVersion)}, block.blobs[blobIndex].NamespaceId..., + []byte{byte(block.blobs[blobIndex].NamespaceVersion)}, + block.blobs[blobIndex].NamespaceId..., ), ) require.NoError(t, err) // create the blob from the data - blb, err := blob.NewBlob(uint8(block.blobs[blobIndex].ShareVersion), ns, block.blobs[blobIndex].Data) + blb, err := blob.NewBlob( + uint8(block.blobs[blobIndex].ShareVersion), + ns, + block.blobs[blobIndex].Data, + ) require.NoError(t, err) // convert the blob to a number of shares @@ -748,7 +889,11 @@ func generateCommitmentProofFromBlock(t *testing.T, block testBlock, blobIndex i require.Greater(t, startShareIndex, 0) // create an inclusion proof of the blob using the share range instead of the commitment - sharesProof, err := pkgproof.NewShareInclusionProofFromEDS(block.eds, ns.ToAppNamespace(), shares.NewRange(startShareIndex, startShareIndex+len(blobShares))) + sharesProof, err := pkgproof.NewShareInclusionProofFromEDS( + block.eds, + ns.ToAppNamespace(), + shares.NewRange(startShareIndex, startShareIndex+len(blobShares)), + ) require.NoError(t, err) require.NoError(t, sharesProof.Validate(block.dataRoot)) @@ -756,9 +901,17 @@ func generateCommitmentProofFromBlock(t *testing.T, block testBlock, blobIndex i subtreeRoots := make([][]byte, 0) dataCursor := 0 for _, proof := range sharesProof.ShareProofs { - ranges, err := nmt.ToLeafRanges(int(proof.Start), int(proof.End), shares.SubTreeWidth(len(blobShares), appconsts.DefaultSubtreeRootThreshold)) + ranges, err := nmt.ToLeafRanges( + int(proof.Start), + int(proof.End), + shares.SubTreeWidth(len(blobShares), appconsts.DefaultSubtreeRootThreshold), + ) require.NoError(t, err) - roots, err := computeSubtreeRoots(blobShares[dataCursor:int32(dataCursor)+proof.End-proof.Start], ranges, int(proof.Start)) + roots, err := computeSubtreeRoots( + blobShares[dataCursor:int32(dataCursor)+proof.End-proof.Start], + ranges, + int(proof.Start), + ) require.NoError(t, err) subtreeRoots = append(subtreeRoots, roots...) dataCursor += int(proof.End - proof.Start) @@ -783,15 +936,26 @@ func generateCommitmentProofFromBlock(t *testing.T, block testBlock, blobIndex i } // generateTestBlocks generates a set of test blocks with a specific blob size and number of transactions -func generateTestBlocks(t *testing.T, numberOfBlocks int, blobSize int, numberOfTransactions int) []testBlock { +func generateTestBlocks( + t *testing.T, + numberOfBlocks, blobSize, numberOfTransactions int, +) []testBlock { require.Greater(t, numberOfBlocks, 1) blocks := make([]testBlock, 0) for i := 1; i <= numberOfBlocks; i++ { - nss, msgs, blobs, coreTxs := createTestBlobTransactions(t, numberOfTransactions, blobSize) + nss, msgs, blobs, coreTxs := createTestBlobTransactions( + t, + numberOfTransactions, + blobSize, + ) txs := make(coretypes.Txs, 0) txs = append(txs, coreTxs...) - dataSquare, err := square.Construct(txs.ToSliceOfBytes(), appconsts.LatestVersion, appconsts.SquareSizeUpperBound(appconsts.LatestVersion)) + dataSquare, err := square.Construct( + txs.ToSliceOfBytes(), + appconsts.LatestVersion, + appconsts.SquareSizeUpperBound(appconsts.LatestVersion), + ) require.NoError(t, err) // erasure the data square which we use to create the data root. @@ -819,7 +983,10 @@ func generateTestBlocks(t *testing.T, numberOfBlocks int, blobSize int, numberOf // createTestBlobTransactions generates a set of transactions that can be added to a blob. // The number of transactions dictates the number of PFBs that will be returned. // The size refers to the size of the data contained in the PFBs in bytes. -func createTestBlobTransactions(t *testing.T, numberOfTransactions int, size int) ([]namespace.Namespace, []*types.MsgPayForBlobs, []*types.Blob, []coretypes.Tx) { +func createTestBlobTransactions( + t *testing.T, + numberOfTransactions, size int, +) ([]namespace.Namespace, []*types.MsgPayForBlobs, []*types.Blob, []coretypes.Tx) { acc := "blobstream-api-tests" kr := testfactory.GenerateKeyring(acc) signer := types.NewKeyringSigner(kr, acc, "test") @@ -841,7 +1008,11 @@ func createTestBlobTransactions(t *testing.T, numberOfTransactions int, size int // createTestBlobTransaction creates a test blob transaction using a specific signer and a specific PFB size. // The size is in bytes. -func createTestBlobTransaction(t *testing.T, signer *types.KeyringSigner, size int) (namespace.Namespace, *types.MsgPayForBlobs, *types.Blob, coretypes.Tx) { +func createTestBlobTransaction( + t *testing.T, + signer *types.KeyringSigner, + size int, +) (namespace.Namespace, *types.MsgPayForBlobs, *types.Blob, coretypes.Tx) { addr, err := signer.GetSignerInfo().GetAddress() require.NoError(t, err) @@ -908,10 +1079,17 @@ func TestSubtreeRootsToCommitmentProof(t *testing.T) { // compare the proofs and validate for subtreeRootIndex, actualProof := range actualProofs { t.Run(fmt.Sprintf("subtreeRootIndex=%d", subtreeRootIndex), func(t *testing.T) { - valid, err := actualProof.SubtreeRootToCommitmentProof.Verify(shareCommitment, subtreeRoots[subtreeRootIndex]) + valid, err := actualProof.SubtreeRootToCommitmentProof.Verify( + shareCommitment, + subtreeRoots[subtreeRootIndex], + ) assert.NoError(t, err) assert.True(t, valid) - assert.Equal(t, *expectedProofs[subtreeRootIndex], actualProof.SubtreeRootToCommitmentProof.Proof) + assert.Equal( + t, + *expectedProofs[subtreeRootIndex], + actualProof.SubtreeRootToCommitmentProof.Proof, + ) }) } } @@ -922,18 +1100,31 @@ type mockBlobService struct { blocks []testBlock } -func (m mockBlobService) Submit(_ context.Context, _ []*blob.Blob, _ blob.GasPrice) (height uint64, _ error) { +func (m mockBlobService) Submit( + _ context.Context, + _ []*blob.Blob, + _ blob.GasPrice, +) (height uint64, _ error) { // TODO implement me panic("implement me") } -func (m mockBlobService) Get(ctx context.Context, height uint64, ns share.Namespace, commitment blob.Commitment) (*blob.Blob, error) { +func (m mockBlobService) Get( + ctx context.Context, + height uint64, + ns share.Namespace, + commitment blob.Commitment, +) (*blob.Blob, error) { if height > uint64(len(m.blocks)) { return nil, errors.New("height greater than the blockchain") } for i, msg := range m.blocks[height].msgs { if bytes.Equal(msg.ShareCommitments[0], commitment) { - blb, err := blob.NewBlob(uint8(m.blocks[height].blobs[i].ShareVersion), ns, m.blocks[height].blobs[i].Data) + blb, err := blob.NewBlob( + uint8(m.blocks[height].blobs[i].ShareVersion), + ns, + m.blocks[height].blobs[i].Data, + ) if err != nil { return nil, err } @@ -943,22 +1134,40 @@ func (m mockBlobService) Get(ctx context.Context, height uint64, ns share.Namesp return nil, fmt.Errorf("coudln't find commitment") } -func (m mockBlobService) GetAll(_ context.Context, height uint64, _ []share.Namespace) ([]*blob.Blob, error) { +func (m mockBlobService) GetAll( + _ context.Context, + height uint64, + _ []share.Namespace, +) ([]*blob.Blob, error) { // TODO implement me panic("implement me") } -func (m mockBlobService) GetProof(ctx context.Context, height uint64, ns share.Namespace, commitment blob.Commitment) (*blob.Proof, error) { +func (m mockBlobService) GetProof( + ctx context.Context, + height uint64, + ns share.Namespace, + commitment blob.Commitment, +) (*blob.Proof, error) { if height >= uint64(len(m.blocks)) { return nil, errors.New("height greater than the blockchain") } for i, msg := range m.blocks[height].msgs { if bytes.Equal(msg.ShareCommitments[0], commitment) { - blobShareRange, err := square.BlobShareRange(m.blocks[height].coreTxs.ToSliceOfBytes(), i, 0, appconsts.LatestVersion) + blobShareRange, err := square.BlobShareRange( + m.blocks[height].coreTxs.ToSliceOfBytes(), + i, + 0, + appconsts.LatestVersion, + ) if err != nil { return nil, err } - proof, err := pkgproof.NewShareInclusionProofFromEDS(m.blocks[height].eds, m.blocks[height].nss[i], blobShareRange) + proof, err := pkgproof.NewShareInclusionProofFromEDS( + m.blocks[height].eds, + m.blocks[height].nss[i], + blobShareRange, + ) if err != nil { return nil, err } @@ -980,7 +1189,13 @@ func (m mockBlobService) GetProof(ctx context.Context, height uint64, ns share.N return nil, fmt.Errorf("coudln't find commitment") } -func (m mockBlobService) Included(_ context.Context, height uint64, _ share.Namespace, _ *blob.Proof, _ blob.Commitment) (bool, error) { +func (m mockBlobService) Included( + _ context.Context, + height uint64, + _ share.Namespace, + _ *blob.Proof, + _ blob.Commitment, +) (bool, error) { // TODO implement me panic("implement me") } @@ -991,26 +1206,40 @@ type mockShareService struct { blocks []testBlock } -func (m mockShareService) SharesAvailable(ctx context.Context, extendedHeader *header.ExtendedHeader) error { +func (m mockShareService) SharesAvailable( + ctx context.Context, + extendedHeader *header.ExtendedHeader, +) error { // TODO implement me panic("implement me") } -func (m mockShareService) GetShare(ctx context.Context, header *header.ExtendedHeader, row, col int) (share.Share, error) { +func (m mockShareService) GetShare( + ctx context.Context, + header *header.ExtendedHeader, + row, col int, +) (share.Share, error) { if header.Height() > uint64(len(m.blocks)) { return nil, errors.New("height greater than the blockchain") } return m.blocks[header.Height()].eds.GetCell(uint(row), uint(col)), nil } -func (m mockShareService) GetEDS(ctx context.Context, header *header.ExtendedHeader) (*rsmt2d.ExtendedDataSquare, error) { +func (m mockShareService) GetEDS( + ctx context.Context, + header *header.ExtendedHeader, +) (*rsmt2d.ExtendedDataSquare, error) { if header.Height() >= uint64(len(m.blocks)) { return nil, errors.New("height greater than the blockchain") } return m.blocks[header.Height()].eds, nil } -func (m mockShareService) GetSharesByNamespace(ctx context.Context, header *header.ExtendedHeader, namespace share.Namespace) (share.NamespacedShares, error) { +func (m mockShareService) GetSharesByNamespace( + ctx context.Context, + header *header.ExtendedHeader, + namespace share.Namespace, +) (share.NamespacedShares, error) { // TODO implement me panic("implement me") } @@ -1031,17 +1260,27 @@ func (m mockHeaderService) LocalHead(ctx context.Context) (*header.ExtendedHeade }, nil } -func (m mockHeaderService) GetByHash(ctx context.Context, hash libhead.Hash) (*header.ExtendedHeader, error) { +func (m mockHeaderService) GetByHash( + ctx context.Context, + hash libhead.Hash, +) (*header.ExtendedHeader, error) { // TODO implement me panic("implement me") } -func (m mockHeaderService) GetRangeByHeight(ctx context.Context, from *header.ExtendedHeader, to uint64) ([]*header.ExtendedHeader, error) { +func (m mockHeaderService) GetRangeByHeight( + ctx context.Context, + from *header.ExtendedHeader, + to uint64, +) ([]*header.ExtendedHeader, error) { // TODO implement me panic("implement me") } -func (m mockHeaderService) GetByHeight(ctx context.Context, height uint64) (*header.ExtendedHeader, error) { +func (m mockHeaderService) GetByHeight( + ctx context.Context, + height uint64, +) (*header.ExtendedHeader, error) { if height >= uint64(len(m.blocks)) { return nil, errors.New("height greater than the blockchain") } @@ -1054,7 +1293,10 @@ func (m mockHeaderService) GetByHeight(ctx context.Context, height uint64) (*hea }, nil } -func (m mockHeaderService) WaitForHeight(ctx context.Context, u uint64) (*header.ExtendedHeader, error) { +func (m mockHeaderService) WaitForHeight( + ctx context.Context, + u uint64, +) (*header.ExtendedHeader, error) { // TODO implement me panic("implement me") } diff --git a/nodebuilder/blobstream/types.go b/nodebuilder/blobstream/types.go index 1c45cd94db..60c8ba34a7 100644 --- a/nodebuilder/blobstream/types.go +++ b/nodebuilder/blobstream/types.go @@ -7,11 +7,12 @@ import ( "github.com/celestiaorg/celestia-app/pkg/appconsts" - "github.com/celestiaorg/celestia-node/share" "github.com/celestiaorg/nmt" "github.com/celestiaorg/nmt/namespace" "github.com/tendermint/tendermint/crypto/merkle" + "github.com/celestiaorg/celestia-node/share" + "github.com/tendermint/tendermint/libs/bytes" "github.com/tendermint/tendermint/types" ) @@ -131,7 +132,12 @@ func (commitmentProof CommitmentProof) Verify(root []byte, subtreeRootThreshold return false, err } if !valid { - return false, fmt.Errorf("subtree root proof for range [%d, %d) is invalid", subtreeRootProof.Start(), subtreeRootProof.End()) + return false, + fmt.Errorf( + "subtree root proof for range [%d, %d) is invalid", + subtreeRootProof.Start(), + subtreeRootProof.End(), + ) } subtreeRootsCursor += len(ranges) } @@ -157,7 +163,10 @@ type SubtreeRootToCommitmentProof struct { } // Verify verifies that a share commitment commits to the provided subtree root. -func (subtreeRootProof SubtreeRootToCommitmentProof) Verify(shareCommitment bytes.HexBytes, subtreeRoot []byte) (bool, error) { +func (subtreeRootProof SubtreeRootToCommitmentProof) Verify( + shareCommitment bytes.HexBytes, + subtreeRoot []byte, +) (bool, error) { err := subtreeRootProof.Proof.Verify(shareCommitment.Bytes(), subtreeRoot) if err != nil { return false, err @@ -177,7 +186,7 @@ type ShareToSubtreeRootProof struct { } // Verify verifies that a share commitment commits to the provided subtree root. -func (shareToSubtreeRootProof ShareToSubtreeRootProof) Verify(subtreeRoot []byte, share []byte) (bool, error) { +func (shareToSubtreeRootProof ShareToSubtreeRootProof) Verify(subtreeRoot, share []byte) (bool, error) { err := shareToSubtreeRootProof.Proof.Verify(subtreeRoot, share) if err != nil { return false, err From 4463b4ac7c0a0424bded160af1ff57e81f73999c Mon Sep 17 00:00:00 2001 From: sweexordious Date: Wed, 5 Jun 2024 16:01:46 +0400 Subject: [PATCH 10/52] chore: gofumpt --- api/rpc/client/client.go | 3 ++- nodebuilder/module.go | 1 + nodebuilder/node.go | 3 ++- 3 files changed, 5 insertions(+), 2 deletions(-) diff --git a/api/rpc/client/client.go b/api/rpc/client/client.go index f77b774dd1..4d9b0b01ad 100644 --- a/api/rpc/client/client.go +++ b/api/rpc/client/client.go @@ -3,9 +3,10 @@ package client import ( "context" "fmt" - "github.com/celestiaorg/celestia-node/nodebuilder/blobstream" "net/http" + "github.com/celestiaorg/celestia-node/nodebuilder/blobstream" + "github.com/filecoin-project/go-jsonrpc" "github.com/celestiaorg/celestia-node/api/rpc/perms" diff --git a/nodebuilder/module.go b/nodebuilder/module.go index 4ca37293ad..314ae4fd90 100644 --- a/nodebuilder/module.go +++ b/nodebuilder/module.go @@ -2,6 +2,7 @@ package nodebuilder import ( "context" + "github.com/celestiaorg/celestia-node/nodebuilder/blobstream" "go.uber.org/fx" diff --git a/nodebuilder/node.go b/nodebuilder/node.go index 31d11732c0..869ad0ef22 100644 --- a/nodebuilder/node.go +++ b/nodebuilder/node.go @@ -4,9 +4,10 @@ import ( "context" "errors" "fmt" - "github.com/celestiaorg/celestia-node/nodebuilder/blobstream" "strings" + "github.com/celestiaorg/celestia-node/nodebuilder/blobstream" + "github.com/cristalhq/jwt" "github.com/ipfs/boxo/blockservice" "github.com/ipfs/boxo/exchange" From 1953fc5e3bccf377c3158f72c3543990c7a3dc47 Mon Sep 17 00:00:00 2001 From: sweexordious Date: Wed, 5 Jun 2024 16:02:12 +0400 Subject: [PATCH 11/52] chore: gofumpt --- nodebuilder/rpc/constructors.go | 3 ++- share/eds/cache/noop.go | 1 + 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/nodebuilder/rpc/constructors.go b/nodebuilder/rpc/constructors.go index 6f83330740..414e4a7a47 100644 --- a/nodebuilder/rpc/constructors.go +++ b/nodebuilder/rpc/constructors.go @@ -1,9 +1,10 @@ package rpc import ( - "github.com/celestiaorg/celestia-node/nodebuilder/blobstream" "github.com/cristalhq/jwt" + "github.com/celestiaorg/celestia-node/nodebuilder/blobstream" + "github.com/celestiaorg/celestia-node/api/rpc" "github.com/celestiaorg/celestia-node/nodebuilder/blob" "github.com/celestiaorg/celestia-node/nodebuilder/da" diff --git a/share/eds/cache/noop.go b/share/eds/cache/noop.go index 8e1c17924a..e07df982ba 100644 --- a/share/eds/cache/noop.go +++ b/share/eds/cache/noop.go @@ -38,6 +38,7 @@ var _ Accessor = (*NoopAccessor)(nil) type NoopAccessor struct{} func (n NoopAccessor) Blockstore() (dagstore.ReadBlockstore, error) { + //nolint:all return nil, nil } From eb0b29e5afb02d7b9d461d596bf373cff0b03dce Mon Sep 17 00:00:00 2001 From: sweexordious Date: Wed, 5 Jun 2024 16:05:35 +0400 Subject: [PATCH 12/52] chore: gofumpt --- share/eds/cache/noop.go | 1 - 1 file changed, 1 deletion(-) diff --git a/share/eds/cache/noop.go b/share/eds/cache/noop.go index e07df982ba..8e1c17924a 100644 --- a/share/eds/cache/noop.go +++ b/share/eds/cache/noop.go @@ -38,7 +38,6 @@ var _ Accessor = (*NoopAccessor)(nil) type NoopAccessor struct{} func (n NoopAccessor) Blockstore() (dagstore.ReadBlockstore, error) { - //nolint:all return nil, nil } From e4c59107d64cee30b7d07b33688917708f3e191c Mon Sep 17 00:00:00 2001 From: sweexordious Date: Fri, 7 Jun 2024 16:42:11 +0400 Subject: [PATCH 13/52] chore: lowercase import --- api/rpc_test.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/api/rpc_test.go b/api/rpc_test.go index c0994b191f..9772e3303f 100644 --- a/api/rpc_test.go +++ b/api/rpc_test.go @@ -24,7 +24,7 @@ import ( "github.com/celestiaorg/celestia-node/nodebuilder" "github.com/celestiaorg/celestia-node/nodebuilder/blob" blobMock "github.com/celestiaorg/celestia-node/nodebuilder/blob/mocks" - BlobstreamMock "github.com/celestiaorg/celestia-node/nodebuilder/blobstream/mocks" + blobstreamMock "github.com/celestiaorg/celestia-node/nodebuilder/blobstream/mocks" "github.com/celestiaorg/celestia-node/nodebuilder/da" daMock "github.com/celestiaorg/celestia-node/nodebuilder/da/mocks" "github.com/celestiaorg/celestia-node/nodebuilder/das" @@ -304,7 +304,7 @@ func setupNodeWithAuthedRPC(t *testing.T, auth jwt.Signer) (*nodebuilder.Node, * nodeMock.NewMockModule(ctrl), blobMock.NewMockModule(ctrl), daMock.NewMockModule(ctrl), - BlobstreamMock.NewMockModule(ctrl), + blobstreamMock.NewMockModule(ctrl), } // given the behavior of fx.Invoke, this invoke will be called last as it is added at the root @@ -345,5 +345,5 @@ type mockAPI struct { Node *nodeMock.MockModule Blob *blobMock.MockModule DA *daMock.MockModule - Blobstream *BlobstreamMock.MockModule + Blobstream *blobstreamMock.MockModule } From f8a30668432f6aae6254a04f9b552b7f3ef5a36a Mon Sep 17 00:00:00 2001 From: sweexordious Date: Fri, 7 Jun 2024 17:57:06 +0400 Subject: [PATCH 14/52] chore: internal struct definition inside API --- nodebuilder/blobstream/blobstream.go | 44 +++++++++++++--------------- 1 file changed, 21 insertions(+), 23 deletions(-) diff --git a/nodebuilder/blobstream/blobstream.go b/nodebuilder/blobstream/blobstream.go index 408136d022..6441567f72 100644 --- a/nodebuilder/blobstream/blobstream.go +++ b/nodebuilder/blobstream/blobstream.go @@ -36,31 +36,29 @@ type Module interface { ) (*ResultCommitmentProof, error) } -type Internal struct { - DataCommitment func( - ctx context.Context, - start, end uint64, - ) (*ResultDataCommitment, error) `perm:"read"` - DataRootInclusionProof func( - ctx context.Context, - height int64, - start, end uint64, - ) (*ResultDataRootInclusionProof, error) `perm:"read"` - ProveShares func( - ctx context.Context, - height, start, end uint64, - ) (*ResultShareProof, error) `perm:"read"` - ProveCommitment func( - ctx context.Context, - height uint64, - namespace share.Namespace, - shareCommitment []byte, - ) (*ResultCommitmentProof, error) `perm:"read"` -} - // API is a wrapper around the Module for RPC. type API struct { - Internal Internal + Internal struct { + DataCommitment func( + ctx context.Context, + start, end uint64, + ) (*ResultDataCommitment, error) `perm:"read"` + DataRootInclusionProof func( + ctx context.Context, + height int64, + start, end uint64, + ) (*ResultDataRootInclusionProof, error) `perm:"read"` + ProveShares func( + ctx context.Context, + height, start, end uint64, + ) (*ResultShareProof, error) `perm:"read"` + ProveCommitment func( + ctx context.Context, + height uint64, + namespace share.Namespace, + shareCommitment []byte, + ) (*ResultCommitmentProof, error) `perm:"read"` + } } func (api *API) DataCommitment( From a409722021cddc86063090ababb0cb3e14570b42 Mon Sep 17 00:00:00 2001 From: sweexordious Date: Thu, 20 Jun 2024 12:27:51 +0100 Subject: [PATCH 15/52] chore: fmt --- api/rpc/client/client.go | 3 +-- api/rpc_test.go | 3 +-- blob/parser.go | 3 ++- blob/service_test.go | 4 ++-- nodebuilder/blobstream/service.go | 12 +++++----- nodebuilder/blobstream/service_test.go | 31 +++++++++++++------------- nodebuilder/blobstream/types.go | 24 +++++++++++--------- nodebuilder/module.go | 3 +-- nodebuilder/node.go | 3 +-- nodebuilder/rpc/constructors.go | 3 +-- share/eds/byzantine/share_proof.go | 4 ++-- 11 files changed, 46 insertions(+), 47 deletions(-) diff --git a/api/rpc/client/client.go b/api/rpc/client/client.go index 4d9b0b01ad..56b4a54d19 100644 --- a/api/rpc/client/client.go +++ b/api/rpc/client/client.go @@ -5,12 +5,11 @@ import ( "fmt" "net/http" - "github.com/celestiaorg/celestia-node/nodebuilder/blobstream" - "github.com/filecoin-project/go-jsonrpc" "github.com/celestiaorg/celestia-node/api/rpc/perms" "github.com/celestiaorg/celestia-node/nodebuilder/blob" + "github.com/celestiaorg/celestia-node/nodebuilder/blobstream" "github.com/celestiaorg/celestia-node/nodebuilder/da" "github.com/celestiaorg/celestia-node/nodebuilder/das" "github.com/celestiaorg/celestia-node/nodebuilder/fraud" diff --git a/api/rpc_test.go b/api/rpc_test.go index 9772e3303f..598acc45c8 100644 --- a/api/rpc_test.go +++ b/api/rpc_test.go @@ -8,8 +8,6 @@ import ( "testing" "time" - "github.com/celestiaorg/celestia-node/nodebuilder/blobstream" - sdk "github.com/cosmos/cosmos-sdk/types" "github.com/cristalhq/jwt" "github.com/golang/mock/gomock" @@ -24,6 +22,7 @@ import ( "github.com/celestiaorg/celestia-node/nodebuilder" "github.com/celestiaorg/celestia-node/nodebuilder/blob" blobMock "github.com/celestiaorg/celestia-node/nodebuilder/blob/mocks" + "github.com/celestiaorg/celestia-node/nodebuilder/blobstream" blobstreamMock "github.com/celestiaorg/celestia-node/nodebuilder/blobstream/mocks" "github.com/celestiaorg/celestia-node/nodebuilder/da" daMock "github.com/celestiaorg/celestia-node/nodebuilder/da/mocks" diff --git a/blob/parser.go b/blob/parser.go index 6c103698b0..1b5ddc4a7a 100644 --- a/blob/parser.go +++ b/blob/parser.go @@ -70,7 +70,8 @@ func (p *parser) addShares(shares []shares.Share) (shrs []shares.Share, isComple return shares[index+1:], true } -// parse ensures that correct amount of shares was collected and create a blob from the existing shares. +// parse ensures that correct amount of shares was collected and create a blob from the existing +// shares. func (p *parser) parse() (*Blob, error) { if p.length != len(p.shares) { return nil, fmt.Errorf("invalid shares amount. want:%d, have:%d", p.length, len(p.shares)) diff --git a/blob/service_test.go b/blob/service_test.go index b1d9c78998..360a95d9fd 100644 --- a/blob/service_test.go +++ b/blob/service_test.go @@ -426,8 +426,8 @@ func TestService_Get(t *testing.T) { } // TestService_GetAllWithoutPadding it retrieves all blobs under the given namespace: -// the amount of the blobs is known and equal to 5. Then it ensures that each blob has a correct index inside the eds -// by requesting share and comparing them. +// the number of the blobs is known and equal to 5. Then it ensures that each blob has a correct +// index inside the eds by requesting share and comparing them. func TestService_GetAllWithoutPadding(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) t.Cleanup(cancel) diff --git a/nodebuilder/blobstream/service.go b/nodebuilder/blobstream/service.go index 95f31755bb..ae3a917d4d 100644 --- a/nodebuilder/blobstream/service.go +++ b/nodebuilder/blobstream/service.go @@ -8,13 +8,14 @@ import ( "math" "strconv" + logging "github.com/ipfs/go-log/v2" + "github.com/tendermint/tendermint/crypto/merkle" + "github.com/celestiaorg/celestia-app/pkg/appconsts" appns "github.com/celestiaorg/celestia-app/pkg/namespace" pkgproof "github.com/celestiaorg/celestia-app/pkg/proof" "github.com/celestiaorg/celestia-app/pkg/shares" "github.com/celestiaorg/nmt" - logging "github.com/ipfs/go-log/v2" - "github.com/tendermint/tendermint/crypto/merkle" "github.com/celestiaorg/celestia-node/blob" nodeblob "github.com/celestiaorg/celestia-node/nodebuilder/blob" @@ -140,7 +141,8 @@ func To32PaddedHexBytes(number uint64) ([]byte, error) { // DataRootTuple contains the data that will be used to create the QGB commitments. // The commitments will be signed by orchestrators and submitted to an EVM chain via a relayer. -// For more information: https://github.com/celestiaorg/quantum-gravity-bridge/blob/master/src/DataRootTuple.sol +// For more information: +// https://github.com/celestiaorg/quantum-gravity-bridge/blob/master/src/DataRootTuple.sol type DataRootTuple struct { height uint64 dataRoot [32]byte @@ -229,8 +231,8 @@ func (s *Service) validateDataCommitmentRange(ctx context.Context, start, end ui return nil } -// hashDataRootTuples hashes a list of blocks data root tuples, i.e., height, data root and square size, -// then returns their merkle root. +// hashDataRootTuples hashes a list of blocks data root tuples, i.e., height, data root and square +// size, then returns their merkle root. func hashDataRootTuples(tuples []DataRootTuple) ([]byte, error) { if len(tuples) == 0 { return nil, fmt.Errorf("cannot hash an empty list of data root tuples") diff --git a/nodebuilder/blobstream/service_test.go b/nodebuilder/blobstream/service_test.go index 90ec01c3db..393bb6e832 100644 --- a/nodebuilder/blobstream/service_test.go +++ b/nodebuilder/blobstream/service_test.go @@ -10,14 +10,11 @@ import ( "math" "testing" - libhead "github.com/celestiaorg/go-header" - "github.com/celestiaorg/go-header/sync" - - nodeblob "github.com/celestiaorg/celestia-node/nodebuilder/blob" - headerServ "github.com/celestiaorg/celestia-node/nodebuilder/header" - shareServ "github.com/celestiaorg/celestia-node/nodebuilder/share" - - "github.com/celestiaorg/celestia-app/test/util/blobfactory" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/crypto/merkle" + bytes2 "github.com/tendermint/tendermint/libs/bytes" + coretypes "github.com/tendermint/tendermint/types" "github.com/celestiaorg/celestia-app/app" "github.com/celestiaorg/celestia-app/app/encoding" @@ -27,18 +24,19 @@ import ( pkgproof "github.com/celestiaorg/celestia-app/pkg/proof" "github.com/celestiaorg/celestia-app/pkg/shares" "github.com/celestiaorg/celestia-app/pkg/square" + "github.com/celestiaorg/celestia-app/test/util/blobfactory" "github.com/celestiaorg/celestia-app/test/util/testfactory" "github.com/celestiaorg/celestia-app/x/blob/types" + libhead "github.com/celestiaorg/go-header" + "github.com/celestiaorg/go-header/sync" "github.com/celestiaorg/nmt" "github.com/celestiaorg/rsmt2d" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "github.com/tendermint/tendermint/crypto/merkle" - bytes2 "github.com/tendermint/tendermint/libs/bytes" - coretypes "github.com/tendermint/tendermint/types" "github.com/celestiaorg/celestia-node/blob" "github.com/celestiaorg/celestia-node/header" + nodeblob "github.com/celestiaorg/celestia-node/nodebuilder/blob" + headerServ "github.com/celestiaorg/celestia-node/nodebuilder/header" + shareServ "github.com/celestiaorg/celestia-node/nodebuilder/share" "github.com/celestiaorg/celestia-node/share" ) @@ -935,7 +933,8 @@ func generateCommitmentProofFromBlock( return ResultCommitmentProof{CommitmentProof: commitmentProof} } -// generateTestBlocks generates a set of test blocks with a specific blob size and number of transactions +// generateTestBlocks generates a set of test blocks with a specific blob size and number of +// transactions func generateTestBlocks( t *testing.T, numberOfBlocks, blobSize, numberOfTransactions int, @@ -1006,8 +1005,8 @@ func createTestBlobTransactions( return nss, msgs, blobs, coreTxs } -// createTestBlobTransaction creates a test blob transaction using a specific signer and a specific PFB size. -// The size is in bytes. +// createTestBlobTransaction creates a test blob transaction using a specific signer and a specific +// PFB size. The size is in bytes. func createTestBlobTransaction( t *testing.T, signer *types.KeyringSigner, diff --git a/nodebuilder/blobstream/types.go b/nodebuilder/blobstream/types.go index 60c8ba34a7..bce23e636d 100644 --- a/nodebuilder/blobstream/types.go +++ b/nodebuilder/blobstream/types.go @@ -3,18 +3,16 @@ package blobstream import ( "fmt" - "github.com/celestiaorg/celestia-app/pkg/shares" + "github.com/tendermint/tendermint/crypto/merkle" + "github.com/tendermint/tendermint/libs/bytes" + "github.com/tendermint/tendermint/types" "github.com/celestiaorg/celestia-app/pkg/appconsts" - + "github.com/celestiaorg/celestia-app/pkg/shares" "github.com/celestiaorg/nmt" "github.com/celestiaorg/nmt/namespace" - "github.com/tendermint/tendermint/crypto/merkle" "github.com/celestiaorg/celestia-node/share" - - "github.com/tendermint/tendermint/libs/bytes" - "github.com/tendermint/tendermint/types" ) // ResultDataCommitment is the API response containing a data @@ -111,7 +109,8 @@ func (commitmentProof CommitmentProof) Verify(root []byte, subtreeRootThreshold // use the computed total number of shares to calculate the subtree roots // width. // the subtree roots width is defined in ADR-013: - // https://github.com/celestiaorg/celestia-app/blob/main/docs/architecture/adr-013-non-interactive-default-rules-for-zero-padding.md + // + //https://github.com/celestiaorg/celestia-app/blob/main/docs/architecture/adr-013-non-interactive-default-rules-for-zero-padding.md subtreeRootsWidth := shares.SubTreeWidth(numberOfShares, subtreeRootThreshold) // verify the proof of the subtree roots @@ -151,13 +150,15 @@ func (commitmentProof CommitmentProof) GenerateCommitment() bytes.HexBytes { return merkle.HashFromByteSlices(commitmentProof.SubtreeRoots) } -// ResultSubtreeRootToCommitmentProof is an API response that contains a SubtreeRootToCommitmentProof. -// A subtree root to commitment proof is a proof of a subtree root to a share commitment. +// ResultSubtreeRootToCommitmentProof is an API response that contains a +// SubtreeRootToCommitmentProof. A subtree root to commitment proof is a proof of a subtree root to +// a share commitment. type ResultSubtreeRootToCommitmentProof struct { SubtreeRootToCommitmentProof SubtreeRootToCommitmentProof `json:"subtree_root_to_commitment_proof"` } -// SubtreeRootToCommitmentProof a subtree root to commitment proof is a proof of a subtree root to a share commitment. +// SubtreeRootToCommitmentProof a subtree root to commitment proof is a proof of a subtree root to +// a share commitment. type SubtreeRootToCommitmentProof struct { Proof merkle.Proof `json:"proof"` } @@ -180,7 +181,8 @@ type ResultShareToSubtreeRootProof struct { ShareToSubtreeRootProof ShareToSubtreeRootProof `json:"share_to_subtree_root_proof"` } -// ShareToSubtreeRootProof a share to subtree root proof is an inclusion proof of a share to a subtree root. +// ShareToSubtreeRootProof a share to subtree root proof is an inclusion proof of a share to a +// subtree root. type ShareToSubtreeRootProof struct { Proof merkle.Proof `json:"proof"` } diff --git a/nodebuilder/module.go b/nodebuilder/module.go index 314ae4fd90..5a774b8b9b 100644 --- a/nodebuilder/module.go +++ b/nodebuilder/module.go @@ -3,13 +3,12 @@ package nodebuilder import ( "context" - "github.com/celestiaorg/celestia-node/nodebuilder/blobstream" - "go.uber.org/fx" "github.com/celestiaorg/celestia-node/header" "github.com/celestiaorg/celestia-node/libs/fxutil" "github.com/celestiaorg/celestia-node/nodebuilder/blob" + "github.com/celestiaorg/celestia-node/nodebuilder/blobstream" "github.com/celestiaorg/celestia-node/nodebuilder/core" "github.com/celestiaorg/celestia-node/nodebuilder/da" "github.com/celestiaorg/celestia-node/nodebuilder/das" diff --git a/nodebuilder/node.go b/nodebuilder/node.go index 869ad0ef22..bf8aec668b 100644 --- a/nodebuilder/node.go +++ b/nodebuilder/node.go @@ -6,8 +6,6 @@ import ( "fmt" "strings" - "github.com/celestiaorg/celestia-node/nodebuilder/blobstream" - "github.com/cristalhq/jwt" "github.com/ipfs/boxo/blockservice" "github.com/ipfs/boxo/exchange" @@ -24,6 +22,7 @@ import ( "github.com/celestiaorg/celestia-node/api/gateway" "github.com/celestiaorg/celestia-node/api/rpc" "github.com/celestiaorg/celestia-node/nodebuilder/blob" + "github.com/celestiaorg/celestia-node/nodebuilder/blobstream" "github.com/celestiaorg/celestia-node/nodebuilder/da" "github.com/celestiaorg/celestia-node/nodebuilder/das" "github.com/celestiaorg/celestia-node/nodebuilder/fraud" diff --git a/nodebuilder/rpc/constructors.go b/nodebuilder/rpc/constructors.go index 414e4a7a47..5db26e52f5 100644 --- a/nodebuilder/rpc/constructors.go +++ b/nodebuilder/rpc/constructors.go @@ -3,10 +3,9 @@ package rpc import ( "github.com/cristalhq/jwt" - "github.com/celestiaorg/celestia-node/nodebuilder/blobstream" - "github.com/celestiaorg/celestia-node/api/rpc" "github.com/celestiaorg/celestia-node/nodebuilder/blob" + "github.com/celestiaorg/celestia-node/nodebuilder/blobstream" "github.com/celestiaorg/celestia-node/nodebuilder/da" "github.com/celestiaorg/celestia-node/nodebuilder/das" "github.com/celestiaorg/celestia-node/nodebuilder/fraud" diff --git a/share/eds/byzantine/share_proof.go b/share/eds/byzantine/share_proof.go index dbc687e54b..d064656830 100644 --- a/share/eds/byzantine/share_proof.go +++ b/share/eds/byzantine/share_proof.go @@ -72,8 +72,8 @@ func (s *ShareWithProof) ShareWithProofToProto() *pb.Share { } } -// GetShareWithProof attempts to get a share with proof for the given share. It first tries to get a row proof -// and if that fails or proof is invalid, it tries to get a column proof. +// GetShareWithProof attempts to get a share with proof for the given share. It first tries to get +// a row proof and if that fails or proof is invalid, it tries to get a column proof. func GetShareWithProof( ctx context.Context, bGetter blockservice.BlockGetter, From 44f17a1dfc90532ef7805b3a4006541665735d76 Mon Sep 17 00:00:00 2001 From: sweexordious Date: Thu, 20 Jun 2024 12:31:58 +0100 Subject: [PATCH 16/52] chore: DataCommitment to GetDataCommitment as suggested by @vgonkivs --- nodebuilder/blobstream/blobstream.go | 10 ++++----- nodebuilder/blobstream/mocks/api.go | 31 +++++++++++++------------- nodebuilder/blobstream/service.go | 4 ++-- nodebuilder/blobstream/service_test.go | 2 +- 4 files changed, 23 insertions(+), 24 deletions(-) diff --git a/nodebuilder/blobstream/blobstream.go b/nodebuilder/blobstream/blobstream.go index 6441567f72..d4a0f019bf 100644 --- a/nodebuilder/blobstream/blobstream.go +++ b/nodebuilder/blobstream/blobstream.go @@ -12,9 +12,9 @@ var _ Module = (*API)(nil) // //go:generate mockgen -destination=mocks/api.go -package=mocks . Module type Module interface { - // DataCommitment collects the data roots over a provided ordered range of blocks, + // GetDataCommitment collects the data roots over a provided ordered range of blocks, // and then creates a new Merkle root of those data roots. The range is end exclusive. - DataCommitment(ctx context.Context, start, end uint64) (*ResultDataCommitment, error) + GetDataCommitment(ctx context.Context, start, end uint64) (*ResultDataCommitment, error) // DataRootInclusionProof creates an inclusion proof for the data root of block // height `height` in the set of blocks defined by `start` and `end`. The range @@ -39,7 +39,7 @@ type Module interface { // API is a wrapper around the Module for RPC. type API struct { Internal struct { - DataCommitment func( + GetDataCommitment func( ctx context.Context, start, end uint64, ) (*ResultDataCommitment, error) `perm:"read"` @@ -61,11 +61,11 @@ type API struct { } } -func (api *API) DataCommitment( +func (api *API) GetDataCommitment( ctx context.Context, start, end uint64, ) (*ResultDataCommitment, error) { - return api.Internal.DataCommitment(ctx, start, end) + return api.Internal.GetDataCommitment(ctx, start, end) } func (api *API) DataRootInclusionProof( diff --git a/nodebuilder/blobstream/mocks/api.go b/nodebuilder/blobstream/mocks/api.go index a6f5928f34..b27f10fd32 100644 --- a/nodebuilder/blobstream/mocks/api.go +++ b/nodebuilder/blobstream/mocks/api.go @@ -11,7 +11,6 @@ import ( blobstream "github.com/celestiaorg/celestia-node/nodebuilder/blobstream" share "github.com/celestiaorg/celestia-node/share" gomock "github.com/golang/mock/gomock" - bytes "github.com/tendermint/tendermint/libs/bytes" ) // MockModule is a mock of Module interface. @@ -37,38 +36,38 @@ func (m *MockModule) EXPECT() *MockModuleMockRecorder { return m.recorder } -// DataCommitment mocks base method. -func (m *MockModule) DataCommitment(arg0 context.Context, arg1, arg2 uint64) (*blobstream.ResultDataCommitment, error) { +// DataRootInclusionProof mocks base method. +func (m *MockModule) DataRootInclusionProof(arg0 context.Context, arg1 int64, arg2, arg3 uint64) (*blobstream.ResultDataRootInclusionProof, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "DataCommitment", arg0, arg1, arg2) - ret0, _ := ret[0].(*blobstream.ResultDataCommitment) + ret := m.ctrl.Call(m, "DataRootInclusionProof", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(*blobstream.ResultDataRootInclusionProof) ret1, _ := ret[1].(error) return ret0, ret1 } -// DataCommitment indicates an expected call of DataCommitment. -func (mr *MockModuleMockRecorder) DataCommitment(arg0, arg1, arg2 interface{}) *gomock.Call { +// DataRootInclusionProof indicates an expected call of DataRootInclusionProof. +func (mr *MockModuleMockRecorder) DataRootInclusionProof(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DataCommitment", reflect.TypeOf((*MockModule)(nil).DataCommitment), arg0, arg1, arg2) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DataRootInclusionProof", reflect.TypeOf((*MockModule)(nil).DataRootInclusionProof), arg0, arg1, arg2, arg3) } -// DataRootInclusionProof mocks base method. -func (m *MockModule) DataRootInclusionProof(arg0 context.Context, arg1 int64, arg2, arg3 uint64) (*blobstream.ResultDataRootInclusionProof, error) { +// GetDataCommitment mocks base method. +func (m *MockModule) GetDataCommitment(arg0 context.Context, arg1, arg2 uint64) (*blobstream.ResultDataCommitment, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "DataRootInclusionProof", arg0, arg1, arg2, arg3) - ret0, _ := ret[0].(*blobstream.ResultDataRootInclusionProof) + ret := m.ctrl.Call(m, "GetDataCommitment", arg0, arg1, arg2) + ret0, _ := ret[0].(*blobstream.ResultDataCommitment) ret1, _ := ret[1].(error) return ret0, ret1 } -// DataRootInclusionProof indicates an expected call of DataRootInclusionProof. -func (mr *MockModuleMockRecorder) DataRootInclusionProof(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { +// GetDataCommitment indicates an expected call of GetDataCommitment. +func (mr *MockModuleMockRecorder) GetDataCommitment(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DataRootInclusionProof", reflect.TypeOf((*MockModule)(nil).DataRootInclusionProof), arg0, arg1, arg2, arg3) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDataCommitment", reflect.TypeOf((*MockModule)(nil).GetDataCommitment), arg0, arg1, arg2) } // ProveCommitment mocks base method. -func (m *MockModule) ProveCommitment(arg0 context.Context, arg1 uint64, arg2 share.Namespace, arg3 bytes.HexBytes) (*blobstream.ResultCommitmentProof, error) { +func (m *MockModule) ProveCommitment(arg0 context.Context, arg1 uint64, arg2 share.Namespace, arg3 []byte) (*blobstream.ResultCommitmentProof, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ProveCommitment", arg0, arg1, arg2, arg3) ret0, _ := ret[0].(*blobstream.ResultCommitmentProof) diff --git a/nodebuilder/blobstream/service.go b/nodebuilder/blobstream/service.go index ae3a917d4d..57c652553e 100644 --- a/nodebuilder/blobstream/service.go +++ b/nodebuilder/blobstream/service.go @@ -42,9 +42,9 @@ func NewService(blobMod nodeblob.Module, headerMod headerServ.Module, shareMod s } } -// DataCommitment collects the data roots over a provided ordered range of blocks, +// GetDataCommitment collects the data roots over a provided ordered range of blocks, // and then creates a new Merkle root of those data roots. The range is end exclusive. -func (s *Service) DataCommitment(ctx context.Context, start, end uint64) (*ResultDataCommitment, error) { +func (s *Service) GetDataCommitment(ctx context.Context, start, end uint64) (*ResultDataCommitment, error) { log.Debugw("validating the data commitment range", "start", start, "end", end) err := s.validateDataCommitmentRange(ctx, start, end) if err != nil { diff --git a/nodebuilder/blobstream/service_test.go b/nodebuilder/blobstream/service_test.go index 393bb6e832..3d63c3c3fb 100644 --- a/nodebuilder/blobstream/service_test.go +++ b/nodebuilder/blobstream/service_test.go @@ -439,7 +439,7 @@ func TestDataCommitment(t *testing.T) { for name, tc := range tests { t.Run(name, func(t *testing.T) { - result, err := api.blobService.DataCommitment(context.Background(), tc.start, tc.end) + result, err := api.blobService.GetDataCommitment(context.Background(), tc.start, tc.end) if tc.expectErr { assert.Error(t, err) } else { From 080101a51a2c318b8879918b4f65c6ec0712e60d Mon Sep 17 00:00:00 2001 From: sweexordious Date: Thu, 20 Jun 2024 13:42:10 +0100 Subject: [PATCH 17/52] chore: DataCommitment to GetDataCommitment as suggested by @vgonkivs --- nodebuilder/blobstream/service_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nodebuilder/blobstream/service_test.go b/nodebuilder/blobstream/service_test.go index 3d63c3c3fb..57629472e8 100644 --- a/nodebuilder/blobstream/service_test.go +++ b/nodebuilder/blobstream/service_test.go @@ -377,7 +377,7 @@ func TestUint64ToInt(t *testing.T) { } } -func TestDataCommitment(t *testing.T) { +func TestGetDataCommitment(t *testing.T) { api := newTestAPI(t, 10, 1000, 10) tests := map[string]struct { start, end uint64 From 5fc0203f80f8b243766303864a639192820dc5c0 Mon Sep 17 00:00:00 2001 From: sweexordious Date: Thu, 20 Jun 2024 13:43:57 +0100 Subject: [PATCH 18/52] chore: DataRootInclusionProof to GetDataRootInclusionProof as suggested by @vgonkivs --- nodebuilder/blobstream/blobstream.go | 10 ++++----- nodebuilder/blobstream/mocks/api.go | 28 +++++++++++++------------- nodebuilder/blobstream/service.go | 4 ++-- nodebuilder/blobstream/service_test.go | 4 ++-- 4 files changed, 23 insertions(+), 23 deletions(-) diff --git a/nodebuilder/blobstream/blobstream.go b/nodebuilder/blobstream/blobstream.go index d4a0f019bf..85bd04a9e2 100644 --- a/nodebuilder/blobstream/blobstream.go +++ b/nodebuilder/blobstream/blobstream.go @@ -16,10 +16,10 @@ type Module interface { // and then creates a new Merkle root of those data roots. The range is end exclusive. GetDataCommitment(ctx context.Context, start, end uint64) (*ResultDataCommitment, error) - // DataRootInclusionProof creates an inclusion proof for the data root of block + // GetDataRootInclusionProof creates an inclusion proof for the data root of block // height `height` in the set of blocks defined by `start` and `end`. The range // is end exclusive. - DataRootInclusionProof( + GetDataRootInclusionProof( ctx context.Context, height int64, start, end uint64, @@ -43,7 +43,7 @@ type API struct { ctx context.Context, start, end uint64, ) (*ResultDataCommitment, error) `perm:"read"` - DataRootInclusionProof func( + GetDataRootInclusionProof func( ctx context.Context, height int64, start, end uint64, @@ -68,12 +68,12 @@ func (api *API) GetDataCommitment( return api.Internal.GetDataCommitment(ctx, start, end) } -func (api *API) DataRootInclusionProof( +func (api *API) GetDataRootInclusionProof( ctx context.Context, height int64, start, end uint64, ) (*ResultDataRootInclusionProof, error) { - return api.Internal.DataRootInclusionProof(ctx, height, start, end) + return api.Internal.GetDataRootInclusionProof(ctx, height, start, end) } func (api *API) ProveShares( diff --git a/nodebuilder/blobstream/mocks/api.go b/nodebuilder/blobstream/mocks/api.go index b27f10fd32..f096f7e1fd 100644 --- a/nodebuilder/blobstream/mocks/api.go +++ b/nodebuilder/blobstream/mocks/api.go @@ -36,34 +36,34 @@ func (m *MockModule) EXPECT() *MockModuleMockRecorder { return m.recorder } -// DataRootInclusionProof mocks base method. -func (m *MockModule) DataRootInclusionProof(arg0 context.Context, arg1 int64, arg2, arg3 uint64) (*blobstream.ResultDataRootInclusionProof, error) { +// GetDataCommitment mocks base method. +func (m *MockModule) GetDataCommitment(arg0 context.Context, arg1, arg2 uint64) (*blobstream.ResultDataCommitment, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "DataRootInclusionProof", arg0, arg1, arg2, arg3) - ret0, _ := ret[0].(*blobstream.ResultDataRootInclusionProof) + ret := m.ctrl.Call(m, "GetDataCommitment", arg0, arg1, arg2) + ret0, _ := ret[0].(*blobstream.ResultDataCommitment) ret1, _ := ret[1].(error) return ret0, ret1 } -// DataRootInclusionProof indicates an expected call of DataRootInclusionProof. -func (mr *MockModuleMockRecorder) DataRootInclusionProof(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { +// GetDataCommitment indicates an expected call of GetDataCommitment. +func (mr *MockModuleMockRecorder) GetDataCommitment(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DataRootInclusionProof", reflect.TypeOf((*MockModule)(nil).DataRootInclusionProof), arg0, arg1, arg2, arg3) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDataCommitment", reflect.TypeOf((*MockModule)(nil).GetDataCommitment), arg0, arg1, arg2) } -// GetDataCommitment mocks base method. -func (m *MockModule) GetDataCommitment(arg0 context.Context, arg1, arg2 uint64) (*blobstream.ResultDataCommitment, error) { +// GetDataRootInclusionProof mocks base method. +func (m *MockModule) GetDataRootInclusionProof(arg0 context.Context, arg1 int64, arg2, arg3 uint64) (*blobstream.ResultDataRootInclusionProof, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetDataCommitment", arg0, arg1, arg2) - ret0, _ := ret[0].(*blobstream.ResultDataCommitment) + ret := m.ctrl.Call(m, "GetDataRootInclusionProof", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(*blobstream.ResultDataRootInclusionProof) ret1, _ := ret[1].(error) return ret0, ret1 } -// GetDataCommitment indicates an expected call of GetDataCommitment. -func (mr *MockModuleMockRecorder) GetDataCommitment(arg0, arg1, arg2 interface{}) *gomock.Call { +// GetDataRootInclusionProof indicates an expected call of GetDataRootInclusionProof. +func (mr *MockModuleMockRecorder) GetDataRootInclusionProof(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDataCommitment", reflect.TypeOf((*MockModule)(nil).GetDataCommitment), arg0, arg1, arg2) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDataRootInclusionProof", reflect.TypeOf((*MockModule)(nil).GetDataRootInclusionProof), arg0, arg1, arg2, arg3) } // ProveCommitment mocks base method. diff --git a/nodebuilder/blobstream/service.go b/nodebuilder/blobstream/service.go index 57c652553e..cfac827585 100644 --- a/nodebuilder/blobstream/service.go +++ b/nodebuilder/blobstream/service.go @@ -64,10 +64,10 @@ func (s *Service) GetDataCommitment(ctx context.Context, start, end uint64) (*Re return &ResultDataCommitment{DataCommitment: root}, nil } -// DataRootInclusionProof creates an inclusion proof for the data root of block +// GetDataRootInclusionProof creates an inclusion proof for the data root of block // height `height` in the set of blocks defined by `start` and `end`. The range // is end exclusive. -func (s *Service) DataRootInclusionProof( +func (s *Service) GetDataRootInclusionProof( ctx context.Context, height int64, start, diff --git a/nodebuilder/blobstream/service_test.go b/nodebuilder/blobstream/service_test.go index 57629472e8..6ffa75bf9d 100644 --- a/nodebuilder/blobstream/service_test.go +++ b/nodebuilder/blobstream/service_test.go @@ -450,7 +450,7 @@ func TestGetDataCommitment(t *testing.T) { } } -func TestDataRootInclusionProof(t *testing.T) { +func TestGetDataRootInclusionProof(t *testing.T) { api := newTestAPI(t, 10, 1000, 10) tests := map[string]struct { height int64 @@ -554,7 +554,7 @@ func TestDataRootInclusionProof(t *testing.T) { for name, tc := range tests { t.Run(name, func(t *testing.T) { - result, err := api.blobService.DataRootInclusionProof(context.Background(), tc.height, tc.start, tc.end) + result, err := api.blobService.GetDataRootInclusionProof(context.Background(), tc.height, tc.start, tc.end) if tc.expectErr { assert.Error(t, err) } else { From 350d9503b3289cd9f939eab2551414686d5e115a Mon Sep 17 00:00:00 2001 From: sweexordious Date: Thu, 20 Jun 2024 14:45:58 +0100 Subject: [PATCH 19/52] chore: remove todo --- nodebuilder/blobstream/types.go | 1 - 1 file changed, 1 deletion(-) diff --git a/nodebuilder/blobstream/types.go b/nodebuilder/blobstream/types.go index bce23e636d..174c12b378 100644 --- a/nodebuilder/blobstream/types.go +++ b/nodebuilder/blobstream/types.go @@ -40,7 +40,6 @@ type ResultCommitmentProof struct { } // CommitmentProof is an inclusion proof of a commitment to the data root. -// TODO: Ask reviewers if we need protobuf definitions for this type CommitmentProof struct { // SubtreeRoots are the subtree roots of the blob's data that are // used to create the commitment. From 411fc209817aab54d2b0e53e40babd33547e5d7a Mon Sep 17 00:00:00 2001 From: sweexordious Date: Thu, 20 Jun 2024 14:51:17 +0100 Subject: [PATCH 20/52] chore: commitment proof pointer receiver --- nodebuilder/blobstream/types.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/nodebuilder/blobstream/types.go b/nodebuilder/blobstream/types.go index 174c12b378..439a6e4f25 100644 --- a/nodebuilder/blobstream/types.go +++ b/nodebuilder/blobstream/types.go @@ -60,7 +60,7 @@ type CommitmentProof struct { // Validate performs basic validation to the commitment proof. // Note: it doesn't verify if the proof is valid or not. // Check Verify() for that. -func (commitmentProof CommitmentProof) Validate() error { +func (commitmentProof *CommitmentProof) Validate() error { if len(commitmentProof.SubtreeRoots) < len(commitmentProof.SubtreeRootProofs) { return fmt.Errorf( "the number of subtree roots %d should be bigger than the number of subtree root proofs %d", @@ -96,7 +96,7 @@ func (commitmentProof CommitmentProof) Validate() error { // to some data that was posted to a square. // Expects the commitment proof to be properly formulated and validated // using the Validate() function. -func (commitmentProof CommitmentProof) Verify(root []byte, subtreeRootThreshold int) (bool, error) { +func (commitmentProof *CommitmentProof) Verify(root []byte, subtreeRootThreshold int) (bool, error) { nmtHasher := nmt.NewNmtHasher(appconsts.NewBaseHashFunc(), share.NamespaceSize, true) // computes the total number of shares proven. @@ -145,7 +145,7 @@ func (commitmentProof CommitmentProof) Verify(root []byte, subtreeRootThreshold } // GenerateCommitment generates the share commitment of the corresponding subtree roots. -func (commitmentProof CommitmentProof) GenerateCommitment() bytes.HexBytes { +func (commitmentProof *CommitmentProof) GenerateCommitment() bytes.HexBytes { return merkle.HashFromByteSlices(commitmentProof.SubtreeRoots) } From 9e48e55ca81feb2b713fa5b6460e59ff86aee770 Mon Sep 17 00:00:00 2001 From: sweexordious Date: Thu, 20 Jun 2024 14:52:22 +0100 Subject: [PATCH 21/52] fix: subtreeRootProofs instead of SubtreeRoots in error message --- nodebuilder/blobstream/types.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nodebuilder/blobstream/types.go b/nodebuilder/blobstream/types.go index 439a6e4f25..e3254c6abf 100644 --- a/nodebuilder/blobstream/types.go +++ b/nodebuilder/blobstream/types.go @@ -71,7 +71,7 @@ func (commitmentProof *CommitmentProof) Validate() error { if len(commitmentProof.SubtreeRootProofs) != len(commitmentProof.RowProof.Proofs) { return fmt.Errorf( "the number of subtree root proofs %d should be equal to the number of row root proofs %d", - len(commitmentProof.SubtreeRoots), + len(commitmentProof.SubtreeRootProofs), len(commitmentProof.RowProof.Proofs), ) } From f24171ecbd191595bdd6f085c7942835d621a47d Mon Sep 17 00:00:00 2001 From: sweexordious Date: Thu, 20 Jun 2024 14:53:41 +0100 Subject: [PATCH 22/52] chore: new line --- nodebuilder/blobstream/blobstream.go | 1 + 1 file changed, 1 insertion(+) diff --git a/nodebuilder/blobstream/blobstream.go b/nodebuilder/blobstream/blobstream.go index 85bd04a9e2..bfcadcce9e 100644 --- a/nodebuilder/blobstream/blobstream.go +++ b/nodebuilder/blobstream/blobstream.go @@ -27,6 +27,7 @@ type Module interface { // ProveShares generates a share proof for a share range. ProveShares(ctx context.Context, height, start, end uint64) (*ResultShareProof, error) + // ProveCommitment generates a commitment proof for a share commitment. ProveCommitment( ctx context.Context, From 9b79dba8e5ece0d7e245728febd66d2bc533b7a1 Mon Sep 17 00:00:00 2001 From: sweexordious Date: Thu, 20 Jun 2024 15:23:39 +0100 Subject: [PATCH 23/52] chore: revert unnecessary changes --- blob/parser.go | 3 +-- blob/service_test.go | 4 ++-- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/blob/parser.go b/blob/parser.go index 1b5ddc4a7a..6c103698b0 100644 --- a/blob/parser.go +++ b/blob/parser.go @@ -70,8 +70,7 @@ func (p *parser) addShares(shares []shares.Share) (shrs []shares.Share, isComple return shares[index+1:], true } -// parse ensures that correct amount of shares was collected and create a blob from the existing -// shares. +// parse ensures that correct amount of shares was collected and create a blob from the existing shares. func (p *parser) parse() (*Blob, error) { if p.length != len(p.shares) { return nil, fmt.Errorf("invalid shares amount. want:%d, have:%d", p.length, len(p.shares)) diff --git a/blob/service_test.go b/blob/service_test.go index 360a95d9fd..b1d9c78998 100644 --- a/blob/service_test.go +++ b/blob/service_test.go @@ -426,8 +426,8 @@ func TestService_Get(t *testing.T) { } // TestService_GetAllWithoutPadding it retrieves all blobs under the given namespace: -// the number of the blobs is known and equal to 5. Then it ensures that each blob has a correct -// index inside the eds by requesting share and comparing them. +// the amount of the blobs is known and equal to 5. Then it ensures that each blob has a correct index inside the eds +// by requesting share and comparing them. func TestService_GetAllWithoutPadding(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) t.Cleanup(cancel) From 7db0ab92e27af0182fe252c199dc039aaa049d26 Mon Sep 17 00:00:00 2001 From: CHAMI Rachid Date: Thu, 20 Jun 2024 16:25:09 +0200 Subject: [PATCH 24/52] Update nodebuilder/blobstream/service.go --- nodebuilder/blobstream/service.go | 1 - 1 file changed, 1 deletion(-) diff --git a/nodebuilder/blobstream/service.go b/nodebuilder/blobstream/service.go index cfac827585..5ff589e761 100644 --- a/nodebuilder/blobstream/service.go +++ b/nodebuilder/blobstream/service.go @@ -422,7 +422,6 @@ func (s *Service) ProveCommitment( return nil, err } if len(blobShares) == 0 { - // TODO we return the share commitment as hex or some other format? return nil, fmt.Errorf("the blob shares for commitment %s are empty", hex.EncodeToString(shareCommitment)) } From 02be8ac38d1bab9f8951ef730e578eac445291b8 Mon Sep 17 00:00:00 2001 From: sweexordious Date: Wed, 3 Jul 2024 14:09:04 +0100 Subject: [PATCH 25/52] chore: unwrap API types --- blob/parser.go | 3 +- blob/service_test.go | 4 +-- nodebuilder/blobstream/blobstream.go | 26 +++++++++-------- nodebuilder/blobstream/mocks/api.go | 17 +++++------ nodebuilder/blobstream/service.go | 19 ++++++------ nodebuilder/blobstream/service_test.go | 40 +++++++++++++------------- nodebuilder/blobstream/types.go | 25 +++------------- 7 files changed, 62 insertions(+), 72 deletions(-) diff --git a/blob/parser.go b/blob/parser.go index 6c103698b0..1b5ddc4a7a 100644 --- a/blob/parser.go +++ b/blob/parser.go @@ -70,7 +70,8 @@ func (p *parser) addShares(shares []shares.Share) (shrs []shares.Share, isComple return shares[index+1:], true } -// parse ensures that correct amount of shares was collected and create a blob from the existing shares. +// parse ensures that correct amount of shares was collected and create a blob from the existing +// shares. func (p *parser) parse() (*Blob, error) { if p.length != len(p.shares) { return nil, fmt.Errorf("invalid shares amount. want:%d, have:%d", p.length, len(p.shares)) diff --git a/blob/service_test.go b/blob/service_test.go index b1d9c78998..7a99e92e06 100644 --- a/blob/service_test.go +++ b/blob/service_test.go @@ -426,8 +426,8 @@ func TestService_Get(t *testing.T) { } // TestService_GetAllWithoutPadding it retrieves all blobs under the given namespace: -// the amount of the blobs is known and equal to 5. Then it ensures that each blob has a correct index inside the eds -// by requesting share and comparing them. +// the amount of the blobs is known and equal to 5. Then it ensures that each blob has a correct +// index inside the eds by requesting share and comparing them. func TestService_GetAllWithoutPadding(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) t.Cleanup(cancel) diff --git a/nodebuilder/blobstream/blobstream.go b/nodebuilder/blobstream/blobstream.go index bfcadcce9e..3dfcc44a22 100644 --- a/nodebuilder/blobstream/blobstream.go +++ b/nodebuilder/blobstream/blobstream.go @@ -3,6 +3,8 @@ package blobstream import ( "context" + "github.com/tendermint/tendermint/types" + "github.com/celestiaorg/celestia-node/share" ) @@ -14,7 +16,7 @@ var _ Module = (*API)(nil) type Module interface { // GetDataCommitment collects the data roots over a provided ordered range of blocks, // and then creates a new Merkle root of those data roots. The range is end exclusive. - GetDataCommitment(ctx context.Context, start, end uint64) (*ResultDataCommitment, error) + GetDataCommitment(ctx context.Context, start, end uint64) (*DataCommitment, error) // GetDataRootInclusionProof creates an inclusion proof for the data root of block // height `height` in the set of blocks defined by `start` and `end`. The range @@ -23,10 +25,10 @@ type Module interface { ctx context.Context, height int64, start, end uint64, - ) (*ResultDataRootInclusionProof, error) + ) (*DataRootTupleInclusionProof, error) // ProveShares generates a share proof for a share range. - ProveShares(ctx context.Context, height, start, end uint64) (*ResultShareProof, error) + ProveShares(ctx context.Context, height, start, end uint64) (*types.ShareProof, error) // ProveCommitment generates a commitment proof for a share commitment. ProveCommitment( @@ -34,7 +36,7 @@ type Module interface { height uint64, namespace share.Namespace, shareCommitment []byte, - ) (*ResultCommitmentProof, error) + ) (*CommitmentProof, error) } // API is a wrapper around the Module for RPC. @@ -43,29 +45,29 @@ type API struct { GetDataCommitment func( ctx context.Context, start, end uint64, - ) (*ResultDataCommitment, error) `perm:"read"` + ) (*DataCommitment, error) `perm:"read"` GetDataRootInclusionProof func( ctx context.Context, height int64, start, end uint64, - ) (*ResultDataRootInclusionProof, error) `perm:"read"` + ) (*DataRootTupleInclusionProof, error) `perm:"read"` ProveShares func( ctx context.Context, height, start, end uint64, - ) (*ResultShareProof, error) `perm:"read"` + ) (*types.ShareProof, error) `perm:"read"` ProveCommitment func( ctx context.Context, height uint64, namespace share.Namespace, shareCommitment []byte, - ) (*ResultCommitmentProof, error) `perm:"read"` + ) (*CommitmentProof, error) `perm:"read"` } } func (api *API) GetDataCommitment( ctx context.Context, start, end uint64, -) (*ResultDataCommitment, error) { +) (*DataCommitment, error) { return api.Internal.GetDataCommitment(ctx, start, end) } @@ -73,14 +75,14 @@ func (api *API) GetDataRootInclusionProof( ctx context.Context, height int64, start, end uint64, -) (*ResultDataRootInclusionProof, error) { +) (*DataRootTupleInclusionProof, error) { return api.Internal.GetDataRootInclusionProof(ctx, height, start, end) } func (api *API) ProveShares( ctx context.Context, height, start, end uint64, -) (*ResultShareProof, error) { +) (*types.ShareProof, error) { return api.Internal.ProveShares(ctx, height, start, end) } @@ -89,6 +91,6 @@ func (api *API) ProveCommitment( height uint64, namespace share.Namespace, shareCommitment []byte, -) (*ResultCommitmentProof, error) { +) (*CommitmentProof, error) { return api.Internal.ProveCommitment(ctx, height, namespace, shareCommitment) } diff --git a/nodebuilder/blobstream/mocks/api.go b/nodebuilder/blobstream/mocks/api.go index f096f7e1fd..be3dae60ad 100644 --- a/nodebuilder/blobstream/mocks/api.go +++ b/nodebuilder/blobstream/mocks/api.go @@ -11,6 +11,7 @@ import ( blobstream "github.com/celestiaorg/celestia-node/nodebuilder/blobstream" share "github.com/celestiaorg/celestia-node/share" gomock "github.com/golang/mock/gomock" + types "github.com/tendermint/tendermint/types" ) // MockModule is a mock of Module interface. @@ -37,10 +38,10 @@ func (m *MockModule) EXPECT() *MockModuleMockRecorder { } // GetDataCommitment mocks base method. -func (m *MockModule) GetDataCommitment(arg0 context.Context, arg1, arg2 uint64) (*blobstream.ResultDataCommitment, error) { +func (m *MockModule) GetDataCommitment(arg0 context.Context, arg1, arg2 uint64) (*blobstream.DataCommitment, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetDataCommitment", arg0, arg1, arg2) - ret0, _ := ret[0].(*blobstream.ResultDataCommitment) + ret0, _ := ret[0].(*blobstream.DataCommitment) ret1, _ := ret[1].(error) return ret0, ret1 } @@ -52,10 +53,10 @@ func (mr *MockModuleMockRecorder) GetDataCommitment(arg0, arg1, arg2 interface{} } // GetDataRootInclusionProof mocks base method. -func (m *MockModule) GetDataRootInclusionProof(arg0 context.Context, arg1 int64, arg2, arg3 uint64) (*blobstream.ResultDataRootInclusionProof, error) { +func (m *MockModule) GetDataRootInclusionProof(arg0 context.Context, arg1 int64, arg2, arg3 uint64) (*blobstream.DataRootTupleInclusionProof, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetDataRootInclusionProof", arg0, arg1, arg2, arg3) - ret0, _ := ret[0].(*blobstream.ResultDataRootInclusionProof) + ret0, _ := ret[0].(*blobstream.DataRootTupleInclusionProof) ret1, _ := ret[1].(error) return ret0, ret1 } @@ -67,10 +68,10 @@ func (mr *MockModuleMockRecorder) GetDataRootInclusionProof(arg0, arg1, arg2, ar } // ProveCommitment mocks base method. -func (m *MockModule) ProveCommitment(arg0 context.Context, arg1 uint64, arg2 share.Namespace, arg3 []byte) (*blobstream.ResultCommitmentProof, error) { +func (m *MockModule) ProveCommitment(arg0 context.Context, arg1 uint64, arg2 share.Namespace, arg3 []byte) (*blobstream.CommitmentProof, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ProveCommitment", arg0, arg1, arg2, arg3) - ret0, _ := ret[0].(*blobstream.ResultCommitmentProof) + ret0, _ := ret[0].(*blobstream.CommitmentProof) ret1, _ := ret[1].(error) return ret0, ret1 } @@ -82,10 +83,10 @@ func (mr *MockModuleMockRecorder) ProveCommitment(arg0, arg1, arg2, arg3 interfa } // ProveShares mocks base method. -func (m *MockModule) ProveShares(arg0 context.Context, arg1, arg2, arg3 uint64) (*blobstream.ResultShareProof, error) { +func (m *MockModule) ProveShares(arg0 context.Context, arg1, arg2, arg3 uint64) (*types.ShareProof, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ProveShares", arg0, arg1, arg2, arg3) - ret0, _ := ret[0].(*blobstream.ResultShareProof) + ret0, _ := ret[0].(*types.ShareProof) ret1, _ := ret[1].(error) return ret0, ret1 } diff --git a/nodebuilder/blobstream/service.go b/nodebuilder/blobstream/service.go index 5ff589e761..7ba6f5824f 100644 --- a/nodebuilder/blobstream/service.go +++ b/nodebuilder/blobstream/service.go @@ -10,6 +10,7 @@ import ( logging "github.com/ipfs/go-log/v2" "github.com/tendermint/tendermint/crypto/merkle" + "github.com/tendermint/tendermint/types" "github.com/celestiaorg/celestia-app/pkg/appconsts" appns "github.com/celestiaorg/celestia-app/pkg/namespace" @@ -44,7 +45,7 @@ func NewService(blobMod nodeblob.Module, headerMod headerServ.Module, shareMod s // GetDataCommitment collects the data roots over a provided ordered range of blocks, // and then creates a new Merkle root of those data roots. The range is end exclusive. -func (s *Service) GetDataCommitment(ctx context.Context, start, end uint64) (*ResultDataCommitment, error) { +func (s *Service) GetDataCommitment(ctx context.Context, start, end uint64) (*DataCommitment, error) { log.Debugw("validating the data commitment range", "start", start, "end", end) err := s.validateDataCommitmentRange(ctx, start, end) if err != nil { @@ -61,7 +62,8 @@ func (s *Service) GetDataCommitment(ctx context.Context, start, end uint64) (*Re return nil, err } // Create data commitment - return &ResultDataCommitment{DataCommitment: root}, nil + dataCommitment := DataCommitment(root) + return &dataCommitment, nil } // GetDataRootInclusionProof creates an inclusion proof for the data root of block @@ -72,7 +74,7 @@ func (s *Service) GetDataRootInclusionProof( height int64, start, end uint64, -) (*ResultDataRootInclusionProof, error) { +) (*DataRootTupleInclusionProof, error) { log.Debugw( "validating the data root inclusion proof request", "start", @@ -96,7 +98,8 @@ func (s *Service) GetDataRootInclusionProof( if err != nil { return nil, err } - return &ResultDataRootInclusionProof{Proof: *proof}, nil + dataRootTupleInclusionProof := DataRootTupleInclusionProof(proof) + return &dataRootTupleInclusionProof, nil } // padBytes Pad bytes to given length @@ -329,7 +332,7 @@ func (s *Service) fetchDataRootTuples(ctx context.Context, start, end uint64) ([ // the proof and only querying them. However, that would require re-implementing the logic // in Core. Also, core also queries the whole EDS to generate the proof. So, it's fine for // now. In the future, when blocks get way bigger, we should revisit this and improve it. -func (s *Service) ProveShares(ctx context.Context, height, start, end uint64) (*ResultShareProof, error) { +func (s *Service) ProveShares(ctx context.Context, height, start, end uint64) (*types.ShareProof, error) { log.Debugw("proving share range", "start", start, "end", end, "height", height) if height == 0 { return nil, fmt.Errorf("height cannot be equal to 0") @@ -373,7 +376,7 @@ func (s *Service) ProveShares(ctx context.Context, height, start, end uint64) (* if err != nil { return nil, err } - return &ResultShareProof{ShareProof: proof}, nil + return &proof, nil } // ProveCommitment generates a commitment proof for a share commitment. @@ -387,7 +390,7 @@ func (s *Service) ProveCommitment( height uint64, namespace share.Namespace, shareCommitment []byte, -) (*ResultCommitmentProof, error) { +) (*CommitmentProof, error) { log.Debugw("proving share commitment", "height", height, "commitment", shareCommitment, "namespace", namespace) if height == 0 { return nil, fmt.Errorf("height cannot be equal to 0") @@ -543,7 +546,7 @@ func (s *Service) ProveCommitment( NamespaceVersion: namespace.Version(), } - return &ResultCommitmentProof{CommitmentProof: commitmentProof}, nil + return &commitmentProof, nil } // computeSubtreeRoots takes a set of shares and ranges and returns the corresponding subtree roots. diff --git a/nodebuilder/blobstream/service_test.go b/nodebuilder/blobstream/service_test.go index 6ffa75bf9d..958d39b0f9 100644 --- a/nodebuilder/blobstream/service_test.go +++ b/nodebuilder/blobstream/service_test.go @@ -444,7 +444,7 @@ func TestGetDataCommitment(t *testing.T) { assert.Error(t, err) } else { assert.NoError(t, err) - assert.Equal(t, tc.expectedDataCommitment, result.DataCommitment) + assert.Equal(t, tc.expectedDataCommitment, result) } }) } @@ -559,7 +559,7 @@ func TestGetDataRootInclusionProof(t *testing.T) { assert.Error(t, err) } else { assert.NoError(t, err) - assert.Equal(t, tc.expectedProof, result.Proof) + assert.Equal(t, tc.expectedProof, result) } }) } @@ -570,7 +570,7 @@ func TestProveShares(t *testing.T) { tests := map[string]struct { height uint64 start, end uint64 - expectedProof ResultShareProof + expectedProof coretypes.ShareProof expectErr bool }{ "height == 0": { @@ -605,7 +605,7 @@ func TestProveShares(t *testing.T) { height: 6, start: 0, end: 2, - expectedProof: func() ResultShareProof { + expectedProof: func() coretypes.ShareProof { proof, err := pkgproof.NewShareInclusionProofFromEDS( api.blocks[6].eds, namespace.PayForBlobNamespace, @@ -613,7 +613,7 @@ func TestProveShares(t *testing.T) { ) require.NoError(t, err) require.NoError(t, proof.Validate(api.blocks[6].dataRoot)) - return ResultShareProof{ShareProof: proof} + return proof }(), }, } @@ -626,7 +626,7 @@ func TestProveShares(t *testing.T) { } else { assert.NoError(t, err) assert.Equal(t, tc.expectedProof, *result) - assert.NoError(t, result.ShareProof.Validate(api.blocks[6].dataRoot)) + assert.NoError(t, result.Validate(api.blocks[6].dataRoot)) } }) } @@ -639,7 +639,7 @@ func TestProveCommitment(t *testing.T) { height uint64 commitment bytes2.HexBytes ns share.Namespace - expectedProof ResultCommitmentProof + expectedProof CommitmentProof expectErr bool }{ "height == 0": {height: 0, expectErr: true}, @@ -647,12 +647,12 @@ func TestProveCommitment(t *testing.T) { height: 6, ns: api.blocks[6].msgs[0].Namespaces[0], commitment: api.blocks[6].msgs[0].ShareCommitments[0], - expectedProof: func() ResultCommitmentProof { + expectedProof: func() CommitmentProof { commitmentProof := generateCommitmentProofFromBlock(t, api.blocks[6], 0) // make sure we're creating a valid proof for the test - require.NoError(t, commitmentProof.CommitmentProof.Validate()) - valid, err := commitmentProof.CommitmentProof.Verify(api.blocks[6].dataRoot, appconsts.DefaultSubtreeRootThreshold) + require.NoError(t, commitmentProof.Validate()) + valid, err := commitmentProof.Verify(api.blocks[6].dataRoot, appconsts.DefaultSubtreeRootThreshold) require.NoError(t, err) require.True(t, valid) @@ -669,8 +669,8 @@ func TestProveCommitment(t *testing.T) { } else { assert.NoError(t, err) // make sure the actual proof can be validated and verified - assert.NoError(t, result.CommitmentProof.Validate()) - valid, err := result.CommitmentProof.Verify(api.blocks[tc.height].dataRoot, appconsts.DefaultSubtreeRootThreshold) + assert.NoError(t, result.Validate()) + valid, err := result.Verify(api.blocks[tc.height].dataRoot, appconsts.DefaultSubtreeRootThreshold) assert.NoError(t, err) assert.True(t, valid) @@ -678,7 +678,7 @@ func TestProveCommitment(t *testing.T) { assert.Equal(t, tc.expectedProof, *result) // make sure the expected commitment commits to the subtree roots in the actual proof - actualCommitment, _ := merkle.ProofsFromByteSlices(result.CommitmentProof.SubtreeRoots) + actualCommitment, _ := merkle.ProofsFromByteSlices(result.SubtreeRoots) assert.Equal(t, tc.commitment.Bytes(), actualCommitment) } }) @@ -724,8 +724,8 @@ func proveAllCommitments(t *testing.T, numberOfBlocks, blobSize int) { require.NoError(t, err) // make sure the actual commitment attests to the data - require.NoError(t, actualCommitmentProof.CommitmentProof.Validate()) - valid, err := actualCommitmentProof.CommitmentProof.Verify( + require.NoError(t, actualCommitmentProof.Validate()) + valid, err := actualCommitmentProof.Verify( block.dataRoot, appconsts.DefaultSubtreeRootThreshold, ) @@ -734,8 +734,8 @@ func proveAllCommitments(t *testing.T, numberOfBlocks, blobSize int) { // generate an expected proof and verify it's valid expectedCommitmentProof := generateCommitmentProofFromBlock(t, block, msgIndex) - require.NoError(t, expectedCommitmentProof.CommitmentProof.Validate()) - valid, err = expectedCommitmentProof.CommitmentProof.Verify( + require.NoError(t, expectedCommitmentProof.Validate()) + valid, err = expectedCommitmentProof.Verify( block.dataRoot, appconsts.DefaultSubtreeRootThreshold, ) @@ -746,7 +746,7 @@ func proveAllCommitments(t *testing.T, numberOfBlocks, blobSize int) { assert.Equal(t, expectedCommitmentProof, *actualCommitmentProof) // make sure the expected commitment commits to the subtree roots in the result proof - actualCommitment, _ := merkle.ProofsFromByteSlices(actualCommitmentProof.CommitmentProof.SubtreeRoots) + actualCommitment, _ := merkle.ProofsFromByteSlices(actualCommitmentProof.SubtreeRoots) assert.Equal(t, msg.ShareCommitments[0], actualCommitment) }) } @@ -854,7 +854,7 @@ func generateCommitmentProofFromBlock( t *testing.T, block testBlock, blobIndex int, -) ResultCommitmentProof { +) CommitmentProof { // parse the namespace ns, err := share.NamespaceFromBytes( append( @@ -930,7 +930,7 @@ func generateCommitmentProofFromBlock( NamespaceVersion: uint8(sharesProof.NamespaceVersion), } - return ResultCommitmentProof{CommitmentProof: commitmentProof} + return commitmentProof } // generateTestBlocks generates a set of test blocks with a specific blob size and number of diff --git a/nodebuilder/blobstream/types.go b/nodebuilder/blobstream/types.go index e3254c6abf..fa382db487 100644 --- a/nodebuilder/blobstream/types.go +++ b/nodebuilder/blobstream/types.go @@ -15,29 +15,12 @@ import ( "github.com/celestiaorg/celestia-node/share" ) -// ResultDataCommitment is the API response containing a data -// commitment, aka data root tuple root. -type ResultDataCommitment struct { - DataCommitment bytes.HexBytes `json:"data_commitment"` -} +// DataCommitment is the data root tuple root. +type DataCommitment bytes.HexBytes -// ResultDataRootInclusionProof is the API response containing the binary merkle +// DataRootTupleInclusionProof is the binary merkle // inclusion proof of a height to a data commitment. -type ResultDataRootInclusionProof struct { - Proof merkle.Proof `json:"proof"` -} - -// ResultShareProof is the API response that contains a ShareProof. -// A share proof is a proof of a set of shares to the data root. -type ResultShareProof struct { - ShareProof types.ShareProof `json:"share_proof"` -} - -// ResultCommitmentProof is an API response that contains a CommitmentProof. -// A commitment proof is a proof of a blob share commitment to the data root. -type ResultCommitmentProof struct { - CommitmentProof CommitmentProof `json:"commitment_proof"` -} +type DataRootTupleInclusionProof *merkle.Proof // CommitmentProof is an inclusion proof of a commitment to the data root. type CommitmentProof struct { From 68936afe1d08b7445cb76ac6ffa3b6825bdfdb04 Mon Sep 17 00:00:00 2001 From: sweexordious Date: Sat, 13 Jul 2024 12:22:40 +0200 Subject: [PATCH 26/52] chore: refactor the blobstream API --- api/rpc/client/client.go | 39 +- api/rpc_test.go | 42 +- blob/blob.go | 14 - .../types.go => blob/commitment_proof.go | 81 +- blob/service.go | 197 +++ blob/service_test.go | 148 ++ nodebuilder/blob/blob.go | 32 +- nodebuilder/blob/mocks/api.go | 15 + nodebuilder/blobstream/blobstream.go | 96 -- nodebuilder/blobstream/mocks/api.go | 98 -- nodebuilder/blobstream/module.go | 12 - nodebuilder/blobstream/service.go | 627 -------- nodebuilder/blobstream/service_test.go | 1328 ----------------- nodebuilder/header/data_commitment.go | 242 +++ nodebuilder/header/header.go | 43 +- nodebuilder/header/mocks/api.go | 35 +- nodebuilder/header/service.go | 59 + nodebuilder/header/service_test.go | 315 ++++ nodebuilder/module.go | 2 - nodebuilder/node.go | 18 +- nodebuilder/rpc/constructors.go | 3 - nodebuilder/share/constructors.go | 5 +- nodebuilder/share/mocks/api.go | 17 + nodebuilder/share/share.go | 33 + share/eds/eds.go | 30 + share/eds/eds_test.go | 67 + share/eds/edstest/testing.go | 103 ++ 27 files changed, 1388 insertions(+), 2313 deletions(-) rename nodebuilder/blobstream/types.go => blob/commitment_proof.go (63%) delete mode 100644 nodebuilder/blobstream/blobstream.go delete mode 100644 nodebuilder/blobstream/mocks/api.go delete mode 100644 nodebuilder/blobstream/module.go delete mode 100644 nodebuilder/blobstream/service.go delete mode 100644 nodebuilder/blobstream/service_test.go create mode 100644 nodebuilder/header/data_commitment.go diff --git a/api/rpc/client/client.go b/api/rpc/client/client.go index 56b4a54d19..ff206d723e 100644 --- a/api/rpc/client/client.go +++ b/api/rpc/client/client.go @@ -9,7 +9,6 @@ import ( "github.com/celestiaorg/celestia-node/api/rpc/perms" "github.com/celestiaorg/celestia-node/nodebuilder/blob" - "github.com/celestiaorg/celestia-node/nodebuilder/blobstream" "github.com/celestiaorg/celestia-node/nodebuilder/da" "github.com/celestiaorg/celestia-node/nodebuilder/das" "github.com/celestiaorg/celestia-node/nodebuilder/fraud" @@ -27,16 +26,15 @@ var ( ) type Client struct { - Fraud fraud.API - Header header.API - State state.API - Share share.API - DAS das.API - P2P p2p.API - Node node.API - Blob blob.API - DA da.API - Blobstream blobstream.API + Fraud fraud.API + Header header.API + State state.API + Share share.API + DAS das.API + P2P p2p.API + Node node.API + Blob blob.API + DA da.API closer multiClientCloser } @@ -87,15 +85,14 @@ func newClient(ctx context.Context, addr string, authHeader http.Header) (*Clien func moduleMap(client *Client) map[string]interface{} { // TODO: this duplication of strings many times across the codebase can be avoided with issue #1176 return map[string]interface{}{ - "share": &client.Share.Internal, - "state": &client.State.Internal, - "header": &client.Header.Internal, - "fraud": &client.Fraud.Internal, - "das": &client.DAS.Internal, - "p2p": &client.P2P.Internal, - "node": &client.Node.Internal, - "blob": &client.Blob.Internal, - "da": &client.DA.Internal, - "blobstream": &client.Blobstream.Internal, + "share": &client.Share.Internal, + "state": &client.State.Internal, + "header": &client.Header.Internal, + "fraud": &client.Fraud.Internal, + "das": &client.DAS.Internal, + "p2p": &client.P2P.Internal, + "node": &client.Node.Internal, + "blob": &client.Blob.Internal, + "da": &client.DA.Internal, } } diff --git a/api/rpc_test.go b/api/rpc_test.go index 598acc45c8..e019412f4d 100644 --- a/api/rpc_test.go +++ b/api/rpc_test.go @@ -22,8 +22,6 @@ import ( "github.com/celestiaorg/celestia-node/nodebuilder" "github.com/celestiaorg/celestia-node/nodebuilder/blob" blobMock "github.com/celestiaorg/celestia-node/nodebuilder/blob/mocks" - "github.com/celestiaorg/celestia-node/nodebuilder/blobstream" - blobstreamMock "github.com/celestiaorg/celestia-node/nodebuilder/blobstream/mocks" "github.com/celestiaorg/celestia-node/nodebuilder/da" daMock "github.com/celestiaorg/celestia-node/nodebuilder/da/mocks" "github.com/celestiaorg/celestia-node/nodebuilder/das" @@ -87,16 +85,15 @@ func TestRPCCallsUnderlyingNode(t *testing.T) { // api contains all modules that are made available as the node's // public API surface type api struct { - Fraud fraud.Module - Header header.Module - State statemod.Module - Share share.Module - DAS das.Module - Node node.Module - P2P p2p.Module - Blob blob.Module - DA da.Module - Blobstream blobstream.Module + Fraud fraud.Module + Header header.Module + State statemod.Module + Share share.Module + DAS das.Module + Node node.Module + P2P p2p.Module + Blob blob.Module + DA da.Module } func TestModulesImplementFullAPI(t *testing.T) { @@ -303,7 +300,6 @@ func setupNodeWithAuthedRPC(t *testing.T, auth jwt.Signer) (*nodebuilder.Node, * nodeMock.NewMockModule(ctrl), blobMock.NewMockModule(ctrl), daMock.NewMockModule(ctrl), - blobstreamMock.NewMockModule(ctrl), } // given the behavior of fx.Invoke, this invoke will be called last as it is added at the root @@ -318,7 +314,6 @@ func setupNodeWithAuthedRPC(t *testing.T, auth jwt.Signer) (*nodebuilder.Node, * srv.RegisterService("node", mockAPI.Node, &node.API{}) srv.RegisterService("blob", mockAPI.Blob, &blob.API{}) srv.RegisterService("da", mockAPI.DA, &da.API{}) - srv.RegisterService("blobstream", mockAPI.Blobstream, &blobstream.API{}) }) // fx.Replace does not work here, but fx.Decorate does nd := nodebuilder.TestNode(t, node.Full, invokeRPC, fx.Decorate(func() (jwt.Signer, error) { @@ -335,14 +330,13 @@ func setupNodeWithAuthedRPC(t *testing.T, auth jwt.Signer) (*nodebuilder.Node, * } type mockAPI struct { - State *stateMock.MockModule - Share *shareMock.MockModule - Fraud *fraudMock.MockModule - Header *headerMock.MockModule - Das *dasMock.MockModule - P2P *p2pMock.MockModule - Node *nodeMock.MockModule - Blob *blobMock.MockModule - DA *daMock.MockModule - Blobstream *blobstreamMock.MockModule + State *stateMock.MockModule + Share *shareMock.MockModule + Fraud *fraudMock.MockModule + Header *headerMock.MockModule + Das *dasMock.MockModule + P2P *p2pMock.MockModule + Node *nodeMock.MockModule + Blob *blobMock.MockModule + DA *daMock.MockModule } diff --git a/blob/blob.go b/blob/blob.go index 89177b713e..610b0cba96 100644 --- a/blob/blob.go +++ b/blob/blob.go @@ -17,20 +17,6 @@ import ( var errEmptyShares = errors.New("empty shares") -// Commitment is a Merkle Root of the subtree built from shares of the Blob. -// It is computed by splitting the blob into shares and building the Merkle subtree to be included -// after Submit. -type Commitment []byte - -func (com Commitment) String() string { - return string(com) -} - -// Equal ensures that commitments are the same -func (com Commitment) Equal(c Commitment) bool { - return bytes.Equal(com, c) -} - // The Proof is a set of nmt proofs that can be verified only through // the included method (due to limitation of the nmt https://github.com/celestiaorg/nmt/issues/218). // Proof proves the WHOLE namespaced data to the row roots. diff --git a/nodebuilder/blobstream/types.go b/blob/commitment_proof.go similarity index 63% rename from nodebuilder/blobstream/types.go rename to blob/commitment_proof.go index fa382db487..5cf419a659 100644 --- a/nodebuilder/blobstream/types.go +++ b/blob/commitment_proof.go @@ -1,11 +1,10 @@ -package blobstream +package blob import ( + "bytes" "fmt" - "github.com/tendermint/tendermint/crypto/merkle" - "github.com/tendermint/tendermint/libs/bytes" - "github.com/tendermint/tendermint/types" + coretypes "github.com/tendermint/tendermint/types" "github.com/celestiaorg/celestia-app/pkg/appconsts" "github.com/celestiaorg/celestia-app/pkg/shares" @@ -15,12 +14,10 @@ import ( "github.com/celestiaorg/celestia-node/share" ) -// DataCommitment is the data root tuple root. -type DataCommitment bytes.HexBytes - -// DataRootTupleInclusionProof is the binary merkle -// inclusion proof of a height to a data commitment. -type DataRootTupleInclusionProof *merkle.Proof +// Commitment is a Merkle Root of the subtree built from shares of the Blob. +// It is computed by splitting the blob into shares and building the Merkle subtree to be included +// after Submit. +type Commitment []byte // CommitmentProof is an inclusion proof of a commitment to the data root. type CommitmentProof struct { @@ -36,8 +33,17 @@ type CommitmentProof struct { NamespaceID namespace.ID `json:"namespace_id"` // RowProof is the proof of the rows containing the blob's data to the // data root. - RowProof types.RowProof `json:"row_proof"` - NamespaceVersion uint8 `json:"namespace_version"` + RowProof coretypes.RowProof `json:"row_proof"` + NamespaceVersion uint8 `json:"namespace_version"` +} + +func (com Commitment) String() string { + return string(com) +} + +// Equal ensures that commitments are the same +func (com Commitment) Equal(c Commitment) bool { + return bytes.Equal(com, c) } // Validate performs basic validation to the commitment proof. @@ -126,54 +132,3 @@ func (commitmentProof *CommitmentProof) Verify(root []byte, subtreeRootThreshold // verify row roots to data root proof return commitmentProof.RowProof.VerifyProof(root), nil } - -// GenerateCommitment generates the share commitment of the corresponding subtree roots. -func (commitmentProof *CommitmentProof) GenerateCommitment() bytes.HexBytes { - return merkle.HashFromByteSlices(commitmentProof.SubtreeRoots) -} - -// ResultSubtreeRootToCommitmentProof is an API response that contains a -// SubtreeRootToCommitmentProof. A subtree root to commitment proof is a proof of a subtree root to -// a share commitment. -type ResultSubtreeRootToCommitmentProof struct { - SubtreeRootToCommitmentProof SubtreeRootToCommitmentProof `json:"subtree_root_to_commitment_proof"` -} - -// SubtreeRootToCommitmentProof a subtree root to commitment proof is a proof of a subtree root to -// a share commitment. -type SubtreeRootToCommitmentProof struct { - Proof merkle.Proof `json:"proof"` -} - -// Verify verifies that a share commitment commits to the provided subtree root. -func (subtreeRootProof SubtreeRootToCommitmentProof) Verify( - shareCommitment bytes.HexBytes, - subtreeRoot []byte, -) (bool, error) { - err := subtreeRootProof.Proof.Verify(shareCommitment.Bytes(), subtreeRoot) - if err != nil { - return false, err - } - return true, nil -} - -// ResultShareToSubtreeRootProof is an API response that contains a ShareToSubtreeRootProof. -// A share to subtree root proof is an inclusion proof of a share to a subtree root. -type ResultShareToSubtreeRootProof struct { - ShareToSubtreeRootProof ShareToSubtreeRootProof `json:"share_to_subtree_root_proof"` -} - -// ShareToSubtreeRootProof a share to subtree root proof is an inclusion proof of a share to a -// subtree root. -type ShareToSubtreeRootProof struct { - Proof merkle.Proof `json:"proof"` -} - -// Verify verifies that a share commitment commits to the provided subtree root. -func (shareToSubtreeRootProof ShareToSubtreeRootProof) Verify(subtreeRoot, share []byte) (bool, error) { - err := shareToSubtreeRootProof.Proof.Verify(subtreeRoot, share) - if err != nil { - return false, err - } - return true, nil -} diff --git a/blob/service.go b/blob/service.go index 25bf48fd1f..5c62433fc7 100644 --- a/blob/service.go +++ b/blob/service.go @@ -1,7 +1,9 @@ package blob import ( + bytes2 "bytes" "context" + "encoding/hex" "errors" "fmt" "math" @@ -17,8 +19,12 @@ import ( "go.opentelemetry.io/otel/trace" "github.com/celestiaorg/celestia-app/pkg/appconsts" + appns "github.com/celestiaorg/celestia-app/pkg/namespace" + pkgproof "github.com/celestiaorg/celestia-app/pkg/proof" "github.com/celestiaorg/celestia-app/pkg/shares" blobtypes "github.com/celestiaorg/celestia-app/x/blob/types" + "github.com/celestiaorg/nmt" + "github.com/celestiaorg/rsmt2d" "github.com/celestiaorg/celestia-node/header" "github.com/celestiaorg/celestia-node/libs/utils" @@ -418,3 +424,194 @@ func (s *Service) getBlobs( } return blobs, nil } + +func (s *Service) GetCommitmentProof( + ctx context.Context, + height uint64, + namespace share.Namespace, + shareCommitment []byte, +) (*CommitmentProof, error) { + log.Debugw("proving share commitment", "height", height, "commitment", shareCommitment, "namespace", namespace) + if height == 0 { + return nil, fmt.Errorf("height cannot be equal to 0") + } + + // get the blob to compute the subtree roots + log.Debugw( + "getting the blob", + "height", + height, + "commitment", + shareCommitment, + "namespace", + namespace, + ) + blb, err := s.Get(ctx, height, namespace, shareCommitment) + if err != nil { + return nil, err + } + + log.Debugw( + "converting the blob to shares", + "height", + height, + "commitment", + shareCommitment, + "namespace", + namespace, + ) + blobShares, err := BlobsToShares(blb) + if err != nil { + return nil, err + } + if len(blobShares) == 0 { + return nil, fmt.Errorf("the blob shares for commitment %s are empty", hex.EncodeToString(shareCommitment)) + } + + // get the extended header + log.Debugw( + "getting the extended header", + "height", + height, + ) + extendedHeader, err := s.headerGetter(ctx, height) + if err != nil { + return nil, err + } + + log.Debugw("getting eds", "height", height) + eds, err := s.shareGetter.GetEDS(ctx, extendedHeader) + if err != nil { + return nil, err + } + + return ProveCommitment(eds, namespace, blobShares) +} + +func ProveCommitment( + eds *rsmt2d.ExtendedDataSquare, + namespace share.Namespace, + blobShares []share.Share, +) (*CommitmentProof, error) { + // find the blob shares in the EDS + blobSharesStartIndex := -1 + for index, share := range eds.FlattenedODS() { + if bytes2.Equal(share, blobShares[0]) { + blobSharesStartIndex = index + } + } + if blobSharesStartIndex < 0 { + return nil, fmt.Errorf("couldn't find the blob shares in the ODS") + } + + nID, err := appns.From(namespace) + if err != nil { + return nil, err + } + + log.Debugw( + "generating the blob share proof for commitment", + "start_share", + blobSharesStartIndex, + "end_share", + blobSharesStartIndex+len(blobShares), + ) + sharesProof, err := pkgproof.NewShareInclusionProofFromEDS( + eds, + nID, + shares.NewRange(blobSharesStartIndex, blobSharesStartIndex+len(blobShares)), + ) + if err != nil { + return nil, err + } + + // convert the shares to row root proofs to nmt proofs + nmtProofs := make([]*nmt.Proof, 0) + for _, proof := range sharesProof.ShareProofs { + nmtProof := nmt.NewInclusionProof(int(proof.Start), + int(proof.End), + proof.Nodes, + true) + nmtProofs = append( + nmtProofs, + &nmtProof, + ) + } + + // compute the subtree roots of the blob shares + log.Debugw("computing the subtree roots") + subtreeRoots := make([][]byte, 0) + dataCursor := 0 + for _, proof := range nmtProofs { + // TODO: do we want directly use the default subtree root threshold + // or want to allow specifying which version to use? + ranges, err := nmt.ToLeafRanges( + proof.Start(), + proof.End(), + shares.SubTreeWidth(len(blobShares), appconsts.DefaultSubtreeRootThreshold), + ) + if err != nil { + return nil, err + } + roots, err := computeSubtreeRoots( + blobShares[dataCursor:dataCursor+proof.End()-proof.Start()], + ranges, + proof.Start(), + ) + if err != nil { + return nil, err + } + subtreeRoots = append(subtreeRoots, roots...) + dataCursor += proof.End() - proof.Start() + } + + log.Debugw("successfully proved the share commitment") + commitmentProof := CommitmentProof{ + SubtreeRoots: subtreeRoots, + SubtreeRootProofs: nmtProofs, + NamespaceID: namespace.ID(), + RowProof: sharesProof.RowProof, + NamespaceVersion: namespace.Version(), + } + return &commitmentProof, nil +} + +// computeSubtreeRoots takes a set of shares and ranges and returns the corresponding subtree roots. +// the offset is the number of shares that are before the subtree roots we're calculating. +func computeSubtreeRoots(shares []share.Share, ranges []nmt.LeafRange, offset int) ([][]byte, error) { + if len(shares) == 0 { + return nil, fmt.Errorf("cannot compute subtree roots for an empty shares list") + } + if len(ranges) == 0 { + return nil, fmt.Errorf("cannot compute subtree roots for an empty ranges list") + } + if offset < 0 { + return nil, fmt.Errorf("the offset %d cannot be stricly negative", offset) + } + + // create a tree containing the shares to generate their subtree roots + tree := nmt.New( + appconsts.NewBaseHashFunc(), + nmt.IgnoreMaxNamespace(true), + nmt.NamespaceIDSize(share.NamespaceSize), + ) + for _, sh := range shares { + leafData := make([]byte, 0) + leafData = append(append(leafData, share.GetNamespace(sh)...), sh...) + err := tree.Push(leafData) + if err != nil { + return nil, err + } + } + + // generate the subtree roots + subtreeRoots := make([][]byte, 0) + for _, rg := range ranges { + root, err := tree.ComputeSubtreeRoot(rg.Start-offset, rg.End-offset) + if err != nil { + return nil, err + } + subtreeRoots = append(subtreeRoots, root) + } + return subtreeRoots, nil +} diff --git a/blob/service_test.go b/blob/service_test.go index 7a99e92e06..b92c3659de 100644 --- a/blob/service_test.go +++ b/blob/service_test.go @@ -13,16 +13,22 @@ import ( ds_sync "github.com/ipfs/go-datastore/sync" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/crypto/merkle" tmrand "github.com/tendermint/tendermint/libs/rand" "github.com/celestiaorg/celestia-app/pkg/appconsts" + pkgproof "github.com/celestiaorg/celestia-app/pkg/proof" "github.com/celestiaorg/celestia-app/pkg/shares" + blobtypes "github.com/celestiaorg/celestia-app/x/blob/types" "github.com/celestiaorg/go-header/store" + "github.com/celestiaorg/nmt" + "github.com/celestiaorg/rsmt2d" "github.com/celestiaorg/celestia-node/blob/blobtest" "github.com/celestiaorg/celestia-node/header" "github.com/celestiaorg/celestia-node/header/headertest" "github.com/celestiaorg/celestia-node/share" + "github.com/celestiaorg/celestia-node/share/eds/edstest" "github.com/celestiaorg/celestia-node/share/getters" "github.com/celestiaorg/celestia-node/share/ipld" ) @@ -616,3 +622,145 @@ func createService(ctx context.Context, t testing.TB, blobs []*Blob) *Service { } return NewService(nil, getters.NewIPLDGetter(bs), fn) } + +// TestProveCommitmentAllCombinations tests proving all the commitments in a block. +// The number of shares per blob increases with each blob to cover proving a large number +// of possibilities. +func TestProveCommitmentAllCombinations(t *testing.T) { + tests := map[string]struct { + blobSize int + }{ + "very small blobs that take less than a share": {blobSize: 350}, + "small blobs that take 2 shares": {blobSize: 1000}, + "small blobs that take ~10 shares": {blobSize: 5000}, + "large blobs ~100 shares": {blobSize: 50000}, + "large blobs ~150 shares": {blobSize: 75000}, + "large blobs ~300 shares": {blobSize: 150000}, + "very large blobs ~1500 shares": {blobSize: 750000}, + } + + for name, tc := range tests { + t.Run(name, func(t *testing.T) { + proveAndVerifyShareCommitments(t, tc.blobSize) + }) + } +} + +func proveAndVerifyShareCommitments(t *testing.T, blobSize int) { + msgs, blobs, nss, eds, _, _, dataRoot := edstest.GenerateTestBlock(t, blobSize, 10) + for msgIndex, msg := range msgs { + t.Run(fmt.Sprintf("msgIndex=%d", msgIndex), func(t *testing.T) { + blb, err := NewBlob(uint8(blobs[msgIndex].ShareVersion), nss[msgIndex].Bytes(), blobs[msgIndex].Data) + require.NoError(t, err) + blobShares, err := BlobsToShares(blb) + require.NoError(t, err) + // compute the commitment + actualCommitmentProof, err := ProveCommitment(eds, nss[msgIndex].Bytes(), blobShares) + require.NoError(t, err) + + // make sure the actual commitment attests to the data + require.NoError(t, actualCommitmentProof.Validate()) + valid, err := actualCommitmentProof.Verify( + dataRoot, + appconsts.DefaultSubtreeRootThreshold, + ) + require.NoError(t, err) + require.True(t, valid) + + // generate an expected proof and verify it's valid + expectedCommitmentProof := generateCommitmentProofFromBlock(t, eds, nss[msgIndex].Bytes(), blobs[msgIndex], dataRoot) + require.NoError(t, expectedCommitmentProof.Validate()) + valid, err = expectedCommitmentProof.Verify( + dataRoot, + appconsts.DefaultSubtreeRootThreshold, + ) + require.NoError(t, err) + require.True(t, valid) + + // make sure the expected proof is the same as the actual on + assert.Equal(t, expectedCommitmentProof, *actualCommitmentProof) + + // make sure the expected commitment commits to the subtree roots in the result proof + actualCommitment, _ := merkle.ProofsFromByteSlices(actualCommitmentProof.SubtreeRoots) + assert.Equal(t, msg.ShareCommitments[0], actualCommitment) + }) + } +} + +// generateCommitmentProofFromBlock takes a block and a PFB index and generates the commitment proof +// using the traditional way of doing, instead of using the API. +func generateCommitmentProofFromBlock( + t *testing.T, + eds *rsmt2d.ExtendedDataSquare, + ns share.Namespace, + blob *blobtypes.Blob, + dataRoot []byte, +) CommitmentProof { + // create the blob from the data + blb, err := NewBlob( + uint8(blob.ShareVersion), + ns, + blob.Data, + ) + require.NoError(t, err) + + // convert the blob to a number of shares + blobShares, err := BlobsToShares(blb) + require.NoError(t, err) + + // find the first share of the blob in the ODS + startShareIndex := -1 + for i, sh := range eds.FlattenedODS() { + if bytes.Equal(sh, blobShares[0]) { + startShareIndex = i + break + } + } + require.Greater(t, startShareIndex, 0) + + // create an inclusion proof of the blob using the share range instead of the commitment + sharesProof, err := pkgproof.NewShareInclusionProofFromEDS( + eds, + ns.ToAppNamespace(), + shares.NewRange(startShareIndex, startShareIndex+len(blobShares)), + ) + require.NoError(t, err) + require.NoError(t, sharesProof.Validate(dataRoot)) + + // calculate the subtree roots + subtreeRoots := make([][]byte, 0) + dataCursor := 0 + for _, proof := range sharesProof.ShareProofs { + ranges, err := nmt.ToLeafRanges( + int(proof.Start), + int(proof.End), + shares.SubTreeWidth(len(blobShares), appconsts.DefaultSubtreeRootThreshold), + ) + require.NoError(t, err) + roots, err := computeSubtreeRoots( + blobShares[dataCursor:int32(dataCursor)+proof.End-proof.Start], + ranges, + int(proof.Start), + ) + require.NoError(t, err) + subtreeRoots = append(subtreeRoots, roots...) + dataCursor += int(proof.End - proof.Start) + } + + // convert the nmt proof to be accepted by the commitment proof + nmtProofs := make([]*nmt.Proof, 0) + for _, proof := range sharesProof.ShareProofs { + nmtProof := nmt.NewInclusionProof(int(proof.Start), int(proof.End), proof.Nodes, true) + nmtProofs = append(nmtProofs, &nmtProof) + } + + commitmentProof := CommitmentProof{ + SubtreeRoots: subtreeRoots, + SubtreeRootProofs: nmtProofs, + NamespaceID: sharesProof.NamespaceID, + RowProof: sharesProof.RowProof, + NamespaceVersion: uint8(sharesProof.NamespaceVersion), + } + + return commitmentProof +} diff --git a/nodebuilder/blob/blob.go b/nodebuilder/blob/blob.go index f87105541a..7e4d22a94f 100644 --- a/nodebuilder/blob/blob.go +++ b/nodebuilder/blob/blob.go @@ -26,15 +26,28 @@ type Module interface { // Included checks whether a blob's given commitment(Merkle subtree root) is included at // given height and under the namespace. Included(_ context.Context, height uint64, _ share.Namespace, _ *blob.Proof, _ blob.Commitment) (bool, error) + // GetCommitmentProof generates a commitment proof for a share commitment. + GetCommitmentProof( + ctx context.Context, + height uint64, + namespace share.Namespace, + shareCommitment []byte, + ) (*blob.CommitmentProof, error) } type API struct { Internal struct { - Submit func(context.Context, []*blob.Blob, blob.GasPrice) (uint64, error) `perm:"write"` - Get func(context.Context, uint64, share.Namespace, blob.Commitment) (*blob.Blob, error) `perm:"read"` - GetAll func(context.Context, uint64, []share.Namespace) ([]*blob.Blob, error) `perm:"read"` - GetProof func(context.Context, uint64, share.Namespace, blob.Commitment) (*blob.Proof, error) `perm:"read"` - Included func(context.Context, uint64, share.Namespace, *blob.Proof, blob.Commitment) (bool, error) `perm:"read"` + Submit func(context.Context, []*blob.Blob, blob.GasPrice) (uint64, error) `perm:"write"` + Get func(context.Context, uint64, share.Namespace, blob.Commitment) (*blob.Blob, error) `perm:"read"` + GetAll func(context.Context, uint64, []share.Namespace) ([]*blob.Blob, error) `perm:"read"` + GetProof func(context.Context, uint64, share.Namespace, blob.Commitment) (*blob.Proof, error) `perm:"read"` + Included func(context.Context, uint64, share.Namespace, *blob.Proof, blob.Commitment) (bool, error) `perm:"read"` + GetCommitmentProof func( + ctx context.Context, + height uint64, + namespace share.Namespace, + shareCommitment []byte, + ) (*blob.CommitmentProof, error) `perm:"read"` } } @@ -64,6 +77,15 @@ func (api *API) GetProof( return api.Internal.GetProof(ctx, height, namespace, commitment) } +func (api *API) GetCommitmentProof( + ctx context.Context, + height uint64, + namespace share.Namespace, + shareCommitment []byte, +) (*blob.CommitmentProof, error) { + return api.Internal.GetCommitmentProof(ctx, height, namespace, shareCommitment) +} + func (api *API) Included( ctx context.Context, height uint64, diff --git a/nodebuilder/blob/mocks/api.go b/nodebuilder/blob/mocks/api.go index 0898e70459..797d962528 100644 --- a/nodebuilder/blob/mocks/api.go +++ b/nodebuilder/blob/mocks/api.go @@ -66,6 +66,21 @@ func (mr *MockModuleMockRecorder) GetAll(arg0, arg1, arg2 interface{}) *gomock.C return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAll", reflect.TypeOf((*MockModule)(nil).GetAll), arg0, arg1, arg2) } +// GetCommitmentProof mocks base method. +func (m *MockModule) GetCommitmentProof(arg0 context.Context, arg1 uint64, arg2 share.Namespace, arg3 []byte) (*blob.CommitmentProof, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetCommitmentProof", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(*blob.CommitmentProof) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetCommitmentProof indicates an expected call of GetCommitmentProof. +func (mr *MockModuleMockRecorder) GetCommitmentProof(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCommitmentProof", reflect.TypeOf((*MockModule)(nil).GetCommitmentProof), arg0, arg1, arg2, arg3) +} + // GetProof mocks base method. func (m *MockModule) GetProof(arg0 context.Context, arg1 uint64, arg2 share.Namespace, arg3 blob.Commitment) (*blob.Proof, error) { m.ctrl.T.Helper() diff --git a/nodebuilder/blobstream/blobstream.go b/nodebuilder/blobstream/blobstream.go deleted file mode 100644 index 3dfcc44a22..0000000000 --- a/nodebuilder/blobstream/blobstream.go +++ /dev/null @@ -1,96 +0,0 @@ -package blobstream - -import ( - "context" - - "github.com/tendermint/tendermint/types" - - "github.com/celestiaorg/celestia-node/share" -) - -var _ Module = (*API)(nil) - -// Module defines the API related to interacting with the proofs -// -//go:generate mockgen -destination=mocks/api.go -package=mocks . Module -type Module interface { - // GetDataCommitment collects the data roots over a provided ordered range of blocks, - // and then creates a new Merkle root of those data roots. The range is end exclusive. - GetDataCommitment(ctx context.Context, start, end uint64) (*DataCommitment, error) - - // GetDataRootInclusionProof creates an inclusion proof for the data root of block - // height `height` in the set of blocks defined by `start` and `end`. The range - // is end exclusive. - GetDataRootInclusionProof( - ctx context.Context, - height int64, - start, end uint64, - ) (*DataRootTupleInclusionProof, error) - - // ProveShares generates a share proof for a share range. - ProveShares(ctx context.Context, height, start, end uint64) (*types.ShareProof, error) - - // ProveCommitment generates a commitment proof for a share commitment. - ProveCommitment( - ctx context.Context, - height uint64, - namespace share.Namespace, - shareCommitment []byte, - ) (*CommitmentProof, error) -} - -// API is a wrapper around the Module for RPC. -type API struct { - Internal struct { - GetDataCommitment func( - ctx context.Context, - start, end uint64, - ) (*DataCommitment, error) `perm:"read"` - GetDataRootInclusionProof func( - ctx context.Context, - height int64, - start, end uint64, - ) (*DataRootTupleInclusionProof, error) `perm:"read"` - ProveShares func( - ctx context.Context, - height, start, end uint64, - ) (*types.ShareProof, error) `perm:"read"` - ProveCommitment func( - ctx context.Context, - height uint64, - namespace share.Namespace, - shareCommitment []byte, - ) (*CommitmentProof, error) `perm:"read"` - } -} - -func (api *API) GetDataCommitment( - ctx context.Context, - start, end uint64, -) (*DataCommitment, error) { - return api.Internal.GetDataCommitment(ctx, start, end) -} - -func (api *API) GetDataRootInclusionProof( - ctx context.Context, - height int64, - start, end uint64, -) (*DataRootTupleInclusionProof, error) { - return api.Internal.GetDataRootInclusionProof(ctx, height, start, end) -} - -func (api *API) ProveShares( - ctx context.Context, - height, start, end uint64, -) (*types.ShareProof, error) { - return api.Internal.ProveShares(ctx, height, start, end) -} - -func (api *API) ProveCommitment( - ctx context.Context, - height uint64, - namespace share.Namespace, - shareCommitment []byte, -) (*CommitmentProof, error) { - return api.Internal.ProveCommitment(ctx, height, namespace, shareCommitment) -} diff --git a/nodebuilder/blobstream/mocks/api.go b/nodebuilder/blobstream/mocks/api.go deleted file mode 100644 index be3dae60ad..0000000000 --- a/nodebuilder/blobstream/mocks/api.go +++ /dev/null @@ -1,98 +0,0 @@ -// Code generated by MockGen. DO NOT EDIT. -// Source: github.com/celestiaorg/celestia-node/nodebuilder/blobstream (interfaces: Module) - -// Package mocks is a generated GoMock package. -package mocks - -import ( - context "context" - reflect "reflect" - - blobstream "github.com/celestiaorg/celestia-node/nodebuilder/blobstream" - share "github.com/celestiaorg/celestia-node/share" - gomock "github.com/golang/mock/gomock" - types "github.com/tendermint/tendermint/types" -) - -// MockModule is a mock of Module interface. -type MockModule struct { - ctrl *gomock.Controller - recorder *MockModuleMockRecorder -} - -// MockModuleMockRecorder is the mock recorder for MockModule. -type MockModuleMockRecorder struct { - mock *MockModule -} - -// NewMockModule creates a new mock instance. -func NewMockModule(ctrl *gomock.Controller) *MockModule { - mock := &MockModule{ctrl: ctrl} - mock.recorder = &MockModuleMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockModule) EXPECT() *MockModuleMockRecorder { - return m.recorder -} - -// GetDataCommitment mocks base method. -func (m *MockModule) GetDataCommitment(arg0 context.Context, arg1, arg2 uint64) (*blobstream.DataCommitment, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetDataCommitment", arg0, arg1, arg2) - ret0, _ := ret[0].(*blobstream.DataCommitment) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetDataCommitment indicates an expected call of GetDataCommitment. -func (mr *MockModuleMockRecorder) GetDataCommitment(arg0, arg1, arg2 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDataCommitment", reflect.TypeOf((*MockModule)(nil).GetDataCommitment), arg0, arg1, arg2) -} - -// GetDataRootInclusionProof mocks base method. -func (m *MockModule) GetDataRootInclusionProof(arg0 context.Context, arg1 int64, arg2, arg3 uint64) (*blobstream.DataRootTupleInclusionProof, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetDataRootInclusionProof", arg0, arg1, arg2, arg3) - ret0, _ := ret[0].(*blobstream.DataRootTupleInclusionProof) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetDataRootInclusionProof indicates an expected call of GetDataRootInclusionProof. -func (mr *MockModuleMockRecorder) GetDataRootInclusionProof(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDataRootInclusionProof", reflect.TypeOf((*MockModule)(nil).GetDataRootInclusionProof), arg0, arg1, arg2, arg3) -} - -// ProveCommitment mocks base method. -func (m *MockModule) ProveCommitment(arg0 context.Context, arg1 uint64, arg2 share.Namespace, arg3 []byte) (*blobstream.CommitmentProof, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ProveCommitment", arg0, arg1, arg2, arg3) - ret0, _ := ret[0].(*blobstream.CommitmentProof) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ProveCommitment indicates an expected call of ProveCommitment. -func (mr *MockModuleMockRecorder) ProveCommitment(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ProveCommitment", reflect.TypeOf((*MockModule)(nil).ProveCommitment), arg0, arg1, arg2, arg3) -} - -// ProveShares mocks base method. -func (m *MockModule) ProveShares(arg0 context.Context, arg1, arg2, arg3 uint64) (*types.ShareProof, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ProveShares", arg0, arg1, arg2, arg3) - ret0, _ := ret[0].(*types.ShareProof) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ProveShares indicates an expected call of ProveShares. -func (mr *MockModuleMockRecorder) ProveShares(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ProveShares", reflect.TypeOf((*MockModule)(nil).ProveShares), arg0, arg1, arg2, arg3) -} diff --git a/nodebuilder/blobstream/module.go b/nodebuilder/blobstream/module.go deleted file mode 100644 index c8deb1db10..0000000000 --- a/nodebuilder/blobstream/module.go +++ /dev/null @@ -1,12 +0,0 @@ -package blobstream - -import "go.uber.org/fx" - -func ConstructModule() fx.Option { - return fx.Module("blobstream", - fx.Provide(NewService), - fx.Provide(func(serv *Service) Module { - return serv - }), - ) -} diff --git a/nodebuilder/blobstream/service.go b/nodebuilder/blobstream/service.go deleted file mode 100644 index 7ba6f5824f..0000000000 --- a/nodebuilder/blobstream/service.go +++ /dev/null @@ -1,627 +0,0 @@ -package blobstream - -import ( - bytes2 "bytes" - "context" - "encoding/hex" - "fmt" - "math" - "strconv" - - logging "github.com/ipfs/go-log/v2" - "github.com/tendermint/tendermint/crypto/merkle" - "github.com/tendermint/tendermint/types" - - "github.com/celestiaorg/celestia-app/pkg/appconsts" - appns "github.com/celestiaorg/celestia-app/pkg/namespace" - pkgproof "github.com/celestiaorg/celestia-app/pkg/proof" - "github.com/celestiaorg/celestia-app/pkg/shares" - "github.com/celestiaorg/nmt" - - "github.com/celestiaorg/celestia-node/blob" - nodeblob "github.com/celestiaorg/celestia-node/nodebuilder/blob" - headerServ "github.com/celestiaorg/celestia-node/nodebuilder/header" - shareServ "github.com/celestiaorg/celestia-node/nodebuilder/share" - "github.com/celestiaorg/celestia-node/share" -) - -var _ Module = (*Service)(nil) - -var log = logging.Logger("go-blobstream") - -type Service struct { - blobServ nodeblob.Module - headerServ headerServ.Module - shareServ shareServ.Module -} - -func NewService(blobMod nodeblob.Module, headerMod headerServ.Module, shareMod shareServ.Module) *Service { - return &Service{ - blobServ: blobMod, - headerServ: headerMod, - shareServ: shareMod, - } -} - -// GetDataCommitment collects the data roots over a provided ordered range of blocks, -// and then creates a new Merkle root of those data roots. The range is end exclusive. -func (s *Service) GetDataCommitment(ctx context.Context, start, end uint64) (*DataCommitment, error) { - log.Debugw("validating the data commitment range", "start", start, "end", end) - err := s.validateDataCommitmentRange(ctx, start, end) - if err != nil { - return nil, err - } - log.Debugw("fetching the data root tuples", "start", start, "end", end) - tuples, err := s.fetchDataRootTuples(ctx, start, end) - if err != nil { - return nil, err - } - log.Debugw("hashing the data root tuples", "start", start, "end", end) - root, err := hashDataRootTuples(tuples) - if err != nil { - return nil, err - } - // Create data commitment - dataCommitment := DataCommitment(root) - return &dataCommitment, nil -} - -// GetDataRootInclusionProof creates an inclusion proof for the data root of block -// height `height` in the set of blocks defined by `start` and `end`. The range -// is end exclusive. -func (s *Service) GetDataRootInclusionProof( - ctx context.Context, - height int64, - start, - end uint64, -) (*DataRootTupleInclusionProof, error) { - log.Debugw( - "validating the data root inclusion proof request", - "start", - start, - "end", - end, - "height", - height, - ) - err := s.validateDataRootInclusionProofRequest(ctx, uint64(height), start, end) - if err != nil { - return nil, err - } - log.Debugw("fetching the data root tuples", "start", start, "end", end) - tuples, err := s.fetchDataRootTuples(ctx, start, end) - if err != nil { - return nil, err - } - log.Debugw("proving the data root tuples", "start", start, "end", end) - proof, err := proveDataRootTuples(tuples, height) - if err != nil { - return nil, err - } - dataRootTupleInclusionProof := DataRootTupleInclusionProof(proof) - return &dataRootTupleInclusionProof, nil -} - -// padBytes Pad bytes to given length -func padBytes(byt []byte, length int) ([]byte, error) { - l := len(byt) - if l > length { - return nil, fmt.Errorf( - "cannot pad bytes because length of bytes array: %d is greater than given length: %d", - l, - length, - ) - } - if l == length { - return byt, nil - } - tmp := make([]byte, length) - copy(tmp[length-l:], byt) - return tmp, nil -} - -// To32PaddedHexBytes takes a number and returns its hex representation padded to 32 bytes. -// Used to mimic the result of `abi.encode(number)` in Ethereum. -func To32PaddedHexBytes(number uint64) ([]byte, error) { - hexRepresentation := strconv.FormatUint(number, 16) - // Make sure hex representation has even length. - // The `strconv.FormatUint` can return odd length hex encodings. - // For example, `strconv.FormatUint(10, 16)` returns `a`. - // Thus, we need to pad it. - if len(hexRepresentation)%2 == 1 { - hexRepresentation = "0" + hexRepresentation - } - hexBytes, hexErr := hex.DecodeString(hexRepresentation) - if hexErr != nil { - return nil, hexErr - } - paddedBytes, padErr := padBytes(hexBytes, 32) - if padErr != nil { - return nil, padErr - } - return paddedBytes, nil -} - -// DataRootTuple contains the data that will be used to create the QGB commitments. -// The commitments will be signed by orchestrators and submitted to an EVM chain via a relayer. -// For more information: -// https://github.com/celestiaorg/quantum-gravity-bridge/blob/master/src/DataRootTuple.sol -type DataRootTuple struct { - height uint64 - dataRoot [32]byte -} - -// EncodeDataRootTuple takes a height and a data root, and returns the equivalent of -// `abi.encode(...)` in Ethereum. -// The encoded type is a DataRootTuple, which has the following ABI: -// -// { -// "components":[ -// { -// "internalType":"uint256", -// "name":"height", -// "type":"uint256" -// }, -// { -// "internalType":"bytes32", -// "name":"dataRoot", -// "type":"bytes32" -// }, -// { -// "internalType":"structDataRootTuple", -// "name":"_tuple", -// "type":"tuple" -// } -// ] -// } -// -// padding the hex representation of the height padded to 32 bytes concatenated to the data root. -// For more information, refer to: -// https://github.com/celestiaorg/blobstream-contracts/blob/master/src/DataRootTuple.sol -func EncodeDataRootTuple(height uint64, dataRoot [32]byte) ([]byte, error) { - paddedHeight, err := To32PaddedHexBytes(height) - if err != nil { - return nil, err - } - return append(paddedHeight, dataRoot[:]...), nil -} - -// dataCommitmentBlocksLimit The maximum number of blocks to be used to create a data commitment. -// It's a local parameter to protect the API from creating unnecessarily large commitments. -const dataCommitmentBlocksLimit = 10_000 // ~33 hours of blocks assuming 12-second blocks. - -// validateDataCommitmentRange runs basic checks on the asc sorted list of -// heights that will be used subsequently in generating data commitments over -// the defined set of heights. -func (s *Service) validateDataCommitmentRange(ctx context.Context, start, end uint64) error { - if start == 0 { - return fmt.Errorf("the start block is 0") - } - if start >= end { - return fmt.Errorf("end block is smaller or equal to the start block") - } - heightsRange := end - start - if heightsRange > uint64(dataCommitmentBlocksLimit) { - return fmt.Errorf("the query exceeds the limit of allowed blocks %d", dataCommitmentBlocksLimit) - } - - currentHeader, err := s.headerServ.NetworkHead(ctx) - if err != nil { - return err - } - // the data commitment range is end exclusive - if end > currentHeader.Height()+1 { - return fmt.Errorf( - "end block %d is higher than current chain height %d", - end, - currentHeader.Height(), - ) - } - - currentLocalHeader, err := s.headerServ.LocalHead(ctx) - if err != nil { - return err - } - // the data commitment range is end exclusive - if end > currentLocalHeader.Height()+1 { - return fmt.Errorf( - "end block %d is higher than local chain height %d. Wait for the node until it syncs up to %d", - end, - currentLocalHeader.Height(), - end, - ) - } - return nil -} - -// hashDataRootTuples hashes a list of blocks data root tuples, i.e., height, data root and square -// size, then returns their merkle root. -func hashDataRootTuples(tuples []DataRootTuple) ([]byte, error) { - if len(tuples) == 0 { - return nil, fmt.Errorf("cannot hash an empty list of data root tuples") - } - dataRootEncodedTuples := make([][]byte, 0, len(tuples)) - for _, tuple := range tuples { - encodedTuple, err := EncodeDataRootTuple( - tuple.height, - tuple.dataRoot, - ) - if err != nil { - return nil, err - } - dataRootEncodedTuples = append(dataRootEncodedTuples, encodedTuple) - } - root := merkle.HashFromByteSlices(dataRootEncodedTuples) - return root, nil -} - -// validateDataRootInclusionProofRequest validates the request to generate a data root -// inclusion proof. -func (s *Service) validateDataRootInclusionProofRequest( - ctx context.Context, - height, start, end uint64, -) error { - err := s.validateDataCommitmentRange(ctx, start, end) - if err != nil { - return err - } - if height < start || height >= end { - return fmt.Errorf( - "height %d should be in the end exclusive interval first_block %d last_block %d", - height, - start, - end, - ) - } - return nil -} - -// proveDataRootTuples returns the merkle inclusion proof for a height. -func proveDataRootTuples(tuples []DataRootTuple, height int64) (*merkle.Proof, error) { - if len(tuples) == 0 { - return nil, fmt.Errorf("cannot prove an empty list of tuples") - } - if height < 0 { - return nil, fmt.Errorf("cannot prove a strictly negative height %d", height) - } - currentHeight := tuples[0].height - 1 - for _, tuple := range tuples { - if tuple.height != currentHeight+1 { - return nil, fmt.Errorf("the provided tuples are not consecutive %d vs %d", currentHeight, tuple.height) - } - currentHeight++ - } - dataRootEncodedTuples := make([][]byte, 0, len(tuples)) - for _, tuple := range tuples { - encodedTuple, err := EncodeDataRootTuple( - tuple.height, - tuple.dataRoot, - ) - if err != nil { - return nil, err - } - dataRootEncodedTuples = append(dataRootEncodedTuples, encodedTuple) - } - _, proofs := merkle.ProofsFromByteSlices(dataRootEncodedTuples) - return proofs[height-int64(tuples[0].height)], nil -} - -// fetchDataRootTuples takes an end exclusive range of heights and fetches its -// corresponding data root tuples. -func (s *Service) fetchDataRootTuples(ctx context.Context, start, end uint64) ([]DataRootTuple, error) { - tuples := make([]DataRootTuple, 0, end-start) - for height := start; height < end; height++ { - block, err := s.headerServ.GetByHeight(ctx, height) - if err != nil { - return nil, err - } - if block == nil { - return nil, fmt.Errorf("couldn't load block %d", height) - } - tuples = append(tuples, DataRootTuple{ - height: block.Height(), - dataRoot: *(*[32]byte)(block.DataHash), - }) - } - return tuples, nil -} - -// ProveShares generates a share proof for a share range. -// Note: queries the whole EDS to generate the proof. -// This can be improved by selecting the set of shares that will need to be used to create -// the proof and only querying them. However, that would require re-implementing the logic -// in Core. Also, core also queries the whole EDS to generate the proof. So, it's fine for -// now. In the future, when blocks get way bigger, we should revisit this and improve it. -func (s *Service) ProveShares(ctx context.Context, height, start, end uint64) (*types.ShareProof, error) { - log.Debugw("proving share range", "start", start, "end", end, "height", height) - if height == 0 { - return nil, fmt.Errorf("height cannot be equal to 0") - } - if start == end { - return nil, fmt.Errorf("start share cannot be equal to end share") - } - if start > end { - return nil, fmt.Errorf("start share %d cannot be greater than end share %d", start, end) - } - - log.Debugw("getting extended header", "height", height) - extendedHeader, err := s.headerServ.GetByHeight(ctx, height) - if err != nil { - return nil, err - } - log.Debugw("getting eds", "height", height) - eds, err := s.shareServ.GetEDS(ctx, extendedHeader) - if err != nil { - return nil, err - } - - startInt, err := uint64ToInt(start) - if err != nil { - return nil, err - } - endInt, err := uint64ToInt(end) - if err != nil { - return nil, err - } - odsShares, err := shares.FromBytes(eds.FlattenedODS()) - if err != nil { - return nil, err - } - nID, err := pkgproof.ParseNamespace(odsShares, startInt, endInt) - if err != nil { - return nil, err - } - log.Debugw("generating the share proof", "start", start, "end", end, "height", height) - proof, err := pkgproof.NewShareInclusionProofFromEDS(eds, nID, shares.NewRange(startInt, endInt)) - if err != nil { - return nil, err - } - return &proof, nil -} - -// ProveCommitment generates a commitment proof for a share commitment. -// It takes as argument the height of the block containing the blob of data, its -// namespace and its share commitment. -// Note: queries the whole EDS to generate the proof. -// This can be improved once `GetProof` returns the proof only for the blob and not the whole -// namespace. -func (s *Service) ProveCommitment( - ctx context.Context, - height uint64, - namespace share.Namespace, - shareCommitment []byte, -) (*CommitmentProof, error) { - log.Debugw("proving share commitment", "height", height, "commitment", shareCommitment, "namespace", namespace) - if height == 0 { - return nil, fmt.Errorf("height cannot be equal to 0") - } - - // get the blob to compute the subtree roots - log.Debugw( - "getting the blob", - "height", - height, - "commitment", - shareCommitment, - "namespace", - namespace, - ) - blb, err := s.blobServ.Get(ctx, height, namespace, shareCommitment) - if err != nil { - return nil, err - } - - log.Debugw( - "converting the blob to shares", - "height", - height, - "commitment", - shareCommitment, - "namespace", - namespace, - ) - blobShares, err := blob.BlobsToShares(blb) - if err != nil { - return nil, err - } - if len(blobShares) == 0 { - return nil, fmt.Errorf("the blob shares for commitment %s are empty", hex.EncodeToString(shareCommitment)) - } - - // get the extended header - log.Debugw( - "getting the extended header", - "height", - height, - ) - extendedHeader, err := s.headerServ.GetByHeight(ctx, height) - if err != nil { - return nil, err - } - - log.Debugw("getting eds", "height", height) - eds, err := s.shareServ.GetEDS(ctx, extendedHeader) - if err != nil { - return nil, err - } - - // find the blob shares in the EDS - blobSharesStartIndex := -1 - for index, share := range eds.FlattenedODS() { - if bytes2.Equal(share, blobShares[0]) { - blobSharesStartIndex = index - } - } - if blobSharesStartIndex < 0 { - return nil, fmt.Errorf("couldn't find the blob shares in the ODS") - } - - nID, err := appns.From(namespace) - if err != nil { - return nil, err - } - - log.Debugw( - "generating the blob share proof for commitment", - "commitment", - shareCommitment, - "start_share", - blobSharesStartIndex, - "end_share", - blobSharesStartIndex+len(blobShares), - "height", - height, - ) - sharesProof, err := pkgproof.NewShareInclusionProofFromEDS( - eds, - nID, - shares.NewRange(blobSharesStartIndex, blobSharesStartIndex+len(blobShares)), - ) - if err != nil { - return nil, err - } - - // convert the shares to row root proofs to nmt proofs - nmtProofs := make([]*nmt.Proof, 0) - for _, proof := range sharesProof.ShareProofs { - nmtProof := nmt.NewInclusionProof(int(proof.Start), - int(proof.End), - proof.Nodes, - true) - nmtProofs = append( - nmtProofs, - &nmtProof, - ) - } - - // compute the subtree roots of the blob shares - log.Debugw( - "computing the subtree roots", - "height", - height, - "commitment", - shareCommitment, - "namespace", - namespace, - ) - subtreeRoots := make([][]byte, 0) - dataCursor := 0 - for _, proof := range nmtProofs { - // TODO: do we want directly use the default subtree root threshold - // or want to allow specifying which version to use? - ranges, err := nmt.ToLeafRanges( - proof.Start(), - proof.End(), - shares.SubTreeWidth(len(blobShares), appconsts.DefaultSubtreeRootThreshold), - ) - if err != nil { - return nil, err - } - roots, err := computeSubtreeRoots( - blobShares[dataCursor:dataCursor+proof.End()-proof.Start()], - ranges, - proof.Start(), - ) - if err != nil { - return nil, err - } - subtreeRoots = append(subtreeRoots, roots...) - dataCursor += proof.End() - proof.Start() - } - - log.Debugw( - "successfully proved the share commitment", - "height", - height, - "commitment", - shareCommitment, - "namespace", - namespace, - ) - commitmentProof := CommitmentProof{ - SubtreeRoots: subtreeRoots, - SubtreeRootProofs: nmtProofs, - NamespaceID: namespace.ID(), - RowProof: sharesProof.RowProof, - NamespaceVersion: namespace.Version(), - } - - return &commitmentProof, nil -} - -// computeSubtreeRoots takes a set of shares and ranges and returns the corresponding subtree roots. -// the offset is the number of shares that are before the subtree roots we're calculating. -func computeSubtreeRoots(shares []share.Share, ranges []nmt.LeafRange, offset int) ([][]byte, error) { - if len(shares) == 0 { - return nil, fmt.Errorf("cannot compute subtree roots for an empty shares list") - } - if len(ranges) == 0 { - return nil, fmt.Errorf("cannot compute subtree roots for an empty ranges list") - } - if offset < 0 { - return nil, fmt.Errorf("the offset %d cannot be stricly negative", offset) - } - - // create a tree containing the shares to generate their subtree roots - tree := nmt.New( - appconsts.NewBaseHashFunc(), - nmt.IgnoreMaxNamespace(true), - nmt.NamespaceIDSize(share.NamespaceSize), - ) - for _, sh := range shares { - leafData := make([]byte, 0) - leafData = append(append(leafData, share.GetNamespace(sh)...), sh...) - err := tree.Push(leafData) - if err != nil { - return nil, err - } - } - - // generate the subtree roots - subtreeRoots := make([][]byte, 0) - for _, rg := range ranges { - root, err := tree.ComputeSubtreeRoot(rg.Start-offset, rg.End-offset) - if err != nil { - return nil, err - } - subtreeRoots = append(subtreeRoots, root) - } - return subtreeRoots, nil -} - -func uint64ToInt(number uint64) (int, error) { - if number >= math.MaxInt { - return 0, fmt.Errorf("number %d is higher than max int %d", number, math.MaxInt) - } - return int(number), nil -} - -// ProveSubtreeRootToCommitment generates a subtree root to share commitment inclusion proof. -// Note: this method is not part of the API. It will not be served by any endpoint, however, -// it can be called directly programmatically. -func ProveSubtreeRootToCommitment( - subtreeRoots [][]byte, - subtreeRootIndex uint64, -) (*ResultSubtreeRootToCommitmentProof, error) { - _, proofs := merkle.ProofsFromByteSlices(subtreeRoots) - return &ResultSubtreeRootToCommitmentProof{ - SubtreeRootToCommitmentProof: SubtreeRootToCommitmentProof{ - Proof: *proofs[subtreeRootIndex], - }, - }, nil -} - -// ProveShareToSubtreeRoot generates a share to subtree root inclusion proof -// Note: this method is not part of the API. It will not be served by any endpoint, however, -// it can be called directly programmatically. -func ProveShareToSubtreeRoot( - shares [][]byte, - shareIndex uint64, -) (*ResultShareToSubtreeRootProof, error) { - _, proofs := merkle.ProofsFromByteSlices(shares) - return &ResultShareToSubtreeRootProof{ - ShareToSubtreeRootProof: ShareToSubtreeRootProof{ - Proof: *proofs[shareIndex], - }, - }, nil -} diff --git a/nodebuilder/blobstream/service_test.go b/nodebuilder/blobstream/service_test.go deleted file mode 100644 index 958d39b0f9..0000000000 --- a/nodebuilder/blobstream/service_test.go +++ /dev/null @@ -1,1328 +0,0 @@ -package blobstream - -import ( - "bytes" - "context" - "crypto/sha256" - "encoding/hex" - "errors" - "fmt" - "math" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "github.com/tendermint/tendermint/crypto/merkle" - bytes2 "github.com/tendermint/tendermint/libs/bytes" - coretypes "github.com/tendermint/tendermint/types" - - "github.com/celestiaorg/celestia-app/app" - "github.com/celestiaorg/celestia-app/app/encoding" - "github.com/celestiaorg/celestia-app/pkg/appconsts" - "github.com/celestiaorg/celestia-app/pkg/da" - "github.com/celestiaorg/celestia-app/pkg/namespace" - pkgproof "github.com/celestiaorg/celestia-app/pkg/proof" - "github.com/celestiaorg/celestia-app/pkg/shares" - "github.com/celestiaorg/celestia-app/pkg/square" - "github.com/celestiaorg/celestia-app/test/util/blobfactory" - "github.com/celestiaorg/celestia-app/test/util/testfactory" - "github.com/celestiaorg/celestia-app/x/blob/types" - libhead "github.com/celestiaorg/go-header" - "github.com/celestiaorg/go-header/sync" - "github.com/celestiaorg/nmt" - "github.com/celestiaorg/rsmt2d" - - "github.com/celestiaorg/celestia-node/blob" - "github.com/celestiaorg/celestia-node/header" - nodeblob "github.com/celestiaorg/celestia-node/nodebuilder/blob" - headerServ "github.com/celestiaorg/celestia-node/nodebuilder/header" - shareServ "github.com/celestiaorg/celestia-node/nodebuilder/share" - "github.com/celestiaorg/celestia-node/share" -) - -func TestPadBytes(t *testing.T) { - tests := []struct { - input []byte - length int - expected []byte - expectErr bool - }{ - {input: []byte{1, 2, 3}, length: 5, expected: []byte{0, 0, 1, 2, 3}}, - {input: []byte{1, 2, 3}, length: 3, expected: []byte{1, 2, 3}}, - {input: []byte{1, 2, 3}, length: 2, expected: nil, expectErr: true}, - {input: []byte{}, length: 3, expected: []byte{0, 0, 0}}, - } - - for _, test := range tests { - result, err := padBytes(test.input, test.length) - if test.expectErr { - assert.Error(t, err) - } else { - assert.NoError(t, err) - assert.Equal(t, test.expected, result) - } - } -} - -func TestTo32PaddedHexBytes(t *testing.T) { - tests := []struct { - number uint64 - expected []byte - expectError bool - }{ - { - number: 10, - expected: func() []byte { - res, _ := hex.DecodeString("000000000000000000000000000000000000000000000000000000000000000a") - return res - }(), - }, - { - number: 255, - expected: func() []byte { - res, _ := hex.DecodeString("00000000000000000000000000000000000000000000000000000000000000ff") - return res - }(), - }, - { - number: 255, - expected: func() []byte { - res, _ := hex.DecodeString("00000000000000000000000000000000000000000000000000000000000000ff") - return res - }(), - }, - { - number: 4294967295, - expected: func() []byte { - res, _ := hex.DecodeString("00000000000000000000000000000000000000000000000000000000ffffffff") - return res - }(), - }, - } - - for _, test := range tests { - t.Run(fmt.Sprintf("number: %d", test.number), func(t *testing.T) { - result, err := To32PaddedHexBytes(test.number) - if test.expectError { - assert.Error(t, err) - } else { - assert.NoError(t, err) - assert.Equal(t, test.expected, result) - } - }) - } -} - -func TestEncodeDataRootTuple(t *testing.T) { - height := uint64(2) - dataRoot, err := hex.DecodeString("82dc1607d84557d3579ce602a45f5872e821c36dbda7ec926dfa17ebc8d5c013") - require.NoError(t, err) - - expectedEncoding, err := hex.DecodeString( - // hex representation of height padded to 32 bytes - "0000000000000000000000000000000000000000000000000000000000000002" + - // data root - "82dc1607d84557d3579ce602a45f5872e821c36dbda7ec926dfa17ebc8d5c013", - ) - require.NoError(t, err) - require.NotNil(t, expectedEncoding) - - actualEncoding, err := EncodeDataRootTuple(height, *(*[32]byte)(dataRoot)) - require.NoError(t, err) - require.NotNil(t, actualEncoding) - - // Check that the length of packed data is correct - assert.Equal(t, len(actualEncoding), 64) - assert.Equal(t, expectedEncoding, actualEncoding) -} - -func TestHashDataRootTuples(t *testing.T) { - tests := map[string]struct { - tuples []DataRootTuple - expectedHash []byte - expectErr bool - }{ - "empty tuples list": {tuples: nil, expectErr: true}, - "valid list of data root tuples": { - tuples: []DataRootTuple{ - { - height: 1, - dataRoot: [32]byte{0x1}, - }, - { - height: 2, - dataRoot: [32]byte{0x2}, - }, - }, - expectedHash: func() []byte { - tuple1, _ := EncodeDataRootTuple(1, [32]byte{0x1}) - tuple2, _ := EncodeDataRootTuple(2, [32]byte{0x2}) - - return merkle.HashFromByteSlices([][]byte{tuple1, tuple2}) - }(), - }, - } - - for name, tc := range tests { - t.Run(name, func(t *testing.T) { - result, err := hashDataRootTuples(tc.tuples) - if tc.expectErr { - assert.Error(t, err) - } else { - assert.NoError(t, err) - assert.Equal(t, tc.expectedHash, result) - } - }) - } -} - -func TestProveDataRootTuples(t *testing.T) { - tests := map[string]struct { - tuples []DataRootTuple - height int64 - expectedProof merkle.Proof - expectErr bool - }{ - "empty tuples list": {tuples: nil, expectErr: true}, - "strictly negative height": { - height: -1, - tuples: []DataRootTuple{ - { - height: 1, - dataRoot: [32]byte{0x1}, - }, - }, - expectErr: true, - }, - "non consecutive list of tuples at the beginning": { - tuples: []DataRootTuple{ - { - height: 1, - dataRoot: [32]byte{0x1}, - }, - { - height: 3, - dataRoot: [32]byte{0x2}, - }, - { - height: 4, - dataRoot: [32]byte{0x4}, - }, - }, - expectErr: true, - }, - "non consecutive list of tuples in the middle": { - tuples: []DataRootTuple{ - { - height: 1, - dataRoot: [32]byte{0x1}, - }, - { - height: 2, - dataRoot: [32]byte{0x2}, - }, - { - height: 3, - dataRoot: [32]byte{0x2}, - }, - { - height: 5, - dataRoot: [32]byte{0x4}, - }, - { - height: 6, - dataRoot: [32]byte{0x5}, - }, - }, - expectErr: true, - }, - "non consecutive list of tuples at the end": { - tuples: []DataRootTuple{ - { - height: 1, - dataRoot: [32]byte{0x1}, - }, - { - height: 2, - dataRoot: [32]byte{0x2}, - }, - { - height: 4, - dataRoot: [32]byte{0x4}, - }, - }, - expectErr: true, - }, - "duplicate height at the beginning": { - tuples: []DataRootTuple{ - { - height: 1, - dataRoot: [32]byte{0x1}, - }, - { - height: 1, - dataRoot: [32]byte{0x1}, - }, - { - height: 4, - dataRoot: [32]byte{0x4}, - }, - }, - expectErr: true, - }, - "duplicate height in the middle": { - tuples: []DataRootTuple{ - { - height: 1, - dataRoot: [32]byte{0x1}, - }, - { - height: 2, - dataRoot: [32]byte{0x2}, - }, - { - height: 2, - dataRoot: [32]byte{0x2}, - }, - { - height: 3, - dataRoot: [32]byte{0x3}, - }, - }, - expectErr: true, - }, - "duplicate height at the end": { - tuples: []DataRootTuple{ - { - height: 1, - dataRoot: [32]byte{0x1}, - }, - { - height: 2, - dataRoot: [32]byte{0x2}, - }, - { - height: 2, - dataRoot: [32]byte{0x2}, - }, - }, - expectErr: true, - }, - "valid proof": { - height: 3, - tuples: []DataRootTuple{ - { - height: 1, - dataRoot: [32]byte{0x1}, - }, - { - height: 2, - dataRoot: [32]byte{0x2}, - }, - { - height: 3, - dataRoot: [32]byte{0x3}, - }, - { - height: 4, - dataRoot: [32]byte{0x4}, - }, - }, - expectedProof: func() merkle.Proof { - encodedTuple1, _ := EncodeDataRootTuple(1, [32]byte{0x1}) - encodedTuple2, _ := EncodeDataRootTuple(2, [32]byte{0x2}) - encodedTuple3, _ := EncodeDataRootTuple(3, [32]byte{0x3}) - encodedTuple4, _ := EncodeDataRootTuple(4, [32]byte{0x4}) - _, proofs := merkle.ProofsFromByteSlices([][]byte{encodedTuple1, encodedTuple2, encodedTuple3, encodedTuple4}) - return *proofs[2] - }(), - }, - } - - for name, tc := range tests { - t.Run(name, func(t *testing.T) { - result, err := proveDataRootTuples(tc.tuples, tc.height) - if tc.expectErr { - assert.Error(t, err) - } else { - assert.NoError(t, err) - assert.Equal(t, tc.expectedProof, *result) - } - }) - } -} - -func TestUint64ToInt(t *testing.T) { - tests := []struct { - number uint64 - expected int - expectErr bool - }{ - {number: 0, expected: 0}, - {number: 10, expected: 10}, - {number: math.MaxInt - 1, expected: math.MaxInt - 1}, - {number: math.MaxInt, expected: 0, expectErr: true}, - {number: math.MaxInt + 1, expected: 0, expectErr: true}, - {number: math.MaxUint64, expected: 0, expectErr: true}, - } - - for _, test := range tests { - result, err := uint64ToInt(test.number) - if test.expectErr { - assert.Error(t, err) - } else { - assert.NoError(t, err) - assert.Equal(t, test.expected, result) - } - } -} - -func TestGetDataCommitment(t *testing.T) { - api := newTestAPI(t, 10, 1000, 10) - tests := map[string]struct { - start, end uint64 - expectedDataCommitment bytes2.HexBytes - expectErr bool - }{ - "start == 0": { - start: 0, - expectErr: true, - }, - "start block == end block": { - start: 2, - end: 2, - expectErr: true, - }, - "start block > end block": { - start: 3, - end: 2, - expectErr: true, - }, - "range exceeds data commitment blocks limit": { - start: 3, - end: dataCommitmentBlocksLimit + 10, - expectErr: true, - }, - "end block is greater than the network block height": { - start: 3, - end: 15, - expectErr: true, - }, - "valid case": { - start: 5, - end: 9, - expectedDataCommitment: func() bytes2.HexBytes { - tuples := []DataRootTuple{ - { - height: 5, - dataRoot: [32]byte(api.blocks[5].dataRoot), - }, - { - height: 6, - dataRoot: [32]byte(api.blocks[6].dataRoot), - }, - { - height: 7, - dataRoot: [32]byte(api.blocks[7].dataRoot), - }, - { - height: 8, - dataRoot: [32]byte(api.blocks[8].dataRoot), - }, - } - hash, err := hashDataRootTuples(tuples) - require.NoError(t, err) - return hash - }(), - }, - } - - for name, tc := range tests { - t.Run(name, func(t *testing.T) { - result, err := api.blobService.GetDataCommitment(context.Background(), tc.start, tc.end) - if tc.expectErr { - assert.Error(t, err) - } else { - assert.NoError(t, err) - assert.Equal(t, tc.expectedDataCommitment, result) - } - }) - } -} - -func TestGetDataRootInclusionProof(t *testing.T) { - api := newTestAPI(t, 10, 1000, 10) - tests := map[string]struct { - height int64 - start, end uint64 - expectedProof merkle.Proof - expectErr bool - }{ - "height < 0": { - height: -1, - expectErr: true, - }, - "height == 0": { - height: 0, - expectErr: true, - }, - "start == 0": { - start: 0, - expectErr: true, - }, - "start block == end block": { - start: 2, - end: 2, - expectErr: true, - }, - "start block > end block": { - start: 3, - end: 2, - expectErr: true, - }, - "height < start": { - height: 2, - start: 3, - end: 2, - expectErr: true, - }, - "height == end": { - height: 4, - start: 3, - end: 4, - expectErr: true, - }, - "height > end": { - height: 5, - start: 3, - end: 4, - expectErr: true, - }, - "range exceeds data commitment blocks limit": { - start: 3, - end: dataCommitmentBlocksLimit + 10, - expectErr: true, - }, - "end block is greater than the network block height": { - start: 3, - end: 15, - expectErr: true, - }, - "start block is greater than the network block height": { - start: 12, - end: 15, - height: 14, - expectErr: true, - }, - "height block is greater than the network block height": { - start: 1, - end: 15, - height: 14, - expectErr: true, - }, - "valid case": { - height: 6, - start: 5, - end: 9, - expectedProof: func() merkle.Proof { - encodedTuple5, _ := EncodeDataRootTuple( - 5, - [32]byte(api.blocks[5].dataRoot), - ) - encodedTuple6, _ := EncodeDataRootTuple( - 6, - [32]byte(api.blocks[6].dataRoot), - ) - encodedTuple7, _ := EncodeDataRootTuple( - 7, - [32]byte(api.blocks[7].dataRoot), - ) - encodedTuple8, _ := EncodeDataRootTuple( - 8, - [32]byte(api.blocks[8].dataRoot), - ) - _, proofs := merkle.ProofsFromByteSlices([][]byte{ - encodedTuple5, - encodedTuple6, - encodedTuple7, - encodedTuple8, - }) - return *proofs[1] - }(), - }, - } - - for name, tc := range tests { - t.Run(name, func(t *testing.T) { - result, err := api.blobService.GetDataRootInclusionProof(context.Background(), tc.height, tc.start, tc.end) - if tc.expectErr { - assert.Error(t, err) - } else { - assert.NoError(t, err) - assert.Equal(t, tc.expectedProof, result) - } - }) - } -} - -func TestProveShares(t *testing.T) { - api := newTestAPI(t, 10, 1000, 10) - tests := map[string]struct { - height uint64 - start, end uint64 - expectedProof coretypes.ShareProof - expectErr bool - }{ - "height == 0": { - height: 0, - expectErr: true, - }, - "height > blockchain tip": { - height: 100, - expectErr: true, - }, - "start share == end share": { - start: 2, - end: 2, - expectErr: true, - }, - "start share > end share": { - start: 3, - end: 2, - expectErr: true, - }, - "start share > number of shares in the block": { - start: 200, - end: 201, - expectErr: true, - }, - "end share > number of shares in the block": { - start: 1, - end: 201, - expectErr: true, - }, - "valid case": { - height: 6, - start: 0, - end: 2, - expectedProof: func() coretypes.ShareProof { - proof, err := pkgproof.NewShareInclusionProofFromEDS( - api.blocks[6].eds, - namespace.PayForBlobNamespace, - shares.NewRange(0, 2), - ) - require.NoError(t, err) - require.NoError(t, proof.Validate(api.blocks[6].dataRoot)) - return proof - }(), - }, - } - - for name, tc := range tests { - t.Run(name, func(t *testing.T) { - result, err := api.blobService.ProveShares(context.Background(), tc.height, tc.start, tc.end) - if tc.expectErr { - assert.Error(t, err) - } else { - assert.NoError(t, err) - assert.Equal(t, tc.expectedProof, *result) - assert.NoError(t, result.Validate(api.blocks[6].dataRoot)) - } - }) - } -} - -func TestProveCommitment(t *testing.T) { - api := newTestAPI(t, 10, 300, 10) - - tests := map[string]struct { - height uint64 - commitment bytes2.HexBytes - ns share.Namespace - expectedProof CommitmentProof - expectErr bool - }{ - "height == 0": {height: 0, expectErr: true}, - "valid case": { - height: 6, - ns: api.blocks[6].msgs[0].Namespaces[0], - commitment: api.blocks[6].msgs[0].ShareCommitments[0], - expectedProof: func() CommitmentProof { - commitmentProof := generateCommitmentProofFromBlock(t, api.blocks[6], 0) - - // make sure we're creating a valid proof for the test - require.NoError(t, commitmentProof.Validate()) - valid, err := commitmentProof.Verify(api.blocks[6].dataRoot, appconsts.DefaultSubtreeRootThreshold) - require.NoError(t, err) - require.True(t, valid) - - return commitmentProof - }(), - }, - } - - for name, tc := range tests { - t.Run(name, func(t *testing.T) { - result, err := api.blobService.ProveCommitment(context.Background(), tc.height, tc.ns, tc.commitment) - if tc.expectErr { - assert.Error(t, err) - } else { - assert.NoError(t, err) - // make sure the actual proof can be validated and verified - assert.NoError(t, result.Validate()) - valid, err := result.Verify(api.blocks[tc.height].dataRoot, appconsts.DefaultSubtreeRootThreshold) - assert.NoError(t, err) - assert.True(t, valid) - - // make sure the expected proof is the same as the actual proof - assert.Equal(t, tc.expectedProof, *result) - - // make sure the expected commitment commits to the subtree roots in the actual proof - actualCommitment, _ := merkle.ProofsFromByteSlices(result.SubtreeRoots) - assert.Equal(t, tc.commitment.Bytes(), actualCommitment) - } - }) - } -} - -// TestProveCommitmentAllCombinations tests proving all the commitments in a block. -// The number of shares per blob increases with each blob to cover proving a large number -// of possibilities. -func TestProveCommitmentAllCombinations(t *testing.T) { - tests := map[string]struct { - numberOfBlocks int - blobSize int - }{ - "very small blobs that take less than a share": {numberOfBlocks: 20, blobSize: 350}, - "small blobs that take 2 shares": {numberOfBlocks: 20, blobSize: 1000}, - "small blobs that take ~10 shares": {numberOfBlocks: 10, blobSize: 5000}, - "large blobs ~100 shares": {numberOfBlocks: 5, blobSize: 50000}, - "large blobs ~150 shares": {numberOfBlocks: 5, blobSize: 75000}, - "large blobs ~300 shares": {numberOfBlocks: 5, blobSize: 150000}, - "very large blobs ~1500 shares": {numberOfBlocks: 3, blobSize: 750000}, - } - - for name, tc := range tests { - t.Run(name, func(t *testing.T) { - proveAllCommitments(t, tc.numberOfBlocks, tc.blobSize) - }) - } -} - -func proveAllCommitments(t *testing.T, numberOfBlocks, blobSize int) { - api := newTestAPI(t, numberOfBlocks, blobSize, 10) - for blockIndex, block := range api.blocks { - for msgIndex, msg := range block.msgs { - t.Run(fmt.Sprintf("height=%d, blobIndex=%d", blockIndex, msgIndex), func(t *testing.T) { - // compute the commitment - actualCommitmentProof, err := api.blobService.ProveCommitment( - context.Background(), - uint64(blockIndex), - msg.Namespaces[0], - msg.ShareCommitments[0], - ) - require.NoError(t, err) - - // make sure the actual commitment attests to the data - require.NoError(t, actualCommitmentProof.Validate()) - valid, err := actualCommitmentProof.Verify( - block.dataRoot, - appconsts.DefaultSubtreeRootThreshold, - ) - require.NoError(t, err) - require.True(t, valid) - - // generate an expected proof and verify it's valid - expectedCommitmentProof := generateCommitmentProofFromBlock(t, block, msgIndex) - require.NoError(t, expectedCommitmentProof.Validate()) - valid, err = expectedCommitmentProof.Verify( - block.dataRoot, - appconsts.DefaultSubtreeRootThreshold, - ) - require.NoError(t, err) - require.True(t, valid) - - // make sure the expected proof is the same as the actual on - assert.Equal(t, expectedCommitmentProof, *actualCommitmentProof) - - // make sure the expected commitment commits to the subtree roots in the result proof - actualCommitment, _ := merkle.ProofsFromByteSlices(actualCommitmentProof.SubtreeRoots) - assert.Equal(t, msg.ShareCommitments[0], actualCommitment) - }) - } - } -} - -// testBlock is a block struct used to keep track of all the information -// needed to mock the API. -type testBlock struct { - msgs []*types.MsgPayForBlobs - blobs []*types.Blob - nss []namespace.Namespace - eds *rsmt2d.ExtendedDataSquare - coreTxs coretypes.Txs - dah *da.DataAvailabilityHeader - dataRoot []byte -} - -// testAPI an API that allows mocking all the methods and thoroughly testing them -type testAPI struct { - blobService *Service - blocks []testBlock -} - -// newTestAPI creates a new test API that fetches data from a test blockchain that has -// a specific number of blocks. Each block has a number of PFBs. Each PFB has a single blob with -// size blobSize or bigger. -func newTestAPI(t *testing.T, numberOfBlocks, blobSize, numberOfPFBs int) *testAPI { - blocks := []testBlock{{}} // so that the heights match the slice indexes - blocks = append( - blocks, - generateTestBlocks(t, numberOfBlocks, blobSize, numberOfPFBs)..., - ) - - newTestService := NewService( - mockBlobService{blocks}, - mockHeaderService{blocks}, - mockShareService{blocks}, - ) - api := &testAPI{ - blobService: newTestService, - blocks: blocks, - } - - return api -} - -// addBlock adds a new block the testAPI. -// The added block can be created in the tests and added to the chain -// to test specific cases. -// -//nolint:unused -func (api *testAPI) addBlock(t *testing.T, numberOfBlobs, blobSize int) int { - acc := "blobstream-api-tests" - kr := testfactory.GenerateKeyring(acc) - signer := types.NewKeyringSigner(kr, acc, "test") - - msgs := make([]*types.MsgPayForBlobs, 0) - blobs := make([]*types.Blob, 0) - nss := make([]namespace.Namespace, 0) - coreTxs := make(coretypes.Txs, 0) - - for i := 0; i < numberOfBlobs; i++ { - ns, msg, blob, coreTx := createTestBlobTransaction(t, signer, blobSize) - msgs = append(msgs, msg) - blobs = append(blobs, blob) - nss = append(nss, ns) - coreTxs = append(coreTxs, coreTx) - } - - txs := make(coretypes.Txs, 0) - txs = append(txs, coreTxs...) - dataSquare, err := square.Construct( - txs.ToSliceOfBytes(), - appconsts.LatestVersion, - appconsts.SquareSizeUpperBound(appconsts.LatestVersion), - ) - require.NoError(t, err) - - // erasure the data square which we use to create the data root. - eds, err := da.ExtendShares(shares.ToBytes(dataSquare)) - require.NoError(t, err) - - // create the new data root by creating the data availability header (merkle - // roots of each row and col of the erasure data). - dah, err := da.NewDataAvailabilityHeader(eds) - require.NoError(t, err) - dataRoot := dah.Hash() - api.blocks = append(api.blocks, testBlock{ - msgs: msgs, - blobs: blobs, - nss: nss, - coreTxs: coreTxs, - eds: eds, - dah: &dah, - dataRoot: dataRoot, - }) - - return len(api.blocks) - 1 -} - -// generateCommitmentProofFromBlock takes a block and a PFB index and generates the commitment proof -// using the traditional way of doing, instead of using the API. -func generateCommitmentProofFromBlock( - t *testing.T, - block testBlock, - blobIndex int, -) CommitmentProof { - // parse the namespace - ns, err := share.NamespaceFromBytes( - append( - []byte{byte(block.blobs[blobIndex].NamespaceVersion)}, - block.blobs[blobIndex].NamespaceId..., - ), - ) - require.NoError(t, err) - - // create the blob from the data - blb, err := blob.NewBlob( - uint8(block.blobs[blobIndex].ShareVersion), - ns, - block.blobs[blobIndex].Data, - ) - require.NoError(t, err) - - // convert the blob to a number of shares - blobShares, err := blob.BlobsToShares(blb) - require.NoError(t, err) - - // find the first share of the blob in the ODS - startShareIndex := -1 - for i, sh := range block.eds.FlattenedODS() { - if bytes.Equal(sh, blobShares[0]) { - startShareIndex = i - break - } - } - require.Greater(t, startShareIndex, 0) - - // create an inclusion proof of the blob using the share range instead of the commitment - sharesProof, err := pkgproof.NewShareInclusionProofFromEDS( - block.eds, - ns.ToAppNamespace(), - shares.NewRange(startShareIndex, startShareIndex+len(blobShares)), - ) - require.NoError(t, err) - require.NoError(t, sharesProof.Validate(block.dataRoot)) - - // calculate the subtree roots - subtreeRoots := make([][]byte, 0) - dataCursor := 0 - for _, proof := range sharesProof.ShareProofs { - ranges, err := nmt.ToLeafRanges( - int(proof.Start), - int(proof.End), - shares.SubTreeWidth(len(blobShares), appconsts.DefaultSubtreeRootThreshold), - ) - require.NoError(t, err) - roots, err := computeSubtreeRoots( - blobShares[dataCursor:int32(dataCursor)+proof.End-proof.Start], - ranges, - int(proof.Start), - ) - require.NoError(t, err) - subtreeRoots = append(subtreeRoots, roots...) - dataCursor += int(proof.End - proof.Start) - } - - // convert the nmt proof to be accepted by the commitment proof - nmtProofs := make([]*nmt.Proof, 0) - for _, proof := range sharesProof.ShareProofs { - nmtProof := nmt.NewInclusionProof(int(proof.Start), int(proof.End), proof.Nodes, true) - nmtProofs = append(nmtProofs, &nmtProof) - } - - commitmentProof := CommitmentProof{ - SubtreeRoots: subtreeRoots, - SubtreeRootProofs: nmtProofs, - NamespaceID: sharesProof.NamespaceID, - RowProof: sharesProof.RowProof, - NamespaceVersion: uint8(sharesProof.NamespaceVersion), - } - - return commitmentProof -} - -// generateTestBlocks generates a set of test blocks with a specific blob size and number of -// transactions -func generateTestBlocks( - t *testing.T, - numberOfBlocks, blobSize, numberOfTransactions int, -) []testBlock { - require.Greater(t, numberOfBlocks, 1) - blocks := make([]testBlock, 0) - for i := 1; i <= numberOfBlocks; i++ { - nss, msgs, blobs, coreTxs := createTestBlobTransactions( - t, - numberOfTransactions, - blobSize, - ) - - txs := make(coretypes.Txs, 0) - txs = append(txs, coreTxs...) - dataSquare, err := square.Construct( - txs.ToSliceOfBytes(), - appconsts.LatestVersion, - appconsts.SquareSizeUpperBound(appconsts.LatestVersion), - ) - require.NoError(t, err) - - // erasure the data square which we use to create the data root. - eds, err := da.ExtendShares(shares.ToBytes(dataSquare)) - require.NoError(t, err) - - // create the new data root by creating the data availability header (merkle - // roots of each row and col of the erasure data). - dah, err := da.NewDataAvailabilityHeader(eds) - require.NoError(t, err) - dataRoot := dah.Hash() - blocks = append(blocks, testBlock{ - msgs: msgs, - blobs: blobs, - nss: nss, - eds: eds, - dah: &dah, - dataRoot: dataRoot, - coreTxs: coreTxs, - }) - } - return blocks -} - -// createTestBlobTransactions generates a set of transactions that can be added to a blob. -// The number of transactions dictates the number of PFBs that will be returned. -// The size refers to the size of the data contained in the PFBs in bytes. -func createTestBlobTransactions( - t *testing.T, - numberOfTransactions, size int, -) ([]namespace.Namespace, []*types.MsgPayForBlobs, []*types.Blob, []coretypes.Tx) { - acc := "blobstream-api-tests" - kr := testfactory.GenerateKeyring(acc) - signer := types.NewKeyringSigner(kr, acc, "test") - - nss := make([]namespace.Namespace, 0) - msgs := make([]*types.MsgPayForBlobs, 0) - blobs := make([]*types.Blob, 0) - coreTxs := make([]coretypes.Tx, 0) - for i := 0; i < numberOfTransactions; i++ { - ns, msg, blob, coreTx := createTestBlobTransaction(t, signer, size+i*1000) - nss = append(nss, ns) - msgs = append(msgs, msg) - blobs = append(blobs, blob) - coreTxs = append(coreTxs, coreTx) - } - - return nss, msgs, blobs, coreTxs -} - -// createTestBlobTransaction creates a test blob transaction using a specific signer and a specific -// PFB size. The size is in bytes. -func createTestBlobTransaction( - t *testing.T, - signer *types.KeyringSigner, - size int, -) (namespace.Namespace, *types.MsgPayForBlobs, *types.Blob, coretypes.Tx) { - addr, err := signer.GetSignerInfo().GetAddress() - require.NoError(t, err) - - ns := namespace.RandomBlobNamespace() - msg, blob := blobfactory.RandMsgPayForBlobsWithNamespaceAndSigner(addr.String(), ns, size) - require.NoError(t, err) - - builder := signer.NewTxBuilder() - stx, err := signer.BuildSignedTx(builder, msg) - require.NoError(t, err) - rawTx, err := encoding.MakeConfig(app.ModuleEncodingRegisters...).TxConfig.TxEncoder()(stx) - require.NoError(t, err) - cTx, err := coretypes.MarshalBlobTx(rawTx, blob) - require.NoError(t, err) - return ns, msg, blob, cTx -} - -func TestShareToSubtreeRootProof(t *testing.T) { - shares := make([][]byte, 0) - // generate some shares - for i := 0; i < 10; i++ { - shares = append(shares, bytes.Repeat([]byte{0x1}, appconsts.ShareSize)) - } - // calculate the expected proof - subtreeRoot, expectedProofs := merkle.ProofsFromByteSlices(shares) - - // calculate the actual proofs - actualProofs := make([]*ResultShareToSubtreeRootProof, 0) - for i := range shares { - proof, err := ProveShareToSubtreeRoot(shares, uint64(i)) - require.NoError(t, err) - actualProofs = append(actualProofs, proof) - } - - // compare the proofs and validate - for shareIndex, actualProof := range actualProofs { - t.Run(fmt.Sprintf("shareIndex=%d", shareIndex), func(t *testing.T) { - valid, err := actualProof.ShareToSubtreeRootProof.Verify(subtreeRoot, shares[shareIndex]) - assert.NoError(t, err) - assert.True(t, valid) - assert.Equal(t, *expectedProofs[shareIndex], actualProof.ShareToSubtreeRootProof.Proof) - }) - } -} - -func TestSubtreeRootsToCommitmentProof(t *testing.T) { - rowRootSize := sha256.Size + 2*appconsts.NamespaceSize - subtreeRoots := make([][]byte, 0) - // generate some subtreeRoots - for i := 0; i < 10; i++ { - subtreeRoots = append(subtreeRoots, bytes.Repeat([]byte{0x1}, rowRootSize)) - } - // calculate the expected proof - shareCommitment, expectedProofs := merkle.ProofsFromByteSlices(subtreeRoots) - - // calculate the actual proofs - actualProofs := make([]*ResultSubtreeRootToCommitmentProof, 0) - for i := range subtreeRoots { - proof, err := ProveSubtreeRootToCommitment(subtreeRoots, uint64(i)) - require.NoError(t, err) - actualProofs = append(actualProofs, proof) - } - - // compare the proofs and validate - for subtreeRootIndex, actualProof := range actualProofs { - t.Run(fmt.Sprintf("subtreeRootIndex=%d", subtreeRootIndex), func(t *testing.T) { - valid, err := actualProof.SubtreeRootToCommitmentProof.Verify( - shareCommitment, - subtreeRoots[subtreeRootIndex], - ) - assert.NoError(t, err) - assert.True(t, valid) - assert.Equal( - t, - *expectedProofs[subtreeRootIndex], - actualProof.SubtreeRootToCommitmentProof.Proof, - ) - }) - } -} - -var _ nodeblob.Module = &mockBlobService{} - -type mockBlobService struct { - blocks []testBlock -} - -func (m mockBlobService) Submit( - _ context.Context, - _ []*blob.Blob, - _ blob.GasPrice, -) (height uint64, _ error) { - // TODO implement me - panic("implement me") -} - -func (m mockBlobService) Get( - ctx context.Context, - height uint64, - ns share.Namespace, - commitment blob.Commitment, -) (*blob.Blob, error) { - if height > uint64(len(m.blocks)) { - return nil, errors.New("height greater than the blockchain") - } - for i, msg := range m.blocks[height].msgs { - if bytes.Equal(msg.ShareCommitments[0], commitment) { - blb, err := blob.NewBlob( - uint8(m.blocks[height].blobs[i].ShareVersion), - ns, - m.blocks[height].blobs[i].Data, - ) - if err != nil { - return nil, err - } - return blb, nil - } - } - return nil, fmt.Errorf("coudln't find commitment") -} - -func (m mockBlobService) GetAll( - _ context.Context, - height uint64, - _ []share.Namespace, -) ([]*blob.Blob, error) { - // TODO implement me - panic("implement me") -} - -func (m mockBlobService) GetProof( - ctx context.Context, - height uint64, - ns share.Namespace, - commitment blob.Commitment, -) (*blob.Proof, error) { - if height >= uint64(len(m.blocks)) { - return nil, errors.New("height greater than the blockchain") - } - for i, msg := range m.blocks[height].msgs { - if bytes.Equal(msg.ShareCommitments[0], commitment) { - blobShareRange, err := square.BlobShareRange( - m.blocks[height].coreTxs.ToSliceOfBytes(), - i, - 0, - appconsts.LatestVersion, - ) - if err != nil { - return nil, err - } - proof, err := pkgproof.NewShareInclusionProofFromEDS( - m.blocks[height].eds, - m.blocks[height].nss[i], - blobShareRange, - ) - if err != nil { - return nil, err - } - nmtProofs := make([]*nmt.Proof, 0) - for _, proof := range proof.ShareProofs { - nmtProof := nmt.NewInclusionProof(int(proof.Start), - int(proof.End), - proof.Nodes, - true) - nmtProofs = append( - nmtProofs, - &nmtProof, - ) - } - blobProof := blob.Proof(nmtProofs) - return &blobProof, nil - } - } - return nil, fmt.Errorf("coudln't find commitment") -} - -func (m mockBlobService) Included( - _ context.Context, - height uint64, - _ share.Namespace, - _ *blob.Proof, - _ blob.Commitment, -) (bool, error) { - // TODO implement me - panic("implement me") -} - -var _ shareServ.Module = &mockShareService{} - -type mockShareService struct { - blocks []testBlock -} - -func (m mockShareService) SharesAvailable( - ctx context.Context, - extendedHeader *header.ExtendedHeader, -) error { - // TODO implement me - panic("implement me") -} - -func (m mockShareService) GetShare( - ctx context.Context, - header *header.ExtendedHeader, - row, col int, -) (share.Share, error) { - if header.Height() > uint64(len(m.blocks)) { - return nil, errors.New("height greater than the blockchain") - } - return m.blocks[header.Height()].eds.GetCell(uint(row), uint(col)), nil -} - -func (m mockShareService) GetEDS( - ctx context.Context, - header *header.ExtendedHeader, -) (*rsmt2d.ExtendedDataSquare, error) { - if header.Height() >= uint64(len(m.blocks)) { - return nil, errors.New("height greater than the blockchain") - } - return m.blocks[header.Height()].eds, nil -} - -func (m mockShareService) GetSharesByNamespace( - ctx context.Context, - header *header.ExtendedHeader, - namespace share.Namespace, -) (share.NamespacedShares, error) { - // TODO implement me - panic("implement me") -} - -var _ headerServ.Module = &mockHeaderService{} - -type mockHeaderService struct { - blocks []testBlock -} - -func (m mockHeaderService) LocalHead(ctx context.Context) (*header.ExtendedHeader, error) { - return &header.ExtendedHeader{ - RawHeader: header.RawHeader{ - Height: int64(len(m.blocks) - 1), - DataHash: m.blocks[len(m.blocks)-1].dataRoot, - }, - DAH: m.blocks[len(m.blocks)-1].dah, - }, nil -} - -func (m mockHeaderService) GetByHash( - ctx context.Context, - hash libhead.Hash, -) (*header.ExtendedHeader, error) { - // TODO implement me - panic("implement me") -} - -func (m mockHeaderService) GetRangeByHeight( - ctx context.Context, - from *header.ExtendedHeader, - to uint64, -) ([]*header.ExtendedHeader, error) { - // TODO implement me - panic("implement me") -} - -func (m mockHeaderService) GetByHeight( - ctx context.Context, - height uint64, -) (*header.ExtendedHeader, error) { - if height >= uint64(len(m.blocks)) { - return nil, errors.New("height greater than the blockchain") - } - return &header.ExtendedHeader{ - RawHeader: header.RawHeader{ - Height: int64(height), - DataHash: m.blocks[height].dataRoot, - }, - DAH: m.blocks[height].dah, - }, nil -} - -func (m mockHeaderService) WaitForHeight( - ctx context.Context, - u uint64, -) (*header.ExtendedHeader, error) { - // TODO implement me - panic("implement me") -} - -func (m mockHeaderService) SyncState(ctx context.Context) (sync.State, error) { - // TODO implement me - panic("implement me") -} - -func (m mockHeaderService) SyncWait(ctx context.Context) error { - // TODO implement me - panic("implement me") -} - -func (m mockHeaderService) NetworkHead(ctx context.Context) (*header.ExtendedHeader, error) { - return &header.ExtendedHeader{ - RawHeader: header.RawHeader{ - Height: int64(len(m.blocks) - 1), - DataHash: m.blocks[len(m.blocks)-1].dataRoot, - }, - Commit: nil, - ValidatorSet: nil, - DAH: m.blocks[len(m.blocks)-1].dah, - }, nil -} - -func (m mockHeaderService) Subscribe(ctx context.Context) (<-chan *header.ExtendedHeader, error) { - // TODO implement me - panic("implement me") -} diff --git a/nodebuilder/header/data_commitment.go b/nodebuilder/header/data_commitment.go new file mode 100644 index 0000000000..ec060b8b97 --- /dev/null +++ b/nodebuilder/header/data_commitment.go @@ -0,0 +1,242 @@ +package header + +import ( + "context" + "encoding/hex" + "fmt" + "strconv" + + "github.com/tendermint/tendermint/crypto/merkle" + "github.com/tendermint/tendermint/libs/bytes" +) + +// DataCommitment is the data root tuple root. +type DataCommitment bytes.HexBytes + +// DataRootTupleInclusionProof is the binary merkle +// inclusion proof of a height to a data commitment. +type DataRootTupleInclusionProof *merkle.Proof + +// padBytes Pad bytes to given length +func padBytes(byt []byte, length int) ([]byte, error) { + l := len(byt) + if l > length { + return nil, fmt.Errorf( + "cannot pad bytes because length of bytes array: %d is greater than given length: %d", + l, + length, + ) + } + if l == length { + return byt, nil + } + tmp := make([]byte, length) + copy(tmp[length-l:], byt) + return tmp, nil +} + +// To32PaddedHexBytes takes a number and returns its hex representation padded to 32 bytes. +// Used to mimic the result of `abi.encode(number)` in Ethereum. +func To32PaddedHexBytes(number uint64) ([]byte, error) { + hexRepresentation := strconv.FormatUint(number, 16) + // Make sure hex representation has even length. + // The `strconv.FormatUint` can return odd length hex encodings. + // For example, `strconv.FormatUint(10, 16)` returns `a`. + // Thus, we need to pad it. + if len(hexRepresentation)%2 == 1 { + hexRepresentation = "0" + hexRepresentation + } + hexBytes, hexErr := hex.DecodeString(hexRepresentation) + if hexErr != nil { + return nil, hexErr + } + paddedBytes, padErr := padBytes(hexBytes, 32) + if padErr != nil { + return nil, padErr + } + return paddedBytes, nil +} + +// DataRootTuple contains the data that will be used to create the QGB commitments. +// The commitments will be signed by orchestrators and submitted to an EVM chain via a relayer. +// For more information: +// https://github.com/celestiaorg/quantum-gravity-bridge/blob/master/src/DataRootTuple.sol +type DataRootTuple struct { + height uint64 + dataRoot [32]byte +} + +// EncodeDataRootTuple takes a height and a data root, and returns the equivalent of +// `abi.encode(...)` in Ethereum. +// The encoded type is a DataRootTuple, which has the following ABI: +// +// { +// "components":[ +// { +// "internalType":"uint256", +// "name":"height", +// "type":"uint256" +// }, +// { +// "internalType":"bytes32", +// "name":"dataRoot", +// "type":"bytes32" +// }, +// { +// "internalType":"structDataRootTuple", +// "name":"_tuple", +// "type":"tuple" +// } +// ] +// } +// +// padding the hex representation of the height padded to 32 bytes concatenated to the data root. +// For more information, refer to: +// https://github.com/celestiaorg/blobstream-contracts/blob/master/src/DataRootTuple.sol +func EncodeDataRootTuple(height uint64, dataRoot [32]byte) ([]byte, error) { + paddedHeight, err := To32PaddedHexBytes(height) + if err != nil { + return nil, err + } + return append(paddedHeight, dataRoot[:]...), nil +} + +// dataCommitmentBlocksLimit The maximum number of blocks to be used to create a data commitment. +// It's a local parameter to protect the API from creating unnecessarily large commitments. +const dataCommitmentBlocksLimit = 10_000 // ~33 hours of blocks assuming 12-second blocks. + +// validateDataCommitmentRange runs basic checks on the asc sorted list of +// heights that will be used subsequently in generating data commitments over +// the defined set of heights. +func (s *Service) validateDataCommitmentRange(ctx context.Context, start, end uint64) error { + if start == 0 { + return fmt.Errorf("the start block is 0") + } + if start >= end { + return fmt.Errorf("end block is smaller or equal to the start block") + } + heightsRange := end - start + if heightsRange > uint64(dataCommitmentBlocksLimit) { + return fmt.Errorf("the query exceeds the limit of allowed blocks %d", dataCommitmentBlocksLimit) + } + + currentHeader, err := s.NetworkHead(ctx) + if err != nil { + return err + } + // the data commitment range is end exclusive + if end > currentHeader.Height()+1 { + return fmt.Errorf( + "end block %d is higher than current chain height %d", + end, + currentHeader.Height(), + ) + } + + currentLocalHeader, err := s.LocalHead(ctx) + if err != nil { + return err + } + // the data commitment range is end exclusive + if end > currentLocalHeader.Height()+1 { + return fmt.Errorf( + "end block %d is higher than local chain height %d. Wait for the node until it syncs up to %d", + end, + currentLocalHeader.Height(), + end, + ) + } + return nil +} + +// hashDataRootTuples hashes a list of blocks data root tuples, i.e., height, data root and square +// size, then returns their merkle root. +func hashDataRootTuples(tuples []DataRootTuple) ([]byte, error) { + if len(tuples) == 0 { + return nil, fmt.Errorf("cannot hash an empty list of data root tuples") + } + dataRootEncodedTuples := make([][]byte, 0, len(tuples)) + for _, tuple := range tuples { + encodedTuple, err := EncodeDataRootTuple( + tuple.height, + tuple.dataRoot, + ) + if err != nil { + return nil, err + } + dataRootEncodedTuples = append(dataRootEncodedTuples, encodedTuple) + } + root := merkle.HashFromByteSlices(dataRootEncodedTuples) + return root, nil +} + +// validateDataRootInclusionProofRequest validates the request to generate a data root +// inclusion proof. +func (s *Service) validateDataRootInclusionProofRequest( + ctx context.Context, + height, start, end uint64, +) error { + err := s.validateDataCommitmentRange(ctx, start, end) + if err != nil { + return err + } + if height < start || height >= end { + return fmt.Errorf( + "height %d should be in the end exclusive interval first_block %d last_block %d", + height, + start, + end, + ) + } + return nil +} + +// proveDataRootTuples returns the merkle inclusion proof for a height. +func proveDataRootTuples(tuples []DataRootTuple, height int64) (*merkle.Proof, error) { + if len(tuples) == 0 { + return nil, fmt.Errorf("cannot prove an empty list of tuples") + } + if height < 0 { + return nil, fmt.Errorf("cannot prove a strictly negative height %d", height) + } + currentHeight := tuples[0].height - 1 + for _, tuple := range tuples { + if tuple.height != currentHeight+1 { + return nil, fmt.Errorf("the provided tuples are not consecutive %d vs %d", currentHeight, tuple.height) + } + currentHeight++ + } + dataRootEncodedTuples := make([][]byte, 0, len(tuples)) + for _, tuple := range tuples { + encodedTuple, err := EncodeDataRootTuple( + tuple.height, + tuple.dataRoot, + ) + if err != nil { + return nil, err + } + dataRootEncodedTuples = append(dataRootEncodedTuples, encodedTuple) + } + _, proofs := merkle.ProofsFromByteSlices(dataRootEncodedTuples) + return proofs[height-int64(tuples[0].height)], nil +} + +// fetchDataRootTuples takes an end exclusive range of heights and fetches its +// corresponding data root tuples. +func (s *Service) fetchDataRootTuples(ctx context.Context, start, end uint64) ([]DataRootTuple, error) { + tuples := make([]DataRootTuple, 0, end-start) + for height := start; height < end; height++ { + block, err := s.GetByHeight(ctx, height) + if err != nil { + return nil, err + } + if block == nil { + return nil, fmt.Errorf("couldn't load block %d", height) + } + tuples = append(tuples, DataRootTuple{ + height: block.Height(), + dataRoot: *(*[32]byte)(block.DataHash), + }) + } + return tuples, nil +} diff --git a/nodebuilder/header/header.go b/nodebuilder/header/header.go index f807796eb6..128d684c16 100644 --- a/nodebuilder/header/header.go +++ b/nodebuilder/header/header.go @@ -43,6 +43,19 @@ type Module interface { // Subscribe to recent ExtendedHeaders from the network. Subscribe(ctx context.Context) (<-chan *header.ExtendedHeader, error) + + // GetDataCommitment collects the data roots over a provided ordered range of blocks, + // and then creates a new Merkle root of those data roots. The range is end exclusive. + GetDataCommitment(ctx context.Context, start, end uint64) (*DataCommitment, error) + + // GetDataRootInclusionProof creates an inclusion proof for the data root of block + // height `height` in the set of blocks defined by `start` and `end`. The range + // is end exclusive. + GetDataRootInclusionProof( + ctx context.Context, + height int64, + start, end uint64, + ) (*DataRootTupleInclusionProof, error) } // API is a wrapper around Module for the RPC. @@ -59,12 +72,18 @@ type API struct { *header.ExtendedHeader, uint64, ) ([]*header.ExtendedHeader, error) `perm:"read"` - GetByHeight func(context.Context, uint64) (*header.ExtendedHeader, error) `perm:"read"` - WaitForHeight func(context.Context, uint64) (*header.ExtendedHeader, error) `perm:"read"` - SyncState func(ctx context.Context) (sync.State, error) `perm:"read"` - SyncWait func(ctx context.Context) error `perm:"read"` - NetworkHead func(ctx context.Context) (*header.ExtendedHeader, error) `perm:"read"` - Subscribe func(ctx context.Context) (<-chan *header.ExtendedHeader, error) `perm:"read"` + GetByHeight func(context.Context, uint64) (*header.ExtendedHeader, error) `perm:"read"` + WaitForHeight func(context.Context, uint64) (*header.ExtendedHeader, error) `perm:"read"` + SyncState func(ctx context.Context) (sync.State, error) `perm:"read"` + SyncWait func(ctx context.Context) error `perm:"read"` + NetworkHead func(ctx context.Context) (*header.ExtendedHeader, error) `perm:"read"` + Subscribe func(ctx context.Context) (<-chan *header.ExtendedHeader, error) `perm:"read"` + GetDataCommitment func(ctx context.Context, start, end uint64) (*DataCommitment, error) `perm:"read"` + GetDataRootInclusionProof func( + ctx context.Context, + height int64, + start, end uint64, + ) (*DataRootTupleInclusionProof, error) `perm:"read"` } } @@ -107,3 +126,15 @@ func (api *API) NetworkHead(ctx context.Context) (*header.ExtendedHeader, error) func (api *API) Subscribe(ctx context.Context) (<-chan *header.ExtendedHeader, error) { return api.Internal.Subscribe(ctx) } + +func (api *API) GetDataCommitment(ctx context.Context, start, end uint64) (*DataCommitment, error) { + return api.Internal.GetDataCommitment(ctx, start, end) +} + +func (api *API) GetDataRootInclusionProof( + ctx context.Context, + height int64, + start, end uint64, +) (*DataRootTupleInclusionProof, error) { + return api.Internal.GetDataRootInclusionProof(ctx, height, start, end) +} diff --git a/nodebuilder/header/mocks/api.go b/nodebuilder/header/mocks/api.go index b0d2b961d9..03bc31f4e7 100644 --- a/nodebuilder/header/mocks/api.go +++ b/nodebuilder/header/mocks/api.go @@ -9,7 +9,8 @@ import ( reflect "reflect" header "github.com/celestiaorg/celestia-node/header" - header0 "github.com/celestiaorg/go-header" + header0 "github.com/celestiaorg/celestia-node/nodebuilder/header" + header1 "github.com/celestiaorg/go-header" sync "github.com/celestiaorg/go-header/sync" gomock "github.com/golang/mock/gomock" ) @@ -38,7 +39,7 @@ func (m *MockModule) EXPECT() *MockModuleMockRecorder { } // GetByHash mocks base method. -func (m *MockModule) GetByHash(arg0 context.Context, arg1 header0.Hash) (*header.ExtendedHeader, error) { +func (m *MockModule) GetByHash(arg0 context.Context, arg1 header1.Hash) (*header.ExtendedHeader, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetByHash", arg0, arg1) ret0, _ := ret[0].(*header.ExtendedHeader) @@ -67,6 +68,36 @@ func (mr *MockModuleMockRecorder) GetByHeight(arg0, arg1 interface{}) *gomock.Ca return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetByHeight", reflect.TypeOf((*MockModule)(nil).GetByHeight), arg0, arg1) } +// GetDataCommitment mocks base method. +func (m *MockModule) GetDataCommitment(arg0 context.Context, arg1, arg2 uint64) (*header0.DataCommitment, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetDataCommitment", arg0, arg1, arg2) + ret0, _ := ret[0].(*header0.DataCommitment) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetDataCommitment indicates an expected call of GetDataCommitment. +func (mr *MockModuleMockRecorder) GetDataCommitment(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDataCommitment", reflect.TypeOf((*MockModule)(nil).GetDataCommitment), arg0, arg1, arg2) +} + +// GetDataRootInclusionProof mocks base method. +func (m *MockModule) GetDataRootInclusionProof(arg0 context.Context, arg1 int64, arg2, arg3 uint64) (*header0.DataRootTupleInclusionProof, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetDataRootInclusionProof", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(*header0.DataRootTupleInclusionProof) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetDataRootInclusionProof indicates an expected call of GetDataRootInclusionProof. +func (mr *MockModuleMockRecorder) GetDataRootInclusionProof(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDataRootInclusionProof", reflect.TypeOf((*MockModule)(nil).GetDataRootInclusionProof), arg0, arg1, arg2, arg3) +} + // GetRangeByHeight mocks base method. func (m *MockModule) GetRangeByHeight(arg0 context.Context, arg1 *header.ExtendedHeader, arg2 uint64) ([]*header.ExtendedHeader, error) { m.ctrl.T.Helper() diff --git a/nodebuilder/header/service.go b/nodebuilder/header/service.go index e769cd5299..63c0dc3cb1 100644 --- a/nodebuilder/header/service.go +++ b/nodebuilder/header/service.go @@ -141,3 +141,62 @@ func (s *Service) Subscribe(ctx context.Context) (<-chan *header.ExtendedHeader, }() return headerCh, nil } + +// GetDataCommitment collects the data roots over a provided ordered range of blocks, +// and then creates a new Merkle root of those data roots. The range is end exclusive. +func (s *Service) GetDataCommitment(ctx context.Context, start, end uint64) (*DataCommitment, error) { + log.Debugw("validating the data commitment range", "start", start, "end", end) + err := s.validateDataCommitmentRange(ctx, start, end) + if err != nil { + return nil, err + } + log.Debugw("fetching the data root tuples", "start", start, "end", end) + tuples, err := s.fetchDataRootTuples(ctx, start, end) + if err != nil { + return nil, err + } + log.Debugw("hashing the data root tuples", "start", start, "end", end) + root, err := hashDataRootTuples(tuples) + if err != nil { + return nil, err + } + // Create data commitment + dataCommitment := DataCommitment(root) + return &dataCommitment, nil +} + +// GetDataRootInclusionProof creates an inclusion proof for the data root of block +// height `height` in the set of blocks defined by `start` and `end`. The range +// is end exclusive. +func (s *Service) GetDataRootInclusionProof( + ctx context.Context, + height int64, + start, + end uint64, +) (*DataRootTupleInclusionProof, error) { + log.Debugw( + "validating the data root inclusion proof request", + "start", + start, + "end", + end, + "height", + height, + ) + err := s.validateDataRootInclusionProofRequest(ctx, uint64(height), start, end) + if err != nil { + return nil, err + } + log.Debugw("fetching the data root tuples", "start", start, "end", end) + tuples, err := s.fetchDataRootTuples(ctx, start, end) + if err != nil { + return nil, err + } + log.Debugw("proving the data root tuples", "start", start, "end", end) + proof, err := proveDataRootTuples(tuples, height) + if err != nil { + return nil, err + } + dataRootTupleInclusionProof := DataRootTupleInclusionProof(proof) + return &dataRootTupleInclusionProof, nil +} diff --git a/nodebuilder/header/service_test.go b/nodebuilder/header/service_test.go index 14d5ada87d..22c2ccc39a 100644 --- a/nodebuilder/header/service_test.go +++ b/nodebuilder/header/service_test.go @@ -2,10 +2,13 @@ package header import ( "context" + "encoding/hex" "fmt" "testing" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/crypto/merkle" libhead "github.com/celestiaorg/go-header" "github.com/celestiaorg/go-header/sync" @@ -39,3 +42,315 @@ func (d *errorSyncer[H]) State() sync.State { func (d *errorSyncer[H]) SyncWait(context.Context) error { return fmt.Errorf("dummy error") } + +func TestPadBytes(t *testing.T) { + tests := []struct { + input []byte + length int + expected []byte + expectErr bool + }{ + {input: []byte{1, 2, 3}, length: 5, expected: []byte{0, 0, 1, 2, 3}}, + {input: []byte{1, 2, 3}, length: 3, expected: []byte{1, 2, 3}}, + {input: []byte{1, 2, 3}, length: 2, expected: nil, expectErr: true}, + {input: []byte{}, length: 3, expected: []byte{0, 0, 0}}, + } + + for _, test := range tests { + result, err := padBytes(test.input, test.length) + if test.expectErr { + assert.Error(t, err) + } else { + assert.NoError(t, err) + assert.Equal(t, test.expected, result) + } + } +} + +func TestTo32PaddedHexBytes(t *testing.T) { + tests := []struct { + number uint64 + expected []byte + expectError bool + }{ + { + number: 10, + expected: func() []byte { + res, _ := hex.DecodeString("000000000000000000000000000000000000000000000000000000000000000a") + return res + }(), + }, + { + number: 255, + expected: func() []byte { + res, _ := hex.DecodeString("00000000000000000000000000000000000000000000000000000000000000ff") + return res + }(), + }, + { + number: 255, + expected: func() []byte { + res, _ := hex.DecodeString("00000000000000000000000000000000000000000000000000000000000000ff") + return res + }(), + }, + { + number: 4294967295, + expected: func() []byte { + res, _ := hex.DecodeString("00000000000000000000000000000000000000000000000000000000ffffffff") + return res + }(), + }, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("number: %d", test.number), func(t *testing.T) { + result, err := To32PaddedHexBytes(test.number) + if test.expectError { + assert.Error(t, err) + } else { + assert.NoError(t, err) + assert.Equal(t, test.expected, result) + } + }) + } +} + +func TestEncodeDataRootTuple(t *testing.T) { + height := uint64(2) + dataRoot, err := hex.DecodeString("82dc1607d84557d3579ce602a45f5872e821c36dbda7ec926dfa17ebc8d5c013") + require.NoError(t, err) + + expectedEncoding, err := hex.DecodeString( + // hex representation of height padded to 32 bytes + "0000000000000000000000000000000000000000000000000000000000000002" + + // data root + "82dc1607d84557d3579ce602a45f5872e821c36dbda7ec926dfa17ebc8d5c013", + ) + require.NoError(t, err) + require.NotNil(t, expectedEncoding) + + actualEncoding, err := EncodeDataRootTuple(height, *(*[32]byte)(dataRoot)) + require.NoError(t, err) + require.NotNil(t, actualEncoding) + + // Check that the length of packed data is correct + assert.Equal(t, len(actualEncoding), 64) + assert.Equal(t, expectedEncoding, actualEncoding) +} + +func TestHashDataRootTuples(t *testing.T) { + tests := map[string]struct { + tuples []DataRootTuple + expectedHash []byte + expectErr bool + }{ + "empty tuples list": {tuples: nil, expectErr: true}, + "valid list of data root tuples": { + tuples: []DataRootTuple{ + { + height: 1, + dataRoot: [32]byte{0x1}, + }, + { + height: 2, + dataRoot: [32]byte{0x2}, + }, + }, + expectedHash: func() []byte { + tuple1, _ := EncodeDataRootTuple(1, [32]byte{0x1}) + tuple2, _ := EncodeDataRootTuple(2, [32]byte{0x2}) + + return merkle.HashFromByteSlices([][]byte{tuple1, tuple2}) + }(), + }, + } + + for name, tc := range tests { + t.Run(name, func(t *testing.T) { + result, err := hashDataRootTuples(tc.tuples) + if tc.expectErr { + assert.Error(t, err) + } else { + assert.NoError(t, err) + assert.Equal(t, tc.expectedHash, result) + } + }) + } +} + +func TestProveDataRootTuples(t *testing.T) { + tests := map[string]struct { + tuples []DataRootTuple + height int64 + expectedProof merkle.Proof + expectErr bool + }{ + "empty tuples list": {tuples: nil, expectErr: true}, + "strictly negative height": { + height: -1, + tuples: []DataRootTuple{ + { + height: 1, + dataRoot: [32]byte{0x1}, + }, + }, + expectErr: true, + }, + "non consecutive list of tuples at the beginning": { + tuples: []DataRootTuple{ + { + height: 1, + dataRoot: [32]byte{0x1}, + }, + { + height: 3, + dataRoot: [32]byte{0x2}, + }, + { + height: 4, + dataRoot: [32]byte{0x4}, + }, + }, + expectErr: true, + }, + "non consecutive list of tuples in the middle": { + tuples: []DataRootTuple{ + { + height: 1, + dataRoot: [32]byte{0x1}, + }, + { + height: 2, + dataRoot: [32]byte{0x2}, + }, + { + height: 3, + dataRoot: [32]byte{0x2}, + }, + { + height: 5, + dataRoot: [32]byte{0x4}, + }, + { + height: 6, + dataRoot: [32]byte{0x5}, + }, + }, + expectErr: true, + }, + "non consecutive list of tuples at the end": { + tuples: []DataRootTuple{ + { + height: 1, + dataRoot: [32]byte{0x1}, + }, + { + height: 2, + dataRoot: [32]byte{0x2}, + }, + { + height: 4, + dataRoot: [32]byte{0x4}, + }, + }, + expectErr: true, + }, + "duplicate height at the beginning": { + tuples: []DataRootTuple{ + { + height: 1, + dataRoot: [32]byte{0x1}, + }, + { + height: 1, + dataRoot: [32]byte{0x1}, + }, + { + height: 4, + dataRoot: [32]byte{0x4}, + }, + }, + expectErr: true, + }, + "duplicate height in the middle": { + tuples: []DataRootTuple{ + { + height: 1, + dataRoot: [32]byte{0x1}, + }, + { + height: 2, + dataRoot: [32]byte{0x2}, + }, + { + height: 2, + dataRoot: [32]byte{0x2}, + }, + { + height: 3, + dataRoot: [32]byte{0x3}, + }, + }, + expectErr: true, + }, + "duplicate height at the end": { + tuples: []DataRootTuple{ + { + height: 1, + dataRoot: [32]byte{0x1}, + }, + { + height: 2, + dataRoot: [32]byte{0x2}, + }, + { + height: 2, + dataRoot: [32]byte{0x2}, + }, + }, + expectErr: true, + }, + "valid proof": { + height: 3, + tuples: []DataRootTuple{ + { + height: 1, + dataRoot: [32]byte{0x1}, + }, + { + height: 2, + dataRoot: [32]byte{0x2}, + }, + { + height: 3, + dataRoot: [32]byte{0x3}, + }, + { + height: 4, + dataRoot: [32]byte{0x4}, + }, + }, + expectedProof: func() merkle.Proof { + encodedTuple1, _ := EncodeDataRootTuple(1, [32]byte{0x1}) + encodedTuple2, _ := EncodeDataRootTuple(2, [32]byte{0x2}) + encodedTuple3, _ := EncodeDataRootTuple(3, [32]byte{0x3}) + encodedTuple4, _ := EncodeDataRootTuple(4, [32]byte{0x4}) + _, proofs := merkle.ProofsFromByteSlices([][]byte{encodedTuple1, encodedTuple2, encodedTuple3, encodedTuple4}) + return *proofs[2] + }(), + }, + } + + for name, tc := range tests { + t.Run(name, func(t *testing.T) { + result, err := proveDataRootTuples(tc.tuples, tc.height) + if tc.expectErr { + assert.Error(t, err) + } else { + assert.NoError(t, err) + assert.Equal(t, tc.expectedProof, *result) + } + }) + } +} diff --git a/nodebuilder/module.go b/nodebuilder/module.go index 5a774b8b9b..8f196f3b1d 100644 --- a/nodebuilder/module.go +++ b/nodebuilder/module.go @@ -8,7 +8,6 @@ import ( "github.com/celestiaorg/celestia-node/header" "github.com/celestiaorg/celestia-node/libs/fxutil" "github.com/celestiaorg/celestia-node/nodebuilder/blob" - "github.com/celestiaorg/celestia-node/nodebuilder/blobstream" "github.com/celestiaorg/celestia-node/nodebuilder/core" "github.com/celestiaorg/celestia-node/nodebuilder/da" "github.com/celestiaorg/celestia-node/nodebuilder/das" @@ -57,7 +56,6 @@ func ConstructModule(tp node.Type, network p2p.Network, cfg *Config, store Store node.ConstructModule(tp), pruner.ConstructModule(tp, &cfg.Pruner), rpc.ConstructModule(tp, &cfg.RPC), - blobstream.ConstructModule(), ) return fx.Module( diff --git a/nodebuilder/node.go b/nodebuilder/node.go index bf8aec668b..9ec1c4d4e0 100644 --- a/nodebuilder/node.go +++ b/nodebuilder/node.go @@ -22,7 +22,6 @@ import ( "github.com/celestiaorg/celestia-node/api/gateway" "github.com/celestiaorg/celestia-node/api/rpc" "github.com/celestiaorg/celestia-node/nodebuilder/blob" - "github.com/celestiaorg/celestia-node/nodebuilder/blobstream" "github.com/celestiaorg/celestia-node/nodebuilder/da" "github.com/celestiaorg/celestia-node/nodebuilder/das" "github.com/celestiaorg/celestia-node/nodebuilder/fraud" @@ -70,15 +69,14 @@ type Node struct { // p2p protocols PubSub *pubsub.PubSub // services - ShareServ share.Module // not optional - HeaderServ header.Module // not optional - StateServ state.Module // not optional - FraudServ fraud.Module // not optional - BlobServ blob.Module // not optional - DASer das.Module // not optional - AdminServ node.Module // not optional - DAMod da.Module // not optional - BlobstreamMod blobstream.Module + ShareServ share.Module // not optional + HeaderServ header.Module // not optional + StateServ state.Module // not optional + FraudServ fraud.Module // not optional + BlobServ blob.Module // not optional + DASer das.Module // not optional + AdminServ node.Module // not optional + DAMod da.Module // not optional // start and stop control ref internal fx.App lifecycle funcs to be called from Start and Stop start, stop lifecycleFunc diff --git a/nodebuilder/rpc/constructors.go b/nodebuilder/rpc/constructors.go index 5db26e52f5..43a8055207 100644 --- a/nodebuilder/rpc/constructors.go +++ b/nodebuilder/rpc/constructors.go @@ -5,7 +5,6 @@ import ( "github.com/celestiaorg/celestia-node/api/rpc" "github.com/celestiaorg/celestia-node/nodebuilder/blob" - "github.com/celestiaorg/celestia-node/nodebuilder/blobstream" "github.com/celestiaorg/celestia-node/nodebuilder/da" "github.com/celestiaorg/celestia-node/nodebuilder/das" "github.com/celestiaorg/celestia-node/nodebuilder/fraud" @@ -27,7 +26,6 @@ func registerEndpoints( nodeMod node.Module, blobMod blob.Module, daMod da.Module, - blobstreamMod blobstream.Module, serv *rpc.Server, ) { serv.RegisterService("fraud", fraudMod, &fraud.API{}) @@ -39,7 +37,6 @@ func registerEndpoints( serv.RegisterService("node", nodeMod, &node.API{}) serv.RegisterService("blob", blobMod, &blob.API{}) serv.RegisterService("da", daMod, &da.API{}) - serv.RegisterService("blobstream", blobstreamMod, &blobstream.API{}) } func server(cfg *Config, auth jwt.Signer) *rpc.Server { diff --git a/nodebuilder/share/constructors.go b/nodebuilder/share/constructors.go index 12c6b9c628..10bec434b3 100644 --- a/nodebuilder/share/constructors.go +++ b/nodebuilder/share/constructors.go @@ -9,14 +9,15 @@ import ( "github.com/celestiaorg/celestia-app/pkg/da" + headerServ "github.com/celestiaorg/celestia-node/nodebuilder/header" "github.com/celestiaorg/celestia-node/share" "github.com/celestiaorg/celestia-node/share/eds" "github.com/celestiaorg/celestia-node/share/getters" "github.com/celestiaorg/celestia-node/share/ipld" ) -func newShareModule(getter share.Getter, avail share.Availability) Module { - return &module{getter, avail} +func newShareModule(getter share.Getter, avail share.Availability, header headerServ.Module) Module { + return &module{getter, avail, header} } // ensureEmptyCARExists adds an empty EDS to the provided EDS store. diff --git a/nodebuilder/share/mocks/api.go b/nodebuilder/share/mocks/api.go index 4e21cecae0..c24a5dc771 100644 --- a/nodebuilder/share/mocks/api.go +++ b/nodebuilder/share/mocks/api.go @@ -12,6 +12,7 @@ import ( share "github.com/celestiaorg/celestia-node/share" rsmt2d "github.com/celestiaorg/rsmt2d" gomock "github.com/golang/mock/gomock" + types "github.com/tendermint/tendermint/types" ) // MockModule is a mock of Module interface. @@ -52,6 +53,22 @@ func (mr *MockModuleMockRecorder) GetEDS(arg0, arg1 interface{}) *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetEDS", reflect.TypeOf((*MockModule)(nil).GetEDS), arg0, arg1) } +// GetRange mocks base method. +func (m *MockModule) GetRange(arg0 context.Context, arg1 uint64, arg2, arg3 int) ([][]byte, *types.ShareProof, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetRange", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].([][]byte) + ret1, _ := ret[1].(*types.ShareProof) + ret2, _ := ret[2].(error) + return ret0, ret1, ret2 +} + +// GetRange indicates an expected call of GetRange. +func (mr *MockModuleMockRecorder) GetRange(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetRange", reflect.TypeOf((*MockModule)(nil).GetRange), arg0, arg1, arg2, arg3) +} + // GetShare mocks base method. func (m *MockModule) GetShare(arg0 context.Context, arg1 *header.ExtendedHeader, arg2, arg3 int) ([]byte, error) { m.ctrl.T.Helper() diff --git a/nodebuilder/share/share.go b/nodebuilder/share/share.go index a8e1e1c895..a79a13f492 100644 --- a/nodebuilder/share/share.go +++ b/nodebuilder/share/share.go @@ -2,11 +2,16 @@ package share import ( "context" + "fmt" + + "github.com/tendermint/tendermint/types" "github.com/celestiaorg/rsmt2d" "github.com/celestiaorg/celestia-node/header" + headerServ "github.com/celestiaorg/celestia-node/nodebuilder/header" "github.com/celestiaorg/celestia-node/share" + "github.com/celestiaorg/celestia-node/share/eds" ) var _ Module = (*API)(nil) @@ -40,6 +45,8 @@ type Module interface { GetSharesByNamespace( ctx context.Context, header *header.ExtendedHeader, namespace share.Namespace, ) (share.NamespacedShares, error) + // GetRange gets a list of shares and their corresponding proof. + GetRange(ctx context.Context, height uint64, start, end int) ([]share.Share, *types.ShareProof, error) } // API is a wrapper around Module for the RPC. @@ -61,6 +68,11 @@ type API struct { header *header.ExtendedHeader, namespace share.Namespace, ) (share.NamespacedShares, error) `perm:"read"` + GetRange func( + ctx context.Context, + height uint64, + start, end int, + ) ([]share.Share, *types.ShareProof, error) `perm:"read"` } } @@ -76,6 +88,10 @@ func (api *API) GetEDS(ctx context.Context, header *header.ExtendedHeader) (*rsm return api.Internal.GetEDS(ctx, header) } +func (api *API) GetRange(ctx context.Context, height uint64, start, end int) ([]share.Share, *types.ShareProof, error) { + return api.Internal.GetRange(ctx, height, start, end) +} + func (api *API) GetSharesByNamespace( ctx context.Context, header *header.ExtendedHeader, @@ -87,8 +103,25 @@ func (api *API) GetSharesByNamespace( type module struct { share.Getter share.Availability + hs headerServ.Module } func (m module) SharesAvailable(ctx context.Context, header *header.ExtendedHeader) error { return m.Availability.SharesAvailable(ctx, header) } + +func (m module) GetRange(ctx context.Context, height uint64, start, end int) ([]share.Share, *types.ShareProof, error) { + if height == 0 { + return nil, nil, fmt.Errorf("height cannot be equal to 0") + } + extendedHeader, err := m.hs.GetByHeight(ctx, height) + if err != nil { + return nil, nil, err + } + extendedDataSquare, err := m.GetEDS(ctx, extendedHeader) + if err != nil { + return nil, nil, err + } + proof, err := eds.ProveShares(extendedDataSquare, start, end) + return extendedDataSquare.FlattenedODS()[start:end], proof, err +} diff --git a/share/eds/eds.go b/share/eds/eds.go index b8a332f275..22c359a4f5 100644 --- a/share/eds/eds.go +++ b/share/eds/eds.go @@ -11,7 +11,10 @@ import ( "github.com/ipfs/go-cid" "github.com/ipld/go-car" "github.com/ipld/go-car/util" + "github.com/tendermint/tendermint/types" + pkgproof "github.com/celestiaorg/celestia-app/pkg/proof" + "github.com/celestiaorg/celestia-app/pkg/shares" "github.com/celestiaorg/celestia-app/pkg/wrapper" "github.com/celestiaorg/nmt" "github.com/celestiaorg/rsmt2d" @@ -271,3 +274,30 @@ func ReadEDS(ctx context.Context, r io.Reader, root share.DataHash) (eds *rsmt2d } return eds, nil } + +// ProveShares generates a share proof for a share range. +// The share range, defined by start and end, is end-exclusive. +func ProveShares(eds *rsmt2d.ExtendedDataSquare, start, end int) (*types.ShareProof, error) { + log.Debugw("proving share range", "start", start, "end", end) + if start == end { + return nil, fmt.Errorf("start share cannot be equal to end share") + } + if start > end { + return nil, fmt.Errorf("start share %d cannot be greater than end share %d", start, end) + } + + odsShares, err := shares.FromBytes(eds.FlattenedODS()) + if err != nil { + return nil, err + } + nID, err := pkgproof.ParseNamespace(odsShares, start, end) + if err != nil { + return nil, err + } + log.Debugw("generating the share proof", "start", start, "end", end) + proof, err := pkgproof.NewShareInclusionProofFromEDS(eds, nID, shares.NewRange(start, end)) + if err != nil { + return nil, err + } + return &proof, nil +} diff --git a/share/eds/eds_test.go b/share/eds/eds_test.go index b5e02fe14a..fb615fb84f 100644 --- a/share/eds/eds_test.go +++ b/share/eds/eds_test.go @@ -15,9 +15,13 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/tendermint/tendermint/libs/rand" + coretypes "github.com/tendermint/tendermint/types" "github.com/celestiaorg/celestia-app/pkg/appconsts" "github.com/celestiaorg/celestia-app/pkg/da" + "github.com/celestiaorg/celestia-app/pkg/namespace" + pkgproof "github.com/celestiaorg/celestia-app/pkg/proof" + "github.com/celestiaorg/celestia-app/pkg/shares" "github.com/celestiaorg/rsmt2d" "github.com/celestiaorg/celestia-node/share" @@ -281,3 +285,66 @@ func createTestData(t *testing.T, testDir string) { //nolint:unused require.NoError(t, err, "writing example root to file") f.Close() } + +func TestProveShares(t *testing.T) { + ns := namespace.RandomBlobNamespace() + eds, dataRoot := edstest.RandEDSWithNamespace( + t, + ns.Bytes(), + 16, + ) + + tests := map[string]struct { + start, end int + expectedProof coretypes.ShareProof + expectErr bool + }{ + "start share == end share": { + start: 2, + end: 2, + expectErr: true, + }, + "start share > end share": { + start: 3, + end: 2, + expectErr: true, + }, + "start share > number of shares in the block": { + start: 2000, + end: 2010, + expectErr: true, + }, + "end share > number of shares in the block": { + start: 1, + end: 2010, + expectErr: true, + }, + "valid case": { + start: 0, + end: 2, + expectedProof: func() coretypes.ShareProof { + proof, err := pkgproof.NewShareInclusionProofFromEDS( + eds, + ns, + shares.NewRange(0, 2), + ) + require.NoError(t, err) + require.NoError(t, proof.Validate(dataRoot.Hash())) + return proof + }(), + }, + } + + for name, tc := range tests { + t.Run(name, func(t *testing.T) { + result, err := ProveShares(eds, tc.start, tc.end) + if tc.expectErr { + assert.Error(t, err) + } else { + assert.NoError(t, err) + assert.Equal(t, tc.expectedProof, *result) + assert.NoError(t, result.Validate(dataRoot.Hash())) + } + }) + } +} diff --git a/share/eds/edstest/testing.go b/share/eds/edstest/testing.go index bf5e664f90..d38db72396 100644 --- a/share/eds/edstest/testing.go +++ b/share/eds/edstest/testing.go @@ -4,8 +4,19 @@ import ( "testing" "github.com/stretchr/testify/require" + coretypes "github.com/tendermint/tendermint/types" + "github.com/celestiaorg/celestia-app/app" + "github.com/celestiaorg/celestia-app/app/encoding" + "github.com/celestiaorg/celestia-app/pkg/appconsts" + "github.com/celestiaorg/celestia-app/pkg/da" + "github.com/celestiaorg/celestia-app/pkg/namespace" + "github.com/celestiaorg/celestia-app/pkg/shares" + "github.com/celestiaorg/celestia-app/pkg/square" "github.com/celestiaorg/celestia-app/pkg/wrapper" + "github.com/celestiaorg/celestia-app/test/util/blobfactory" + "github.com/celestiaorg/celestia-app/test/util/testfactory" + "github.com/celestiaorg/celestia-app/x/blob/types" "github.com/celestiaorg/nmt" "github.com/celestiaorg/rsmt2d" @@ -46,3 +57,95 @@ func RandEDSWithNamespace( require.NoError(t, err) return eds, dah } + +// GenerateTestBlock generates a set of test blocks with a specific blob size and number of +// transactions +func GenerateTestBlock( + t *testing.T, + blobSize, numberOfTransactions int, +) ( + []*types.MsgPayForBlobs, + []*types.Blob, + []namespace.Namespace, + *rsmt2d.ExtendedDataSquare, + coretypes.Txs, + *da.DataAvailabilityHeader, + []byte, +) { + nss, msgs, blobs, coreTxs := createTestBlobTransactions( + t, + numberOfTransactions, + blobSize, + ) + + txs := make(coretypes.Txs, 0) + txs = append(txs, coreTxs...) + dataSquare, err := square.Construct( + txs.ToSliceOfBytes(), + appconsts.LatestVersion, + appconsts.SquareSizeUpperBound(appconsts.LatestVersion), + ) + require.NoError(t, err) + + // erasure the data square which we use to create the data root. + eds, err := da.ExtendShares(shares.ToBytes(dataSquare)) + require.NoError(t, err) + + // create the new data root by creating the data availability header (merkle + // roots of each row and col of the erasure data). + dah, err := da.NewDataAvailabilityHeader(eds) + require.NoError(t, err) + dataRoot := dah.Hash() + + return msgs, blobs, nss, eds, coreTxs, &dah, dataRoot +} + +// createTestBlobTransactions generates a set of transactions that can be added to a blob. +// The number of transactions dictates the number of PFBs that will be returned. +// The size refers to the size of the data contained in the PFBs in bytes. +func createTestBlobTransactions( + t *testing.T, + numberOfTransactions, size int, +) ([]namespace.Namespace, []*types.MsgPayForBlobs, []*types.Blob, []coretypes.Tx) { + acc := "blobstream-api-tests" + kr := testfactory.GenerateKeyring(acc) + signer := types.NewKeyringSigner(kr, acc, "test") + + nss := make([]namespace.Namespace, 0) + msgs := make([]*types.MsgPayForBlobs, 0) + blobs := make([]*types.Blob, 0) + coreTxs := make([]coretypes.Tx, 0) + for i := 0; i < numberOfTransactions; i++ { + ns, msg, blob, coreTx := createTestBlobTransaction(t, signer, size+i*1000) + nss = append(nss, ns) + msgs = append(msgs, msg) + blobs = append(blobs, blob) + coreTxs = append(coreTxs, coreTx) + } + + return nss, msgs, blobs, coreTxs +} + +// createTestBlobTransaction creates a test blob transaction using a specific signer and a specific +// PFB size. The size is in bytes. +func createTestBlobTransaction( + t *testing.T, + signer *types.KeyringSigner, + size int, +) (namespace.Namespace, *types.MsgPayForBlobs, *types.Blob, coretypes.Tx) { + addr, err := signer.GetSignerInfo().GetAddress() + require.NoError(t, err) + + ns := namespace.RandomBlobNamespace() + msg, blob := blobfactory.RandMsgPayForBlobsWithNamespaceAndSigner(addr.String(), ns, size) + require.NoError(t, err) + + builder := signer.NewTxBuilder() + stx, err := signer.BuildSignedTx(builder, msg) + require.NoError(t, err) + rawTx, err := encoding.MakeConfig(app.ModuleEncodingRegisters...).TxConfig.TxEncoder()(stx) + require.NoError(t, err) + cTx, err := coretypes.MarshalBlobTx(rawTx, blob) + require.NoError(t, err) + return ns, msg, blob, cTx +} From 576c077ec6f32d48bfc20adfe77fcb1396134f24 Mon Sep 17 00:00:00 2001 From: sweexordious Date: Sat, 13 Jul 2024 12:31:21 +0200 Subject: [PATCH 27/52] chore: imports --- blob/service.go | 1 - blob/service_test.go | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/blob/service.go b/blob/service.go index b17ad1d76e..70714d53b6 100644 --- a/blob/service.go +++ b/blob/service.go @@ -20,7 +20,6 @@ import ( appns "github.com/celestiaorg/celestia-app/pkg/namespace" pkgproof "github.com/celestiaorg/celestia-app/pkg/proof" "github.com/celestiaorg/celestia-app/pkg/shares" - blobtypes "github.com/celestiaorg/celestia-app/x/blob/types" "github.com/celestiaorg/nmt" "github.com/celestiaorg/rsmt2d" diff --git a/blob/service_test.go b/blob/service_test.go index 93d2afbab7..efbc6a65bf 100644 --- a/blob/service_test.go +++ b/blob/service_test.go @@ -19,8 +19,8 @@ import ( tmrand "github.com/tendermint/tendermint/libs/rand" "github.com/celestiaorg/celestia-app/pkg/appconsts" - pkgproof "github.com/celestiaorg/celestia-app/pkg/proof" appns "github.com/celestiaorg/celestia-app/pkg/namespace" + pkgproof "github.com/celestiaorg/celestia-app/pkg/proof" "github.com/celestiaorg/celestia-app/pkg/shares" blobtypes "github.com/celestiaorg/celestia-app/x/blob/types" "github.com/celestiaorg/go-header/store" From 1f2dba206d326ca7d0262a37cbe139d198c0aef8 Mon Sep 17 00:00:00 2001 From: sweexordious Date: Sat, 13 Jul 2024 12:44:52 +0200 Subject: [PATCH 28/52] fix: wrap the get range return value --- nodebuilder/share/share.go | 29 +++++++++++++++++++++-------- 1 file changed, 21 insertions(+), 8 deletions(-) diff --git a/nodebuilder/share/share.go b/nodebuilder/share/share.go index a79a13f492..944f6eeecc 100644 --- a/nodebuilder/share/share.go +++ b/nodebuilder/share/share.go @@ -16,6 +16,13 @@ import ( var _ Module = (*API)(nil) +// GetRangeResult wraps the return value of the GetRange endpoint +// because Json-RPC doesn't support more than two return values. +type GetRangeResult struct { + Shares []share.Share + Proof *types.ShareProof +} + // Module provides access to any data square or block share on the network. // // All Get methods provided on Module follow the following flow: @@ -46,7 +53,7 @@ type Module interface { ctx context.Context, header *header.ExtendedHeader, namespace share.Namespace, ) (share.NamespacedShares, error) // GetRange gets a list of shares and their corresponding proof. - GetRange(ctx context.Context, height uint64, start, end int) ([]share.Share, *types.ShareProof, error) + GetRange(ctx context.Context, height uint64, start, end int) (*GetRangeResult, error) } // API is a wrapper around Module for the RPC. @@ -72,7 +79,7 @@ type API struct { ctx context.Context, height uint64, start, end int, - ) ([]share.Share, *types.ShareProof, error) `perm:"read"` + ) (*GetRangeResult, error) `perm:"read"` } } @@ -88,7 +95,7 @@ func (api *API) GetEDS(ctx context.Context, header *header.ExtendedHeader) (*rsm return api.Internal.GetEDS(ctx, header) } -func (api *API) GetRange(ctx context.Context, height uint64, start, end int) ([]share.Share, *types.ShareProof, error) { +func (api *API) GetRange(ctx context.Context, height uint64, start, end int) (*GetRangeResult, error) { return api.Internal.GetRange(ctx, height, start, end) } @@ -110,18 +117,24 @@ func (m module) SharesAvailable(ctx context.Context, header *header.ExtendedHead return m.Availability.SharesAvailable(ctx, header) } -func (m module) GetRange(ctx context.Context, height uint64, start, end int) ([]share.Share, *types.ShareProof, error) { +func (m module) GetRange(ctx context.Context, height uint64, start, end int) (*GetRangeResult, error) { if height == 0 { - return nil, nil, fmt.Errorf("height cannot be equal to 0") + return nil, fmt.Errorf("height cannot be equal to 0") } extendedHeader, err := m.hs.GetByHeight(ctx, height) if err != nil { - return nil, nil, err + return nil, err } extendedDataSquare, err := m.GetEDS(ctx, extendedHeader) if err != nil { - return nil, nil, err + return nil, err } proof, err := eds.ProveShares(extendedDataSquare, start, end) - return extendedDataSquare.FlattenedODS()[start:end], proof, err + if err != nil { + return nil, err + } + return &GetRangeResult{ + extendedDataSquare.FlattenedODS()[start:end], + proof, + }, nil } From 765240d89a53187b0efcc2d5f0b6fd4a0c39c3d9 Mon Sep 17 00:00:00 2001 From: sweexordious Date: Sat, 13 Jul 2024 12:49:05 +0200 Subject: [PATCH 29/52] chore: lint --- nodebuilder/blob/blob.go | 34 +++++++++++++++++++++++++++++----- 1 file changed, 29 insertions(+), 5 deletions(-) diff --git a/nodebuilder/blob/blob.go b/nodebuilder/blob/blob.go index 1b5afbd739..dc5c099f47 100644 --- a/nodebuilder/blob/blob.go +++ b/nodebuilder/blob/blob.go @@ -45,11 +45,35 @@ type Module interface { type API struct { Internal struct { - Submit func(context.Context, []*blob.Blob, *blob.SubmitOptions) (uint64, error) `perm:"write"` - Get func(context.Context, uint64, share.Namespace, blob.Commitment) (*blob.Blob, error) `perm:"read"` - GetAll func(context.Context, uint64, []share.Namespace) ([]*blob.Blob, error) `perm:"read"` - GetProof func(context.Context, uint64, share.Namespace, blob.Commitment) (*blob.Proof, error) `perm:"read"` - Included func(context.Context, uint64, share.Namespace, *blob.Proof, blob.Commitment) (bool, error) `perm:"read"` + Submit func( + context.Context, + []*blob.Blob, + *blob.SubmitOptions, + ) (uint64, error) `perm:"write"` + Get func( + context.Context, + uint64, + share.Namespace, + blob.Commitment, + ) (*blob.Blob, error) `perm:"read"` + GetAll func( + context.Context, + uint64, + []share.Namespace, + ) ([]*blob.Blob, error) `perm:"read"` + GetProof func( + context.Context, + uint64, + share.Namespace, + blob.Commitment, + ) (*blob.Proof, error) `perm:"read"` + Included func( + context.Context, + uint64, + share.Namespace, + *blob.Proof, + blob.Commitment, + ) (bool, error) `perm:"read"` GetCommitmentProof func( ctx context.Context, height uint64, From 3b2680b6bee005aa82fdc5a9ced69b0a77aa46ee Mon Sep 17 00:00:00 2001 From: sweexordious Date: Tue, 16 Jul 2024 16:47:26 +0200 Subject: [PATCH 30/52] chore: update nmt dependency --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index c89f82378f..f2cc330bb1 100644 --- a/go.mod +++ b/go.mod @@ -12,7 +12,7 @@ require ( github.com/celestiaorg/go-fraud v0.2.1 github.com/celestiaorg/go-header v0.6.2 github.com/celestiaorg/go-libp2p-messenger v0.2.0 - github.com/celestiaorg/nmt v0.21.1-0.20240602221058-a81b748b6f51 // TODO replace with release when ready + github.com/celestiaorg/nmt v0.22.0 github.com/celestiaorg/rsmt2d v0.13.1 github.com/cosmos/cosmos-sdk v0.46.16 github.com/cristalhq/jwt/v5 v5.4.0 diff --git a/go.sum b/go.sum index 47580841d1..56c7e41e0d 100644 --- a/go.sum +++ b/go.sum @@ -369,8 +369,8 @@ github.com/celestiaorg/go-libp2p-messenger v0.2.0 h1:/0MuPDcFamQMbw9xTZ73yImqgTO github.com/celestiaorg/go-libp2p-messenger v0.2.0/go.mod h1:s9PIhMi7ApOauIsfBcQwbr7m+HBzmVfDIS+QLdgzDSo= github.com/celestiaorg/merkletree v0.0.0-20230308153949-c33506a7aa26 h1:P2RI1xJ49EZ8cuHMcH+ZSBonfRDtBS8OS9Jdt1BWX3k= github.com/celestiaorg/merkletree v0.0.0-20230308153949-c33506a7aa26/go.mod h1:2m8ukndOegwB0PU0AfJCwDUQHqd7QQRlSXvQL5VToVY= -github.com/celestiaorg/nmt v0.21.1-0.20240602221058-a81b748b6f51 h1:vOLlAiHwCtXA7LNsXokDysmPHl2UvorPTARyhHQPQQA= -github.com/celestiaorg/nmt v0.21.1-0.20240602221058-a81b748b6f51/go.mod h1:ia/EpCk0enD5yO5frcxoNoFToz2Ghtk2i+blmCRjIY8= +github.com/celestiaorg/nmt v0.22.0 h1:AGtfmBiVgreR1KkIV5R7XFNeMp/H4IUDLlBbLjZZ3zk= +github.com/celestiaorg/nmt v0.22.0/go.mod h1:ia/EpCk0enD5yO5frcxoNoFToz2Ghtk2i+blmCRjIY8= github.com/celestiaorg/quantum-gravity-bridge/v2 v2.1.2 h1:Q8nr5SAtDW5gocrBwqwDJcSS/JedqU58WwQA2SP+nXw= github.com/celestiaorg/quantum-gravity-bridge/v2 v2.1.2/go.mod h1:s/LzLUw0WeYPJ6qdk4q46jKLOq7rc9Z5Mdrxtfpcigw= github.com/celestiaorg/rsmt2d v0.13.1 h1:eRhp79DKTkDojwInKVs1lRK6f6zJc1BVlmZfUfI19yQ= From 8c56c741f9ad6f64b2ea1a1ffef534267fb46968 Mon Sep 17 00:00:00 2001 From: sweexordious Date: Fri, 19 Jul 2024 10:42:15 +0100 Subject: [PATCH 31/52] chore: use a ErrHeightNegative --- nodebuilder/header/data_commitment.go | 6 +++--- nodebuilder/header/service.go | 6 ++++++ nodebuilder/share/share.go | 5 ----- 3 files changed, 9 insertions(+), 8 deletions(-) diff --git a/nodebuilder/header/data_commitment.go b/nodebuilder/header/data_commitment.go index ec060b8b97..f35a7395ff 100644 --- a/nodebuilder/header/data_commitment.go +++ b/nodebuilder/header/data_commitment.go @@ -110,7 +110,7 @@ const dataCommitmentBlocksLimit = 10_000 // ~33 hours of blocks assuming 12-seco // the defined set of heights. func (s *Service) validateDataCommitmentRange(ctx context.Context, start, end uint64) error { if start == 0 { - return fmt.Errorf("the start block is 0") + return ErrHeightNegative } if start >= end { return fmt.Errorf("end block is smaller or equal to the start block") @@ -196,8 +196,8 @@ func proveDataRootTuples(tuples []DataRootTuple, height int64) (*merkle.Proof, e if len(tuples) == 0 { return nil, fmt.Errorf("cannot prove an empty list of tuples") } - if height < 0 { - return nil, fmt.Errorf("cannot prove a strictly negative height %d", height) + if height <= 0 { + return nil, ErrHeightNegative } currentHeight := tuples[0].height - 1 for _, tuple := range tuples { diff --git a/nodebuilder/header/service.go b/nodebuilder/header/service.go index 63c0dc3cb1..e2d8872522 100644 --- a/nodebuilder/header/service.go +++ b/nodebuilder/header/service.go @@ -13,6 +13,9 @@ import ( modfraud "github.com/celestiaorg/celestia-node/nodebuilder/fraud" ) +// ErrHeightNegative returned when the provided block height is <= 0. +var ErrHeightNegative = errors.New("height is negative") + // Service represents the header Service that can be started / stopped on a node. // Service's main function is to manage its sub-services. Service can contain several // sub-services, such as Exchange, ExchangeServer, Syncer, and so forth. @@ -64,6 +67,9 @@ func (s *Service) GetRangeByHeight( } func (s *Service) GetByHeight(ctx context.Context, height uint64) (*header.ExtendedHeader, error) { + if height == 0 { + return nil, ErrHeightNegative + } head, err := s.syncer.Head(ctx) switch { case err != nil: diff --git a/nodebuilder/share/share.go b/nodebuilder/share/share.go index 944f6eeecc..e1efb3842b 100644 --- a/nodebuilder/share/share.go +++ b/nodebuilder/share/share.go @@ -2,8 +2,6 @@ package share import ( "context" - "fmt" - "github.com/tendermint/tendermint/types" "github.com/celestiaorg/rsmt2d" @@ -118,9 +116,6 @@ func (m module) SharesAvailable(ctx context.Context, header *header.ExtendedHead } func (m module) GetRange(ctx context.Context, height uint64, start, end int) (*GetRangeResult, error) { - if height == 0 { - return nil, fmt.Errorf("height cannot be equal to 0") - } extendedHeader, err := m.hs.GetByHeight(ctx, height) if err != nil { return nil, err From 609e71450255d8569b237bc754f4da1b1e272e5f Mon Sep 17 00:00:00 2001 From: sweexordious Date: Fri, 19 Jul 2024 10:50:34 +0100 Subject: [PATCH 32/52] chore: rename to data root tuple root --- ..._commitment.go => data_root_tuple_root.go} | 24 +++++++------- nodebuilder/header/header.go | 32 +++++++++---------- nodebuilder/header/mocks/api.go | 28 ++++++++-------- nodebuilder/header/service.go | 14 ++++---- 4 files changed, 49 insertions(+), 49 deletions(-) rename nodebuilder/header/{data_commitment.go => data_root_tuple_root.go} (88%) diff --git a/nodebuilder/header/data_commitment.go b/nodebuilder/header/data_root_tuple_root.go similarity index 88% rename from nodebuilder/header/data_commitment.go rename to nodebuilder/header/data_root_tuple_root.go index f35a7395ff..dd756ac2c9 100644 --- a/nodebuilder/header/data_commitment.go +++ b/nodebuilder/header/data_root_tuple_root.go @@ -10,8 +10,9 @@ import ( "github.com/tendermint/tendermint/libs/bytes" ) -// DataCommitment is the data root tuple root. -type DataCommitment bytes.HexBytes +// DataRootTupleRoot is the root of the merkle tree created +// from a set of data root tuples. +type DataRootTupleRoot bytes.HexBytes // DataRootTupleInclusionProof is the binary merkle // inclusion proof of a height to a data commitment. @@ -57,10 +58,9 @@ func To32PaddedHexBytes(number uint64) ([]byte, error) { return paddedBytes, nil } -// DataRootTuple contains the data that will be used to create the QGB commitments. -// The commitments will be signed by orchestrators and submitted to an EVM chain via a relayer. +// DataRootTuple contains the data that will be used to generate the Blobstream data root tuple roots. // For more information: -// https://github.com/celestiaorg/quantum-gravity-bridge/blob/master/src/DataRootTuple.sol +// https://github.com/celestiaorg/blobstream-contracts/blob/master/src/DataRootTuple.sol type DataRootTuple struct { height uint64 dataRoot [32]byte @@ -101,14 +101,14 @@ func EncodeDataRootTuple(height uint64, dataRoot [32]byte) ([]byte, error) { return append(paddedHeight, dataRoot[:]...), nil } -// dataCommitmentBlocksLimit The maximum number of blocks to be used to create a data commitment. +// dataRootTupleRootBlocksLimit The maximum number of blocks to be used to create a data commitment. // It's a local parameter to protect the API from creating unnecessarily large commitments. -const dataCommitmentBlocksLimit = 10_000 // ~33 hours of blocks assuming 12-second blocks. +const dataRootTupleRootBlocksLimit = 10_000 // ~33 hours of blocks assuming 12-second blocks. -// validateDataCommitmentRange runs basic checks on the asc sorted list of +// validateDataRootTupleRootRange runs basic checks on the asc sorted list of // heights that will be used subsequently in generating data commitments over // the defined set of heights. -func (s *Service) validateDataCommitmentRange(ctx context.Context, start, end uint64) error { +func (s *Service) validateDataRootTupleRootRange(ctx context.Context, start, end uint64) error { if start == 0 { return ErrHeightNegative } @@ -116,8 +116,8 @@ func (s *Service) validateDataCommitmentRange(ctx context.Context, start, end ui return fmt.Errorf("end block is smaller or equal to the start block") } heightsRange := end - start - if heightsRange > uint64(dataCommitmentBlocksLimit) { - return fmt.Errorf("the query exceeds the limit of allowed blocks %d", dataCommitmentBlocksLimit) + if heightsRange > uint64(dataRootTupleRootBlocksLimit) { + return fmt.Errorf("the query exceeds the limit of allowed blocks %d", dataRootTupleRootBlocksLimit) } currentHeader, err := s.NetworkHead(ctx) @@ -176,7 +176,7 @@ func (s *Service) validateDataRootInclusionProofRequest( ctx context.Context, height, start, end uint64, ) error { - err := s.validateDataCommitmentRange(ctx, start, end) + err := s.validateDataRootTupleRootRange(ctx, start, end) if err != nil { return err } diff --git a/nodebuilder/header/header.go b/nodebuilder/header/header.go index 128d684c16..20eb53ef40 100644 --- a/nodebuilder/header/header.go +++ b/nodebuilder/header/header.go @@ -44,14 +44,14 @@ type Module interface { // Subscribe to recent ExtendedHeaders from the network. Subscribe(ctx context.Context) (<-chan *header.ExtendedHeader, error) - // GetDataCommitment collects the data roots over a provided ordered range of blocks, + // GetDataRootTupleRoot collects the data roots over a provided ordered range of blocks, // and then creates a new Merkle root of those data roots. The range is end exclusive. - GetDataCommitment(ctx context.Context, start, end uint64) (*DataCommitment, error) + GetDataRootTupleRoot(ctx context.Context, start, end uint64) (*DataRootTupleRoot, error) - // GetDataRootInclusionProof creates an inclusion proof for the data root of block + // GetDataRootTupleInclusionProof creates an inclusion proof for the data root of block // height `height` in the set of blocks defined by `start` and `end`. The range // is end exclusive. - GetDataRootInclusionProof( + GetDataRootTupleInclusionProof( ctx context.Context, height int64, start, end uint64, @@ -72,14 +72,14 @@ type API struct { *header.ExtendedHeader, uint64, ) ([]*header.ExtendedHeader, error) `perm:"read"` - GetByHeight func(context.Context, uint64) (*header.ExtendedHeader, error) `perm:"read"` - WaitForHeight func(context.Context, uint64) (*header.ExtendedHeader, error) `perm:"read"` - SyncState func(ctx context.Context) (sync.State, error) `perm:"read"` - SyncWait func(ctx context.Context) error `perm:"read"` - NetworkHead func(ctx context.Context) (*header.ExtendedHeader, error) `perm:"read"` - Subscribe func(ctx context.Context) (<-chan *header.ExtendedHeader, error) `perm:"read"` - GetDataCommitment func(ctx context.Context, start, end uint64) (*DataCommitment, error) `perm:"read"` - GetDataRootInclusionProof func( + GetByHeight func(context.Context, uint64) (*header.ExtendedHeader, error) `perm:"read"` + WaitForHeight func(context.Context, uint64) (*header.ExtendedHeader, error) `perm:"read"` + SyncState func(ctx context.Context) (sync.State, error) `perm:"read"` + SyncWait func(ctx context.Context) error `perm:"read"` + NetworkHead func(ctx context.Context) (*header.ExtendedHeader, error) `perm:"read"` + Subscribe func(ctx context.Context) (<-chan *header.ExtendedHeader, error) `perm:"read"` + GetDataRootTupleRoot func(ctx context.Context, start, end uint64) (*DataRootTupleRoot, error) `perm:"read"` + GetDataRootTupleInclusionProof func( ctx context.Context, height int64, start, end uint64, @@ -127,14 +127,14 @@ func (api *API) Subscribe(ctx context.Context) (<-chan *header.ExtendedHeader, e return api.Internal.Subscribe(ctx) } -func (api *API) GetDataCommitment(ctx context.Context, start, end uint64) (*DataCommitment, error) { - return api.Internal.GetDataCommitment(ctx, start, end) +func (api *API) GetDataRootTupleRoot(ctx context.Context, start, end uint64) (*DataRootTupleRoot, error) { + return api.Internal.GetDataRootTupleRoot(ctx, start, end) } -func (api *API) GetDataRootInclusionProof( +func (api *API) GetDataRootTupleInclusionProof( ctx context.Context, height int64, start, end uint64, ) (*DataRootTupleInclusionProof, error) { - return api.Internal.GetDataRootInclusionProof(ctx, height, start, end) + return api.Internal.GetDataRootTupleInclusionProof(ctx, height, start, end) } diff --git a/nodebuilder/header/mocks/api.go b/nodebuilder/header/mocks/api.go index 03bc31f4e7..9a80bdb97c 100644 --- a/nodebuilder/header/mocks/api.go +++ b/nodebuilder/header/mocks/api.go @@ -68,34 +68,34 @@ func (mr *MockModuleMockRecorder) GetByHeight(arg0, arg1 interface{}) *gomock.Ca return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetByHeight", reflect.TypeOf((*MockModule)(nil).GetByHeight), arg0, arg1) } -// GetDataCommitment mocks base method. -func (m *MockModule) GetDataCommitment(arg0 context.Context, arg1, arg2 uint64) (*header0.DataCommitment, error) { +// GetDataRootTupleInclusionProof mocks base method. +func (m *MockModule) GetDataRootTupleInclusionProof(arg0 context.Context, arg1 int64, arg2, arg3 uint64) (*header0.DataRootTupleInclusionProof, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetDataCommitment", arg0, arg1, arg2) - ret0, _ := ret[0].(*header0.DataCommitment) + ret := m.ctrl.Call(m, "GetDataRootTupleInclusionProof", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(*header0.DataRootTupleInclusionProof) ret1, _ := ret[1].(error) return ret0, ret1 } -// GetDataCommitment indicates an expected call of GetDataCommitment. -func (mr *MockModuleMockRecorder) GetDataCommitment(arg0, arg1, arg2 interface{}) *gomock.Call { +// GetDataRootTupleInclusionProof indicates an expected call of GetDataRootTupleInclusionProof. +func (mr *MockModuleMockRecorder) GetDataRootTupleInclusionProof(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDataCommitment", reflect.TypeOf((*MockModule)(nil).GetDataCommitment), arg0, arg1, arg2) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDataRootTupleInclusionProof", reflect.TypeOf((*MockModule)(nil).GetDataRootTupleInclusionProof), arg0, arg1, arg2, arg3) } -// GetDataRootInclusionProof mocks base method. -func (m *MockModule) GetDataRootInclusionProof(arg0 context.Context, arg1 int64, arg2, arg3 uint64) (*header0.DataRootTupleInclusionProof, error) { +// GetDataRootTupleRoot mocks base method. +func (m *MockModule) GetDataRootTupleRoot(arg0 context.Context, arg1, arg2 uint64) (*header0.DataRootTupleRoot, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetDataRootInclusionProof", arg0, arg1, arg2, arg3) - ret0, _ := ret[0].(*header0.DataRootTupleInclusionProof) + ret := m.ctrl.Call(m, "GetDataRootTupleRoot", arg0, arg1, arg2) + ret0, _ := ret[0].(*header0.DataRootTupleRoot) ret1, _ := ret[1].(error) return ret0, ret1 } -// GetDataRootInclusionProof indicates an expected call of GetDataRootInclusionProof. -func (mr *MockModuleMockRecorder) GetDataRootInclusionProof(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { +// GetDataRootTupleRoot indicates an expected call of GetDataRootTupleRoot. +func (mr *MockModuleMockRecorder) GetDataRootTupleRoot(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDataRootInclusionProof", reflect.TypeOf((*MockModule)(nil).GetDataRootInclusionProof), arg0, arg1, arg2, arg3) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDataRootTupleRoot", reflect.TypeOf((*MockModule)(nil).GetDataRootTupleRoot), arg0, arg1, arg2) } // GetRangeByHeight mocks base method. diff --git a/nodebuilder/header/service.go b/nodebuilder/header/service.go index e2d8872522..f8f4185d45 100644 --- a/nodebuilder/header/service.go +++ b/nodebuilder/header/service.go @@ -148,11 +148,11 @@ func (s *Service) Subscribe(ctx context.Context) (<-chan *header.ExtendedHeader, return headerCh, nil } -// GetDataCommitment collects the data roots over a provided ordered range of blocks, +// GetDataRootTupleRoot collects the data roots over a provided ordered range of blocks, // and then creates a new Merkle root of those data roots. The range is end exclusive. -func (s *Service) GetDataCommitment(ctx context.Context, start, end uint64) (*DataCommitment, error) { +func (s *Service) GetDataRootTupleRoot(ctx context.Context, start, end uint64) (*DataRootTupleRoot, error) { log.Debugw("validating the data commitment range", "start", start, "end", end) - err := s.validateDataCommitmentRange(ctx, start, end) + err := s.validateDataRootTupleRootRange(ctx, start, end) if err != nil { return nil, err } @@ -167,14 +167,14 @@ func (s *Service) GetDataCommitment(ctx context.Context, start, end uint64) (*Da return nil, err } // Create data commitment - dataCommitment := DataCommitment(root) - return &dataCommitment, nil + dataRootTupleRoot := DataRootTupleRoot(root) + return &dataRootTupleRoot, nil } -// GetDataRootInclusionProof creates an inclusion proof for the data root of block +// GetDataRootTupleInclusionProof creates an inclusion proof for the data root of block // height `height` in the set of blocks defined by `start` and `end`. The range // is end exclusive. -func (s *Service) GetDataRootInclusionProof( +func (s *Service) GetDataRootTupleInclusionProof( ctx context.Context, height int64, start, From 4bc4d7dfdd4c95457f6e8cf81c0516e5767a89f8 Mon Sep 17 00:00:00 2001 From: sweexordious Date: Fri, 19 Jul 2024 10:51:19 +0100 Subject: [PATCH 33/52] chore: rename to data root tuple root --- nodebuilder/header/data_root_tuple_root.go | 4 ++-- nodebuilder/share/share.go | 1 + 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/nodebuilder/header/data_root_tuple_root.go b/nodebuilder/header/data_root_tuple_root.go index dd756ac2c9..539194f915 100644 --- a/nodebuilder/header/data_root_tuple_root.go +++ b/nodebuilder/header/data_root_tuple_root.go @@ -58,8 +58,8 @@ func To32PaddedHexBytes(number uint64) ([]byte, error) { return paddedBytes, nil } -// DataRootTuple contains the data that will be used to generate the Blobstream data root tuple roots. -// For more information: +// DataRootTuple contains the data that will be used to generate the Blobstream data root tuple +// roots. For more information: // https://github.com/celestiaorg/blobstream-contracts/blob/master/src/DataRootTuple.sol type DataRootTuple struct { height uint64 diff --git a/nodebuilder/share/share.go b/nodebuilder/share/share.go index e1efb3842b..a2cef51170 100644 --- a/nodebuilder/share/share.go +++ b/nodebuilder/share/share.go @@ -2,6 +2,7 @@ package share import ( "context" + "github.com/tendermint/tendermint/types" "github.com/celestiaorg/rsmt2d" From 76a1aaff700b780303e501f92218ea329f7c3fbc Mon Sep 17 00:00:00 2001 From: sweexordious Date: Fri, 19 Jul 2024 11:39:50 +0100 Subject: [PATCH 34/52] chore: un-export data root tuple --- nodebuilder/header/data_root_tuple_root.go | 30 ++++++++--------- nodebuilder/header/service_test.go | 38 +++++++++++----------- 2 files changed, 34 insertions(+), 34 deletions(-) diff --git a/nodebuilder/header/data_root_tuple_root.go b/nodebuilder/header/data_root_tuple_root.go index 539194f915..c30d740b91 100644 --- a/nodebuilder/header/data_root_tuple_root.go +++ b/nodebuilder/header/data_root_tuple_root.go @@ -36,9 +36,9 @@ func padBytes(byt []byte, length int) ([]byte, error) { return tmp, nil } -// To32PaddedHexBytes takes a number and returns its hex representation padded to 32 bytes. +// to32PaddedHexBytes takes a number and returns its hex representation padded to 32 bytes. // Used to mimic the result of `abi.encode(number)` in Ethereum. -func To32PaddedHexBytes(number uint64) ([]byte, error) { +func to32PaddedHexBytes(number uint64) ([]byte, error) { hexRepresentation := strconv.FormatUint(number, 16) // Make sure hex representation has even length. // The `strconv.FormatUint` can return odd length hex encodings. @@ -58,17 +58,17 @@ func To32PaddedHexBytes(number uint64) ([]byte, error) { return paddedBytes, nil } -// DataRootTuple contains the data that will be used to generate the Blobstream data root tuple +// dataRootTuple contains the data that will be used to generate the Blobstream data root tuple // roots. For more information: // https://github.com/celestiaorg/blobstream-contracts/blob/master/src/DataRootTuple.sol -type DataRootTuple struct { +type dataRootTuple struct { height uint64 dataRoot [32]byte } -// EncodeDataRootTuple takes a height and a data root, and returns the equivalent of +// encodeDataRootTuple takes a height and a data root, and returns the equivalent of // `abi.encode(...)` in Ethereum. -// The encoded type is a DataRootTuple, which has the following ABI: +// The encoded type is a dataRootTuple, which has the following ABI: // // { // "components":[ @@ -93,8 +93,8 @@ type DataRootTuple struct { // padding the hex representation of the height padded to 32 bytes concatenated to the data root. // For more information, refer to: // https://github.com/celestiaorg/blobstream-contracts/blob/master/src/DataRootTuple.sol -func EncodeDataRootTuple(height uint64, dataRoot [32]byte) ([]byte, error) { - paddedHeight, err := To32PaddedHexBytes(height) +func encodeDataRootTuple(height uint64, dataRoot [32]byte) ([]byte, error) { + paddedHeight, err := to32PaddedHexBytes(height) if err != nil { return nil, err } @@ -151,13 +151,13 @@ func (s *Service) validateDataRootTupleRootRange(ctx context.Context, start, end // hashDataRootTuples hashes a list of blocks data root tuples, i.e., height, data root and square // size, then returns their merkle root. -func hashDataRootTuples(tuples []DataRootTuple) ([]byte, error) { +func hashDataRootTuples(tuples []dataRootTuple) ([]byte, error) { if len(tuples) == 0 { return nil, fmt.Errorf("cannot hash an empty list of data root tuples") } dataRootEncodedTuples := make([][]byte, 0, len(tuples)) for _, tuple := range tuples { - encodedTuple, err := EncodeDataRootTuple( + encodedTuple, err := encodeDataRootTuple( tuple.height, tuple.dataRoot, ) @@ -192,7 +192,7 @@ func (s *Service) validateDataRootInclusionProofRequest( } // proveDataRootTuples returns the merkle inclusion proof for a height. -func proveDataRootTuples(tuples []DataRootTuple, height int64) (*merkle.Proof, error) { +func proveDataRootTuples(tuples []dataRootTuple, height int64) (*merkle.Proof, error) { if len(tuples) == 0 { return nil, fmt.Errorf("cannot prove an empty list of tuples") } @@ -208,7 +208,7 @@ func proveDataRootTuples(tuples []DataRootTuple, height int64) (*merkle.Proof, e } dataRootEncodedTuples := make([][]byte, 0, len(tuples)) for _, tuple := range tuples { - encodedTuple, err := EncodeDataRootTuple( + encodedTuple, err := encodeDataRootTuple( tuple.height, tuple.dataRoot, ) @@ -223,8 +223,8 @@ func proveDataRootTuples(tuples []DataRootTuple, height int64) (*merkle.Proof, e // fetchDataRootTuples takes an end exclusive range of heights and fetches its // corresponding data root tuples. -func (s *Service) fetchDataRootTuples(ctx context.Context, start, end uint64) ([]DataRootTuple, error) { - tuples := make([]DataRootTuple, 0, end-start) +func (s *Service) fetchDataRootTuples(ctx context.Context, start, end uint64) ([]dataRootTuple, error) { + tuples := make([]dataRootTuple, 0, end-start) for height := start; height < end; height++ { block, err := s.GetByHeight(ctx, height) if err != nil { @@ -233,7 +233,7 @@ func (s *Service) fetchDataRootTuples(ctx context.Context, start, end uint64) ([ if block == nil { return nil, fmt.Errorf("couldn't load block %d", height) } - tuples = append(tuples, DataRootTuple{ + tuples = append(tuples, dataRootTuple{ height: block.Height(), dataRoot: *(*[32]byte)(block.DataHash), }) diff --git a/nodebuilder/header/service_test.go b/nodebuilder/header/service_test.go index 22c2ccc39a..a0fa2efa75 100644 --- a/nodebuilder/header/service_test.go +++ b/nodebuilder/header/service_test.go @@ -105,7 +105,7 @@ func TestTo32PaddedHexBytes(t *testing.T) { for _, test := range tests { t.Run(fmt.Sprintf("number: %d", test.number), func(t *testing.T) { - result, err := To32PaddedHexBytes(test.number) + result, err := to32PaddedHexBytes(test.number) if test.expectError { assert.Error(t, err) } else { @@ -130,7 +130,7 @@ func TestEncodeDataRootTuple(t *testing.T) { require.NoError(t, err) require.NotNil(t, expectedEncoding) - actualEncoding, err := EncodeDataRootTuple(height, *(*[32]byte)(dataRoot)) + actualEncoding, err := encodeDataRootTuple(height, *(*[32]byte)(dataRoot)) require.NoError(t, err) require.NotNil(t, actualEncoding) @@ -141,13 +141,13 @@ func TestEncodeDataRootTuple(t *testing.T) { func TestHashDataRootTuples(t *testing.T) { tests := map[string]struct { - tuples []DataRootTuple + tuples []dataRootTuple expectedHash []byte expectErr bool }{ "empty tuples list": {tuples: nil, expectErr: true}, "valid list of data root tuples": { - tuples: []DataRootTuple{ + tuples: []dataRootTuple{ { height: 1, dataRoot: [32]byte{0x1}, @@ -158,8 +158,8 @@ func TestHashDataRootTuples(t *testing.T) { }, }, expectedHash: func() []byte { - tuple1, _ := EncodeDataRootTuple(1, [32]byte{0x1}) - tuple2, _ := EncodeDataRootTuple(2, [32]byte{0x2}) + tuple1, _ := encodeDataRootTuple(1, [32]byte{0x1}) + tuple2, _ := encodeDataRootTuple(2, [32]byte{0x2}) return merkle.HashFromByteSlices([][]byte{tuple1, tuple2}) }(), @@ -181,7 +181,7 @@ func TestHashDataRootTuples(t *testing.T) { func TestProveDataRootTuples(t *testing.T) { tests := map[string]struct { - tuples []DataRootTuple + tuples []dataRootTuple height int64 expectedProof merkle.Proof expectErr bool @@ -189,7 +189,7 @@ func TestProveDataRootTuples(t *testing.T) { "empty tuples list": {tuples: nil, expectErr: true}, "strictly negative height": { height: -1, - tuples: []DataRootTuple{ + tuples: []dataRootTuple{ { height: 1, dataRoot: [32]byte{0x1}, @@ -198,7 +198,7 @@ func TestProveDataRootTuples(t *testing.T) { expectErr: true, }, "non consecutive list of tuples at the beginning": { - tuples: []DataRootTuple{ + tuples: []dataRootTuple{ { height: 1, dataRoot: [32]byte{0x1}, @@ -215,7 +215,7 @@ func TestProveDataRootTuples(t *testing.T) { expectErr: true, }, "non consecutive list of tuples in the middle": { - tuples: []DataRootTuple{ + tuples: []dataRootTuple{ { height: 1, dataRoot: [32]byte{0x1}, @@ -240,7 +240,7 @@ func TestProveDataRootTuples(t *testing.T) { expectErr: true, }, "non consecutive list of tuples at the end": { - tuples: []DataRootTuple{ + tuples: []dataRootTuple{ { height: 1, dataRoot: [32]byte{0x1}, @@ -257,7 +257,7 @@ func TestProveDataRootTuples(t *testing.T) { expectErr: true, }, "duplicate height at the beginning": { - tuples: []DataRootTuple{ + tuples: []dataRootTuple{ { height: 1, dataRoot: [32]byte{0x1}, @@ -274,7 +274,7 @@ func TestProveDataRootTuples(t *testing.T) { expectErr: true, }, "duplicate height in the middle": { - tuples: []DataRootTuple{ + tuples: []dataRootTuple{ { height: 1, dataRoot: [32]byte{0x1}, @@ -295,7 +295,7 @@ func TestProveDataRootTuples(t *testing.T) { expectErr: true, }, "duplicate height at the end": { - tuples: []DataRootTuple{ + tuples: []dataRootTuple{ { height: 1, dataRoot: [32]byte{0x1}, @@ -313,7 +313,7 @@ func TestProveDataRootTuples(t *testing.T) { }, "valid proof": { height: 3, - tuples: []DataRootTuple{ + tuples: []dataRootTuple{ { height: 1, dataRoot: [32]byte{0x1}, @@ -332,10 +332,10 @@ func TestProveDataRootTuples(t *testing.T) { }, }, expectedProof: func() merkle.Proof { - encodedTuple1, _ := EncodeDataRootTuple(1, [32]byte{0x1}) - encodedTuple2, _ := EncodeDataRootTuple(2, [32]byte{0x2}) - encodedTuple3, _ := EncodeDataRootTuple(3, [32]byte{0x3}) - encodedTuple4, _ := EncodeDataRootTuple(4, [32]byte{0x4}) + encodedTuple1, _ := encodeDataRootTuple(1, [32]byte{0x1}) + encodedTuple2, _ := encodeDataRootTuple(2, [32]byte{0x2}) + encodedTuple3, _ := encodeDataRootTuple(3, [32]byte{0x3}) + encodedTuple4, _ := encodeDataRootTuple(4, [32]byte{0x4}) _, proofs := merkle.ProofsFromByteSlices([][]byte{encodedTuple1, encodedTuple2, encodedTuple3, encodedTuple4}) return *proofs[2] }(), From 2c53cb00bcd6a976d6afd97b7e0d31eda0de1e56 Mon Sep 17 00:00:00 2001 From: sweexordious Date: Fri, 19 Jul 2024 11:57:39 +0100 Subject: [PATCH 35/52] docs: document GetDataRootTupleInclusionProof and GetDataRootTupleRoot --- nodebuilder/header/header.go | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/nodebuilder/header/header.go b/nodebuilder/header/header.go index 20eb53ef40..76be3095af 100644 --- a/nodebuilder/header/header.go +++ b/nodebuilder/header/header.go @@ -46,11 +46,13 @@ type Module interface { // GetDataRootTupleRoot collects the data roots over a provided ordered range of blocks, // and then creates a new Merkle root of those data roots. The range is end exclusive. + // It's in the header module because it only needs access to the headers to generate the proof. GetDataRootTupleRoot(ctx context.Context, start, end uint64) (*DataRootTupleRoot, error) - // GetDataRootTupleInclusionProof creates an inclusion proof for the data root of block - // height `height` in the set of blocks defined by `start` and `end`. The range + // GetDataRootTupleInclusionProof creates an inclusion proof, for the data root tuple of block + // height `height`, in the set of blocks defined by `start` and `end`. The range // is end exclusive. + // It's in the header module because it only needs access to the headers to generate the proof. GetDataRootTupleInclusionProof( ctx context.Context, height int64, From 8bc6a664b4c2c9ad1220d0b42abd9c71d6f82822 Mon Sep 17 00:00:00 2001 From: sweexordious Date: Fri, 19 Jul 2024 12:04:16 +0100 Subject: [PATCH 36/52] chore: use uint64 for heights --- nodebuilder/header/data_root_tuple_root.go | 10 +++++----- nodebuilder/header/header.go | 9 +++------ nodebuilder/header/service.go | 10 ++++------ nodebuilder/header/service_test.go | 12 +----------- 4 files changed, 13 insertions(+), 28 deletions(-) diff --git a/nodebuilder/header/data_root_tuple_root.go b/nodebuilder/header/data_root_tuple_root.go index c30d740b91..72a3d2d569 100644 --- a/nodebuilder/header/data_root_tuple_root.go +++ b/nodebuilder/header/data_root_tuple_root.go @@ -110,7 +110,7 @@ const dataRootTupleRootBlocksLimit = 10_000 // ~33 hours of blocks assuming 12-s // the defined set of heights. func (s *Service) validateDataRootTupleRootRange(ctx context.Context, start, end uint64) error { if start == 0 { - return ErrHeightNegative + return ErrHeightZero } if start >= end { return fmt.Errorf("end block is smaller or equal to the start block") @@ -192,12 +192,12 @@ func (s *Service) validateDataRootInclusionProofRequest( } // proveDataRootTuples returns the merkle inclusion proof for a height. -func proveDataRootTuples(tuples []dataRootTuple, height int64) (*merkle.Proof, error) { +func proveDataRootTuples(tuples []dataRootTuple, height uint64) (*merkle.Proof, error) { if len(tuples) == 0 { return nil, fmt.Errorf("cannot prove an empty list of tuples") } - if height <= 0 { - return nil, ErrHeightNegative + if height == 0 { + return nil, ErrHeightZero } currentHeight := tuples[0].height - 1 for _, tuple := range tuples { @@ -218,7 +218,7 @@ func proveDataRootTuples(tuples []dataRootTuple, height int64) (*merkle.Proof, e dataRootEncodedTuples = append(dataRootEncodedTuples, encodedTuple) } _, proofs := merkle.ProofsFromByteSlices(dataRootEncodedTuples) - return proofs[height-int64(tuples[0].height)], nil + return proofs[height-tuples[0].height], nil } // fetchDataRootTuples takes an end exclusive range of heights and fetches its diff --git a/nodebuilder/header/header.go b/nodebuilder/header/header.go index 76be3095af..1892afeb94 100644 --- a/nodebuilder/header/header.go +++ b/nodebuilder/header/header.go @@ -55,8 +55,7 @@ type Module interface { // It's in the header module because it only needs access to the headers to generate the proof. GetDataRootTupleInclusionProof( ctx context.Context, - height int64, - start, end uint64, + height, start, end uint64, ) (*DataRootTupleInclusionProof, error) } @@ -83,8 +82,7 @@ type API struct { GetDataRootTupleRoot func(ctx context.Context, start, end uint64) (*DataRootTupleRoot, error) `perm:"read"` GetDataRootTupleInclusionProof func( ctx context.Context, - height int64, - start, end uint64, + height, start, end uint64, ) (*DataRootTupleInclusionProof, error) `perm:"read"` } } @@ -135,8 +133,7 @@ func (api *API) GetDataRootTupleRoot(ctx context.Context, start, end uint64) (*D func (api *API) GetDataRootTupleInclusionProof( ctx context.Context, - height int64, - start, end uint64, + height, start, end uint64, ) (*DataRootTupleInclusionProof, error) { return api.Internal.GetDataRootTupleInclusionProof(ctx, height, start, end) } diff --git a/nodebuilder/header/service.go b/nodebuilder/header/service.go index f8f4185d45..7d7c3d9666 100644 --- a/nodebuilder/header/service.go +++ b/nodebuilder/header/service.go @@ -13,8 +13,8 @@ import ( modfraud "github.com/celestiaorg/celestia-node/nodebuilder/fraud" ) -// ErrHeightNegative returned when the provided block height is <= 0. -var ErrHeightNegative = errors.New("height is negative") +// ErrHeightZero returned when the provided block height is equal to 0. +var ErrHeightZero = errors.New("height is equal to 0") // Service represents the header Service that can be started / stopped on a node. // Service's main function is to manage its sub-services. Service can contain several @@ -68,7 +68,7 @@ func (s *Service) GetRangeByHeight( func (s *Service) GetByHeight(ctx context.Context, height uint64) (*header.ExtendedHeader, error) { if height == 0 { - return nil, ErrHeightNegative + return nil, ErrHeightZero } head, err := s.syncer.Head(ctx) switch { @@ -176,9 +176,7 @@ func (s *Service) GetDataRootTupleRoot(ctx context.Context, start, end uint64) ( // is end exclusive. func (s *Service) GetDataRootTupleInclusionProof( ctx context.Context, - height int64, - start, - end uint64, + height, start, end uint64, ) (*DataRootTupleInclusionProof, error) { log.Debugw( "validating the data root inclusion proof request", diff --git a/nodebuilder/header/service_test.go b/nodebuilder/header/service_test.go index a0fa2efa75..3bde8a6400 100644 --- a/nodebuilder/header/service_test.go +++ b/nodebuilder/header/service_test.go @@ -182,21 +182,11 @@ func TestHashDataRootTuples(t *testing.T) { func TestProveDataRootTuples(t *testing.T) { tests := map[string]struct { tuples []dataRootTuple - height int64 + height uint64 expectedProof merkle.Proof expectErr bool }{ "empty tuples list": {tuples: nil, expectErr: true}, - "strictly negative height": { - height: -1, - tuples: []dataRootTuple{ - { - height: 1, - dataRoot: [32]byte{0x1}, - }, - }, - expectErr: true, - }, "non consecutive list of tuples at the beginning": { tuples: []dataRootTuple{ { From 6917aa2f6b97b1fb9c6188ce7c306393feca66b3 Mon Sep 17 00:00:00 2001 From: CHAMI Rachid Date: Fri, 19 Jul 2024 13:06:09 +0200 Subject: [PATCH 37/52] Update nodebuilder/header/service.go Co-authored-by: rene <41963722+renaynay@users.noreply.github.com> --- nodebuilder/header/service.go | 1 + 1 file changed, 1 insertion(+) diff --git a/nodebuilder/header/service.go b/nodebuilder/header/service.go index 7d7c3d9666..71a1854840 100644 --- a/nodebuilder/header/service.go +++ b/nodebuilder/header/service.go @@ -192,6 +192,7 @@ func (s *Service) GetDataRootTupleInclusionProof( return nil, err } log.Debugw("fetching the data root tuples", "start", start, "end", end) + tuples, err := s.fetchDataRootTuples(ctx, start, end) if err != nil { return nil, err From d44ffdba9d97ef54c1c72b19b8e72546476d351e Mon Sep 17 00:00:00 2001 From: sweexordious Date: Fri, 19 Jul 2024 12:08:54 +0100 Subject: [PATCH 38/52] chore: regenerate api --- nodebuilder/header/mocks/api.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nodebuilder/header/mocks/api.go b/nodebuilder/header/mocks/api.go index 9a80bdb97c..eaf7f49cc0 100644 --- a/nodebuilder/header/mocks/api.go +++ b/nodebuilder/header/mocks/api.go @@ -69,7 +69,7 @@ func (mr *MockModuleMockRecorder) GetByHeight(arg0, arg1 interface{}) *gomock.Ca } // GetDataRootTupleInclusionProof mocks base method. -func (m *MockModule) GetDataRootTupleInclusionProof(arg0 context.Context, arg1 int64, arg2, arg3 uint64) (*header0.DataRootTupleInclusionProof, error) { +func (m *MockModule) GetDataRootTupleInclusionProof(arg0 context.Context, arg1, arg2, arg3 uint64) (*header0.DataRootTupleInclusionProof, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetDataRootTupleInclusionProof", arg0, arg1, arg2, arg3) ret0, _ := ret[0].(*header0.DataRootTupleInclusionProof) From 18452f6da5f8c1313463ea03f6f1faf8ed7d0cb6 Mon Sep 17 00:00:00 2001 From: sweexordious Date: Fri, 19 Jul 2024 12:09:08 +0100 Subject: [PATCH 39/52] fix: remove call to network head --- nodebuilder/header/data_root_tuple_root.go | 15 +-------------- 1 file changed, 1 insertion(+), 14 deletions(-) diff --git a/nodebuilder/header/data_root_tuple_root.go b/nodebuilder/header/data_root_tuple_root.go index 72a3d2d569..2045140114 100644 --- a/nodebuilder/header/data_root_tuple_root.go +++ b/nodebuilder/header/data_root_tuple_root.go @@ -120,22 +120,9 @@ func (s *Service) validateDataRootTupleRootRange(ctx context.Context, start, end return fmt.Errorf("the query exceeds the limit of allowed blocks %d", dataRootTupleRootBlocksLimit) } - currentHeader, err := s.NetworkHead(ctx) - if err != nil { - return err - } - // the data commitment range is end exclusive - if end > currentHeader.Height()+1 { - return fmt.Errorf( - "end block %d is higher than current chain height %d", - end, - currentHeader.Height(), - ) - } - currentLocalHeader, err := s.LocalHead(ctx) if err != nil { - return err + return fmt.Errorf("couldn't get the local head to validate the data root tuple root range%w", err) } // the data commitment range is end exclusive if end > currentLocalHeader.Height()+1 { From e5f6a0541c7b8f92db5441678eee73efc79476ca Mon Sep 17 00:00:00 2001 From: sweexordious Date: Fri, 19 Jul 2024 12:10:35 +0100 Subject: [PATCH 40/52] docs: comment suggestion by @renaynay --- nodebuilder/header/data_root_tuple_root.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nodebuilder/header/data_root_tuple_root.go b/nodebuilder/header/data_root_tuple_root.go index 2045140114..d59073c7ec 100644 --- a/nodebuilder/header/data_root_tuple_root.go +++ b/nodebuilder/header/data_root_tuple_root.go @@ -105,9 +105,9 @@ func encodeDataRootTuple(height uint64, dataRoot [32]byte) ([]byte, error) { // It's a local parameter to protect the API from creating unnecessarily large commitments. const dataRootTupleRootBlocksLimit = 10_000 // ~33 hours of blocks assuming 12-second blocks. -// validateDataRootTupleRootRange runs basic checks on the asc sorted list of +// validateDataRootTupleRootRange runs basic checks on the ascending sorted list of // heights that will be used subsequently in generating data commitments over -// the defined set of heights. +// the defined set of heights by ensuring the range exists in the chain. func (s *Service) validateDataRootTupleRootRange(ctx context.Context, start, end uint64) error { if start == 0 { return ErrHeightZero From e5d1719947f91b7504f03f26862f5636458765a5 Mon Sep 17 00:00:00 2001 From: sweexordious Date: Fri, 19 Jul 2024 12:11:13 +0100 Subject: [PATCH 41/52] docs: whitespace as suggested by @renaynay --- nodebuilder/header/data_root_tuple_root.go | 1 + 1 file changed, 1 insertion(+) diff --git a/nodebuilder/header/data_root_tuple_root.go b/nodebuilder/header/data_root_tuple_root.go index d59073c7ec..ed6068a761 100644 --- a/nodebuilder/header/data_root_tuple_root.go +++ b/nodebuilder/header/data_root_tuple_root.go @@ -115,6 +115,7 @@ func (s *Service) validateDataRootTupleRootRange(ctx context.Context, start, end if start >= end { return fmt.Errorf("end block is smaller or equal to the start block") } + heightsRange := end - start if heightsRange > uint64(dataRootTupleRootBlocksLimit) { return fmt.Errorf("the query exceeds the limit of allowed blocks %d", dataRootTupleRootBlocksLimit) From f8a7354dee7f124a1e98e6006aff4a936cfa2532 Mon Sep 17 00:00:00 2001 From: sweexordious Date: Fri, 19 Jul 2024 12:18:08 +0100 Subject: [PATCH 42/52] docs: whitespace as suggested by @renaynay --- nodebuilder/header/service.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nodebuilder/header/service.go b/nodebuilder/header/service.go index 71a1854840..455d0bffd1 100644 --- a/nodebuilder/header/service.go +++ b/nodebuilder/header/service.go @@ -192,7 +192,7 @@ func (s *Service) GetDataRootTupleInclusionProof( return nil, err } log.Debugw("fetching the data root tuples", "start", start, "end", end) - + tuples, err := s.fetchDataRootTuples(ctx, start, end) if err != nil { return nil, err From 9d4675857994f89431985c3ea9191607a9712858 Mon Sep 17 00:00:00 2001 From: sweexordious Date: Fri, 19 Jul 2024 12:24:56 +0100 Subject: [PATCH 43/52] chore: use GetHeaderRange instead of manually getting the headers --- nodebuilder/header/data_root_tuple_root.go | 29 +++++++++------------- 1 file changed, 12 insertions(+), 17 deletions(-) diff --git a/nodebuilder/header/data_root_tuple_root.go b/nodebuilder/header/data_root_tuple_root.go index ed6068a761..99336e932d 100644 --- a/nodebuilder/header/data_root_tuple_root.go +++ b/nodebuilder/header/data_root_tuple_root.go @@ -187,13 +187,6 @@ func proveDataRootTuples(tuples []dataRootTuple, height uint64) (*merkle.Proof, if height == 0 { return nil, ErrHeightZero } - currentHeight := tuples[0].height - 1 - for _, tuple := range tuples { - if tuple.height != currentHeight+1 { - return nil, fmt.Errorf("the provided tuples are not consecutive %d vs %d", currentHeight, tuple.height) - } - currentHeight++ - } dataRootEncodedTuples := make([][]byte, 0, len(tuples)) for _, tuple := range tuples { encodedTuple, err := encodeDataRootTuple( @@ -211,19 +204,21 @@ func proveDataRootTuples(tuples []dataRootTuple, height uint64) (*merkle.Proof, // fetchDataRootTuples takes an end exclusive range of heights and fetches its // corresponding data root tuples. +// end is not included in the range. func (s *Service) fetchDataRootTuples(ctx context.Context, start, end uint64) ([]dataRootTuple, error) { tuples := make([]dataRootTuple, 0, end-start) - for height := start; height < end; height++ { - block, err := s.GetByHeight(ctx, height) - if err != nil { - return nil, err - } - if block == nil { - return nil, fmt.Errorf("couldn't load block %d", height) - } + startHeader, err := s.GetByHeight(ctx, start) + if err != nil { + return nil, err + } + headerRange, err := s.GetRangeByHeight(ctx, startHeader, end) + if err != nil { + return nil, err + } + for _, header := range headerRange { tuples = append(tuples, dataRootTuple{ - height: block.Height(), - dataRoot: *(*[32]byte)(block.DataHash), + height: header.Height(), + dataRoot: *(*[32]byte)(header.DataHash), }) } return tuples, nil From b9d7da2abe995a162e0e9fcd7fc53981f5d261e7 Mon Sep 17 00:00:00 2001 From: sweexordious Date: Fri, 19 Jul 2024 12:59:18 +0100 Subject: [PATCH 44/52] refactor: remove data root tuple type --- nodebuilder/header/data_root_tuple_root.go | 72 +++------ nodebuilder/header/service.go | 10 +- nodebuilder/header/service_test.go | 170 +++------------------ 3 files changed, 49 insertions(+), 203 deletions(-) diff --git a/nodebuilder/header/data_root_tuple_root.go b/nodebuilder/header/data_root_tuple_root.go index 99336e932d..69dbfd1c17 100644 --- a/nodebuilder/header/data_root_tuple_root.go +++ b/nodebuilder/header/data_root_tuple_root.go @@ -58,14 +58,6 @@ func to32PaddedHexBytes(number uint64) ([]byte, error) { return paddedBytes, nil } -// dataRootTuple contains the data that will be used to generate the Blobstream data root tuple -// roots. For more information: -// https://github.com/celestiaorg/blobstream-contracts/blob/master/src/DataRootTuple.sol -type dataRootTuple struct { - height uint64 - dataRoot [32]byte -} - // encodeDataRootTuple takes a height and a data root, and returns the equivalent of // `abi.encode(...)` in Ethereum. // The encoded type is a dataRootTuple, which has the following ABI: @@ -137,24 +129,13 @@ func (s *Service) validateDataRootTupleRootRange(ctx context.Context, start, end return nil } -// hashDataRootTuples hashes a list of blocks data root tuples, i.e., height, data root and square -// size, then returns their merkle root. -func hashDataRootTuples(tuples []dataRootTuple) ([]byte, error) { - if len(tuples) == 0 { - return nil, fmt.Errorf("cannot hash an empty list of data root tuples") - } - dataRootEncodedTuples := make([][]byte, 0, len(tuples)) - for _, tuple := range tuples { - encodedTuple, err := encodeDataRootTuple( - tuple.height, - tuple.dataRoot, - ) - if err != nil { - return nil, err - } - dataRootEncodedTuples = append(dataRootEncodedTuples, encodedTuple) +// hashDataRootTuples hashes a list of encoded blocks data root tuples, i.e., height, data root and +// square size, then returns their merkle root. +func hashDataRootTuples(encodedDataRootTuples [][]byte) ([]byte, error) { + if len(encodedDataRootTuples) == 0 { + return nil, fmt.Errorf("cannot hash an empty list of encoded data root tuples") } - root := merkle.HashFromByteSlices(dataRootEncodedTuples) + root := merkle.HashFromByteSlices(encodedDataRootTuples) return root, nil } @@ -180,33 +161,23 @@ func (s *Service) validateDataRootInclusionProofRequest( } // proveDataRootTuples returns the merkle inclusion proof for a height. -func proveDataRootTuples(tuples []dataRootTuple, height uint64) (*merkle.Proof, error) { - if len(tuples) == 0 { - return nil, fmt.Errorf("cannot prove an empty list of tuples") +// expects the list of encoded data root tuples to be ordered and the heights to be consecutive. +func proveDataRootTuples(encodedDataRootTuples [][]byte, rangeStartHeight, height uint64) (*merkle.Proof, error) { + if len(encodedDataRootTuples) == 0 { + return nil, fmt.Errorf("cannot prove an empty list of encoded data root tuples") } - if height == 0 { + if height == 0 || rangeStartHeight == 0 { return nil, ErrHeightZero } - dataRootEncodedTuples := make([][]byte, 0, len(tuples)) - for _, tuple := range tuples { - encodedTuple, err := encodeDataRootTuple( - tuple.height, - tuple.dataRoot, - ) - if err != nil { - return nil, err - } - dataRootEncodedTuples = append(dataRootEncodedTuples, encodedTuple) - } - _, proofs := merkle.ProofsFromByteSlices(dataRootEncodedTuples) - return proofs[height-tuples[0].height], nil + _, proofs := merkle.ProofsFromByteSlices(encodedDataRootTuples) + return proofs[height-rangeStartHeight], nil } -// fetchDataRootTuples takes an end exclusive range of heights and fetches its +// fetchEncodedDataRootTuples takes an end exclusive range of heights and fetches its // corresponding data root tuples. // end is not included in the range. -func (s *Service) fetchDataRootTuples(ctx context.Context, start, end uint64) ([]dataRootTuple, error) { - tuples := make([]dataRootTuple, 0, end-start) +func (s *Service) fetchEncodedDataRootTuples(ctx context.Context, start, end uint64) ([][]byte, error) { + encodedDataRootTuples := make([][]byte, 0, end-start) startHeader, err := s.GetByHeight(ctx, start) if err != nil { return nil, err @@ -216,10 +187,11 @@ func (s *Service) fetchDataRootTuples(ctx context.Context, start, end uint64) ([ return nil, err } for _, header := range headerRange { - tuples = append(tuples, dataRootTuple{ - height: header.Height(), - dataRoot: *(*[32]byte)(header.DataHash), - }) + encodedDataRootTuple, err := encodeDataRootTuple(header.Height(), *(*[32]byte)(header.DataHash)) + if err != nil { + return nil, err + } + encodedDataRootTuples = append(encodedDataRootTuples, encodedDataRootTuple) } - return tuples, nil + return encodedDataRootTuples, nil } diff --git a/nodebuilder/header/service.go b/nodebuilder/header/service.go index 455d0bffd1..1e40a917c8 100644 --- a/nodebuilder/header/service.go +++ b/nodebuilder/header/service.go @@ -157,12 +157,12 @@ func (s *Service) GetDataRootTupleRoot(ctx context.Context, start, end uint64) ( return nil, err } log.Debugw("fetching the data root tuples", "start", start, "end", end) - tuples, err := s.fetchDataRootTuples(ctx, start, end) + encodedDataRootTuples, err := s.fetchEncodedDataRootTuples(ctx, start, end) if err != nil { return nil, err } log.Debugw("hashing the data root tuples", "start", start, "end", end) - root, err := hashDataRootTuples(tuples) + root, err := hashDataRootTuples(encodedDataRootTuples) if err != nil { return nil, err } @@ -187,18 +187,18 @@ func (s *Service) GetDataRootTupleInclusionProof( "height", height, ) - err := s.validateDataRootInclusionProofRequest(ctx, uint64(height), start, end) + err := s.validateDataRootInclusionProofRequest(ctx, height, start, end) if err != nil { return nil, err } log.Debugw("fetching the data root tuples", "start", start, "end", end) - tuples, err := s.fetchDataRootTuples(ctx, start, end) + encodedDataRootTuples, err := s.fetchEncodedDataRootTuples(ctx, start, end) if err != nil { return nil, err } log.Debugw("proving the data root tuples", "start", start, "end", end) - proof, err := proveDataRootTuples(tuples, height) + proof, err := proveDataRootTuples(encodedDataRootTuples, start, height) if err != nil { return nil, err } diff --git a/nodebuilder/header/service_test.go b/nodebuilder/header/service_test.go index 3bde8a6400..789f97d51e 100644 --- a/nodebuilder/header/service_test.go +++ b/nodebuilder/header/service_test.go @@ -141,22 +141,17 @@ func TestEncodeDataRootTuple(t *testing.T) { func TestHashDataRootTuples(t *testing.T) { tests := map[string]struct { - tuples []dataRootTuple + tuples [][]byte expectedHash []byte expectErr bool }{ "empty tuples list": {tuples: nil, expectErr: true}, - "valid list of data root tuples": { - tuples: []dataRootTuple{ - { - height: 1, - dataRoot: [32]byte{0x1}, - }, - { - height: 2, - dataRoot: [32]byte{0x2}, - }, - }, + "valid list of encoded data root tuples": { + tuples: func() [][]byte { + tuple1, _ := encodeDataRootTuple(1, [32]byte{0x1}) + tuple2, _ := encodeDataRootTuple(2, [32]byte{0x2}) + return [][]byte{tuple1, tuple2} + }(), expectedHash: func() []byte { tuple1, _ := encodeDataRootTuple(1, [32]byte{0x1}) tuple2, _ := encodeDataRootTuple(2, [32]byte{0x2}) @@ -181,146 +176,25 @@ func TestHashDataRootTuples(t *testing.T) { func TestProveDataRootTuples(t *testing.T) { tests := map[string]struct { - tuples []dataRootTuple + tuples [][]byte height uint64 + rangeStart uint64 expectedProof merkle.Proof expectErr bool }{ - "empty tuples list": {tuples: nil, expectErr: true}, - "non consecutive list of tuples at the beginning": { - tuples: []dataRootTuple{ - { - height: 1, - dataRoot: [32]byte{0x1}, - }, - { - height: 3, - dataRoot: [32]byte{0x2}, - }, - { - height: 4, - dataRoot: [32]byte{0x4}, - }, - }, - expectErr: true, - }, - "non consecutive list of tuples in the middle": { - tuples: []dataRootTuple{ - { - height: 1, - dataRoot: [32]byte{0x1}, - }, - { - height: 2, - dataRoot: [32]byte{0x2}, - }, - { - height: 3, - dataRoot: [32]byte{0x2}, - }, - { - height: 5, - dataRoot: [32]byte{0x4}, - }, - { - height: 6, - dataRoot: [32]byte{0x5}, - }, - }, - expectErr: true, - }, - "non consecutive list of tuples at the end": { - tuples: []dataRootTuple{ - { - height: 1, - dataRoot: [32]byte{0x1}, - }, - { - height: 2, - dataRoot: [32]byte{0x2}, - }, - { - height: 4, - dataRoot: [32]byte{0x4}, - }, - }, - expectErr: true, - }, - "duplicate height at the beginning": { - tuples: []dataRootTuple{ - { - height: 1, - dataRoot: [32]byte{0x1}, - }, - { - height: 1, - dataRoot: [32]byte{0x1}, - }, - { - height: 4, - dataRoot: [32]byte{0x4}, - }, - }, - expectErr: true, - }, - "duplicate height in the middle": { - tuples: []dataRootTuple{ - { - height: 1, - dataRoot: [32]byte{0x1}, - }, - { - height: 2, - dataRoot: [32]byte{0x2}, - }, - { - height: 2, - dataRoot: [32]byte{0x2}, - }, - { - height: 3, - dataRoot: [32]byte{0x3}, - }, - }, - expectErr: true, - }, - "duplicate height at the end": { - tuples: []dataRootTuple{ - { - height: 1, - dataRoot: [32]byte{0x1}, - }, - { - height: 2, - dataRoot: [32]byte{0x2}, - }, - { - height: 2, - dataRoot: [32]byte{0x2}, - }, - }, - expectErr: true, - }, + "empty tuples list": {tuples: [][]byte{{0x1}}, expectErr: true}, + "start height == 0": {tuples: [][]byte{{0x1}}, expectErr: true}, + "range start height == 0": {tuples: [][]byte{{0x1}}, expectErr: true}, "valid proof": { - height: 3, - tuples: []dataRootTuple{ - { - height: 1, - dataRoot: [32]byte{0x1}, - }, - { - height: 2, - dataRoot: [32]byte{0x2}, - }, - { - height: 3, - dataRoot: [32]byte{0x3}, - }, - { - height: 4, - dataRoot: [32]byte{0x4}, - }, - }, + height: 3, + rangeStart: 1, + tuples: func() [][]byte { + encodedTuple1, _ := encodeDataRootTuple(1, [32]byte{0x1}) + encodedTuple2, _ := encodeDataRootTuple(2, [32]byte{0x2}) + encodedTuple3, _ := encodeDataRootTuple(3, [32]byte{0x3}) + encodedTuple4, _ := encodeDataRootTuple(4, [32]byte{0x4}) + return [][]byte{encodedTuple1, encodedTuple2, encodedTuple3, encodedTuple4} + }(), expectedProof: func() merkle.Proof { encodedTuple1, _ := encodeDataRootTuple(1, [32]byte{0x1}) encodedTuple2, _ := encodeDataRootTuple(2, [32]byte{0x2}) @@ -334,7 +208,7 @@ func TestProveDataRootTuples(t *testing.T) { for name, tc := range tests { t.Run(name, func(t *testing.T) { - result, err := proveDataRootTuples(tc.tuples, tc.height) + result, err := proveDataRootTuples(tc.tuples, tc.rangeStart, tc.height) if tc.expectErr { assert.Error(t, err) } else { From 8276e5ac2b1989e30182f9b70eacb3c84b8dc1c8 Mon Sep 17 00:00:00 2001 From: sweexordious Date: Fri, 19 Jul 2024 13:17:58 +0100 Subject: [PATCH 45/52] chore: bump app version and remove unnecessary checks --- go.mod | 4 ++-- go.sum | 8 ++++---- share/eds/eds.go | 6 ------ 3 files changed, 6 insertions(+), 12 deletions(-) diff --git a/go.mod b/go.mod index f2cc330bb1..fce0e8928e 100644 --- a/go.mod +++ b/go.mod @@ -8,7 +8,7 @@ require ( github.com/BurntSushi/toml v1.4.0 github.com/alecthomas/jsonschema v0.0.0-20220216202328-9eeeec9d044b github.com/benbjohnson/clock v1.3.5 - github.com/celestiaorg/celestia-app v1.12.0 + github.com/celestiaorg/celestia-app v1.13.0 github.com/celestiaorg/go-fraud v0.2.1 github.com/celestiaorg/go-header v0.6.2 github.com/celestiaorg/go-libp2p-messenger v0.2.0 @@ -356,5 +356,5 @@ replace ( github.com/gogo/protobuf => github.com/regen-network/protobuf v1.3.3-alpha.regen.1 // broken goleveldb needs to be replaced for the cosmos-sdk and celestia-app github.com/syndtr/goleveldb => github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 - github.com/tendermint/tendermint => github.com/celestiaorg/celestia-core v1.37.0-tm-v0.34.29 + github.com/tendermint/tendermint => github.com/celestiaorg/celestia-core v1.38.0-tm-v0.34.29 ) diff --git a/go.sum b/go.sum index 56c7e41e0d..a6de05d7d2 100644 --- a/go.sum +++ b/go.sum @@ -353,10 +353,10 @@ github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7 github.com/bwesterb/go-ristretto v1.2.0/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0= github.com/c-bata/go-prompt v0.2.2/go.mod h1:VzqtzE2ksDBcdln8G7mk2RX9QyGjH+OVqOCSiVIqS34= github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= -github.com/celestiaorg/celestia-app v1.12.0 h1:7SMTI/sB8jxp7QPJQRi/liAREnToAD5nOyA7M+naPIc= -github.com/celestiaorg/celestia-app v1.12.0/go.mod h1:O/idsViCLLFdcaE4cJ+iZctZLX0KWfRheKT2W18W2uM= -github.com/celestiaorg/celestia-core v1.37.0-tm-v0.34.29 h1:9nJDE37cTg/Cx+f4FS2g7yYeoLrsaNJg36XsQ47sS1A= -github.com/celestiaorg/celestia-core v1.37.0-tm-v0.34.29/go.mod h1:IIdMu9gnDtjUmZkFuBN4Bf11z/rBtlL2rtwbQxdbRAU= +github.com/celestiaorg/celestia-app v1.13.0 h1:7MWEox6lim6WDyiP84Y2/ERfWUJxWPfZlKxzO6OFcig= +github.com/celestiaorg/celestia-app v1.13.0/go.mod h1:CF9VZwWAlTU0Is/BOsmxqkbkYnnmrgl0YRlSBIzr0m0= +github.com/celestiaorg/celestia-core v1.38.0-tm-v0.34.29 h1:HwbA4OegRvXX0aNchBA7Cmu+oIxnH7xRcOhISuDP0ak= +github.com/celestiaorg/celestia-core v1.38.0-tm-v0.34.29/go.mod h1:MyElURdWAOJkOp84WZnfEUJ+OLvTwOOHG2lbK9E8XRI= github.com/celestiaorg/cosmos-sdk v1.23.0-sdk-v0.46.16 h1:N2uETI13szEKnGAdKhtTR0EsrpcW0AwRKYER74WLnuw= github.com/celestiaorg/cosmos-sdk v1.23.0-sdk-v0.46.16/go.mod h1:Bpl1LSWiDpQumgOhhMTZBMopqa0j7fRasIhvTZB44P0= github.com/celestiaorg/dagstore v0.0.0-20230824094345-537c012aa403 h1:Lj73O3S+KJx5/hgZ+IeOLEIoLsAveJN/7/ZtQQtPSVw= diff --git a/share/eds/eds.go b/share/eds/eds.go index 22c359a4f5..079d7636b9 100644 --- a/share/eds/eds.go +++ b/share/eds/eds.go @@ -279,12 +279,6 @@ func ReadEDS(ctx context.Context, r io.Reader, root share.DataHash) (eds *rsmt2d // The share range, defined by start and end, is end-exclusive. func ProveShares(eds *rsmt2d.ExtendedDataSquare, start, end int) (*types.ShareProof, error) { log.Debugw("proving share range", "start", start, "end", end) - if start == end { - return nil, fmt.Errorf("start share cannot be equal to end share") - } - if start > end { - return nil, fmt.Errorf("start share %d cannot be greater than end share %d", start, end) - } odsShares, err := shares.FromBytes(eds.FlattenedODS()) if err != nil { From da27e8a929a54df33dc2328a5795679d9df89819 Mon Sep 17 00:00:00 2001 From: sweexordious Date: Fri, 19 Jul 2024 23:04:52 +0100 Subject: [PATCH 46/52] refactor: reintroduce the blobstream module --- nodebuilder/blobstream/blobstream.go | 48 +++++ .../data_root_tuple_root.go | 14 +- nodebuilder/blobstream/mocks/api.go | 66 +++++++ nodebuilder/blobstream/module.go | 12 ++ nodebuilder/blobstream/service.go | 81 ++++++++ nodebuilder/blobstream/service_test.go | 187 ++++++++++++++++++ nodebuilder/header/header.go | 42 +--- nodebuilder/header/mocks/api.go | 35 +--- nodebuilder/header/service.go | 58 ------ nodebuilder/header/service_test.go | 179 ----------------- nodebuilder/module.go | 2 + nodebuilder/node.go | 18 +- 12 files changed, 422 insertions(+), 320 deletions(-) create mode 100644 nodebuilder/blobstream/blobstream.go rename nodebuilder/{header => blobstream}/data_root_tuple_root.go (94%) create mode 100644 nodebuilder/blobstream/mocks/api.go create mode 100644 nodebuilder/blobstream/module.go create mode 100644 nodebuilder/blobstream/service.go create mode 100644 nodebuilder/blobstream/service_test.go diff --git a/nodebuilder/blobstream/blobstream.go b/nodebuilder/blobstream/blobstream.go new file mode 100644 index 0000000000..57a8ab26cf --- /dev/null +++ b/nodebuilder/blobstream/blobstream.go @@ -0,0 +1,48 @@ +package blobstream + +import ( + "context" +) + +var _ Module = (*API)(nil) + +// Module defines the API related to interacting with the data root tuples proofs +// +//go:generate mockgen -destination=mocks/api.go -package=mocks . Module +type Module interface { + // GetDataRootTupleRoot collects the data roots over a provided ordered range of blocks, + // and then creates a new Merkle root of those data roots. The range is end exclusive. + // It's in the header module because it only needs access to the headers to generate the proof. + GetDataRootTupleRoot(ctx context.Context, start, end uint64) (*DataRootTupleRoot, error) + + // GetDataRootTupleInclusionProof creates an inclusion proof, for the data root tuple of block + // height `height`, in the set of blocks defined by `start` and `end`. The range + // is end exclusive. + // It's in the header module because it only needs access to the headers to generate the proof. + GetDataRootTupleInclusionProof( + ctx context.Context, + height, start, end uint64, + ) (*DataRootTupleInclusionProof, error) +} + +// API is a wrapper around the Module for RPC. +type API struct { + Internal struct { + GetDataRootTupleRoot func(ctx context.Context, start, end uint64) (*DataRootTupleRoot, error) `perm:"read"` + GetDataRootTupleInclusionProof func( + ctx context.Context, + height, start, end uint64, + ) (*DataRootTupleInclusionProof, error) `perm:"read"` + } +} + +func (api *API) GetDataRootTupleRoot(ctx context.Context, start, end uint64) (*DataRootTupleRoot, error) { + return api.Internal.GetDataRootTupleRoot(ctx, start, end) +} + +func (api *API) GetDataRootTupleInclusionProof( + ctx context.Context, + height, start, end uint64, +) (*DataRootTupleInclusionProof, error) { + return api.Internal.GetDataRootTupleInclusionProof(ctx, height, start, end) +} diff --git a/nodebuilder/header/data_root_tuple_root.go b/nodebuilder/blobstream/data_root_tuple_root.go similarity index 94% rename from nodebuilder/header/data_root_tuple_root.go rename to nodebuilder/blobstream/data_root_tuple_root.go index 69dbfd1c17..ae314d6d2d 100644 --- a/nodebuilder/header/data_root_tuple_root.go +++ b/nodebuilder/blobstream/data_root_tuple_root.go @@ -1,4 +1,4 @@ -package header +package blobstream import ( "context" @@ -8,6 +8,8 @@ import ( "github.com/tendermint/tendermint/crypto/merkle" "github.com/tendermint/tendermint/libs/bytes" + + "github.com/celestiaorg/celestia-node/nodebuilder/header" ) // DataRootTupleRoot is the root of the merkle tree created @@ -102,7 +104,7 @@ const dataRootTupleRootBlocksLimit = 10_000 // ~33 hours of blocks assuming 12-s // the defined set of heights by ensuring the range exists in the chain. func (s *Service) validateDataRootTupleRootRange(ctx context.Context, start, end uint64) error { if start == 0 { - return ErrHeightZero + return header.ErrHeightZero } if start >= end { return fmt.Errorf("end block is smaller or equal to the start block") @@ -113,7 +115,7 @@ func (s *Service) validateDataRootTupleRootRange(ctx context.Context, start, end return fmt.Errorf("the query exceeds the limit of allowed blocks %d", dataRootTupleRootBlocksLimit) } - currentLocalHeader, err := s.LocalHead(ctx) + currentLocalHeader, err := s.headerServ.LocalHead(ctx) if err != nil { return fmt.Errorf("couldn't get the local head to validate the data root tuple root range%w", err) } @@ -167,7 +169,7 @@ func proveDataRootTuples(encodedDataRootTuples [][]byte, rangeStartHeight, heigh return nil, fmt.Errorf("cannot prove an empty list of encoded data root tuples") } if height == 0 || rangeStartHeight == 0 { - return nil, ErrHeightZero + return nil, header.ErrHeightZero } _, proofs := merkle.ProofsFromByteSlices(encodedDataRootTuples) return proofs[height-rangeStartHeight], nil @@ -178,11 +180,11 @@ func proveDataRootTuples(encodedDataRootTuples [][]byte, rangeStartHeight, heigh // end is not included in the range. func (s *Service) fetchEncodedDataRootTuples(ctx context.Context, start, end uint64) ([][]byte, error) { encodedDataRootTuples := make([][]byte, 0, end-start) - startHeader, err := s.GetByHeight(ctx, start) + startHeader, err := s.headerServ.GetByHeight(ctx, start) if err != nil { return nil, err } - headerRange, err := s.GetRangeByHeight(ctx, startHeader, end) + headerRange, err := s.headerServ.GetRangeByHeight(ctx, startHeader, end) if err != nil { return nil, err } diff --git a/nodebuilder/blobstream/mocks/api.go b/nodebuilder/blobstream/mocks/api.go new file mode 100644 index 0000000000..e4dff86a78 --- /dev/null +++ b/nodebuilder/blobstream/mocks/api.go @@ -0,0 +1,66 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/celestiaorg/celestia-node/nodebuilder/blobstream (interfaces: Module) + +// Package mocks is a generated GoMock package. +package mocks + +import ( + context "context" + reflect "reflect" + + blobstream "github.com/celestiaorg/celestia-node/nodebuilder/blobstream" + gomock "github.com/golang/mock/gomock" +) + +// MockModule is a mock of Module interface. +type MockModule struct { + ctrl *gomock.Controller + recorder *MockModuleMockRecorder +} + +// MockModuleMockRecorder is the mock recorder for MockModule. +type MockModuleMockRecorder struct { + mock *MockModule +} + +// NewMockModule creates a new mock instance. +func NewMockModule(ctrl *gomock.Controller) *MockModule { + mock := &MockModule{ctrl: ctrl} + mock.recorder = &MockModuleMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockModule) EXPECT() *MockModuleMockRecorder { + return m.recorder +} + +// GetDataRootTupleInclusionProof mocks base method. +func (m *MockModule) GetDataRootTupleInclusionProof(arg0 context.Context, arg1, arg2, arg3 uint64) (*blobstream.DataRootTupleInclusionProof, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetDataRootTupleInclusionProof", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(*blobstream.DataRootTupleInclusionProof) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetDataRootTupleInclusionProof indicates an expected call of GetDataRootTupleInclusionProof. +func (mr *MockModuleMockRecorder) GetDataRootTupleInclusionProof(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDataRootTupleInclusionProof", reflect.TypeOf((*MockModule)(nil).GetDataRootTupleInclusionProof), arg0, arg1, arg2, arg3) +} + +// GetDataRootTupleRoot mocks base method. +func (m *MockModule) GetDataRootTupleRoot(arg0 context.Context, arg1, arg2 uint64) (*blobstream.DataRootTupleRoot, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetDataRootTupleRoot", arg0, arg1, arg2) + ret0, _ := ret[0].(*blobstream.DataRootTupleRoot) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetDataRootTupleRoot indicates an expected call of GetDataRootTupleRoot. +func (mr *MockModuleMockRecorder) GetDataRootTupleRoot(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDataRootTupleRoot", reflect.TypeOf((*MockModule)(nil).GetDataRootTupleRoot), arg0, arg1, arg2) +} diff --git a/nodebuilder/blobstream/module.go b/nodebuilder/blobstream/module.go new file mode 100644 index 0000000000..c8deb1db10 --- /dev/null +++ b/nodebuilder/blobstream/module.go @@ -0,0 +1,12 @@ +package blobstream + +import "go.uber.org/fx" + +func ConstructModule() fx.Option { + return fx.Module("blobstream", + fx.Provide(NewService), + fx.Provide(func(serv *Service) Module { + return serv + }), + ) +} diff --git a/nodebuilder/blobstream/service.go b/nodebuilder/blobstream/service.go new file mode 100644 index 0000000000..5803e19012 --- /dev/null +++ b/nodebuilder/blobstream/service.go @@ -0,0 +1,81 @@ +package blobstream + +import ( + "context" + + logging "github.com/ipfs/go-log/v2" + + headerServ "github.com/celestiaorg/celestia-node/nodebuilder/header" +) + +var _ Module = (*Service)(nil) + +var log = logging.Logger("go-blobstream") + +type Service struct { + headerServ headerServ.Module +} + +func NewService(headerMod headerServ.Module) *Service { + return &Service{ + headerServ: headerMod, + } +} + +// GetDataRootTupleRoot collects the data roots over a provided ordered range of blocks, +// and then creates a new Merkle root of those data roots. The range is end exclusive. +func (s *Service) GetDataRootTupleRoot(ctx context.Context, start, end uint64) (*DataRootTupleRoot, error) { + log.Debugw("validating the data commitment range", "start", start, "end", end) + err := s.validateDataRootTupleRootRange(ctx, start, end) + if err != nil { + return nil, err + } + log.Debugw("fetching the data root tuples", "start", start, "end", end) + encodedDataRootTuples, err := s.fetchEncodedDataRootTuples(ctx, start, end) + if err != nil { + return nil, err + } + log.Debugw("hashing the data root tuples", "start", start, "end", end) + root, err := hashDataRootTuples(encodedDataRootTuples) + if err != nil { + return nil, err + } + // Create data commitment + dataRootTupleRoot := DataRootTupleRoot(root) + return &dataRootTupleRoot, nil +} + +// GetDataRootTupleInclusionProof creates an inclusion proof for the data root of block +// height `height` in the set of blocks defined by `start` and `end`. The range +// is end exclusive. +func (s *Service) GetDataRootTupleInclusionProof( + ctx context.Context, + height, start, end uint64, +) (*DataRootTupleInclusionProof, error) { + log.Debugw( + "validating the data root inclusion proof request", + "start", + start, + "end", + end, + "height", + height, + ) + err := s.validateDataRootInclusionProofRequest(ctx, height, start, end) + if err != nil { + return nil, err + } + log.Debugw("fetching the data root tuples", "start", start, "end", end) + + encodedDataRootTuples, err := s.fetchEncodedDataRootTuples(ctx, start, end) + if err != nil { + return nil, err + } + log.Debugw("proving the data root tuples", "start", start, "end", end) + proof, err := proveDataRootTuples(encodedDataRootTuples, start, height) + if err != nil { + return nil, err + } + dataRootTupleInclusionProof := DataRootTupleInclusionProof(proof) + return &dataRootTupleInclusionProof, nil +} diff --git a/nodebuilder/blobstream/service_test.go b/nodebuilder/blobstream/service_test.go new file mode 100644 index 0000000000..77ee47efff --- /dev/null +++ b/nodebuilder/blobstream/service_test.go @@ -0,0 +1,187 @@ +package blobstream + +import ( + "encoding/hex" + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/crypto/merkle" +) + +func TestPadBytes(t *testing.T) { + tests := []struct { + input []byte + length int + expected []byte + expectErr bool + }{ + {input: []byte{1, 2, 3}, length: 5, expected: []byte{0, 0, 1, 2, 3}}, + {input: []byte{1, 2, 3}, length: 3, expected: []byte{1, 2, 3}}, + {input: []byte{1, 2, 3}, length: 2, expected: nil, expectErr: true}, + {input: []byte{}, length: 3, expected: []byte{0, 0, 0}}, + } + + for _, test := range tests { + result, err := padBytes(test.input, test.length) + if test.expectErr { + assert.Error(t, err) + } else { + assert.NoError(t, err) + assert.Equal(t, test.expected, result) + } + } +} + +func TestTo32PaddedHexBytes(t *testing.T) { + tests := []struct { + number uint64 + expected []byte + expectError bool + }{ + { + number: 10, + expected: func() []byte { + res, _ := hex.DecodeString("000000000000000000000000000000000000000000000000000000000000000a") + return res + }(), + }, + { + number: 255, + expected: func() []byte { + res, _ := hex.DecodeString("00000000000000000000000000000000000000000000000000000000000000ff") + return res + }(), + }, + { + number: 255, + expected: func() []byte { + res, _ := hex.DecodeString("00000000000000000000000000000000000000000000000000000000000000ff") + return res + }(), + }, + { + number: 4294967295, + expected: func() []byte { + res, _ := hex.DecodeString("00000000000000000000000000000000000000000000000000000000ffffffff") + return res + }(), + }, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("number: %d", test.number), func(t *testing.T) { + result, err := to32PaddedHexBytes(test.number) + if test.expectError { + assert.Error(t, err) + } else { + assert.NoError(t, err) + assert.Equal(t, test.expected, result) + } + }) + } +} + +func TestEncodeDataRootTuple(t *testing.T) { + height := uint64(2) + dataRoot, err := hex.DecodeString("82dc1607d84557d3579ce602a45f5872e821c36dbda7ec926dfa17ebc8d5c013") + require.NoError(t, err) + + expectedEncoding, err := hex.DecodeString( + // hex representation of height padded to 32 bytes + "0000000000000000000000000000000000000000000000000000000000000002" + + // data root + "82dc1607d84557d3579ce602a45f5872e821c36dbda7ec926dfa17ebc8d5c013", + ) + require.NoError(t, err) + require.NotNil(t, expectedEncoding) + + actualEncoding, err := encodeDataRootTuple(height, *(*[32]byte)(dataRoot)) + require.NoError(t, err) + require.NotNil(t, actualEncoding) + + // Check that the length of packed data is correct + assert.Equal(t, len(actualEncoding), 64) + assert.Equal(t, expectedEncoding, actualEncoding) +} + +func TestHashDataRootTuples(t *testing.T) { + tests := map[string]struct { + tuples [][]byte + expectedHash []byte + expectErr bool + }{ + "empty tuples list": {tuples: nil, expectErr: true}, + "valid list of encoded data root tuples": { + tuples: func() [][]byte { + tuple1, _ := encodeDataRootTuple(1, [32]byte{0x1}) + tuple2, _ := encodeDataRootTuple(2, [32]byte{0x2}) + return [][]byte{tuple1, tuple2} + }(), + expectedHash: func() []byte { + tuple1, _ := encodeDataRootTuple(1, [32]byte{0x1}) + tuple2, _ := encodeDataRootTuple(2, [32]byte{0x2}) + + return merkle.HashFromByteSlices([][]byte{tuple1, tuple2}) + }(), + }, + } + + for name, tc := range tests { + t.Run(name, func(t *testing.T) { + result, err := hashDataRootTuples(tc.tuples) + if tc.expectErr { + assert.Error(t, err) + } else { + assert.NoError(t, err) + assert.Equal(t, tc.expectedHash, result) + } + }) + } +} + +func TestProveDataRootTuples(t *testing.T) { + tests := map[string]struct { + tuples [][]byte + height uint64 + rangeStart uint64 + expectedProof merkle.Proof + expectErr bool + }{ + "empty tuples list": {tuples: [][]byte{{0x1}}, expectErr: true}, + "start height == 0": {tuples: [][]byte{{0x1}}, expectErr: true}, + "range start height == 0": {tuples: [][]byte{{0x1}}, expectErr: true}, + "valid proof": { + height: 3, + rangeStart: 1, + tuples: func() [][]byte { + encodedTuple1, _ := encodeDataRootTuple(1, [32]byte{0x1}) + encodedTuple2, _ := encodeDataRootTuple(2, [32]byte{0x2}) + encodedTuple3, _ := encodeDataRootTuple(3, [32]byte{0x3}) + encodedTuple4, _ := encodeDataRootTuple(4, [32]byte{0x4}) + return [][]byte{encodedTuple1, encodedTuple2, encodedTuple3, encodedTuple4} + }(), + expectedProof: func() merkle.Proof { + encodedTuple1, _ := encodeDataRootTuple(1, [32]byte{0x1}) + encodedTuple2, _ := encodeDataRootTuple(2, [32]byte{0x2}) + encodedTuple3, _ := encodeDataRootTuple(3, [32]byte{0x3}) + encodedTuple4, _ := encodeDataRootTuple(4, [32]byte{0x4}) + _, proofs := merkle.ProofsFromByteSlices([][]byte{encodedTuple1, encodedTuple2, encodedTuple3, encodedTuple4}) + return *proofs[2] + }(), + }, + } + + for name, tc := range tests { + t.Run(name, func(t *testing.T) { + result, err := proveDataRootTuples(tc.tuples, tc.rangeStart, tc.height) + if tc.expectErr { + assert.Error(t, err) + } else { + assert.NoError(t, err) + assert.Equal(t, tc.expectedProof, *result) + } + }) + } +} diff --git a/nodebuilder/header/header.go b/nodebuilder/header/header.go index 1892afeb94..f807796eb6 100644 --- a/nodebuilder/header/header.go +++ b/nodebuilder/header/header.go @@ -43,20 +43,6 @@ type Module interface { // Subscribe to recent ExtendedHeaders from the network. Subscribe(ctx context.Context) (<-chan *header.ExtendedHeader, error) - - // GetDataRootTupleRoot collects the data roots over a provided ordered range of blocks, - // and then creates a new Merkle root of those data roots. The range is end exclusive. - // It's in the header module because it only needs access to the headers to generate the proof. - GetDataRootTupleRoot(ctx context.Context, start, end uint64) (*DataRootTupleRoot, error) - - // GetDataRootTupleInclusionProof creates an inclusion proof, for the data root tuple of block - // height `height`, in the set of blocks defined by `start` and `end`. The range - // is end exclusive. - // It's in the header module because it only needs access to the headers to generate the proof. - GetDataRootTupleInclusionProof( - ctx context.Context, - height, start, end uint64, - ) (*DataRootTupleInclusionProof, error) } // API is a wrapper around Module for the RPC. @@ -73,17 +59,12 @@ type API struct { *header.ExtendedHeader, uint64, ) ([]*header.ExtendedHeader, error) `perm:"read"` - GetByHeight func(context.Context, uint64) (*header.ExtendedHeader, error) `perm:"read"` - WaitForHeight func(context.Context, uint64) (*header.ExtendedHeader, error) `perm:"read"` - SyncState func(ctx context.Context) (sync.State, error) `perm:"read"` - SyncWait func(ctx context.Context) error `perm:"read"` - NetworkHead func(ctx context.Context) (*header.ExtendedHeader, error) `perm:"read"` - Subscribe func(ctx context.Context) (<-chan *header.ExtendedHeader, error) `perm:"read"` - GetDataRootTupleRoot func(ctx context.Context, start, end uint64) (*DataRootTupleRoot, error) `perm:"read"` - GetDataRootTupleInclusionProof func( - ctx context.Context, - height, start, end uint64, - ) (*DataRootTupleInclusionProof, error) `perm:"read"` + GetByHeight func(context.Context, uint64) (*header.ExtendedHeader, error) `perm:"read"` + WaitForHeight func(context.Context, uint64) (*header.ExtendedHeader, error) `perm:"read"` + SyncState func(ctx context.Context) (sync.State, error) `perm:"read"` + SyncWait func(ctx context.Context) error `perm:"read"` + NetworkHead func(ctx context.Context) (*header.ExtendedHeader, error) `perm:"read"` + Subscribe func(ctx context.Context) (<-chan *header.ExtendedHeader, error) `perm:"read"` } } @@ -126,14 +107,3 @@ func (api *API) NetworkHead(ctx context.Context) (*header.ExtendedHeader, error) func (api *API) Subscribe(ctx context.Context) (<-chan *header.ExtendedHeader, error) { return api.Internal.Subscribe(ctx) } - -func (api *API) GetDataRootTupleRoot(ctx context.Context, start, end uint64) (*DataRootTupleRoot, error) { - return api.Internal.GetDataRootTupleRoot(ctx, start, end) -} - -func (api *API) GetDataRootTupleInclusionProof( - ctx context.Context, - height, start, end uint64, -) (*DataRootTupleInclusionProof, error) { - return api.Internal.GetDataRootTupleInclusionProof(ctx, height, start, end) -} diff --git a/nodebuilder/header/mocks/api.go b/nodebuilder/header/mocks/api.go index eaf7f49cc0..b0d2b961d9 100644 --- a/nodebuilder/header/mocks/api.go +++ b/nodebuilder/header/mocks/api.go @@ -9,8 +9,7 @@ import ( reflect "reflect" header "github.com/celestiaorg/celestia-node/header" - header0 "github.com/celestiaorg/celestia-node/nodebuilder/header" - header1 "github.com/celestiaorg/go-header" + header0 "github.com/celestiaorg/go-header" sync "github.com/celestiaorg/go-header/sync" gomock "github.com/golang/mock/gomock" ) @@ -39,7 +38,7 @@ func (m *MockModule) EXPECT() *MockModuleMockRecorder { } // GetByHash mocks base method. -func (m *MockModule) GetByHash(arg0 context.Context, arg1 header1.Hash) (*header.ExtendedHeader, error) { +func (m *MockModule) GetByHash(arg0 context.Context, arg1 header0.Hash) (*header.ExtendedHeader, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetByHash", arg0, arg1) ret0, _ := ret[0].(*header.ExtendedHeader) @@ -68,36 +67,6 @@ func (mr *MockModuleMockRecorder) GetByHeight(arg0, arg1 interface{}) *gomock.Ca return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetByHeight", reflect.TypeOf((*MockModule)(nil).GetByHeight), arg0, arg1) } -// GetDataRootTupleInclusionProof mocks base method. -func (m *MockModule) GetDataRootTupleInclusionProof(arg0 context.Context, arg1, arg2, arg3 uint64) (*header0.DataRootTupleInclusionProof, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetDataRootTupleInclusionProof", arg0, arg1, arg2, arg3) - ret0, _ := ret[0].(*header0.DataRootTupleInclusionProof) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetDataRootTupleInclusionProof indicates an expected call of GetDataRootTupleInclusionProof. -func (mr *MockModuleMockRecorder) GetDataRootTupleInclusionProof(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDataRootTupleInclusionProof", reflect.TypeOf((*MockModule)(nil).GetDataRootTupleInclusionProof), arg0, arg1, arg2, arg3) -} - -// GetDataRootTupleRoot mocks base method. -func (m *MockModule) GetDataRootTupleRoot(arg0 context.Context, arg1, arg2 uint64) (*header0.DataRootTupleRoot, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetDataRootTupleRoot", arg0, arg1, arg2) - ret0, _ := ret[0].(*header0.DataRootTupleRoot) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetDataRootTupleRoot indicates an expected call of GetDataRootTupleRoot. -func (mr *MockModuleMockRecorder) GetDataRootTupleRoot(arg0, arg1, arg2 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDataRootTupleRoot", reflect.TypeOf((*MockModule)(nil).GetDataRootTupleRoot), arg0, arg1, arg2) -} - // GetRangeByHeight mocks base method. func (m *MockModule) GetRangeByHeight(arg0 context.Context, arg1 *header.ExtendedHeader, arg2 uint64) ([]*header.ExtendedHeader, error) { m.ctrl.T.Helper() diff --git a/nodebuilder/header/service.go b/nodebuilder/header/service.go index 1e40a917c8..944562ee61 100644 --- a/nodebuilder/header/service.go +++ b/nodebuilder/header/service.go @@ -147,61 +147,3 @@ func (s *Service) Subscribe(ctx context.Context) (<-chan *header.ExtendedHeader, }() return headerCh, nil } - -// GetDataRootTupleRoot collects the data roots over a provided ordered range of blocks, -// and then creates a new Merkle root of those data roots. The range is end exclusive. -func (s *Service) GetDataRootTupleRoot(ctx context.Context, start, end uint64) (*DataRootTupleRoot, error) { - log.Debugw("validating the data commitment range", "start", start, "end", end) - err := s.validateDataRootTupleRootRange(ctx, start, end) - if err != nil { - return nil, err - } - log.Debugw("fetching the data root tuples", "start", start, "end", end) - encodedDataRootTuples, err := s.fetchEncodedDataRootTuples(ctx, start, end) - if err != nil { - return nil, err - } - log.Debugw("hashing the data root tuples", "start", start, "end", end) - root, err := hashDataRootTuples(encodedDataRootTuples) - if err != nil { - return nil, err - } - // Create data commitment - dataRootTupleRoot := DataRootTupleRoot(root) - return &dataRootTupleRoot, nil -} - -// GetDataRootTupleInclusionProof creates an inclusion proof for the data root of block -// height `height` in the set of blocks defined by `start` and `end`. The range -// is end exclusive. -func (s *Service) GetDataRootTupleInclusionProof( - ctx context.Context, - height, start, end uint64, -) (*DataRootTupleInclusionProof, error) { - log.Debugw( - "validating the data root inclusion proof request", - "start", - start, - "end", - end, - "height", - height, - ) - err := s.validateDataRootInclusionProofRequest(ctx, height, start, end) - if err != nil { - return nil, err - } - log.Debugw("fetching the data root tuples", "start", start, "end", end) - - encodedDataRootTuples, err := s.fetchEncodedDataRootTuples(ctx, start, end) - if err != nil { - return nil, err - } - log.Debugw("proving the data root tuples", "start", start, "end", end) - proof, err := proveDataRootTuples(encodedDataRootTuples, start, height) - if err != nil { - return nil, err - } - dataRootTupleInclusionProof := DataRootTupleInclusionProof(proof) - return &dataRootTupleInclusionProof, nil -} diff --git a/nodebuilder/header/service_test.go b/nodebuilder/header/service_test.go index 789f97d51e..14d5ada87d 100644 --- a/nodebuilder/header/service_test.go +++ b/nodebuilder/header/service_test.go @@ -2,13 +2,10 @@ package header import ( "context" - "encoding/hex" "fmt" "testing" "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "github.com/tendermint/tendermint/crypto/merkle" libhead "github.com/celestiaorg/go-header" "github.com/celestiaorg/go-header/sync" @@ -42,179 +39,3 @@ func (d *errorSyncer[H]) State() sync.State { func (d *errorSyncer[H]) SyncWait(context.Context) error { return fmt.Errorf("dummy error") } - -func TestPadBytes(t *testing.T) { - tests := []struct { - input []byte - length int - expected []byte - expectErr bool - }{ - {input: []byte{1, 2, 3}, length: 5, expected: []byte{0, 0, 1, 2, 3}}, - {input: []byte{1, 2, 3}, length: 3, expected: []byte{1, 2, 3}}, - {input: []byte{1, 2, 3}, length: 2, expected: nil, expectErr: true}, - {input: []byte{}, length: 3, expected: []byte{0, 0, 0}}, - } - - for _, test := range tests { - result, err := padBytes(test.input, test.length) - if test.expectErr { - assert.Error(t, err) - } else { - assert.NoError(t, err) - assert.Equal(t, test.expected, result) - } - } -} - -func TestTo32PaddedHexBytes(t *testing.T) { - tests := []struct { - number uint64 - expected []byte - expectError bool - }{ - { - number: 10, - expected: func() []byte { - res, _ := hex.DecodeString("000000000000000000000000000000000000000000000000000000000000000a") - return res - }(), - }, - { - number: 255, - expected: func() []byte { - res, _ := hex.DecodeString("00000000000000000000000000000000000000000000000000000000000000ff") - return res - }(), - }, - { - number: 255, - expected: func() []byte { - res, _ := hex.DecodeString("00000000000000000000000000000000000000000000000000000000000000ff") - return res - }(), - }, - { - number: 4294967295, - expected: func() []byte { - res, _ := hex.DecodeString("00000000000000000000000000000000000000000000000000000000ffffffff") - return res - }(), - }, - } - - for _, test := range tests { - t.Run(fmt.Sprintf("number: %d", test.number), func(t *testing.T) { - result, err := to32PaddedHexBytes(test.number) - if test.expectError { - assert.Error(t, err) - } else { - assert.NoError(t, err) - assert.Equal(t, test.expected, result) - } - }) - } -} - -func TestEncodeDataRootTuple(t *testing.T) { - height := uint64(2) - dataRoot, err := hex.DecodeString("82dc1607d84557d3579ce602a45f5872e821c36dbda7ec926dfa17ebc8d5c013") - require.NoError(t, err) - - expectedEncoding, err := hex.DecodeString( - // hex representation of height padded to 32 bytes - "0000000000000000000000000000000000000000000000000000000000000002" + - // data root - "82dc1607d84557d3579ce602a45f5872e821c36dbda7ec926dfa17ebc8d5c013", - ) - require.NoError(t, err) - require.NotNil(t, expectedEncoding) - - actualEncoding, err := encodeDataRootTuple(height, *(*[32]byte)(dataRoot)) - require.NoError(t, err) - require.NotNil(t, actualEncoding) - - // Check that the length of packed data is correct - assert.Equal(t, len(actualEncoding), 64) - assert.Equal(t, expectedEncoding, actualEncoding) -} - -func TestHashDataRootTuples(t *testing.T) { - tests := map[string]struct { - tuples [][]byte - expectedHash []byte - expectErr bool - }{ - "empty tuples list": {tuples: nil, expectErr: true}, - "valid list of encoded data root tuples": { - tuples: func() [][]byte { - tuple1, _ := encodeDataRootTuple(1, [32]byte{0x1}) - tuple2, _ := encodeDataRootTuple(2, [32]byte{0x2}) - return [][]byte{tuple1, tuple2} - }(), - expectedHash: func() []byte { - tuple1, _ := encodeDataRootTuple(1, [32]byte{0x1}) - tuple2, _ := encodeDataRootTuple(2, [32]byte{0x2}) - - return merkle.HashFromByteSlices([][]byte{tuple1, tuple2}) - }(), - }, - } - - for name, tc := range tests { - t.Run(name, func(t *testing.T) { - result, err := hashDataRootTuples(tc.tuples) - if tc.expectErr { - assert.Error(t, err) - } else { - assert.NoError(t, err) - assert.Equal(t, tc.expectedHash, result) - } - }) - } -} - -func TestProveDataRootTuples(t *testing.T) { - tests := map[string]struct { - tuples [][]byte - height uint64 - rangeStart uint64 - expectedProof merkle.Proof - expectErr bool - }{ - "empty tuples list": {tuples: [][]byte{{0x1}}, expectErr: true}, - "start height == 0": {tuples: [][]byte{{0x1}}, expectErr: true}, - "range start height == 0": {tuples: [][]byte{{0x1}}, expectErr: true}, - "valid proof": { - height: 3, - rangeStart: 1, - tuples: func() [][]byte { - encodedTuple1, _ := encodeDataRootTuple(1, [32]byte{0x1}) - encodedTuple2, _ := encodeDataRootTuple(2, [32]byte{0x2}) - encodedTuple3, _ := encodeDataRootTuple(3, [32]byte{0x3}) - encodedTuple4, _ := encodeDataRootTuple(4, [32]byte{0x4}) - return [][]byte{encodedTuple1, encodedTuple2, encodedTuple3, encodedTuple4} - }(), - expectedProof: func() merkle.Proof { - encodedTuple1, _ := encodeDataRootTuple(1, [32]byte{0x1}) - encodedTuple2, _ := encodeDataRootTuple(2, [32]byte{0x2}) - encodedTuple3, _ := encodeDataRootTuple(3, [32]byte{0x3}) - encodedTuple4, _ := encodeDataRootTuple(4, [32]byte{0x4}) - _, proofs := merkle.ProofsFromByteSlices([][]byte{encodedTuple1, encodedTuple2, encodedTuple3, encodedTuple4}) - return *proofs[2] - }(), - }, - } - - for name, tc := range tests { - t.Run(name, func(t *testing.T) { - result, err := proveDataRootTuples(tc.tuples, tc.rangeStart, tc.height) - if tc.expectErr { - assert.Error(t, err) - } else { - assert.NoError(t, err) - assert.Equal(t, tc.expectedProof, *result) - } - }) - } -} diff --git a/nodebuilder/module.go b/nodebuilder/module.go index 8f196f3b1d..5a774b8b9b 100644 --- a/nodebuilder/module.go +++ b/nodebuilder/module.go @@ -8,6 +8,7 @@ import ( "github.com/celestiaorg/celestia-node/header" "github.com/celestiaorg/celestia-node/libs/fxutil" "github.com/celestiaorg/celestia-node/nodebuilder/blob" + "github.com/celestiaorg/celestia-node/nodebuilder/blobstream" "github.com/celestiaorg/celestia-node/nodebuilder/core" "github.com/celestiaorg/celestia-node/nodebuilder/da" "github.com/celestiaorg/celestia-node/nodebuilder/das" @@ -56,6 +57,7 @@ func ConstructModule(tp node.Type, network p2p.Network, cfg *Config, store Store node.ConstructModule(tp), pruner.ConstructModule(tp, &cfg.Pruner), rpc.ConstructModule(tp, &cfg.RPC), + blobstream.ConstructModule(), ) return fx.Module( diff --git a/nodebuilder/node.go b/nodebuilder/node.go index e17c9d3922..c0ba8f78e8 100644 --- a/nodebuilder/node.go +++ b/nodebuilder/node.go @@ -22,6 +22,7 @@ import ( "github.com/celestiaorg/celestia-node/api/gateway" "github.com/celestiaorg/celestia-node/api/rpc" "github.com/celestiaorg/celestia-node/nodebuilder/blob" + "github.com/celestiaorg/celestia-node/nodebuilder/blobstream" "github.com/celestiaorg/celestia-node/nodebuilder/da" "github.com/celestiaorg/celestia-node/nodebuilder/das" "github.com/celestiaorg/celestia-node/nodebuilder/fraud" @@ -69,14 +70,15 @@ type Node struct { // p2p protocols PubSub *pubsub.PubSub // services - ShareServ share.Module // not optional - HeaderServ header.Module // not optional - StateServ state.Module // not optional - FraudServ fraud.Module // not optional - BlobServ blob.Module // not optional - DASer das.Module // not optional - AdminServ node.Module // not optional - DAMod da.Module // not optional + ShareServ share.Module // not optional + HeaderServ header.Module // not optional + StateServ state.Module // not optional + FraudServ fraud.Module // not optional + BlobServ blob.Module // not optional + DASer das.Module // not optional + AdminServ node.Module // not optional + DAMod da.Module // not optional + BlobstreamMod blobstream.Module // start and stop control ref internal fx.App lifecycle funcs to be called from Start and Stop start, stop lifecycleFunc From 4b71f5b79d8f01d55a99f56f47327f963e418875 Mon Sep 17 00:00:00 2001 From: sweexordious Date: Fri, 19 Jul 2024 23:49:02 +0100 Subject: [PATCH 47/52] fix: correctly register the blobstream module --- api/rpc/client/client.go | 39 ++++++++++++++++--------------- api/rpc_test.go | 41 ++++++++++++++++++--------------- nodebuilder/rpc/constructors.go | 3 +++ 3 files changed, 47 insertions(+), 36 deletions(-) diff --git a/api/rpc/client/client.go b/api/rpc/client/client.go index ff206d723e..56b4a54d19 100644 --- a/api/rpc/client/client.go +++ b/api/rpc/client/client.go @@ -9,6 +9,7 @@ import ( "github.com/celestiaorg/celestia-node/api/rpc/perms" "github.com/celestiaorg/celestia-node/nodebuilder/blob" + "github.com/celestiaorg/celestia-node/nodebuilder/blobstream" "github.com/celestiaorg/celestia-node/nodebuilder/da" "github.com/celestiaorg/celestia-node/nodebuilder/das" "github.com/celestiaorg/celestia-node/nodebuilder/fraud" @@ -26,15 +27,16 @@ var ( ) type Client struct { - Fraud fraud.API - Header header.API - State state.API - Share share.API - DAS das.API - P2P p2p.API - Node node.API - Blob blob.API - DA da.API + Fraud fraud.API + Header header.API + State state.API + Share share.API + DAS das.API + P2P p2p.API + Node node.API + Blob blob.API + DA da.API + Blobstream blobstream.API closer multiClientCloser } @@ -85,14 +87,15 @@ func newClient(ctx context.Context, addr string, authHeader http.Header) (*Clien func moduleMap(client *Client) map[string]interface{} { // TODO: this duplication of strings many times across the codebase can be avoided with issue #1176 return map[string]interface{}{ - "share": &client.Share.Internal, - "state": &client.State.Internal, - "header": &client.Header.Internal, - "fraud": &client.Fraud.Internal, - "das": &client.DAS.Internal, - "p2p": &client.P2P.Internal, - "node": &client.Node.Internal, - "blob": &client.Blob.Internal, - "da": &client.DA.Internal, + "share": &client.Share.Internal, + "state": &client.State.Internal, + "header": &client.Header.Internal, + "fraud": &client.Fraud.Internal, + "das": &client.DAS.Internal, + "p2p": &client.P2P.Internal, + "node": &client.Node.Internal, + "blob": &client.Blob.Internal, + "da": &client.DA.Internal, + "blobstream": &client.Blobstream.Internal, } } diff --git a/api/rpc_test.go b/api/rpc_test.go index 29191b93a2..db65d2a5f8 100644 --- a/api/rpc_test.go +++ b/api/rpc_test.go @@ -22,6 +22,8 @@ import ( "github.com/celestiaorg/celestia-node/nodebuilder" "github.com/celestiaorg/celestia-node/nodebuilder/blob" blobMock "github.com/celestiaorg/celestia-node/nodebuilder/blob/mocks" + "github.com/celestiaorg/celestia-node/nodebuilder/blobstream" + blobstreamMock "github.com/celestiaorg/celestia-node/nodebuilder/blobstream/mocks" "github.com/celestiaorg/celestia-node/nodebuilder/da" daMock "github.com/celestiaorg/celestia-node/nodebuilder/da/mocks" "github.com/celestiaorg/celestia-node/nodebuilder/das" @@ -90,15 +92,16 @@ func TestRPCCallsUnderlyingNode(t *testing.T) { // api contains all modules that are made available as the node's // public API surface type api struct { - Fraud fraud.Module - Header header.Module - State statemod.Module - Share share.Module - DAS das.Module - Node node.Module - P2P p2p.Module - Blob blob.Module - DA da.Module + Fraud fraud.Module + Header header.Module + State statemod.Module + Share share.Module + DAS das.Module + Node node.Module + P2P p2p.Module + Blob blob.Module + DA da.Module + Blobstream blobstream.Module } func TestModulesImplementFullAPI(t *testing.T) { @@ -312,6 +315,7 @@ func setupNodeWithAuthedRPC(t *testing.T, nodeMock.NewMockModule(ctrl), blobMock.NewMockModule(ctrl), daMock.NewMockModule(ctrl), + blobstreamMock.NewMockModule(ctrl), } // given the behavior of fx.Invoke, this invoke will be called last as it is added at the root @@ -342,13 +346,14 @@ func setupNodeWithAuthedRPC(t *testing.T, } type mockAPI struct { - State *stateMock.MockModule - Share *shareMock.MockModule - Fraud *fraudMock.MockModule - Header *headerMock.MockModule - Das *dasMock.MockModule - P2P *p2pMock.MockModule - Node *nodeMock.MockModule - Blob *blobMock.MockModule - DA *daMock.MockModule + State *stateMock.MockModule + Share *shareMock.MockModule + Fraud *fraudMock.MockModule + Header *headerMock.MockModule + Das *dasMock.MockModule + P2P *p2pMock.MockModule + Node *nodeMock.MockModule + Blob *blobMock.MockModule + DA *daMock.MockModule + Blobstream *blobstreamMock.MockModule } diff --git a/nodebuilder/rpc/constructors.go b/nodebuilder/rpc/constructors.go index 6509e38b96..a686202bfd 100644 --- a/nodebuilder/rpc/constructors.go +++ b/nodebuilder/rpc/constructors.go @@ -5,6 +5,7 @@ import ( "github.com/celestiaorg/celestia-node/api/rpc" "github.com/celestiaorg/celestia-node/nodebuilder/blob" + "github.com/celestiaorg/celestia-node/nodebuilder/blobstream" "github.com/celestiaorg/celestia-node/nodebuilder/da" "github.com/celestiaorg/celestia-node/nodebuilder/das" "github.com/celestiaorg/celestia-node/nodebuilder/fraud" @@ -26,6 +27,7 @@ func registerEndpoints( nodeMod node.Module, blobMod blob.Module, daMod da.Module, + blobstreamMod blobstream.Module, serv *rpc.Server, ) { serv.RegisterService("fraud", fraudMod, &fraud.API{}) @@ -37,6 +39,7 @@ func registerEndpoints( serv.RegisterService("node", nodeMod, &node.API{}) serv.RegisterService("blob", blobMod, &blob.API{}) serv.RegisterService("da", daMod, &da.API{}) + serv.RegisterService("blobstream", blobstreamMod, &blobstream.API{}) } func server(cfg *Config, signer jwt.Signer, verifier jwt.Verifier) *rpc.Server { From e97870606c7c3a998bbbce4cc82f89b7225a6a5c Mon Sep 17 00:00:00 2001 From: CHAMI Rachid Date: Mon, 22 Jul 2024 12:30:01 +0200 Subject: [PATCH 48/52] Update nodebuilder/blobstream/data_root_tuple_root.go Co-authored-by: rene <41963722+renaynay@users.noreply.github.com> --- nodebuilder/blobstream/data_root_tuple_root.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nodebuilder/blobstream/data_root_tuple_root.go b/nodebuilder/blobstream/data_root_tuple_root.go index ae314d6d2d..ab0565cd75 100644 --- a/nodebuilder/blobstream/data_root_tuple_root.go +++ b/nodebuilder/blobstream/data_root_tuple_root.go @@ -117,7 +117,7 @@ func (s *Service) validateDataRootTupleRootRange(ctx context.Context, start, end currentLocalHeader, err := s.headerServ.LocalHead(ctx) if err != nil { - return fmt.Errorf("couldn't get the local head to validate the data root tuple root range%w", err) + return fmt.Errorf("could not get the local head to validate the data root tuple root range: %w", err) } // the data commitment range is end exclusive if end > currentLocalHeader.Height()+1 { From 644c2792f91991a83b4d372db019a5561b5b939c Mon Sep 17 00:00:00 2001 From: sweexordious Date: Mon, 22 Jul 2024 11:34:32 +0100 Subject: [PATCH 49/52] fix: also include the initial header when encoding data root tuples --- nodebuilder/blobstream/data_root_tuple_root.go | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/nodebuilder/blobstream/data_root_tuple_root.go b/nodebuilder/blobstream/data_root_tuple_root.go index ab0565cd75..91ec1378d0 100644 --- a/nodebuilder/blobstream/data_root_tuple_root.go +++ b/nodebuilder/blobstream/data_root_tuple_root.go @@ -4,6 +4,7 @@ import ( "context" "encoding/hex" "fmt" + nodeheader "github.com/celestiaorg/celestia-node/header" "strconv" "github.com/tendermint/tendermint/crypto/merkle" @@ -180,15 +181,21 @@ func proveDataRootTuples(encodedDataRootTuples [][]byte, rangeStartHeight, heigh // end is not included in the range. func (s *Service) fetchEncodedDataRootTuples(ctx context.Context, start, end uint64) ([][]byte, error) { encodedDataRootTuples := make([][]byte, 0, end-start) + headers := make([]*nodeheader.ExtendedHeader, 0) + startHeader, err := s.headerServ.GetByHeight(ctx, start) if err != nil { return nil, err } + headers = append(headers, startHeader) + headerRange, err := s.headerServ.GetRangeByHeight(ctx, startHeader, end) if err != nil { return nil, err } - for _, header := range headerRange { + headers = append(headers, headerRange...) + + for _, header := range headers { encodedDataRootTuple, err := encodeDataRootTuple(header.Height(), *(*[32]byte)(header.DataHash)) if err != nil { return nil, err From b8d7a444198905245439dc8743ac26e6f6f9a72f Mon Sep 17 00:00:00 2001 From: sweexordious Date: Mon, 22 Jul 2024 11:38:53 +0100 Subject: [PATCH 50/52] docs: update to comment to reflect data root tuple blocks limit time --- nodebuilder/blobstream/data_root_tuple_root.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nodebuilder/blobstream/data_root_tuple_root.go b/nodebuilder/blobstream/data_root_tuple_root.go index 91ec1378d0..1129c501ec 100644 --- a/nodebuilder/blobstream/data_root_tuple_root.go +++ b/nodebuilder/blobstream/data_root_tuple_root.go @@ -98,7 +98,7 @@ func encodeDataRootTuple(height uint64, dataRoot [32]byte) ([]byte, error) { // dataRootTupleRootBlocksLimit The maximum number of blocks to be used to create a data commitment. // It's a local parameter to protect the API from creating unnecessarily large commitments. -const dataRootTupleRootBlocksLimit = 10_000 // ~33 hours of blocks assuming 12-second blocks. +const dataRootTupleRootBlocksLimit = 10_000 // ~27 hours of blocks assuming 10-second blocks. // validateDataRootTupleRootRange runs basic checks on the ascending sorted list of // heights that will be used subsequently in generating data commitments over From dc082659ae875277b0cba05bce9de1307d613b25 Mon Sep 17 00:00:00 2001 From: sweexordious Date: Mon, 22 Jul 2024 11:46:11 +0100 Subject: [PATCH 51/52] chore: gofumpt --- nodebuilder/blobstream/data_root_tuple_root.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/nodebuilder/blobstream/data_root_tuple_root.go b/nodebuilder/blobstream/data_root_tuple_root.go index 1129c501ec..30220fb9ee 100644 --- a/nodebuilder/blobstream/data_root_tuple_root.go +++ b/nodebuilder/blobstream/data_root_tuple_root.go @@ -4,9 +4,10 @@ import ( "context" "encoding/hex" "fmt" - nodeheader "github.com/celestiaorg/celestia-node/header" "strconv" + nodeheader "github.com/celestiaorg/celestia-node/header" + "github.com/tendermint/tendermint/crypto/merkle" "github.com/tendermint/tendermint/libs/bytes" From 1183e46a694e00b659e82fad3b7981ab20c8a957 Mon Sep 17 00:00:00 2001 From: sweexordious Date: Mon, 22 Jul 2024 13:31:33 +0100 Subject: [PATCH 52/52] chore: add array capacity --- nodebuilder/blobstream/data_root_tuple_root.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nodebuilder/blobstream/data_root_tuple_root.go b/nodebuilder/blobstream/data_root_tuple_root.go index 30220fb9ee..de2d60c4fa 100644 --- a/nodebuilder/blobstream/data_root_tuple_root.go +++ b/nodebuilder/blobstream/data_root_tuple_root.go @@ -182,7 +182,7 @@ func proveDataRootTuples(encodedDataRootTuples [][]byte, rangeStartHeight, heigh // end is not included in the range. func (s *Service) fetchEncodedDataRootTuples(ctx context.Context, start, end uint64) ([][]byte, error) { encodedDataRootTuples := make([][]byte, 0, end-start) - headers := make([]*nodeheader.ExtendedHeader, 0) + headers := make([]*nodeheader.ExtendedHeader, 0, end-start) startHeader, err := s.headerServ.GetByHeight(ctx, start) if err != nil {