From 2c5614f7f4f7bcc7761ec1fb0e345de303a4a729 Mon Sep 17 00:00:00 2001 From: Matt Kocubinski Date: Mon, 15 Apr 2024 10:37:32 -0500 Subject: [PATCH] feat: iavl v2 (#872) Co-authored-by: Marko --- v2/.gitignore | 21 + v2/.golangci.yml | 66 ++ v2/cmd/gen/gen.go | 234 +++++++ v2/cmd/latest.go | 70 +++ v2/cmd/main.go | 40 ++ v2/cmd/rollback/rollback.go | 57 ++ v2/cmd/root.go | 24 + v2/cmd/scan/scan.go | 103 ++++ v2/cmd/snapshot/snapshot.go | 79 +++ v2/export.go | 101 +++ v2/go.mod | 36 ++ v2/go.sum | 81 +++ v2/internal/encoding.go | 167 +++++ v2/internal/encoding_test.go | 88 +++ v2/iterator.go | 375 ++++++++++++ v2/iterator_test.go | 308 ++++++++++ v2/metrics/metrics.go | 120 ++++ v2/migrate/core/store.go | 129 ++++ v2/migrate/go.mod | 56 ++ v2/migrate/go.sum | 454 ++++++++++++++ v2/migrate/main.go | 22 + v2/migrate/v0/migrate_v0.go | 386 ++++++++++++ v2/migrate/v0/types.go | 61 ++ v2/multitree.go | 289 +++++++++ v2/node.go | 560 +++++++++++++++++ v2/pool.go | 56 ++ v2/pool_test.go | 16 + v2/range.go | 94 +++ v2/range_test.go | 104 ++++ v2/snapshot.go | 846 +++++++++++++++++++++++++ v2/sqlite.go | 1119 ++++++++++++++++++++++++++++++++++ v2/sqlite_batch.go | 294 +++++++++ v2/sqlite_metadata.go | 90 +++ v2/sqlite_test.go | 296 +++++++++ v2/sqlite_writer.go | 467 ++++++++++++++ v2/testutil/util.go | 460 ++++++++++++++ v2/tree.go | 701 +++++++++++++++++++++ v2/tree_test.go | 787 ++++++++++++++++++++++++ v2/visualize.go | 45 ++ 39 files changed, 9302 insertions(+) create mode 100644 v2/.gitignore create mode 100644 v2/.golangci.yml create mode 100644 v2/cmd/gen/gen.go create mode 100644 v2/cmd/latest.go create mode 100644 v2/cmd/main.go create mode 100644 v2/cmd/rollback/rollback.go create mode 100644 v2/cmd/root.go create mode 100644 v2/cmd/scan/scan.go create mode 100644 v2/cmd/snapshot/snapshot.go create mode 100644 v2/export.go create mode 100644 v2/go.mod create mode 100644 v2/go.sum create mode 100644 v2/internal/encoding.go create mode 100644 v2/internal/encoding_test.go create mode 100644 v2/iterator.go create mode 100644 v2/iterator_test.go create mode 100644 v2/metrics/metrics.go create mode 100644 v2/migrate/core/store.go create mode 100644 v2/migrate/go.mod create mode 100644 v2/migrate/go.sum create mode 100644 v2/migrate/main.go create mode 100644 v2/migrate/v0/migrate_v0.go create mode 100644 v2/migrate/v0/types.go create mode 100644 v2/multitree.go create mode 100644 v2/node.go create mode 100644 v2/pool.go create mode 100644 v2/pool_test.go create mode 100644 v2/range.go create mode 100644 v2/range_test.go create mode 100644 v2/snapshot.go create mode 100644 v2/sqlite.go create mode 100644 v2/sqlite_batch.go create mode 100644 v2/sqlite_metadata.go create mode 100644 v2/sqlite_test.go create mode 100644 v2/sqlite_writer.go create mode 100644 v2/testutil/util.go create mode 100644 v2/tree.go create mode 100644 v2/tree_test.go create mode 100644 v2/visualize.go diff --git a/v2/.gitignore b/v2/.gitignore new file mode 100644 index 000000000..8b45aed86 --- /dev/null +++ b/v2/.gitignore @@ -0,0 +1,21 @@ +vendor +.glide +*.swp +*.swo + +# created in test code +test.db + +# profiling data +*\.test +cpu*.out +mem*.out +cpu*.pdf +mem*.pdf + +# IDE files +.idea/* +.vscode/* + +go.work +go.work.sum diff --git a/v2/.golangci.yml b/v2/.golangci.yml new file mode 100644 index 000000000..816d9c4a9 --- /dev/null +++ b/v2/.golangci.yml @@ -0,0 +1,66 @@ +run: + tests: true + # timeout for analysis, e.g. 30s, 5m, default is 1m + timeout: 5m + +linters: + disable-all: true + enable: + - bodyclose + - dogsled + - errcheck + - exportloopref + - goconst + - gocritic + - gofumpt + - gosec + - gosimple + - govet + - ineffassign + - misspell + - nakedret + - nolintlint + - prealloc + - revive + - staticcheck + - stylecheck + - typecheck + - unconvert + - unparam + - unused + +linters-settings: + nolintlint: + allow-leading-space: true + require-explanation: false + require-specific: true + +issues: + exclude-rules: + - text: "Use of weak random number generator" + linters: + - gosec + - text: "comment on exported var" + linters: + - golint + - text: "don't use an underscore in package name" + linters: + - golint + - text: "should be written without leading space as" + linters: + - nolintlint + - text: "ST1003:" + linters: + - stylecheck + # FIXME: Disabled until golangci-lint updates stylecheck with this fix: + # https://github.com/dominikh/go-tools/issues/389 + - text: "ST1016:" + linters: + - stylecheck + - path: "migrations" + text: "SA1019:" + linters: + - staticcheck + + max-issues-per-linter: 10000 + max-same-issues: 10000 diff --git a/v2/cmd/gen/gen.go b/v2/cmd/gen/gen.go new file mode 100644 index 000000000..9eb8631ec --- /dev/null +++ b/v2/cmd/gen/gen.go @@ -0,0 +1,234 @@ +package gen + +import ( + "fmt" + "os" + "sync" + "time" + + "github.com/cosmos/iavl-bench/bench" + "github.com/cosmos/iavl/v2" + "github.com/cosmos/iavl/v2/testutil" + "github.com/dustin/go-humanize" + "github.com/kocubinski/costor-api/compact" + "github.com/kocubinski/costor-api/core" + "github.com/rs/zerolog" + zlog "github.com/rs/zerolog/log" + "github.com/spf13/cobra" +) + +var log = zlog.Output(zerolog.ConsoleWriter{ + Out: os.Stderr, + TimeFormat: time.Stamp, +}) + +func Command() *cobra.Command { + cmd := &cobra.Command{ + Use: "gen", + Short: "generate changesets", + } + + cmd.AddCommand(emitCommand(), treeCommand()) + + return cmd +} + +func getChangesetIterator(typ string) (bench.ChangesetIterator, error) { + switch typ { + case "osmo-like": + return testutil.OsmoLike().Iterator, nil + case "osmo-like-many": + return testutil.OsmoLikeManyTrees().Iterator, nil + case "height-zero": + return testutil.NewTreeBuildOptions().Iterator, nil + default: + return nil, fmt.Errorf("unknown generator type %s", typ) + } +} + +func emitCommand() *cobra.Command { + var ( + typ string + out string + start int + limit int + ) + cmd := &cobra.Command{ + Use: "emit", + Short: "emit generated changesets to disk", + RunE: func(cmd *cobra.Command, args []string) error { + itr, err := getChangesetIterator(typ) + if err != nil { + return err + } + ctx := core.Context{Context: cmd.Context()} + + stream := compact.StreamingContext{ + In: make(chan compact.Sequenced), + Context: ctx, + OutDir: out, + MaxFileSize: 100 * 1024 * 1024, + } + + var wg sync.WaitGroup + wg.Add(1) + go func() { + stats, err := stream.Compact() + if err != nil { + log.Fatal().Err(err).Msg("failed to compact") + } + log.Info().Msgf(stats.Report()) + wg.Done() + }() + + var cnt int64 + for ; itr.Valid(); err = itr.Next() { + if err != nil { + return err + } + if limit > 0 && itr.Version() > int64(limit) { + break + } + nodes := itr.Nodes() + for ; nodes.Valid(); err = nodes.Next() { + cnt++ + + if itr.Version() < int64(start) { + if cnt%5_000_000 == 0 { + log.Info().Msgf("fast forward version=%d nodes=%s", itr.Version(), humanize.Comma(cnt)) + } + continue + } + + if cnt%500_000 == 0 { + log.Info().Msgf("version=%d nodes=%s", itr.Version(), humanize.Comma(cnt)) + } + + select { + case <-cmd.Context().Done(): + close(stream.In) + wg.Wait() + return nil + default: + } + + if err != nil { + return err + } + stream.In <- nodes.GetNode() + } + } + close(stream.In) + wg.Wait() + + return nil + }, + } + + cmd.Flags().StringVar(&typ, "type", "", "the type of changeset to generate") + if err := cmd.MarkFlagRequired("type"); err != nil { + panic(err) + } + cmd.Flags().StringVar(&out, "out", "", "the directory to write changesets to") + if err := cmd.MarkFlagRequired("out"); err != nil { + panic(err) + } + cmd.Flags().IntVar(&limit, "limit", -1, "the version (inclusive) to halt generation at. -1 means no limit") + cmd.Flags().IntVar(&start, "start", 1, "the version (inclusive) to start generation at") + + return cmd +} + +func treeCommand() *cobra.Command { + var ( + dbPath string + genType string + limit int64 + ) + cmd := &cobra.Command{ + Use: "tree", + Short: "build and save a Tree to disk, taking generated changesets as input", + RunE: func(cmd *cobra.Command, args []string) error { + multiTree := iavl.NewMultiTree(dbPath, iavl.TreeOptions{StateStorage: true}) + defer func(mt *iavl.MultiTree) { + err := mt.Close() + if err != nil { + log.Error().Err(err).Msg("failed to close db") + } + }(multiTree) + + itr, err := getChangesetIterator(genType) + if err != nil { + return err + } + + var i int64 + var lastHash []byte + var lastVersion int64 + start := time.Now() + for ; itr.Valid(); err = itr.Next() { + if err != nil { + return err + } + if limit > -1 && itr.Version() > limit { + break + } + + changeset := itr.Nodes() + for ; changeset.Valid(); err = changeset.Next() { + if err != nil { + return err + } + node := changeset.GetNode() + key := node.Key + + tree, ok := multiTree.Trees[node.StoreKey] + if !ok { + if err = multiTree.MountTree(node.StoreKey); err != nil { + return err + } + tree = multiTree.Trees[node.StoreKey] + } + if node.Delete { + _, _, err = tree.Remove(key) + if err != nil { + return err + } + } else { + _, err = tree.Set(key, node.Value) + if err != nil { + return err + } + } + + i++ + if i%100_000 == 0 { + log.Info().Msgf("leaves=%s dur=%s rate=%s version=%d", + humanize.Comma(i), + time.Since(start), + humanize.Comma(int64(100_000/time.Since(start).Seconds())), + itr.Version(), + ) + start = time.Now() + } + } + + lastHash, lastVersion, err = multiTree.SaveVersionConcurrently() + if err != nil { + return err + } + } + + log.Info().Msgf("last version=%d hash=%x", lastVersion, lastHash) + + return nil + }, + } + cmd.Flags().StringVar(&genType, "type", "", "the type of changeset to generate") + if err := cmd.MarkFlagRequired("type"); err != nil { + panic(err) + } + cmd.Flags().StringVar(&dbPath, "db", "/tmp", "the path to the database") + cmd.Flags().Int64Var(&limit, "limit", -1, "the version (inclusive) to halt generation at. -1 means no limit") + return cmd +} diff --git a/v2/cmd/latest.go b/v2/cmd/latest.go new file mode 100644 index 000000000..9e8bdf4fe --- /dev/null +++ b/v2/cmd/latest.go @@ -0,0 +1,70 @@ +package main + +import ( + "github.com/cosmos/iavl/v2" + "github.com/spf13/cobra" +) + +func latestCommand() *cobra.Command { + var ( + dbPath string + version int64 + ) + cmd := &cobra.Command{ + Use: "latest", + Short: "fill the latest table with the latest version of leaf nodes in a tree", + RunE: func(cmd *cobra.Command, args []string) error { + paths, err := iavl.FindDbsInPath(dbPath) + if err != nil { + return err + } + var ( + pool = iavl.NewNodePool() + done = make(chan struct{}) + errors = make(chan error) + cnt = 0 + ) + for _, path := range paths { + cnt++ + sqlOpts := iavl.SqliteDbOptions{Path: path} + sql, err := iavl.NewSqliteDb(pool, sqlOpts) + if err != nil { + return err + } + tree := iavl.NewTree(sql, pool, iavl.TreeOptions{}) + if err = tree.LoadVersion(version); err != nil { + return err + } + go func() { + fillErr := tree.WriteLatestLeaves() + if fillErr != nil { + errors <- fillErr + } + fillErr = tree.Close() + if fillErr != nil { + errors <- fillErr + } + done <- struct{}{} + }() + } + for i := 0; i < cnt; i++ { + select { + case <-done: + continue + case err := <-errors: + return err + } + } + return nil + }, + } + cmd.Flags().StringVar(&dbPath, "db", "", "the path to the db to fill the latest table for") + if err := cmd.MarkFlagRequired("db"); err != nil { + panic(err) + } + cmd.Flags().Int64Var(&version, "version", 0, "version to fill from") + if err := cmd.MarkFlagRequired("version"); err != nil { + panic(err) + } + return cmd +} diff --git a/v2/cmd/main.go b/v2/cmd/main.go new file mode 100644 index 000000000..43d4fcf17 --- /dev/null +++ b/v2/cmd/main.go @@ -0,0 +1,40 @@ +package main + +import ( + "context" + "fmt" + "os" + "os/signal" +) + +func main() { + root, err := RootCommand() + if err != nil { + os.Exit(1) + } + + ctx := context.Background() + ctx, cancel := context.WithCancel(ctx) + + signalChan := make(chan os.Signal, 1) + signal.Notify(signalChan, os.Interrupt) + defer func() { + signal.Stop(signalChan) + cancel() + }() + + go func() { + select { + case <-signalChan: + cancel() + case <-ctx.Done(): + } + <-signalChan + os.Exit(2) + }() + + if err := root.ExecuteContext(ctx); err != nil { + fmt.Printf("Error: %s\n", err.Error()) + os.Exit(1) + } +} diff --git a/v2/cmd/rollback/rollback.go b/v2/cmd/rollback/rollback.go new file mode 100644 index 000000000..1a76ad2f9 --- /dev/null +++ b/v2/cmd/rollback/rollback.go @@ -0,0 +1,57 @@ +package rollback + +import ( + "os" + "time" + + "github.com/cosmos/iavl/v2" + "github.com/rs/zerolog" + zlog "github.com/rs/zerolog/log" + "github.com/spf13/cobra" +) + +var log = zlog.Output(zerolog.ConsoleWriter{ + Out: os.Stderr, + TimeFormat: time.Stamp, +}) + +func Command() *cobra.Command { + var ( + version int + path string + ) + cmd := &cobra.Command{ + Use: "rollback", + Short: "Rollback IAVL to a previous version", + RunE: func(cmd *cobra.Command, args []string) error { + dbPaths, err := iavl.FindDbsInPath(path) + if err != nil { + return err + } + for _, dbPath := range dbPaths { + log.Info().Msgf("revert db %s to version %d", dbPath, version) + sql, err := iavl.NewSqliteDb(iavl.NewNodePool(), iavl.SqliteDbOptions{Path: dbPath}) + if err != nil { + return err + } + if err = sql.Revert(version); err != nil { + return err + } + if err = sql.Close(); err != nil { + return err + } + } + return nil + }, + } + cmd.Flags().StringVar(&path, "path", "", "Path to the IAVL database") + cmd.Flags().IntVar(&version, "version", -1, "Version to rollback to") + if err := cmd.MarkFlagRequired("path"); err != nil { + return nil + } + if err := cmd.MarkFlagRequired("version"); err != nil { + return nil + } + + return cmd +} diff --git a/v2/cmd/root.go b/v2/cmd/root.go new file mode 100644 index 000000000..95a76b1b7 --- /dev/null +++ b/v2/cmd/root.go @@ -0,0 +1,24 @@ +package main + +import ( + "github.com/cosmos/iavl/v2/cmd/gen" + "github.com/cosmos/iavl/v2/cmd/rollback" + "github.com/cosmos/iavl/v2/cmd/scan" + "github.com/cosmos/iavl/v2/cmd/snapshot" + "github.com/spf13/cobra" +) + +func RootCommand() (*cobra.Command, error) { + cmd := &cobra.Command{ + Use: "iavl", + Short: "benchmark cosmos/iavl", + } + cmd.AddCommand( + gen.Command(), + snapshot.Command(), + rollback.Command(), + scan.Command(), + latestCommand(), + ) + return cmd, nil +} diff --git a/v2/cmd/scan/scan.go b/v2/cmd/scan/scan.go new file mode 100644 index 000000000..2535e3b58 --- /dev/null +++ b/v2/cmd/scan/scan.go @@ -0,0 +1,103 @@ +package scan + +import ( + "fmt" + "os" + + "github.com/bvinc/go-sqlite-lite/sqlite3" + "github.com/cosmos/iavl/v2" + "github.com/spf13/cobra" +) + +func Command() *cobra.Command { + cmd := &cobra.Command{ + Use: "scan", + } + cmd.AddCommand(probeCommand(), rootsCommand()) + return cmd +} + +func probeCommand() *cobra.Command { + cmd := &cobra.Command{ + Use: "probe", + Short: "prob sqlite cgo configuration", + RunE: func(cmd *cobra.Command, args []string) error { + f, err := os.CreateTemp("", "iavl-v2-probe.sqlite") + if err != nil { + return err + } + fn := f.Name() + fmt.Println("fn:", fn) + conn, err := sqlite3.Open(fn) + if err != nil { + return err + } + + stmt, err := conn.Prepare("PRAGMA mmap_size=1000000000000") + if err != nil { + return err + } + _, err = stmt.Step() + if err != nil { + return err + } + if err = stmt.Close(); err != nil { + return err + } + + stmt, err = conn.Prepare("PRAGMA mmap_size") + if err != nil { + return err + } + _, err = stmt.Step() + if err != nil { + return err + } + res, _, err := stmt.ColumnRawString(0) + if err != nil { + return err + } + fmt.Println("mmap:", res) + + if err = stmt.Close(); err != nil { + return err + } + if err = conn.Close(); err != nil { + return err + } + if err = os.Remove(f.Name()); err != nil { + return err + } + return nil + }, + } + return cmd +} + +func rootsCommand() *cobra.Command { + var ( + dbPath string + version int64 + ) + cmd := &cobra.Command{ + Use: "roots", + Short: "list roots", + RunE: func(cmd *cobra.Command, args []string) error { + sql, err := iavl.NewSqliteDb(iavl.NewNodePool(), iavl.SqliteDbOptions{Path: dbPath}) + if err != nil { + return err + } + node, err := sql.LoadRoot(version) + if err != nil { + return err + } + fmt.Printf("root: %+v\n", node) + return sql.Close() + }, + } + cmd.Flags().StringVar(&dbPath, "db", "", "path to sqlite db") + cmd.Flags().Int64Var(&version, "version", 0, "version to query") + cmd.MarkFlagRequired("db") + cmd.MarkFlagRequired("version") + return cmd +} diff --git a/v2/cmd/snapshot/snapshot.go b/v2/cmd/snapshot/snapshot.go new file mode 100644 index 000000000..27e10b64f --- /dev/null +++ b/v2/cmd/snapshot/snapshot.go @@ -0,0 +1,79 @@ +package snapshot + +import ( + "os" + "time" + + "github.com/cosmos/iavl/v2" + "github.com/rs/zerolog" + zlog "github.com/rs/zerolog/log" + "github.com/spf13/cobra" +) + +var log = zlog.Output(zerolog.ConsoleWriter{ + Out: os.Stderr, + TimeFormat: time.Stamp, +}) + +func Command() *cobra.Command { + var ( + version int64 + dbPath string + ) + cmd := &cobra.Command{ + Use: "snapshot", + Short: "take a snapshot of the tree at version n and write to SQLite", + RunE: func(cmd *cobra.Command, args []string) error { + paths, err := iavl.FindDbsInPath(dbPath) + if err != nil { + return err + } + log.Info().Msgf("found db paths: %v", paths) + + var ( + pool = iavl.NewNodePool() + done = make(chan struct{}) + errors = make(chan error) + cnt = 0 + ) + for _, path := range paths { + cnt++ + sqlOpts := iavl.SqliteDbOptions{Path: path} + sql, err := iavl.NewSqliteDb(pool, sqlOpts) + if err != nil { + return err + } + tree := iavl.NewTree(sql, pool, iavl.TreeOptions{}) + if err = tree.LoadVersion(version); err != nil { + return err + } + go func() { + snapshotErr := sql.Snapshot(cmd.Context(), tree) + if snapshotErr != nil { + errors <- snapshotErr + } + snapshotErr = sql.Close() + if snapshotErr != nil { + errors <- snapshotErr + } + done <- struct{}{} + }() + } + for i := 0; i < cnt; i++ { + select { + case <-done: + continue + case err := <-errors: + return err + } + } + return nil + }, + } + cmd.Flags().Int64Var(&version, "version", 0, "version to snapshot") + if err := cmd.MarkFlagRequired("version"); err != nil { + panic(err) + } + cmd.Flags().StringVar(&dbPath, "db", "/tmp", "path to the sqlite database") + return cmd +} diff --git a/v2/export.go b/v2/export.go new file mode 100644 index 000000000..a1f7e0570 --- /dev/null +++ b/v2/export.go @@ -0,0 +1,101 @@ +package iavl + +import "fmt" + +// TraverseOrderType is the type of the order in which the tree is traversed. +type TraverseOrderType uint8 + +const ( + PreOrder TraverseOrderType = iota + PostOrder +) + +type Exporter struct { + tree *Tree + out chan *Node + errCh chan error +} + +func (tree *Tree) Export(order TraverseOrderType) *Exporter { + exporter := &Exporter{ + tree: tree, + out: make(chan *Node), + errCh: make(chan error), + } + + go func(traverseOrder TraverseOrderType) { + defer close(exporter.out) + defer close(exporter.errCh) + + if traverseOrder == PostOrder { + exporter.postOrderNext(tree.root) + } else if traverseOrder == PreOrder { + exporter.preOrderNext(tree.root) + } + }(order) + + return exporter +} + +func (e *Exporter) postOrderNext(node *Node) { + if node.isLeaf() { + e.out <- node + return + } + + left, err := node.getLeftNode(e.tree) + if err != nil { + e.errCh <- err + return + } + e.postOrderNext(left) + + right, err := node.getRightNode(e.tree) + if err != nil { + e.errCh <- err + return + } + e.postOrderNext(right) + + e.out <- node +} + +func (e *Exporter) preOrderNext(node *Node) { + e.out <- node + if node.isLeaf() { + return + } + + left, err := node.getLeftNode(e.tree) + if err != nil { + e.errCh <- err + return + } + e.preOrderNext(left) + + right, err := node.getRightNode(e.tree) + if err != nil { + e.errCh <- err + return + } + e.preOrderNext(right) +} + +func (e *Exporter) Next() (*SnapshotNode, error) { + select { + case node, ok := <-e.out: + if !ok { + return nil, ErrorExportDone + } + return &SnapshotNode{ + Key: node.key, + Value: node.value, + Version: node.nodeKey.Version(), + Height: node.subtreeHeight, + }, nil + case err := <-e.errCh: + return nil, err + } +} + +var ErrorExportDone = fmt.Errorf("export done") diff --git a/v2/go.mod b/v2/go.mod new file mode 100644 index 000000000..25d0522dc --- /dev/null +++ b/v2/go.mod @@ -0,0 +1,36 @@ +module github.com/cosmos/iavl/v2 + +go 1.18 + +require ( + github.com/aybabtme/uniplot v0.0.0-20151203143629-039c559e5e7e + github.com/bvinc/go-sqlite-lite v0.6.1 + github.com/cosmos/iavl-bench/bench v0.0.4 + github.com/dustin/go-humanize v1.0.1 + github.com/emicklei/dot v1.6.0 + github.com/kocubinski/costor-api v1.1.1 + github.com/prometheus/client_golang v1.16.0 + github.com/rs/zerolog v1.30.0 + github.com/spf13/cobra v1.7.0 + github.com/stretchr/testify v1.8.4 + golang.org/x/exp v0.0.0-20231006140011-7918f672742d +) + +require ( + github.com/beorn7/perks v1.0.1 // indirect + github.com/cespare/xxhash/v2 v2.2.0 // indirect + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/golang/protobuf v1.5.3 // indirect + github.com/inconshreveable/mousetrap v1.1.0 // indirect + github.com/mattn/go-colorable v0.1.13 // indirect + github.com/mattn/go-isatty v0.0.19 // indirect + github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/prometheus/client_model v0.3.0 // indirect + github.com/prometheus/common v0.42.0 // indirect + github.com/prometheus/procfs v0.10.1 // indirect + github.com/spf13/pflag v1.0.5 // indirect + golang.org/x/sys v0.13.0 // indirect + google.golang.org/protobuf v1.31.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect +) diff --git a/v2/go.sum b/v2/go.sum new file mode 100644 index 000000000..8e14cbeaf --- /dev/null +++ b/v2/go.sum @@ -0,0 +1,81 @@ +github.com/aybabtme/uniplot v0.0.0-20151203143629-039c559e5e7e h1:dSeuFcs4WAJJnswS8vXy7YY1+fdlbVPuEVmDAfqvFOQ= +github.com/aybabtme/uniplot v0.0.0-20151203143629-039c559e5e7e/go.mod h1:uh71c5Vc3VNIplXOFXsnDy21T1BepgT32c5X/YPrOyc= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bvinc/go-sqlite-lite v0.6.1 h1:JU8Rz5YAOZQiU3WEulKF084wfXpytRiqD2IaW2QjPz4= +github.com/bvinc/go-sqlite-lite v0.6.1/go.mod h1:2GiE60NUdb0aNhDdY+LXgrqAVDpi2Ijc6dB6ZMp9x6s= +github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= +github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/cosmos/iavl-bench/bench v0.0.4 h1:J6zQPiBqF4CXMM3QBsLqZgQEBGY0taX85vLIZMhmAfQ= +github.com/cosmos/iavl-bench/bench v0.0.4/go.mod h1:j2rLae77EffacWcp7mmj3Uaa4AOAmZA7ymvhsuBQKKI= +github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= +github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= +github.com/emicklei/dot v1.6.0 h1:vUzuoVE8ipzS7QkES4UfxdpCwdU2U97m2Pb2tQCoYRY= +github.com/emicklei/dot v1.6.0/go.mod h1:DeV7GvQtIw4h2u73RKBkkFdvVAz0D9fzeJrgPW6gy/s= +github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= +github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/kocubinski/costor-api v1.1.1 h1:sgfJA7T/8IfZ59zxiMrED0xdjerAFuPNBTqyO90GiEE= +github.com/kocubinski/costor-api v1.1.1/go.mod h1:ESMBMDkKfN+9vvvhhNVdKLhbOmzI3O/i16iXvRM9Tuc= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= +github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= +github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= +github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= +github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA= +github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= +github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_golang v1.16.0 h1:yk/hx9hDbrGHovbci4BY+pRMfSuuat626eFsHb7tmT8= +github.com/prometheus/client_golang v1.16.0/go.mod h1:Zsulrv/L9oM40tJ7T815tM89lFEugiJ9HzIqaAx4LKc= +github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4= +github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= +github.com/prometheus/common v0.42.0 h1:EKsfXEYo4JpWMHH5cg+KOUWeuJSov1Id8zGR8eeI1YM= +github.com/prometheus/common v0.42.0/go.mod h1:xBwqVerjNdUDjgODMpudtOMwlOwf2SaTr1yjz4b7Zbc= +github.com/prometheus/procfs v0.10.1 h1:kYK1Va/YMlutzCGazswoHKo//tZVlFpKYh+PymziUAg= +github.com/prometheus/procfs v0.10.1/go.mod h1:nwNm2aOCAYw8uTR/9bWRREkZFxAUcWzPHWJq+XBB/FM= +github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= +github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= +github.com/rs/zerolog v1.30.0 h1:SymVODrcRsaRaSInD9yQtKbtWqwsfoPcRff/oRXLj4c= +github.com/rs/zerolog v1.30.0/go.mod h1:/tk+P47gFdPXq4QYjvCmT5/Gsug2nagsFWBWhAiSi1w= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I= +github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +golang.org/x/exp v0.0.0-20231006140011-7918f672742d h1:jtJma62tbqLibJ5sFQz8bKtEM8rJBtfilJ2qTU199MI= +golang.org/x/exp v0.0.0-20231006140011-7918f672742d/go.mod h1:ldy0pHrwJyGW56pPQzzkH36rKxoZW1tw7ZJpeKx+hdo= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= +golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= +google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/v2/internal/encoding.go b/v2/internal/encoding.go new file mode 100644 index 000000000..17a994bc1 --- /dev/null +++ b/v2/internal/encoding.go @@ -0,0 +1,167 @@ +package encoding + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "io" + "math/bits" + "sync" +) + +var bufPool = &sync.Pool{ + New: func() interface{} { + return new(bytes.Buffer) + }, +} + +var varintPool = &sync.Pool{ + New: func() interface{} { + return &[binary.MaxVarintLen64]byte{} + }, +} + +var uvarintPool = &sync.Pool{ + New: func() interface{} { + return &[binary.MaxVarintLen64]byte{} + }, +} + +// decodeBytes decodes a varint length-prefixed byte slice, returning it along with the number +// of input bytes read. +func DecodeBytes(bz []byte) ([]byte, int, error) { + s, n, err := DecodeUvarint(bz) + if err != nil { + return nil, n, err + } + // Make sure size doesn't overflow. ^uint(0) >> 1 will help determine the + // max int value variably on 32-bit and 64-bit machines. We also doublecheck + // that size is positive. + size := int(s) + if s >= uint64(^uint(0)>>1) || size < 0 { + return nil, n, fmt.Errorf("invalid out of range length %v decoding []byte", s) + } + // Make sure end index doesn't overflow. We know n>0 from decodeUvarint(). + end := n + size + if end < n { + return nil, n, fmt.Errorf("invalid out of range length %v decoding []byte", size) + } + // Make sure the end index is within bounds. + if len(bz) < end { + return nil, n, fmt.Errorf("insufficient bytes decoding []byte of length %v", size) + } + bz2 := make([]byte, size) + copy(bz2, bz[n:end]) + return bz2, end, nil +} + +// decodeUvarint decodes a varint-encoded unsigned integer from a byte slice, returning it and the +// number of bytes decoded. +func DecodeUvarint(bz []byte) (uint64, int, error) { + u, n := binary.Uvarint(bz) + if n == 0 { + // buf too small + return u, n, errors.New("buffer too small") + } else if n < 0 { + // value larger than 64 bits (overflow) + // and -n is the number of bytes read + n = -n + return u, n, errors.New("EOF decoding uvarint") + } + return u, n, nil +} + +// decodeVarint decodes a varint-encoded integer from a byte slice, returning it and the number of +// bytes decoded. +func DecodeVarint(bz []byte) (int64, int, error) { + i, n := binary.Varint(bz) + if n == 0 { + return i, n, errors.New("buffer too small") + } else if n < 0 { + // value larger than 64 bits (overflow) + // and -n is the number of bytes read + n = -n + return i, n, errors.New("EOF decoding varint") + } + return i, n, nil +} + +// EncodeBytes writes a varint length-prefixed byte slice to the writer. +func EncodeBytes(w io.Writer, bz []byte) error { + err := EncodeUvarint(w, uint64(len(bz))) + if err != nil { + return err + } + _, err = w.Write(bz) + return err +} + +// encodeBytesSlice length-prefixes the byte slice and returns it. +func EncodeBytesSlice(bz []byte) ([]byte, error) { + buf := bufPool.Get().(*bytes.Buffer) + buf.Reset() + defer bufPool.Put(buf) + + err := EncodeBytes(buf, bz) + + bytesCopy := make([]byte, buf.Len()) + copy(bytesCopy, buf.Bytes()) + + return bytesCopy, err +} + +// encodeBytesSize returns the byte size of the given slice including length-prefixing. +func EncodeBytesSize(bz []byte) int { + return EncodeUvarintSize(uint64(len(bz))) + len(bz) +} + +// EncodeUvarint writes a varint-encoded unsigned integer to an io.Writer. +func EncodeUvarint(w io.Writer, u uint64) error { + // See comment in encodeVarint + buf := uvarintPool.Get().(*[binary.MaxVarintLen64]byte) + + n := binary.PutUvarint(buf[:], u) + _, err := w.Write(buf[0:n]) + + uvarintPool.Put(buf) + + return err +} + +// EncodeUvarintSize returns the byte size of the given integer as a varint. +func EncodeUvarintSize(u uint64) int { + if u == 0 { + return 1 + } + return (bits.Len64(u) + 6) / 7 +} + +// EncodeVarint writes a varint-encoded integer to an io.Writer. +func EncodeVarint(w io.Writer, i int64) error { + // Use a pool here to reduce allocations. + // + // Though this allocates just 10 bytes on the stack, doing allocation for every calls + // cost us a huge memory. The profiling show that using pool save us ~30% memory. + // + // Since when we don't have concurrent access to the pool, the speed will nearly identical. + // If we need to support concurrent access, we can accept a *[binary.MaxVarintLen64]byte as + // input, so the caller can allocate just one and pass the same array pointer to each call. + buf := varintPool.Get().(*[binary.MaxVarintLen64]byte) + + n := binary.PutVarint(buf[:], i) + _, err := w.Write(buf[0:n]) + + varintPool.Put(buf) + + return err +} + +// EncodeVarintSize returns the byte size of the given integer as a varint. +func EncodeVarintSize(i int64) int { + ux := uint64(i) << 1 + if i < 0 { + ux = ^ux + } + return EncodeUvarintSize(ux) +} diff --git a/v2/internal/encoding_test.go b/v2/internal/encoding_test.go new file mode 100644 index 000000000..36c402f3e --- /dev/null +++ b/v2/internal/encoding_test.go @@ -0,0 +1,88 @@ +package encoding + +import ( + "encoding/binary" + "math" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestDecodeBytes(t *testing.T) { + bz := []byte{0, 1, 2, 3, 4, 5, 6, 7} + testcases := map[string]struct { + bz []byte + lengthPrefix uint64 + expect []byte + expectErr bool + }{ + "full": {bz, 8, bz, false}, + "empty": {bz, 0, []byte{}, false}, + "partial": {bz, 3, []byte{0, 1, 2}, false}, + "out of bounds": {bz, 9, nil, true}, + "empty input": {[]byte{}, 0, []byte{}, false}, + "empty input out of bounds": {[]byte{}, 1, nil, true}, + + // The following will always fail, since the byte slice is only 8 bytes, + // but we're making sure they don't panic due to overflow issues. See: + // https://github.com/cosmos/iavl/issues/339 + "max int32": {bz, uint64(math.MaxInt32), nil, true}, + "max int32 -1": {bz, uint64(math.MaxInt32) - 1, nil, true}, + "max int32 -10": {bz, uint64(math.MaxInt32) - 10, nil, true}, + "max int32 +1": {bz, uint64(math.MaxInt32) + 1, nil, true}, + "max int32 +10": {bz, uint64(math.MaxInt32) + 10, nil, true}, + + "max int32*2": {bz, uint64(math.MaxInt32) * 2, nil, true}, + "max int32*2 -1": {bz, uint64(math.MaxInt32)*2 - 1, nil, true}, + "max int32*2 -10": {bz, uint64(math.MaxInt32)*2 - 10, nil, true}, + "max int32*2 +1": {bz, uint64(math.MaxInt32)*2 + 1, nil, true}, + "max int32*2 +10": {bz, uint64(math.MaxInt32)*2 + 10, nil, true}, + + "max uint32": {bz, uint64(math.MaxUint32), nil, true}, + "max uint32 -1": {bz, uint64(math.MaxUint32) - 1, nil, true}, + "max uint32 -10": {bz, uint64(math.MaxUint32) - 10, nil, true}, + "max uint32 +1": {bz, uint64(math.MaxUint32) + 1, nil, true}, + "max uint32 +10": {bz, uint64(math.MaxUint32) + 10, nil, true}, + + "max uint32*2": {bz, uint64(math.MaxUint32) * 2, nil, true}, + "max uint32*2 -1": {bz, uint64(math.MaxUint32)*2 - 1, nil, true}, + "max uint32*2 -10": {bz, uint64(math.MaxUint32)*2 - 10, nil, true}, + "max uint32*2 +1": {bz, uint64(math.MaxUint32)*2 + 1, nil, true}, + "max uint32*2 +10": {bz, uint64(math.MaxUint32)*2 + 10, nil, true}, + + "max int64": {bz, uint64(math.MaxInt64), nil, true}, + "max int64 -1": {bz, uint64(math.MaxInt64) - 1, nil, true}, + "max int64 -10": {bz, uint64(math.MaxInt64) - 10, nil, true}, + "max int64 +1": {bz, uint64(math.MaxInt64) + 1, nil, true}, + "max int64 +10": {bz, uint64(math.MaxInt64) + 10, nil, true}, + + "max uint64": {bz, uint64(math.MaxUint64), nil, true}, + "max uint64 -1": {bz, uint64(math.MaxUint64) - 1, nil, true}, + "max uint64 -10": {bz, uint64(math.MaxUint64) - 10, nil, true}, + } + for name, tc := range testcases { + tc := tc + t.Run(name, func(t *testing.T) { + // Generate an input slice. + buf := make([]byte, binary.MaxVarintLen64) + varintBytes := binary.PutUvarint(buf, tc.lengthPrefix) + buf = append(buf[:varintBytes], tc.bz...) + + // Attempt to decode it. + b, n, err := DecodeBytes(buf) + if tc.expectErr { + require.Error(t, err) + require.Equal(t, varintBytes, n) + } else { + require.NoError(t, err) + require.Equal(t, uint64(n), uint64(varintBytes)+tc.lengthPrefix) + require.Equal(t, tc.bz[:tc.lengthPrefix], b) + } + }) + } +} + +func TestDecodeBytes_invalidVarint(t *testing.T) { + _, _, err := DecodeBytes([]byte{0xff}) + require.Error(t, err) +} diff --git a/v2/iterator.go b/v2/iterator.go new file mode 100644 index 000000000..90eb96df3 --- /dev/null +++ b/v2/iterator.go @@ -0,0 +1,375 @@ +package iavl + +import ( + "bytes" + "fmt" + "time" + + "github.com/bvinc/go-sqlite-lite/sqlite3" + "github.com/cosmos/iavl/v2/metrics" +) + +type Iterator interface { + // Domain returns the start (inclusive) and end (exclusive) limits of the iterator. + // CONTRACT: start, end readonly []byte + Domain() (start []byte, end []byte) + + // Valid returns whether the current iterator is valid. Once invalid, the TreeIterator remains + // invalid forever. + Valid() bool + + // Next moves the iterator to the next key in the database, as defined by order of iteration. + // If Valid returns false, this method will panic. + Next() + + // Key returns the key at the current position. Panics if the iterator is invalid. + // CONTRACT: key readonly []byte + Key() (key []byte) + + // Value returns the value at the current position. Panics if the iterator is invalid. + // CONTRACT: value readonly []byte + Value() (value []byte) + + // Error returns the last error encountered by the iterator, if any. + Error() error + + // Close closes the iterator, relasing any allocated resources. + Close() error +} + +var ( + _ Iterator = (*TreeIterator)(nil) + _ Iterator = (*LeafIterator)(nil) +) + +type TreeIterator struct { + tree *Tree + start, end []byte // iteration domain + ascending bool // ascending traversal + inclusive bool // end key inclusiveness + + stack []*Node + started bool + + key, value []byte // current key, value + err error // current error + valid bool // iteration status + + metrics metrics.Proxy +} + +func (i *TreeIterator) Domain() (start []byte, end []byte) { + return i.start, i.end +} + +func (i *TreeIterator) Valid() bool { + return i.valid +} + +func (i *TreeIterator) Next() { + if i.metrics != nil { + defer i.metrics.MeasureSince(time.Now(), "iavl_v2", "iterator", "next") + } + if !i.valid { + return + } + if len(i.stack) == 0 { + i.valid = false + return + } + if i.ascending { + i.stepAscend() + } else { + i.stepDescend() + } + i.started = true +} + +func (i *TreeIterator) push(node *Node) { + i.stack = append(i.stack, node) +} + +func (i *TreeIterator) pop() (node *Node) { + if len(i.stack) == 0 { + return nil + } + node = i.stack[len(i.stack)-1] + i.stack = i.stack[:len(i.stack)-1] + return +} + +func (i *TreeIterator) stepAscend() { + var n *Node + for { + n = i.pop() + if n == nil { + i.valid = false + return + } + if n.isLeaf() { + if !i.started && bytes.Compare(n.key, i.start) < 0 { + continue + } + if i.isPastEndAscend(n.key) { + i.valid = false + return + } + break + } + right, err := n.getRightNode(i.tree) + if err != nil { + i.err = err + i.valid = false + return + } + + if bytes.Compare(i.start, n.key) < 0 { + left, err := n.getLeftNode(i.tree) + if err != nil { + i.err = err + i.valid = false + return + } + i.push(right) + i.push(left) + } else { + i.push(right) + } + + } + i.key = n.key + i.value = n.value +} + +func (i *TreeIterator) stepDescend() { + var n *Node + for { + n = i.pop() + if n == nil { + i.valid = false + return + } + if n.isLeaf() { + if !i.started && i.end != nil { + res := bytes.Compare(i.end, n.key) + // if end is inclusive and the key is greater than end, skip + if i.inclusive && res < 0 { + continue + } + // if end is not inclusive (default) and the key is greater than or equal to end, skip + if res <= 0 { + continue + } + } + if i.isPastEndDescend(n.key) { + i.valid = false + return + } + break + } + left, err := n.getLeftNode(i.tree) + if err != nil { + i.err = err + i.valid = false + return + } + + if i.end == nil || bytes.Compare(n.key, i.end) <= 0 { + right, err := n.getRightNode(i.tree) + if err != nil { + i.err = err + i.valid = false + return + } + i.push(left) + i.push(right) + } else { + i.push(left) + } + } + i.key = n.key + i.value = n.value +} + +func (i *TreeIterator) isPastEndAscend(key []byte) bool { + if i.end == nil { + return false + } + if i.inclusive { + return bytes.Compare(key, i.end) > 0 + } + return bytes.Compare(key, i.end) >= 0 +} + +func (i *TreeIterator) isPastEndDescend(key []byte) bool { + if i.start == nil { + return false + } + return bytes.Compare(key, i.start) < 0 +} + +func (i *TreeIterator) Key() (key []byte) { + return i.key +} + +func (i *TreeIterator) Value() (value []byte) { + return i.value +} + +func (i *TreeIterator) Error() error { + return i.err +} + +func (i *TreeIterator) Close() error { + i.stack = nil + i.valid = false + return i.err +} + +type LeafIterator struct { + sql *SqliteDb + itrStmt *sqlite3.Stmt + start []byte + end []byte + valid bool + err error + key []byte + value []byte + metrics metrics.Proxy + itrIdx int +} + +func (l *LeafIterator) Domain() (start []byte, end []byte) { + return l.start, l.end +} + +func (l *LeafIterator) Valid() bool { + return l.valid +} + +func (l *LeafIterator) Next() { + if l.metrics != nil { + defer l.metrics.MeasureSince(time.Now(), "iavl_v2", "iterator", "next") + } + if !l.valid { + return + } + + hasRow, err := l.itrStmt.Step() + if err != nil { + closeErr := l.Close() + if closeErr != nil { + l.err = fmt.Errorf("error closing iterator: %w; %w", closeErr, err) + } + return + } + if !hasRow { + closeErr := l.Close() + if closeErr != nil { + l.err = fmt.Errorf("error closing iterator: %w; %w", closeErr, err) + } + return + } + if err = l.itrStmt.Scan(&l.key, &l.value); err != nil { + closeErr := l.Close() + if closeErr != nil { + l.err = fmt.Errorf("error closing iterator: %w; %w", closeErr, err) + } + return + } +} + +func (l *LeafIterator) Key() (key []byte) { + return l.key +} + +func (l *LeafIterator) Value() (value []byte) { + return l.value +} + +func (l *LeafIterator) Error() error { + return l.err +} + +func (l *LeafIterator) Close() error { + if l.valid { + if l.metrics != nil { + l.metrics.IncrCounter(1, "iavl_v2", "iterator", "close") + } + l.valid = false + delete(l.sql.iterators, l.itrIdx) + return l.itrStmt.Close() + } + return nil +} + +func (tree *Tree) Iterator(start, end []byte, inclusive bool) (itr Iterator, err error) { + if tree.storeLatestLeaves { + leafItr := &LeafIterator{ + sql: tree.sql, + start: start, + end: end, + valid: true, + metrics: tree.metricsProxy, + } + // TODO: handle inclusive + // TODO: profile re-use of some prepared statement to see if there is improvement + leafItr.itrStmt, leafItr.itrIdx, err = tree.sql.getLeafIteratorQuery(start, end, true, inclusive) + if err != nil { + return nil, err + } + itr = leafItr + } else { + itr = &TreeIterator{ + tree: tree, + start: start, + end: end, + ascending: true, + inclusive: inclusive, + valid: true, + stack: []*Node{tree.root}, + metrics: tree.metricsProxy, + } + } + + if tree.metricsProxy != nil { + tree.metricsProxy.IncrCounter(1, "iavl_v2", "iterator", "open") + } + itr.Next() + return itr, err +} + +func (tree *Tree) ReverseIterator(start, end []byte) (itr Iterator, err error) { + if tree.storeLatestLeaves { + leafItr := &LeafIterator{ + sql: tree.sql, + start: start, + end: end, + valid: true, + metrics: tree.metricsProxy, + } + // TODO: handle inclusive + // TODO: profile re-use of some prepared statement to see if there is improvement + leafItr.itrStmt, leafItr.itrIdx, err = tree.sql.getLeafIteratorQuery(start, end, false, false) + if err != nil { + return nil, err + } + itr = leafItr + } else { + itr = &TreeIterator{ + tree: tree, + start: start, + end: end, + ascending: false, + inclusive: false, + valid: true, + stack: []*Node{tree.root}, + metrics: tree.metricsProxy, + } + } + if tree.metricsProxy != nil { + tree.metricsProxy.IncrCounter(1, "iavl_v2", "iterator", "open") + } + itr.Next() + return itr, nil +} diff --git a/v2/iterator_test.go b/v2/iterator_test.go new file mode 100644 index 000000000..767fd0b13 --- /dev/null +++ b/v2/iterator_test.go @@ -0,0 +1,308 @@ +package iavl_test + +import ( + "fmt" + "testing" + + "github.com/cosmos/iavl/v2" + "github.com/stretchr/testify/require" +) + +func Test_Iterator(t *testing.T) { + pool := iavl.NewNodePool() + sql, err := iavl.NewInMemorySqliteDb(pool) + require.NoError(t, err) + + tree := iavl.NewTree(sql, pool, iavl.TreeOptions{StateStorage: false}) + set := func(key string, value string) { + _, err := tree.Set([]byte(key), []byte(value)) + require.NoError(t, err) + } + set("a", "1") + set("b", "2") + set("c", "3") + set("d", "4") + set("e", "5") + set("f", "6") + set("g", "7") + + cases := []struct { + name string + start, end []byte + inclusive bool + ascending bool + expectedCount int + expectedStart []byte + expectedEnd []byte + }{ + { + name: "all", + start: nil, + end: nil, + ascending: true, + expectedCount: 7, + expectedStart: []byte("a"), + expectedEnd: []byte("g"), + }, + { + name: "b start", + start: []byte("b"), + end: nil, + ascending: true, + expectedCount: 6, + expectedStart: []byte("b"), + expectedEnd: []byte("g"), + }, + { + name: "ab start", + start: []byte("ab"), + end: nil, + ascending: true, + expectedCount: 6, + expectedStart: []byte("b"), + expectedEnd: []byte("g"), + }, + { + name: "c end inclusive", + start: nil, + end: []byte("c"), + ascending: true, + inclusive: true, + expectedCount: 3, + expectedStart: []byte("a"), + expectedEnd: []byte("c"), + }, + { + name: "d end exclusive", + start: nil, + end: []byte("d"), + ascending: true, + inclusive: false, + expectedCount: 3, + expectedStart: []byte("a"), + expectedEnd: []byte("c"), + }, + { + name: "ce end inclusive", + start: nil, + end: []byte("c"), + ascending: true, + inclusive: true, + expectedCount: 3, + expectedStart: []byte("a"), + expectedEnd: []byte("c"), + }, + { + name: "ce end exclusive", + start: nil, + end: []byte("ce"), + ascending: true, + inclusive: false, + expectedCount: 3, + expectedStart: []byte("a"), + expectedEnd: []byte("c"), + }, + { + name: "b to e", + start: []byte("b"), + end: []byte("e"), + inclusive: true, + ascending: true, + expectedCount: 4, + expectedStart: []byte("b"), + expectedEnd: []byte("e"), + }, + { + name: "all desc", + start: nil, + end: nil, + ascending: false, + expectedCount: 7, + expectedStart: []byte("g"), + expectedEnd: []byte("a"), + }, + { + name: "f start desc", + start: nil, + end: []byte("f"), + ascending: false, + expectedCount: 5, + expectedStart: []byte("e"), + expectedEnd: []byte("a"), + }, + { + name: "fe start desc", + start: nil, + end: []byte("fe"), + ascending: false, + expectedCount: 6, + expectedStart: []byte("f"), + expectedEnd: []byte("a"), + }, + { + name: "c stop desc", + start: []byte("c"), + end: nil, + ascending: false, + expectedCount: 5, + expectedStart: []byte("g"), + expectedEnd: []byte("c"), + }, + { + name: "ce stop desc", + start: []byte("ce"), + end: nil, + ascending: false, + expectedCount: 4, + expectedStart: []byte("g"), + expectedEnd: []byte("d"), + }, + { + name: "f to c desc", + start: []byte("c"), + end: []byte("f"), + ascending: false, + expectedCount: 3, + expectedStart: []byte("e"), + expectedEnd: []byte("c"), + }, + { + name: "fe to f should include f", + start: []byte("f"), + end: []byte("fe"), + ascending: false, + expectedCount: 1, + expectedStart: []byte("f"), + expectedEnd: []byte("f"), + }, + { + name: "no range", + start: []byte("ce"), + end: []byte("cf"), + ascending: true, + expectedCount: 0, + expectedStart: nil, + expectedEnd: nil, + }, + } + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + var ( + itr iavl.Iterator + err error + ) + if tc.ascending { + itr, err = tree.Iterator(tc.start, tc.end, tc.inclusive) + } else { + itr, err = tree.ReverseIterator(tc.start, tc.end) + } + require.NoError(t, err) + + if tc.expectedCount == 0 { + require.False(t, itr.Valid()) + } + + cnt := 0 + for ; itr.Valid(); itr.Next() { + if cnt == 0 { + require.Equal(t, tc.expectedStart, itr.Key()) + } + //fmt.Printf("%s %s\n", itr.Key(), itr.Value()) + require.NoError(t, itr.Error()) + cnt++ + } + require.Equal(t, tc.expectedCount, cnt) + require.Equal(t, tc.expectedEnd, itr.Key()) + require.False(t, itr.Valid()) + require.NoError(t, itr.Close()) + }) + } +} + +func Test_IteratorTree(t *testing.T) { + tmpDir := t.TempDir() + pool := iavl.NewNodePool() + sql, err := iavl.NewSqliteDb(pool, iavl.SqliteDbOptions{Path: tmpDir}) + require.NoError(t, err) + + tree := iavl.NewTree(sql, pool, iavl.TreeOptions{StateStorage: true}) + set := func(key string, value string) { + _, err := tree.Set([]byte(key), []byte(value)) + require.NoError(t, err) + } + set("a", "1") + set("b", "2") + set("c", "3") + set("d", "4") + set("e", "5") + set("f", "6") + set("g", "7") + + _, version, err := tree.SaveVersion() + require.NoError(t, err) + tree = iavl.NewTree(sql, pool, iavl.TreeOptions{StateStorage: true}) + require.NoError(t, tree.LoadVersion(version)) + cases := []struct { + name string + start, end []byte + inclusive bool + ascending bool + expectedCount int + expectedStart []byte + expectedEnd []byte + }{ + { + name: "all", + start: nil, + end: nil, + ascending: true, + expectedCount: 7, + expectedStart: []byte("a"), + expectedEnd: []byte("g"), + }, + { + name: "all desc", + start: nil, + end: nil, + ascending: false, + expectedCount: 7, + expectedStart: []byte("g"), + expectedEnd: []byte("a"), + }, + } + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + var ( + itr iavl.Iterator + err error + ) + if tc.ascending { + itr, err = tree.Iterator(tc.start, tc.end, tc.inclusive) + } else { + itr, err = tree.ReverseIterator(tc.start, tc.end) + } + require.NoError(t, err) + + one, err := tree.Get([]byte("a")) + require.NoError(t, err) + require.Equal(t, []byte("1"), one) + + cnt := 0 + for ; itr.Valid(); itr.Next() { + if cnt == 0 { + require.Equal(t, tc.expectedStart, itr.Key()) + } + fmt.Printf("%s %s\n", itr.Key(), itr.Value()) + require.NoError(t, itr.Error()) + cnt++ + } + require.Equal(t, tc.expectedCount, cnt) + require.Equal(t, tc.expectedEnd, itr.Key()) + require.False(t, itr.Valid()) + require.NoError(t, itr.Close()) + }) + } + +} diff --git a/v2/metrics/metrics.go b/v2/metrics/metrics.go new file mode 100644 index 000000000..0ce25dddf --- /dev/null +++ b/v2/metrics/metrics.go @@ -0,0 +1,120 @@ +package metrics + +import ( + "fmt" + "os" + "time" + + "github.com/aybabtme/uniplot/histogram" + "github.com/dustin/go-humanize" +) + +type Label struct { + Name string + Value string +} + +type Proxy interface { + IncrCounter(val float32, keys ...string) + SetGauge(val float32, keys ...string) + MeasureSince(start time.Time, keys ...string) +} + +type TreeMetrics struct { + PoolGet int64 + PoolReturn int64 + PoolEvict int64 + PoolEvictMiss int64 + PoolFault int64 + + TreeUpdate int64 + TreeNewNode int64 + TreeDelete int64 +} + +type DbMetrics struct { + WriteDurations []time.Duration + WriteTime time.Duration + WriteLeaves int64 + + QueryDurations []time.Duration + QueryTime time.Duration + QueryCount int64 + QueryLeafMiss int64 + QueryLeafCount int64 + QueryBranchCount int64 +} + +func (m *TreeMetrics) Report() { + fmt.Printf("Pool:\n gets: %s, returns: %s, faults: %s, evicts: %s, evict miss %s\n", + humanize.Comma(m.PoolGet), + humanize.Comma(m.PoolReturn), + humanize.Comma(m.PoolFault), + humanize.Comma(m.PoolEvict), + humanize.Comma(m.PoolEvictMiss), + ) + + fmt.Printf("\nTree:\n update: %s, new node: %s, delete: %s\n", + humanize.Comma(m.TreeUpdate), + humanize.Comma(m.TreeNewNode), + humanize.Comma(m.TreeDelete)) +} + +func (m *DbMetrics) QueryReport(bins int) error { + if m.QueryCount == 0 { + return nil + } + + fmt.Printf("queries=%s q/s=%s dur/q=%s dur=%s leaf-q=%s branch-q=%s leaf-miss=%s\n", + humanize.Comma(m.QueryCount), + humanize.Comma(int64(float64(m.QueryCount)/m.QueryTime.Seconds())), + time.Duration(int64(m.QueryTime)/m.QueryCount), + m.QueryTime.Round(time.Millisecond), + humanize.Comma(m.QueryLeafCount), + humanize.Comma(m.QueryBranchCount), + humanize.Comma(m.QueryLeafMiss), + ) + + if bins > 0 { + var histData []float64 + for _, d := range m.QueryDurations { + if d > 50*time.Microsecond { + continue + } + histData = append(histData, float64(d)) + } + hist := histogram.Hist(bins, histData) + err := histogram.Fprintf(os.Stdout, hist, histogram.Linear(10), func(v float64) string { + return time.Duration(v).String() + }) + if err != nil { + return err + } + } + + m.SetQueryZero() + + return nil +} + +func (m *DbMetrics) SetQueryZero() { + m.QueryDurations = nil + m.QueryTime = 0 + m.QueryCount = 0 + m.QueryLeafMiss = 0 + m.QueryLeafCount = 0 + m.QueryBranchCount = 0 +} + +func (m *DbMetrics) Add(o *DbMetrics) { + m.WriteDurations = append(m.WriteDurations, o.WriteDurations...) + m.WriteTime += o.WriteTime + m.WriteLeaves += o.WriteLeaves + + m.QueryDurations = append(m.QueryDurations, o.QueryDurations...) + m.QueryTime += o.QueryTime + m.QueryCount += o.QueryCount + m.QueryLeafMiss += o.QueryLeafMiss + m.QueryLeafCount += o.QueryLeafCount + m.QueryBranchCount += o.QueryBranchCount +} diff --git a/v2/migrate/core/store.go b/v2/migrate/core/store.go new file mode 100644 index 000000000..3b3f38997 --- /dev/null +++ b/v2/migrate/core/store.go @@ -0,0 +1,129 @@ +package core + +import ( + "fmt" + "strings" + "sync" + "time" + + store "cosmossdk.io/api/cosmos/store/v1beta1" + db "github.com/cosmos/cosmos-db" + "github.com/cosmos/iavl" + gogotypes "github.com/gogo/protobuf/types" + "github.com/kocubinski/costor-api/logz" + "github.com/pkg/errors" + "github.com/syndtr/goleveldb/leveldb/opt" + "google.golang.org/protobuf/proto" +) + +const ( + CommitInfoKeyFmt = "s/%d" // s/ + LatestVersionKey = "s/latest" +) + +var ( + log = logz.Logger.With().Str("module", "store").Logger() + dbOpenMtx sync.Mutex +) + +type ReadonlyStore struct { + db.DB + commitInfoByName map[string]*store.CommitInfo +} + +func (rs *ReadonlyStore) CommitInfoByName() map[string]*store.CommitInfo { + return rs.commitInfoByName +} + +func (rs *ReadonlyStore) getCommitInfoFromDB(ver int64) (*store.CommitInfo, error) { + cInfoKey := fmt.Sprintf(CommitInfoKeyFmt, ver) + + bz, err := rs.DB.Get([]byte(cInfoKey)) + if err != nil { + return nil, errors.Wrap(err, "failed to get commit info") + } else if bz == nil { + return nil, errors.New("no commit info found") + } + + cInfo := &store.CommitInfo{} + if err = proto.Unmarshal(bz, cInfo); err != nil { + return nil, errors.Wrap(err, "failed unmarshal commit info") + } + + return cInfo, nil +} + +func NewReadonlyStore(dbPath string) (*ReadonlyStore, error) { + l := log.With(). + Str("dbPath", fmt.Sprintf("%s/application.db", dbPath)). + Str("op", "NewReadonlyStore"). + Logger() + since := time.Now() + l.Info().Msg("waiting for lock") + dbOpenMtx.Lock() + l.Info().Msgf("got lock in %s", time.Since(since)) + defer dbOpenMtx.Unlock() + + var err error + rs := &ReadonlyStore{ + commitInfoByName: make(map[string]*store.CommitInfo), + } + since = time.Now() + rs.DB, err = db.NewGoLevelDBWithOpts("application", dbPath, &opt.Options{ + ReadOnly: true, + }) + l.Info().Msgf("opened in %s", time.Since(since)) + if err != nil { + return nil, err + } + latestVersionBz, err := rs.DB.Get([]byte(LatestVersionKey)) + if err != nil { + return nil, err + } + + var latestVersion int64 + if err := gogotypes.StdInt64Unmarshal(&latestVersion, latestVersionBz); err != nil { + return nil, err + } + + since = time.Now() + commitInfo, err := rs.getCommitInfoFromDB(latestVersion) + if err != nil { + return nil, err + } + + var storeInfoNames []string + for _, si := range commitInfo.StoreInfos { + storeInfoNames = append(storeInfoNames, si.Name) + rs.commitInfoByName[si.Name] = commitInfo + } + log.Info().Msgf("loaded commit info in %s for stores %s", time.Since(since), + strings.Join(storeInfoNames, " ")) + + return rs, nil +} + +var ErrStoreNotFound = errors.New("store not found") + +func (rs *ReadonlyStore) LatestTree(storeKey string) (db.DB, *iavl.MutableTree, error) { + //since := time.Now() + + prefix := fmt.Sprintf("s/k:%s/", storeKey) + prefixDb := db.NewPrefixDB(rs.DB, []byte(prefix)) + tree, err := iavl.NewMutableTreeWithOpts(prefixDb, 1000, &iavl.Options{InitialVersion: 0}, true) + if err != nil { + return nil, nil, err + } + commitInfo, ok := rs.commitInfoByName[storeKey] + if !ok { + return nil, nil, ErrStoreNotFound + } + _, err = tree.LoadVersion(commitInfo.Version) + if err != nil { + return nil, nil, err + } + + //log.Info().Msgf("loaded tree in %s", time.Since(since)) + + return prefixDb, tree, nil +} diff --git a/v2/migrate/go.mod b/v2/migrate/go.mod new file mode 100644 index 000000000..e40056133 --- /dev/null +++ b/v2/migrate/go.mod @@ -0,0 +1,56 @@ +module github.com/cosmos/iavl/v2/migrate + +go 1.18 + +require ( + cosmossdk.io/api v0.7.2 + github.com/cosmos/cosmos-db v1.0.0 + github.com/cosmos/iavl v0.21.0-alpha.1.0.20231102165102-f418612be686 + github.com/cosmos/iavl/v2 v2.0.0-20231102165636-7c5cca0663d5 + github.com/gogo/protobuf v1.3.2 + github.com/kocubinski/costor-api v1.1.2 + github.com/spf13/cobra v1.7.0 + github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 +) + +require ( + github.com/DataDog/zstd v1.4.5 // indirect + github.com/HdrHistogram/hdrhistogram-go v1.1.2 // indirect + github.com/aybabtme/uniplot v0.0.0-20151203143629-039c559e5e7e // indirect + github.com/bvinc/go-sqlite-lite v0.6.1 // indirect + github.com/cespare/xxhash/v2 v2.2.0 // indirect + github.com/cockroachdb/errors v1.8.1 // indirect + github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f // indirect + github.com/cockroachdb/pebble v0.0.0-20220817183557-09c6e030a677 // indirect + github.com/cockroachdb/redact v1.0.8 // indirect + github.com/cockroachdb/sentry-go v0.6.1-cockroachdb.2 // indirect + github.com/cosmos/cosmos-proto v1.0.0-beta.3 // indirect + github.com/cosmos/gogoproto v1.4.11 // indirect + github.com/cosmos/ics23/go v0.10.0 // indirect + github.com/dustin/go-humanize v1.0.1 // indirect + github.com/emicklei/dot v1.6.0 // indirect + github.com/golang/protobuf v1.5.3 // indirect + github.com/golang/snappy v0.0.4 // indirect + github.com/google/btree v1.1.2 // indirect + github.com/google/go-cmp v0.6.0 // indirect + github.com/inconshreveable/mousetrap v1.1.0 // indirect + github.com/klauspost/compress v1.15.9 // indirect + github.com/kr/pretty v0.3.1 // indirect + github.com/kr/text v0.2.0 // indirect + github.com/linxGnu/grocksdb v1.7.15 // indirect + github.com/mattn/go-colorable v0.1.13 // indirect + github.com/mattn/go-isatty v0.0.19 // indirect + github.com/pkg/errors v0.9.1 // indirect + github.com/rogpeppe/go-internal v1.11.0 // indirect + github.com/rs/zerolog v1.30.0 // indirect + github.com/spf13/cast v1.5.1 // indirect + github.com/spf13/pflag v1.0.5 // indirect + golang.org/x/crypto v0.14.0 // indirect + golang.org/x/exp v0.0.0-20230811145659-89c5cff77bcb // indirect + golang.org/x/net v0.17.0 // indirect + golang.org/x/sys v0.13.0 // indirect + golang.org/x/text v0.13.0 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20231009173412-8bfb1ae86b6c // indirect + google.golang.org/grpc v1.58.3 // indirect + google.golang.org/protobuf v1.31.0 // indirect +) \ No newline at end of file diff --git a/v2/migrate/go.sum b/v2/migrate/go.sum new file mode 100644 index 000000000..32b14c8d9 --- /dev/null +++ b/v2/migrate/go.sum @@ -0,0 +1,454 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cosmossdk.io/api v0.7.2 h1:BO3i5fvKMKvfaUiMkCznxViuBEfyWA/k6w2eAF6q1C4= +cosmossdk.io/api v0.7.2/go.mod h1:IcxpYS5fMemZGqyYtErK7OqvdM0C8kdW3dq8Q/XIG38= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +github.com/AndreasBriese/bbloom v0.0.0-20190306092124-e2d15f34fcf9/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/CloudyKit/fastprinter v0.0.0-20170127035650-74b38d55f37a/go.mod h1:EFZQ978U7x8IRnstaskI3IysnWY5Ao3QgZUKOXlsAdw= +github.com/CloudyKit/jet v2.1.3-0.20180809161101-62edd43e4f88+incompatible/go.mod h1:HPYO+50pSWkPoj9Q/eq0aRGByCL6ScRlUmiEX5Zgm+w= +github.com/DataDog/zstd v1.4.5 h1:EndNeuB0l9syBZhut0wns3gV1hL8zX8LIu6ZiVHWLIQ= +github.com/DataDog/zstd v1.4.5/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= +github.com/HdrHistogram/hdrhistogram-go v1.1.2 h1:5IcZpTvzydCQeHzK4Ef/D5rrSqwxob0t8PQPMybUNFM= +github.com/HdrHistogram/hdrhistogram-go v1.1.2/go.mod h1:yDgFjdqOqDEKOvasDdhWNXYg9BVp4O+o5f6V/ehm6Oo= +github.com/Joker/hpp v1.0.0/go.mod h1:8x5n+M1Hp5hC0g8okX3sR3vFQwynaX/UgSOM9MeBKzY= +github.com/Joker/jade v1.0.1-0.20190614124447-d475f43051e7/go.mod h1:6E6s8o2AE4KhCrqr6GRJjdC/gNfTdxkIXvuGZZda2VM= +github.com/Shopify/goreferrer v0.0.0-20181106222321-ec9c9a553398/go.mod h1:a1uqRtAwp2Xwc6WNPJEufxJ7fx3npB4UV/JOLmbu5I0= +github.com/ajg/form v1.5.1/go.mod h1:uL1WgH+h2mgNtvBq0339dVnzXdBETtL2LeUXaIv25UY= +github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= +github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= +github.com/aybabtme/uniplot v0.0.0-20151203143629-039c559e5e7e h1:dSeuFcs4WAJJnswS8vXy7YY1+fdlbVPuEVmDAfqvFOQ= +github.com/aybabtme/uniplot v0.0.0-20151203143629-039c559e5e7e/go.mod h1:uh71c5Vc3VNIplXOFXsnDy21T1BepgT32c5X/YPrOyc= +github.com/aymerick/raymond v2.0.3-0.20180322193309-b565731e1464+incompatible/go.mod h1:osfaiScAUVup+UC9Nfq76eWqDhXlp+4UYaA8uhTBO6g= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bvinc/go-sqlite-lite v0.6.1 h1:JU8Rz5YAOZQiU3WEulKF084wfXpytRiqD2IaW2QjPz4= +github.com/bvinc/go-sqlite-lite v0.6.1/go.mod h1:2GiE60NUdb0aNhDdY+LXgrqAVDpi2Ijc6dB6ZMp9x6s= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= +github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cockroachdb/datadriven v1.0.0/go.mod h1:5Ib8Meh+jk1RlHIXej6Pzevx/NLlNvQB9pmSBZErGA4= +github.com/cockroachdb/errors v1.6.1/go.mod h1:tm6FTP5G81vwJ5lC0SizQo374JNCOPrHyXGitRJoDqM= +github.com/cockroachdb/errors v1.8.1 h1:A5+txlVZfOqFBDa4mGz2bUWSp0aHElvHX2bKkdbQu+Y= +github.com/cockroachdb/errors v1.8.1/go.mod h1:qGwQn6JmZ+oMjuLwjWzUNqblqk0xl4CVV3SQbGwK7Ac= +github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f h1:o/kfcElHqOiXqcou5a3rIlMc7oJbMQkeLk0VQJ7zgqY= +github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f/go.mod h1:i/u985jwjWRlyHXQbwatDASoW0RMlZ/3i9yJHE2xLkI= +github.com/cockroachdb/pebble v0.0.0-20220817183557-09c6e030a677 h1:qbb/AE938DFhOajUYh9+OXELpSF9KZw2ZivtmW6eX1Q= +github.com/cockroachdb/pebble v0.0.0-20220817183557-09c6e030a677/go.mod h1:890yq1fUb9b6dGNwssgeUO5vQV9qfXnCPxAJhBQfXw0= +github.com/cockroachdb/redact v1.0.8 h1:8QG/764wK+vmEYoOlfobpe12EQcS81ukx/a4hdVMxNw= +github.com/cockroachdb/redact v1.0.8/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg= +github.com/cockroachdb/sentry-go v0.6.1-cockroachdb.2 h1:IKgmqgMQlVJIZj19CdocBeSfSaiCbEBZGKODaixqtHM= +github.com/cockroachdb/sentry-go v0.6.1-cockroachdb.2/go.mod h1:8BT+cPK6xvFOcRlk0R8eg+OTkcqI6baNH4xAkpiYVvQ= +github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0/go.mod h1:4Zcjuz89kmFXt9morQgcfYZAYZ5n8WHjt81YYWIwtTM= +github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= +github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/cosmos/cosmos-db v1.0.0 h1:EVcQZ+qYag7W6uorBKFPvX6gRjw6Uq2hIh4hCWjuQ0E= +github.com/cosmos/cosmos-db v1.0.0/go.mod h1:iBvi1TtqaedwLdcrZVYRSSCb6eSy61NLj4UNmdIgs0U= +github.com/cosmos/cosmos-proto v1.0.0-beta.3 h1:VitvZ1lPORTVxkmF2fAp3IiA61xVwArQYKXTdEcpW6o= +github.com/cosmos/cosmos-proto v1.0.0-beta.3/go.mod h1:t8IASdLaAq+bbHbjq4p960BvcTqtwuAxid3b/2rOD6I= +github.com/cosmos/gogoproto v1.4.11 h1:LZcMHrx4FjUgrqQSWeaGC1v/TeuVFqSLa43CC6aWR2g= +github.com/cosmos/gogoproto v1.4.11/go.mod h1:/g39Mh8m17X8Q/GDEs5zYTSNaNnInBSohtaxzQnYq1Y= +github.com/cosmos/iavl v0.21.0-alpha.1.0.20231102165102-f418612be686 h1:qrMEAMuG0GonYuEnzLWvt+uIqhg3X/uFaDfbkZl0iBo= +github.com/cosmos/iavl v0.21.0-alpha.1.0.20231102165102-f418612be686/go.mod h1:3avwC2UP2IGFn+uOQMXc2Z4lKy/1KTkTyMh/bB327b4= +github.com/cosmos/iavl-bench/bench v0.0.2 h1:rQtzZ6wsFgtTyL2iKWcSon9eJnSDdZzNgp6RHrdu+Mg= +github.com/cosmos/iavl-bench/bench v0.0.2/go.mod h1:j2rLae77EffacWcp7mmj3Uaa4AOAmZA7ymvhsuBQKKI= +github.com/cosmos/iavl/v2 v2.0.0-20231102165636-7c5cca0663d5 h1:UwWp3Vsv4UjkPTKN+YE+UlsjRFTPisFSuyS+X8GIUyw= +github.com/cosmos/iavl/v2 v2.0.0-20231102165636-7c5cca0663d5/go.mod h1:CiI/8RGo66kVqCCXvhgzzdHdPG9qW9xC5krZ6uny18I= +github.com/cosmos/ics23/go v0.10.0 h1:iXqLLgp2Lp+EdpIuwXTYIQU+AiHj9mOC2X9ab++bZDM= +github.com/cosmos/ics23/go v0.10.0/go.mod h1:ZfJSmng/TBNTBkFemHHHj5YY7VAU/MBU980F4VU1NG0= +github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= +github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dgraph-io/badger v1.6.0/go.mod h1:zwt7syl517jmP8s94KqSxTlM6IMsdhYy6psNgSztDR4= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= +github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= +github.com/eknkc/amber v0.0.0-20171010120322-cdade1c07385/go.mod h1:0vRUJqYpeSZifjYj7uP3BG/gKcuzL9xWVV/Y+cK33KM= +github.com/emicklei/dot v1.6.0 h1:vUzuoVE8ipzS7QkES4UfxdpCwdU2U97m2Pb2tQCoYRY= +github.com/emicklei/dot v1.6.0/go.mod h1:DeV7GvQtIw4h2u73RKBkkFdvVAz0D9fzeJrgPW6gy/s= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/etcd-io/bbolt v1.3.3/go.mod h1:ZF2nL25h33cCyBtcyWeZ2/I3HQOfTP+0PIEvHjkjCrw= +github.com/fasthttp-contrib/websocket v0.0.0-20160511215533-1f3b11f56072/go.mod h1:duJ4Jxv5lDcvg4QuQr0oowTf7dz4/CR8NtyCooz9HL8= +github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= +github.com/flosch/pongo2 v0.0.0-20190707114632-bbf5a6c351f4/go.mod h1:T9YF2M40nIgbVgp3rreNmTged+9HrbNTIQf1PsaIiTA= +github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= +github.com/frankban/quicktest v1.14.4 h1:g2rn0vABPOOXmZUj+vbmUp0lPoXEMuhTpIluN0XL9UY= +github.com/frankban/quicktest v1.14.4/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwVZI= +github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= +github.com/gavv/httpexpect v2.0.0+incompatible/go.mod h1:x+9tiU1YnrOvnB725RkpoLv1M62hOWzwo5OXotisrKc= +github.com/ghemawat/stream v0.0.0-20171120220530-696b145b53b9/go.mod h1:106OIgooyS7OzLDOpUGgm9fA3bQENb/cFSyyBmMoJDs= +github.com/gin-contrib/sse v0.0.0-20190301062529-5545eab6dad3/go.mod h1:VJ0WA2NBN22VlZ2dKZQPAPnyWw5XTlK1KymzLKsr59s= +github.com/gin-gonic/gin v1.4.0/go.mod h1:OW2EZn3DO8Ln9oIKOvM++LBO+5UPHJJDH72/q/3rZdM= +github.com/go-check/check v0.0.0-20180628173108-788fd7840127/go.mod h1:9ES+weclKsC9YodN5RgxqK/VD9HM9JsCSh7rNhMZE98= +github.com/go-errors/errors v1.0.1 h1:LUHzmkK3GUKUrL/1gfBUxAHzcev3apQlezX/+O7ma6w= +github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-martini/martini v0.0.0-20170121215854-22fa46961aab/go.mod h1:/P9AEU963A2AYjv4d1V5eVL1CQbEJq6aCNHDDjibzu8= +github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee/go.mod h1:L0fX3K22YWvt/FAX9NnzrNzcI4wNYi9Yku4O0LKYflo= +github.com/gobwas/pool v0.2.0/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw= +github.com/gobwas/ws v1.0.2/go.mod h1:szmBTxLgaFppYjEmNtny/v3w89xOydFnnZMcgRRu/EM= +github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/gogo/googleapis v0.0.0-20180223154316-0cd9801be74a/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= +github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/gogo/status v1.1.0/go.mod h1:BFv9nrluPLmrS0EmGVvLaPNmRosr9KapBYd5/hpY1WM= +github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= +github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= +github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= +github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/gomodule/redigo v1.7.1-0.20190724094224-574c33c3df38/go.mod h1:B4C85qUVwatsJoIUNIfCRsp7qO0iAmpGFZ4EELWSbC4= +github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU= +github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/hydrogen18/memlistener v0.0.0-20141126152155-54553eb933fb/go.mod h1:qEIFzExnS6016fRpRfxrExeVn2gbClQA99gQhnIcdhE= +github.com/imkira/go-interpol v1.1.0/go.mod h1:z0h2/2T3XF8kyEPpRgJ3kmNv+C43p+I/CoI+jC3w2iA= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/iris-contrib/blackfriday v2.0.0+incompatible/go.mod h1:UzZ2bDEoaSGPbkg6SAB4att1aAwTmVIx/5gCVqeyUdI= +github.com/iris-contrib/go.uuid v2.0.0+incompatible/go.mod h1:iz2lgM/1UnEf1kP0L/+fafWORmlnuysV2EMP8MW+qe0= +github.com/iris-contrib/i18n v0.0.0-20171121225848-987a633949d0/go.mod h1:pMCz62A0xJL6I+umB2YTlFRwWXaDFA0jy+5HzGiJjqI= +github.com/iris-contrib/schema v0.0.1/go.mod h1:urYA3uvUNG1TIIjOSCzHr9/LmbQo8LrOcOqfqxa4hXw= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/juju/errors v0.0.0-20181118221551-089d3ea4e4d5/go.mod h1:W54LbzXuIE0boCoNJfwqpmkKJ1O4TCTZMetAt6jGk7Q= +github.com/juju/loggo v0.0.0-20180524022052-584905176618/go.mod h1:vgyd7OREkbtVEN/8IXZe5Ooef3LQePvuBm9UWj6ZL8U= +github.com/juju/testing v0.0.0-20180920084828-472a3e8b2073/go.mod h1:63prj8cnj0tU0S9OHjGJn+b1h0ZghCndfnbQolrYTwA= +github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= +github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88/go.mod h1:3w7q1U84EfirKl04SVQ/s7nPm1ZPhiXd34z40TNz36k= +github.com/kataras/golog v0.0.9/go.mod h1:12HJgwBIZFNGL0EJnMRhmvGA0PQGx8VFwrZtM4CqbAk= +github.com/kataras/iris/v12 v12.0.1/go.mod h1:udK4vLQKkdDqMGJJVd/msuMtN6hpYJhg/lSzuxjhO+U= +github.com/kataras/neffos v0.0.10/go.mod h1:ZYmJC07hQPW67eKuzlfY7SO3bC0mw83A3j6im82hfqw= +github.com/kataras/pio v0.0.0-20190103105442-ea782b38602d/go.mod h1:NV88laa9UiiDuX9AhMbDPkGYSPugBOV6yTZB1l2K9Z0= +github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.8.2/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= +github.com/klauspost/compress v1.9.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= +github.com/klauspost/compress v1.11.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/compress v1.15.9 h1:wKRjX6JRtDdrE9qwa4b/Cip7ACOshUI4smpCQanqjSY= +github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= +github.com/klauspost/cpuid v1.2.1/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= +github.com/kocubinski/costor-api v1.1.2 h1:aGimIp92F6oitRdd99Jhy4TKzglQtA4WES/oVUWBGyY= +github.com/kocubinski/costor-api v1.1.2/go.mod h1:ESMBMDkKfN+9vvvhhNVdKLhbOmzI3O/i16iXvRM9Tuc= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/labstack/echo/v4 v4.1.11/go.mod h1:i541M3Fj6f76NZtHSj7TXnyM8n2gaodfvfxNnFqi74g= +github.com/labstack/gommon v0.3.0/go.mod h1:MULnywXg0yavhxWKc+lOruYdAhDwPK9wf0OL7NoOu+k= +github.com/linxGnu/grocksdb v1.7.15 h1:AEhP28lkeAybv5UYNYviYISpR6bJejEnKuYbnWAnxx0= +github.com/linxGnu/grocksdb v1.7.15/go.mod h1:pY55D0o+r8yUYLq70QmhdudxYvoDb9F+9puf4m3/W+U= +github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= +github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= +github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= +github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ= +github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= +github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA= +github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpevwGNQEw= +github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= +github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/mediocregopher/mediocre-go-lib v0.0.0-20181029021733-cb65787f37ed/go.mod h1:dSsfyI2zABAdhcbvkXqgxOxrCsbYeHCPgrZkku60dSg= +github.com/mediocregopher/radix/v3 v3.3.0/go.mod h1:EmfVyvspXz1uZEyPBMyGK+kjWiKQGvsUt6O3Pj+LDCQ= +github.com/microcosm-cc/bluemonday v1.0.2/go.mod h1:iVP4YcDBq+n/5fb23BhYFvIMq/leAFZyRl6bYmGDlGc= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/moul/http2curl v1.0.0/go.mod h1:8UbvGypXm98wA/IqH45anm5Y2Z6ep6O31QGOAZ3H0fQ= +github.com/nats-io/nats.go v1.8.1/go.mod h1:BrFz9vVn0fU3AcH9Vn4Kd7W0NpJ651tD5omQ3M8LwxM= +github.com/nats-io/nkeys v0.0.2/go.mod h1:dab7URMsZm6Z/jp9Z5UGa87Uutgc2mVpXLC4B7TDb/4= +github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= +github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78= +github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= +github.com/onsi/ginkgo v1.13.0/go.mod h1:+REjRxOmWfHCjfv9TTWB1jD1Frx4XydAD3zm1lskyM0= +github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= +github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc= +github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= +github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +github.com/onsi/gomega v1.20.0 h1:8W0cWlwFkflGPLltQvLRB7ZVD5HuP6ng320w2IS245Q= +github.com/onsi/gomega v1.20.0/go.mod h1:DtrZpjmvpn2mPm4YWQa0/ALMDj9v4YxLgojwPeREyVo= +github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4= +github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= +github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_golang v1.16.0 h1:yk/hx9hDbrGHovbci4BY+pRMfSuuat626eFsHb7tmT8= +github.com/prometheus/client_golang v1.16.0/go.mod h1:Zsulrv/L9oM40tJ7T815tM89lFEugiJ9HzIqaAx4LKc= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4= +github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= +github.com/prometheus/common v0.42.0 h1:EKsfXEYo4JpWMHH5cg+KOUWeuJSov1Id8zGR8eeI1YM= +github.com/prometheus/common v0.42.0/go.mod h1:xBwqVerjNdUDjgODMpudtOMwlOwf2SaTr1yjz4b7Zbc= +github.com/prometheus/procfs v0.10.1 h1:kYK1Va/YMlutzCGazswoHKo//tZVlFpKYh+PymziUAg= +github.com/prometheus/procfs v0.10.1/go.mod h1:nwNm2aOCAYw8uTR/9bWRREkZFxAUcWzPHWJq+XBB/FM= +github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= +github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= +github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA= +github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= +github.com/rs/zerolog v1.30.0 h1:SymVODrcRsaRaSInD9yQtKbtWqwsfoPcRff/oRXLj4c= +github.com/rs/zerolog v1.30.0/go.mod h1:/tk+P47gFdPXq4QYjvCmT5/Gsug2nagsFWBWhAiSi1w= +github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/sclevine/agouti v3.0.0+incompatible/go.mod h1:b4WX9W9L1sfQKXeJf1mUTLZKJ48R1S7H23Ji7oFO5Bw= +github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= +github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cast v1.5.1 h1:R+kOtfhWQE6TVQzY+4D7wJLBgkdVasCEFxSUBYBYIlA= +github.com/spf13/cast v1.5.1/go.mod h1:b9PdjNptOpzXr7Rq1q9gJML/2cdGQAo69NKzQ10KN48= +github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= +github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I= +github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0= +github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY= +github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc= +github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= +github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= +github.com/urfave/negroni v1.0.0/go.mod h1:Meg73S6kFm/4PpbYdq35yYWoCZ9mS/YSx+lKnmiohz4= +github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= +github.com/valyala/fasthttp v1.6.0/go.mod h1:FstJa9V+Pj9vQ7OJie2qMHdwemEDaDiSdBnvPM1Su9w= +github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8= +github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV9WzVtRmSR+PDvWpU/qWl4Wa5LApYYX4ZtKbio= +github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= +github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= +github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +github.com/yalp/jsonpath v0.0.0-20180802001716-5cc68e5049a0/go.mod h1:/LWChgwKmvncFJFHJ7Gvn9wZArjbV5/FppcK2fKk/tI= +github.com/yudai/gojsondiff v1.0.0/go.mod h1:AY32+k2cwILAkW1fbgxQ5mUmMiZFgLIV+FBNExI05xg= +github.com/yudai/golcs v0.0.0-20170316035057-ecda9a501e82/go.mod h1:lgjkn3NuSvDfVJdfcVVdX+jpBxNmX4rDAzaS45IcYoM= +github.com/yudai/pp v2.0.1+incompatible/go.mod h1:PuxR/8QJ7cyCkFp/aUDS+JY727OFEZkTdatxwunjIkc= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc= +golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= +golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20200513190911-00229845015e/go.mod h1:4M0jN8W1tt0AVLNr8HDosyJCDCDuyL9N9+3m7wDWgKw= +golang.org/x/exp v0.0.0-20230811145659-89c5cff77bcb h1:mIKbk8weKhSeLH2GmUTrvx8CjkyJmnU1wFmg59CUjFA= +golang.org/x/exp v0.0.0-20230811145659-89c5cff77bcb/go.mod h1:FXUEEKJgO7OQYeo8N01OfiKP8RXMtf6e8aTskBGqWdc= +golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190327091125-710a502c58a2/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= +golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E= +golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210909193231-528a39cd75f3/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= +golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= +golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181221001348-537d06c36207/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190327201419-c70d86f8b7cf/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= +gonum.org/v1/gonum v0.8.2/go.mod h1:oe/vMfY3deqTw+1EZJhuvEW2iwGF1bW9wwu7XCu0+v0= +gonum.org/v1/gonum v0.11.0 h1:f1IJhK4Km5tBJmaiJXtk/PkL4cdVX6J+tGiM187uT5E= +gonum.org/v1/gonum v0.11.0/go.mod h1:fSG4YDCxxUZQJ7rKsQrj0gMOg00Il0Z96/qMA4bVQhA= +gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= +gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20180518175338-11a468237815/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto/googleapis/rpc v0.0.0-20231009173412-8bfb1ae86b6c h1:jHkCUWkseRf+W+edG5hMzr/Uh1xkDREY4caybAq4dpY= +google.golang.org/genproto/googleapis/rpc v0.0.0-20231009173412-8bfb1ae86b6c/go.mod h1:4cYg8o5yUbm77w8ZX00LhMVNl/YVBFJRYWDc0uYWMs0= +google.golang.org/grpc v1.12.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.58.3 h1:BjnpXut1btbtgN/6sp+brB2Kbm2LjNXnidYujAVbSoQ= +google.golang.org/grpc v1.58.3/go.mod h1:tgX3ZQDlNJGU96V6yHh1T/JeoBQ2TXdr43YbYSsCJk0= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= +google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8bDuhia5mkpMnE= +gopkg.in/go-playground/validator.v8 v8.18.2/go.mod h1:RX2a/7Ha8BgOhfk7j780h4/u/RRjR0eouCJSH80/M2Y= +gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= diff --git a/v2/migrate/main.go b/v2/migrate/main.go new file mode 100644 index 000000000..61c5c221f --- /dev/null +++ b/v2/migrate/main.go @@ -0,0 +1,22 @@ +package main + +import ( + "fmt" + "os" + + v0 "github.com/cosmos/iavl/v2/migrate/v0" + "github.com/spf13/cobra" +) + +func main() { + root := cobra.Command{ + Use: "migrate", + Short: "migrate application.db to IAVL v2", + } + root.AddCommand(v0.Command()) + + if err := root.Execute(); err != nil { + fmt.Printf("Error: %s\n", err.Error()) + os.Exit(1) + } +} diff --git a/v2/migrate/v0/migrate_v0.go b/v2/migrate/v0/migrate_v0.go new file mode 100644 index 000000000..eef61de61 --- /dev/null +++ b/v2/migrate/v0/migrate_v0.go @@ -0,0 +1,386 @@ +package v0 + +import ( + "bytes" + "fmt" + "sync" + + iavlv2 "github.com/cosmos/iavl/v2" + "github.com/cosmos/iavl/v2/migrate/core" + "github.com/gogo/protobuf/proto" + "github.com/gogo/protobuf/types" + "github.com/kocubinski/costor-api/logz" + "github.com/spf13/cobra" +) + +func Command() *cobra.Command { + cmd := &cobra.Command{ + Use: "v0", + Short: "migrate latest iavl v0 application.db state to iavl v2 in sqlite", + } + cmd.AddCommand(allCommand(), snapshotCommand(), metadataCommand(), latestVersionCommand()) + return cmd +} + +const ( + latestVersionKey = "s/latest" + commitInfoKeyFmt = "s/%d" // s/ + appVersionKey = "s/appversion" +) + +func metadataCommand() *cobra.Command { + var ( + dbv0 string + dbv2 string + ) + cmd := &cobra.Command{ + Use: "v45-metadata", + Short: "migrate CosmosSDK v0.45 store metadata stored in application.db state to iavl v2 in sqlite", + RunE: func(cmd *cobra.Command, args []string) error { + log := logz.Logger.With().Str("op", "migrate").Logger() + + v0, err := core.NewReadonlyStore(dbv0) + if err != nil { + return err + } + v2, err := iavlv2.NewSqliteKVStore(iavlv2.SqliteDbOptions{Path: dbv2}) + if err != nil { + return err + } + bz, err := v0.Get([]byte(latestVersionKey)) + if err != nil { + return err + } + i64 := &types.Int64Value{} + err = proto.Unmarshal(bz, i64) + if err != nil { + return err + } + log.Info().Msgf("latest version: %d\n", i64.Value) + if err = v2.Set([]byte(latestVersionKey), bz); err != nil { + return err + } + + bz, err = v0.Get([]byte(fmt.Sprintf(commitInfoKeyFmt, i64.Value))) + if err != nil { + return err + } + commitInfo := &CommitInfo{} + if err = proto.Unmarshal(bz, commitInfo); err != nil { + return err + } + if err = v2.Set([]byte(fmt.Sprintf(commitInfoKeyFmt, i64.Value)), bz); err != nil { + return err + } + + bz, err = v0.Get([]byte(appVersionKey)) + if err != nil { + return err + } + if err = v2.Set([]byte(appVersionKey), bz); err != nil { + return err + } + + return nil + }, + } + + cmd.Flags().StringVar(&dbv0, "db-v0", "", "Path to the v0 application.db") + cmd.Flags().StringVar(&dbv2, "db-v2", "", "Path to the v2 root") + if err := cmd.MarkFlagRequired("db-v0"); err != nil { + panic(err) + } + if err := cmd.MarkFlagRequired("db-v2"); err != nil { + panic(err) + } + return cmd +} + +func latestVersionCommand() *cobra.Command { + var ( + db string + version int + set bool + ) + cmd := &cobra.Command{ + Use: "latest-version", + Short: "get/set the latest version in the metadata.sqlite", + RunE: func(cmd *cobra.Command, args []string) error { + kv, err := iavlv2.NewSqliteKVStore(iavlv2.SqliteDbOptions{Path: db}) + if err != nil { + return err + } + if set && version == -1 { + return fmt.Errorf("version must be set") + } + if set { + + } else { + bz, err := kv.Get([]byte(latestVersionKey)) + if err != nil { + return err + } + i64 := &types.Int64Value{} + err = proto.Unmarshal(bz, i64) + if err != nil { + return err + } + fmt.Printf("latest version: %d\n", i64.Value) + } + return nil + }, + } + cmd.Flags().StringVar(&db, "db", "", "Path to the metadata.sqlite") + if err := cmd.MarkFlagRequired("db"); err != nil { + panic(err) + } + cmd.Flags().IntVar(&version, "version", -1, "Version to set") + cmd.Flags().BoolVar(&set, "set", false, "Set the latest version") + return cmd +} + +func snapshotCommand() *cobra.Command { + var ( + dbv0 string + snapshotPath string + storekey string + concurrency int + ) + cmd := &cobra.Command{ + Use: "snapshot", + Short: "ingest latest iavl v0 application.db to a pre-order snapshot", + RunE: func(cmd *cobra.Command, args []string) error { + rs, err := core.NewReadonlyStore(dbv0) + if err != nil { + return err + } + + var wg sync.WaitGroup + + var storeKeys []string + if storekey != "" { + storeKeys = []string{storekey} + } else { + for k := range rs.CommitInfoByName() { + storeKeys = append(storeKeys, k) + } + } + + lock := make(chan struct{}, concurrency) + for i := 0; i < concurrency; i++ { + lock <- struct{}{} + } + + // init db and close + initConn, err := iavlv2.NewIngestSnapshotConnection(snapshotPath) + if err != nil { + return err + } + if err = initConn.Close(); err != nil { + return err + } + + for _, storeKey := range storeKeys { + wg.Add(1) + go func(sk string) { + var count int64 + + <-lock + + log := logz.Logger.With().Str("store", sk).Logger() + log.Info().Msgf("migrating %s", sk) + + s, err := core.NewReadonlyStore(dbv0) + if err != nil { + panic(err) + } + _, tree, err := s.LatestTree(sk) + if err != nil { + log.Warn().Err(err).Msgf("skipping %s", sk) + wg.Done() + return + } + + exporter, err := tree.ExportPreOrder() + if err != nil { + panic(err) + } + + nextNodeFn := func() (*iavlv2.SnapshotNode, error) { + count++ + exportNode, err := exporter.Next() + if err != nil { + log.Warn().Err(err).Msgf("export err after %d", count) + return nil, err + } + return &iavlv2.SnapshotNode{ + Key: exportNode.Key, + Value: exportNode.Value, + Height: exportNode.Height, + Version: exportNode.Version, + }, nil + } + + conn, err := iavlv2.NewIngestSnapshotConnection(snapshotPath) + if err != nil { + panic(err) + } + root, err := iavlv2.IngestSnapshot(conn, sk, tree.Version(), nextNodeFn) + if err != nil { + panic(err) + } + + v0Hash, err := tree.WorkingHash() + if err != nil { + panic(err) + } + if !bytes.Equal(root.GetHash(), v0Hash) { + panic(fmt.Sprintf("v2 hash=%x != v0 hash=%x", root.GetHash(), v0Hash)) + } + + lock <- struct{}{} + wg.Done() + }(storeKey) + } + + wg.Wait() + return nil + }, + } + + cmd.Flags().StringVar(&dbv0, "db-v0", "", "Path to the v0 application.db") + if err := cmd.MarkFlagRequired("db-v0"); err != nil { + panic(err) + } + cmd.Flags().StringVar(&snapshotPath, "snapshot-path", "", "Path to the snapshot") + if err := cmd.MarkFlagRequired("snapshot-path"); err != nil { + panic(err) + } + cmd.Flags().IntVar(&concurrency, "concurrency", 6, "Number of concurrent migrations") + cmd.Flags().StringVar(&storekey, "store-key", "", "Store key to migrate") + + return cmd +} + +func allCommand() *cobra.Command { + var ( + dbv0 string + dbv2 string + storekey string + concurrency int + ) + cmd := &cobra.Command{ + Use: "all", + Short: "migrate latest iavl v0 application.db state to iavl v2 in sqlite", + RunE: func(cmd *cobra.Command, args []string) error { + rs, err := core.NewReadonlyStore(dbv0) + if err != nil { + return err + } + + var wg sync.WaitGroup + + var storeKeys []string + if storekey != "" { + storeKeys = []string{storekey} + } else { + for k := range rs.CommitInfoByName() { + storeKeys = append(storeKeys, k) + } + } + + lock := make(chan struct{}, concurrency) + for i := 0; i < concurrency; i++ { + lock <- struct{}{} + } + + for _, storeKey := range storeKeys { + wg.Add(1) + go func(sk string) { + var ( + count int64 + //since = time.Now() + ) + + <-lock + + log := logz.Logger.With().Str("store", sk).Logger() + log.Info().Msgf("migrating %s", sk) + + s, err := core.NewReadonlyStore(dbv0) + if err != nil { + panic(err) + } + _, tree, err := s.LatestTree(sk) + if err != nil { + log.Warn().Err(err).Msgf("skipping %s", sk) + wg.Done() + return + } + sql, err := iavlv2.NewSqliteDb(iavlv2.NewNodePool(), + iavlv2.SqliteDbOptions{ + Path: fmt.Sprintf("%s/%s", dbv2, sk), + WalSize: 1024 * 1024 * 1024, + }) + if err != nil { + panic(err) + } + exporter, err := tree.ExportPreOrder() + if err != nil { + panic(err) + } + + nextNodeFn := func() (*iavlv2.SnapshotNode, error) { + count++ + exportNode, err := exporter.Next() + if err != nil { + log.Warn().Err(err).Msgf("export err after %d", count) + return nil, err + } + return &iavlv2.SnapshotNode{ + Key: exportNode.Key, + Value: exportNode.Value, + Height: exportNode.Height, + Version: exportNode.Version, + }, nil + } + + root, err := sql.WriteSnapshot(cmd.Context(), tree.Version(), nextNodeFn, + iavlv2.SnapshotOptions{StoreLeafValues: true, WriteCheckpoint: true}) + if err != nil { + panic(err) + } + + v0Hash, err := tree.WorkingHash() + if err != nil { + panic(err) + } + if !bytes.Equal(root.GetHash(), v0Hash) { + panic(fmt.Sprintf("v2 hash=%x != v0 hash=%x", root.GetHash(), v0Hash)) + } + if err := sql.Close(); err != nil { + panic(err) + } + + lock <- struct{}{} + wg.Done() + }(storeKey) + } + + wg.Wait() + return nil + }, + } + cmd.Flags().StringVar(&dbv0, "db-v0", "", "Path to the v0 application.db") + cmd.Flags().StringVar(&dbv2, "db-v2", "", "Path to the v2 root") + cmd.Flags().StringVar(&storekey, "store-key", "", "Store key to migrate") + if err := cmd.MarkFlagRequired("db-v0"); err != nil { + panic(err) + } + if err := cmd.MarkFlagRequired("db-v2"); err != nil { + panic(err) + } + cmd.Flags().IntVar(&concurrency, "concurrency", 6, "Number of concurrent migrations") + + return cmd +} diff --git a/v2/migrate/v0/types.go b/v2/migrate/v0/types.go new file mode 100644 index 000000000..443b1fff5 --- /dev/null +++ b/v2/migrate/v0/types.go @@ -0,0 +1,61 @@ +package v0 + +import ( + "time" + + "github.com/gogo/protobuf/proto" +) + +type CommitID struct { + Version int64 `protobuf:"varint,1,opt,name=version,proto3" json:"version,omitempty"` + Hash []byte `protobuf:"bytes,2,opt,name=hash,proto3" json:"hash,omitempty"` +} + +func (c *CommitID) Reset() { + *c = CommitID{} +} + +func (c *CommitID) String() string { + return "" +} + +func (c *CommitID) ProtoMessage() { +} + +type StoreInfo struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + CommitId CommitID `protobuf:"bytes,2,opt,name=commit_id,json=commitId,proto3" json:"commit_id"` +} + +func (s *StoreInfo) Reset() { + *s = StoreInfo{} +} + +func (s *StoreInfo) String() string { + return "" +} + +func (s *StoreInfo) ProtoMessage() { +} + +type CommitInfo struct { + Version int64 `protobuf:"varint,1,opt,name=version,proto3" json:"version,omitempty"` + StoreInfos []StoreInfo `protobuf:"bytes,2,rep,name=store_infos,json=storeInfos,proto3" json:"store_infos"` + Timestamp time.Time `protobuf:"bytes,3,opt,name=timestamp,proto3,stdtime" json:"timestamp"` +} + +func (c *CommitInfo) Reset() { + *c = CommitInfo{} +} + +func (c *CommitInfo) String() string { + return "" +} + +func (c *CommitInfo) ProtoMessage() {} + +var ( + _ proto.Message = (*CommitInfo)(nil) + _ proto.Message = (*CommitID)(nil) + _ proto.Message = (*StoreInfo)(nil) +) diff --git a/v2/multitree.go b/v2/multitree.go new file mode 100644 index 000000000..ccfbcd4c9 --- /dev/null +++ b/v2/multitree.go @@ -0,0 +1,289 @@ +package iavl + +import ( + "crypto/sha256" + "errors" + "fmt" + "path/filepath" + "sync/atomic" + + "github.com/cosmos/iavl/v2/metrics" + "github.com/dustin/go-humanize" + "golang.org/x/exp/slices" +) + +// MultiTree encapsulates multiple IAVL trees, each with its own "store key" in the context of the Cosmos SDK. +// Within IAVL v2 is only used to test the IAVL v2 implementation, and for import/export of IAVL v2 state. +type MultiTree struct { + Trees map[string]*Tree + + pool *NodePool + rootPath string + treeOpts TreeOptions + shouldCheckpoint bool + + doneCh chan saveVersionResult + errorCh chan error +} + +func NewMultiTree(rootPath string, opts TreeOptions) *MultiTree { + return &MultiTree{ + Trees: make(map[string]*Tree), + doneCh: make(chan saveVersionResult, 1000), + errorCh: make(chan error, 1000), + treeOpts: opts, + pool: NewNodePool(), + rootPath: rootPath, + } +} + +func ImportMultiTree(pool *NodePool, version int64, path string, treeOpts TreeOptions) (*MultiTree, error) { + mt := NewMultiTree(path, treeOpts) + paths, err := FindDbsInPath(path) + if err != nil { + return nil, err + } + var ( + cnt = 0 + done = make(chan struct { + path string + tree *Tree + }) + errs = make(chan error) + ) + for _, dbPath := range paths { + cnt++ + sql, err := NewSqliteDb(pool, defaultSqliteDbOptions(SqliteDbOptions{Path: dbPath})) + if err != nil { + return nil, err + } + go func(p string) { + tree := NewTree(sql, pool, mt.treeOpts) + importErr := tree.LoadSnapshot(version, PreOrder) + + if importErr != nil { + errs <- fmt.Errorf("err while importing %s; %w", p, importErr) + return + } + done <- struct { + path string + tree *Tree + }{p, tree} + }(dbPath) + } + + for i := 0; i < cnt; i++ { + select { + case err = <-errs: + return nil, err + case res := <-done: + prefix := filepath.Base(res.path) + log.Info().Msgf("imported %s", prefix) + mt.Trees[prefix] = res.tree + } + } + + return mt, nil +} + +func (mt *MultiTree) MountTree(storeKey string) error { + opts := defaultSqliteDbOptions(SqliteDbOptions{ + Path: mt.rootPath + "/" + storeKey, + }) + sql, err := NewSqliteDb(mt.pool, opts) + if err != nil { + return err + } + tree := NewTree(sql, mt.pool, mt.treeOpts) + mt.Trees[storeKey] = tree + return nil +} + +func (mt *MultiTree) MountTrees() error { + paths, err := FindDbsInPath(mt.rootPath) + if err != nil { + return err + } + for _, dbPath := range paths { + prefix := filepath.Base(dbPath) + sqlOpts := defaultSqliteDbOptions(SqliteDbOptions{}) + sqlOpts.Path = dbPath + log.Info().Msgf("mounting %s; opts %v", prefix, sqlOpts) + sql, err := NewSqliteDb(mt.pool, sqlOpts) + if err != nil { + return err + } + tree := NewTree(sql, mt.pool, mt.treeOpts) + mt.Trees[prefix] = tree + } + return nil +} + +func (mt *MultiTree) LoadVersion(version int64) error { + for k, tree := range mt.Trees { + if err := tree.LoadVersion(version); err != nil { + return fmt.Errorf("failed to load %s version %d; %w", k, version, err) + } + } + return nil +} + +func (mt *MultiTree) SaveVersion() ([]byte, int64, error) { + version := int64(-1) + for _, tree := range mt.Trees { + _, v, err := tree.SaveVersion() + if err != nil { + return nil, 0, err + } + if version != -1 && version != v { + return nil, 0, fmt.Errorf("unexpected; trees are at different versions: %d != %d", version, v) + } + version = v + } + return mt.Hash(), version, nil +} + +type saveVersionResult struct { + version int64 + hash []byte +} + +func (mt *MultiTree) SaveVersionConcurrently() ([]byte, int64, error) { + treeCount := 0 + var workingSize atomic.Int64 + var workingBytes atomic.Uint64 + for _, tree := range mt.Trees { + treeCount++ + go func(t *Tree) { + t.shouldCheckpoint = mt.shouldCheckpoint + h, v, err := t.SaveVersion() + workingSize.Add(t.workingSize) + workingBytes.Add(t.workingBytes) + if err != nil { + mt.errorCh <- err + } + mt.doneCh <- saveVersionResult{version: v, hash: h} + }(tree) + } + + var ( + errs []error + version = int64(-1) + ) + for i := 0; i < treeCount; i++ { + select { + case err := <-mt.errorCh: + log.Error().Err(err).Msg("failed to save version") + errs = append(errs, err) + case result := <-mt.doneCh: + if version != -1 && version != result.version { + errs = append(errs, fmt.Errorf("unexpected; trees are at different versions: %d != %d", + version, result.version)) + } + version = result.version + } + } + mt.shouldCheckpoint = false + + if mt.treeOpts.MetricsProxy != nil { + bz := workingBytes.Load() + sz := workingSize.Load() + fmt.Printf("version=%d work-bytes=%s work-size=%s mem-ceiling=%s\n", + version, humanize.IBytes(bz), humanize.Comma(sz), humanize.IBytes(mt.treeOpts.CheckpointMemory)) + mt.treeOpts.MetricsProxy.SetGauge(float32(workingBytes.Load()), "iavl_v2", "working_bytes") + mt.treeOpts.MetricsProxy.SetGauge(float32(workingSize.Load()), "iavl_v2", "working_size") + } + + if mt.treeOpts.CheckpointMemory > 0 && workingBytes.Load() >= mt.treeOpts.CheckpointMemory { + mt.shouldCheckpoint = true + } + + return mt.Hash(), version, errors.Join(errs...) +} + +func (mt *MultiTree) SnapshotConcurrently() error { + treeCount := 0 + for _, tree := range mt.Trees { + treeCount++ + go func(t *Tree) { + if err := t.SaveSnapshot(); err != nil { + mt.errorCh <- err + } else { + mt.doneCh <- saveVersionResult{} + } + }(tree) + } + + var errs []error + for i := 0; i < treeCount; i++ { + select { + case err := <-mt.errorCh: + log.Error().Err(err).Msg("failed to snapshot") + errs = append(errs, err) + case <-mt.doneCh: + } + } + return errors.Join(errs...) +} + +// Hash is a stand in for code at +// https://github.com/cosmos/cosmos-sdk/blob/80dd55f79bba8ab675610019a5764470a3e2fef9/store/types/commit_info.go#L30 +// it used in testing. App chains should use the store hashing code referenced above instead. +func (mt *MultiTree) Hash() []byte { + var ( + storeKeys []string + hashes []byte + ) + for k := range mt.Trees { + storeKeys = append(storeKeys, k) + } + + slices.Sort(storeKeys) + for _, k := range storeKeys { + tree := mt.Trees[k] + hashes = append(hashes, tree.root.hash...) + } + hash := sha256.Sum256(hashes) + return hash[:] +} + +func (mt *MultiTree) Close() error { + for _, tree := range mt.Trees { + if err := tree.Close(); err != nil { + return err + } + } + return nil +} + +func (mt *MultiTree) WarmLeaves() error { + var cnt int + for _, tree := range mt.Trees { + cnt++ + go func(t *Tree) { + if err := t.sql.WarmLeaves(); err != nil { + mt.errorCh <- err + } else { + mt.doneCh <- saveVersionResult{} + } + }(tree) + } + for i := 0; i < cnt; i++ { + select { + case err := <-mt.errorCh: + log.Error().Err(err).Msg("failed to warm leaves") + return err + case <-mt.doneCh: + } + } + return nil +} + +func (mt *MultiTree) QueryReport(bins int) error { + m := &metrics.DbMetrics{} + for _, tree := range mt.Trees { + m.Add(tree.sql.metrics) + tree.sql.metrics.SetQueryZero() + } + return m.QueryReport(bins) +} diff --git a/v2/node.go b/v2/node.go new file mode 100644 index 000000000..f27c208ec --- /dev/null +++ b/v2/node.go @@ -0,0 +1,560 @@ +package iavl + +import ( + "bytes" + "crypto/sha256" + "encoding/binary" + "errors" + "fmt" + "hash" + "io" + "math" + "sync" + "unsafe" + + encoding "github.com/cosmos/iavl/v2/internal" +) + +const hashSize = 32 + +// NodeKey represents a key of node in the DB. +type NodeKey [12]byte + +func (nk NodeKey) Version() int64 { + return int64(binary.BigEndian.Uint64(nk[:])) +} + +func (nk NodeKey) Sequence() uint32 { + return binary.BigEndian.Uint32(nk[8:]) +} + +func NewNodeKey(version int64, sequence uint32) NodeKey { + var nk NodeKey + binary.BigEndian.PutUint64(nk[:], uint64(version)) + binary.BigEndian.PutUint32(nk[8:], sequence) + return nk +} + +// String returns a string representation of the node key. +func (nk NodeKey) String() string { + return fmt.Sprintf("(%d, %d)", nk.Version(), nk.Sequence()) +} + +var emptyNodeKey = NodeKey{} + +func (nk NodeKey) IsEmpty() bool { + return nk == emptyNodeKey +} + +// Node represents a node in a Tree. +type Node struct { + key []byte + value []byte + hash []byte + nodeKey NodeKey + leftNodeKey NodeKey + rightNodeKey NodeKey + size int64 + leftNode *Node + rightNode *Node + subtreeHeight int8 + + dirty bool + evict bool + poolId uint64 +} + +func (node *Node) String() string { + return fmt.Sprintf("Node{hash: %x, nodeKey: %s, leftNodeKey: %v, rightNodeKey: %v, size: %d, subtreeHeight: %d, poolId: %d}", + node.hash, node.nodeKey, node.leftNodeKey, node.rightNodeKey, node.size, node.subtreeHeight, node.poolId) +} + +func (node *Node) isLeaf() bool { + return node.subtreeHeight == 0 +} + +func (node *Node) setLeft(leftNode *Node) { + node.leftNode = leftNode + node.leftNodeKey = leftNode.nodeKey +} + +func (node *Node) setRight(rightNode *Node) { + node.rightNode = rightNode + node.rightNodeKey = rightNode.nodeKey +} + +func (node *Node) left(t *Tree) *Node { + leftNode, err := node.getLeftNode(t) + if err != nil { + panic(err) + } + return leftNode +} + +func (node *Node) right(t *Tree) *Node { + rightNode, err := node.getRightNode(t) + if err != nil { + panic(err) + } + return rightNode +} + +// getLeftNode will never be called on leaf nodes. all tree nodes have 2 children. +func (node *Node) getLeftNode(t *Tree) (*Node, error) { + if node.isLeaf() { + return nil, fmt.Errorf("leaf node has no left node") + } + if node.leftNode != nil { + return node.leftNode, nil + } + var err error + node.leftNode, err = t.sql.getLeftNode(node) + if err != nil { + return nil, err + } + return node.leftNode, nil +} + +func (node *Node) getRightNode(t *Tree) (*Node, error) { + if node.isLeaf() { + return nil, fmt.Errorf("leaf node has no right node") + } + if node.rightNode != nil { + return node.rightNode, nil + } + var err error + node.rightNode, err = t.sql.getRightNode(node) + if err != nil { + return nil, err + } + return node.rightNode, nil +} + +// NOTE: mutates height and size +func (node *Node) calcHeightAndSize(t *Tree) error { + leftNode, err := node.getLeftNode(t) + if err != nil { + return err + } + + rightNode, err := node.getRightNode(t) + if err != nil { + return err + } + + node.subtreeHeight = maxInt8(leftNode.subtreeHeight, rightNode.subtreeHeight) + 1 + node.size = leftNode.size + rightNode.size + return nil +} + +func maxInt8(a, b int8) int8 { + if a > b { + return a + } + return b +} + +// NOTE: assumes that node can be modified +// TODO: optimize balance & rotate +func (tree *Tree) balance(node *Node) (newSelf *Node, err error) { + if node.hash != nil { + return nil, fmt.Errorf("unexpected balance() call on persisted node") + } + balance, err := node.calcBalance(tree) + if err != nil { + return nil, err + } + + if balance > 1 { + lftBalance, err := node.leftNode.calcBalance(tree) + if err != nil { + return nil, err + } + + if lftBalance >= 0 { + // Left Left Case + newNode, err := tree.rotateRight(node) + if err != nil { + return nil, err + } + return newNode, nil + } + // Left Right Case + newLeftNode, err := tree.rotateLeft(node.leftNode) + if err != nil { + return nil, err + } + node.setLeft(newLeftNode) + + newNode, err := tree.rotateRight(node) + if err != nil { + return nil, err + } + + return newNode, nil + } + if balance < -1 { + rightNode, err := node.getRightNode(tree) + if err != nil { + return nil, err + } + + rightBalance, err := rightNode.calcBalance(tree) + if err != nil { + return nil, err + } + if rightBalance <= 0 { + // Right Right Case + newNode, err := tree.rotateLeft(node) + if err != nil { + return nil, err + } + return newNode, nil + } + // Right Left Case + // TODO should be mutate? ref v1 and v0 + newRightNode, err := tree.rotateRight(rightNode) + if err != nil { + return nil, err + } + node.setRight(newRightNode) + + newNode, err := tree.rotateLeft(node) + if err != nil { + return nil, err + } + return newNode, nil + } + // Nothing changed + return node, nil +} + +func (node *Node) calcBalance(t *Tree) (int, error) { + leftNode, err := node.getLeftNode(t) + if err != nil { + return 0, err + } + + rightNode, err := node.getRightNode(t) + if err != nil { + return 0, err + } + + return int(leftNode.subtreeHeight) - int(rightNode.subtreeHeight), nil +} + +// Rotate right and return the new node and orphan. +func (tree *Tree) rotateRight(node *Node) (*Node, error) { + var err error + tree.addOrphan(node) + tree.mutateNode(node) + + tree.addOrphan(node.left(tree)) + newNode := node.left(tree) + tree.mutateNode(newNode) + + node.setLeft(newNode.right(tree)) + newNode.setRight(node) + + err = node.calcHeightAndSize(tree) + if err != nil { + return nil, err + } + + err = newNode.calcHeightAndSize(tree) + if err != nil { + return nil, err + } + + return newNode, nil +} + +// Rotate left and return the new node and orphan. +func (tree *Tree) rotateLeft(node *Node) (*Node, error) { + var err error + tree.addOrphan(node) + tree.mutateNode(node) + + tree.addOrphan(node.right(tree)) + newNode := node.right(tree) + tree.mutateNode(newNode) + + node.setRight(newNode.left(tree)) + newNode.setLeft(node) + + err = node.calcHeightAndSize(tree) + if err != nil { + return nil, err + } + + err = newNode.calcHeightAndSize(tree) + if err != nil { + return nil, err + } + + return newNode, nil +} + +func (node *Node) get(t *Tree, key []byte) (index int64, value []byte, err error) { + if node.isLeaf() { + switch bytes.Compare(node.key, key) { + case -1: + return 1, nil, nil + case 1: + return 0, nil, nil + default: + return 0, node.value, nil + } + } + + if bytes.Compare(key, node.key) < 0 { + leftNode, err := node.getLeftNode(t) + if err != nil { + return 0, nil, err + } + + return leftNode.get(t, key) + } + + rightNode, err := node.getRightNode(t) + if err != nil { + return 0, nil, err + } + + index, value, err = rightNode.get(t, key) + if err != nil { + return 0, nil, err + } + + index += node.size - rightNode.size + return index, value, nil +} + +var ( + hashPool = &sync.Pool{ + New: func() any { + return sha256.New() + }, + } + emptyHash = sha256.New().Sum(nil) +) + +// Computes the hash of the node without computing its descendants. Must be +// called on nodes which have descendant node hashes already computed. +func (node *Node) _hash() []byte { + if node.hash != nil { + return node.hash + } + + h := hashPool.Get().(hash.Hash) + if err := node.writeHashBytes(h); err != nil { + return nil + } + node.hash = h.Sum(nil) + h.Reset() + hashPool.Put(h) + + return node.hash +} + +func (node *Node) writeHashBytes(w io.Writer) error { + var ( + n int + buf [binary.MaxVarintLen64]byte + ) + + n = binary.PutVarint(buf[:], int64(node.subtreeHeight)) + if _, err := w.Write(buf[0:n]); err != nil { + return fmt.Errorf("writing height, %w", err) + } + n = binary.PutVarint(buf[:], node.size) + if _, err := w.Write(buf[0:n]); err != nil { + return fmt.Errorf("writing size, %w", err) + } + n = binary.PutVarint(buf[:], node.nodeKey.Version()) + if _, err := w.Write(buf[0:n]); err != nil { + return fmt.Errorf("writing version, %w", err) + } + + if node.isLeaf() { + if err := EncodeBytes(w, node.key); err != nil { + return fmt.Errorf("writing key, %w", err) + } + + // Indirection needed to provide proofs without values. + // (e.g. ProofLeafNode.ValueHash) + valueHash := sha256.Sum256(node.value) + + if err := EncodeBytes(w, valueHash[:]); err != nil { + return fmt.Errorf("writing value, %w", err) + } + } else { + if err := EncodeBytes(w, node.leftNode.hash); err != nil { + return fmt.Errorf("writing left hash, %w", err) + } + if err := EncodeBytes(w, node.rightNode.hash); err != nil { + return fmt.Errorf("writing right hash, %w", err) + } + } + + return nil +} + +func EncodeBytes(w io.Writer, bz []byte) error { + var buf [binary.MaxVarintLen64]byte + n := binary.PutUvarint(buf[:], uint64(len(bz))) + if _, err := w.Write(buf[0:n]); err != nil { + return err + } + _, err := w.Write(bz) + return err +} + +// MakeNode constructs a *Node from an encoded byte slice. +func MakeNode(pool *NodePool, nodeKey NodeKey, buf []byte) (*Node, error) { + // Read node header (height, size, version, key). + height, n, err := encoding.DecodeVarint(buf) + if err != nil { + return nil, fmt.Errorf("decoding node.height, %w", err) + } + buf = buf[n:] + if height < int64(math.MinInt8) || height > int64(math.MaxInt8) { + return nil, errors.New("invalid height, must be int8") + } + + size, n, err := encoding.DecodeVarint(buf) + if err != nil { + return nil, fmt.Errorf("decoding node.size, %w", err) + } + buf = buf[n:] + + key, n, err := encoding.DecodeBytes(buf) + if err != nil { + return nil, fmt.Errorf("decoding node.key, %w", err) + } + buf = buf[n:] + + hash, n, err := encoding.DecodeBytes(buf) + if err != nil { + return nil, fmt.Errorf("decoding node.hash, %w", err) + } + buf = buf[n:] + + node := pool.Get() + node.subtreeHeight = int8(height) + node.nodeKey = nodeKey + node.size = size + node.key = key + node.hash = hash + + if node.isLeaf() { + val, _, cause := encoding.DecodeBytes(buf) + if cause != nil { + return nil, fmt.Errorf("decoding node.value, %w", cause) + } + node.value = val + } else { + leftNodeKey, n, err := encoding.DecodeBytes(buf) + if err != nil { + return nil, fmt.Errorf("decoding node.leftKey, %w", err) + } + buf = buf[n:] + + rightNodeKey, _, err := encoding.DecodeBytes(buf) + if err != nil { + return nil, fmt.Errorf("decoding node.rightKey, %w", err) + } + + var leftNk, rightNk NodeKey + copy(leftNk[:], leftNodeKey) + copy(rightNk[:], rightNodeKey) + node.leftNodeKey = leftNk + node.rightNodeKey = rightNk + } + return node, nil +} + +func (node *Node) WriteBytes(w io.Writer) error { + if node == nil { + return errors.New("cannot leafWrite nil node") + } + cause := encoding.EncodeVarint(w, int64(node.subtreeHeight)) + if cause != nil { + return fmt.Errorf("writing height; %w", cause) + } + cause = encoding.EncodeVarint(w, node.size) + if cause != nil { + return fmt.Errorf("writing size; %w", cause) + } + + cause = encoding.EncodeBytes(w, node.key) + if cause != nil { + return fmt.Errorf("writing key; %w", cause) + } + + if len(node.hash) != hashSize { + return fmt.Errorf("hash has unexpected length: %d", len(node.hash)) + } + cause = encoding.EncodeBytes(w, node.hash) + if cause != nil { + return fmt.Errorf("writing hash; %w", cause) + } + + if node.isLeaf() { + cause = encoding.EncodeBytes(w, node.value) + if cause != nil { + return fmt.Errorf("writing value; %w", cause) + } + } else { + if node.leftNodeKey.IsEmpty() { + return fmt.Errorf("left node key is nil") + } + cause = encoding.EncodeBytes(w, node.leftNodeKey[:]) + if cause != nil { + return fmt.Errorf("writing left node key; %w", cause) + } + + if node.rightNodeKey.IsEmpty() { + return fmt.Errorf("right node key is nil") + } + cause = encoding.EncodeBytes(w, node.rightNodeKey[:]) + if cause != nil { + return fmt.Errorf("writing right node key; %w", cause) + } + } + return nil +} + +func (node *Node) Bytes() ([]byte, error) { + buf := &bytes.Buffer{} + err := node.WriteBytes(buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +var nodeSize = uint64(unsafe.Sizeof(Node{})) + hashSize + +func (node *Node) varSize() uint64 { + return uint64(len(node.key) + len(node.value)) +} + +func (node *Node) sizeBytes() uint64 { + return nodeSize + node.varSize() +} + +func (node *Node) GetHash() []byte { + return node.hash +} + +func (node *Node) evictChildren() { + if node.leftNode != nil { + node.leftNode.evict = true + node.leftNode = nil + } + if node.rightNode != nil { + node.rightNode.evict = true + node.rightNode = nil + } +} diff --git a/v2/pool.go b/v2/pool.go new file mode 100644 index 000000000..d68bd479e --- /dev/null +++ b/v2/pool.go @@ -0,0 +1,56 @@ +package iavl + +import ( + "math" + "sync" +) + +type NodePool struct { + syncPool *sync.Pool + + free chan int + nodes []Node + + poolId uint64 +} + +func NewNodePool() *NodePool { + np := &NodePool{ + syncPool: &sync.Pool{ + New: func() interface{} { + return &Node{} + }, + }, + free: make(chan int, 1000), + } + return np +} + +func (np *NodePool) Get() *Node { + if np.poolId == math.MaxUint64 { + np.poolId = 1 + } else { + np.poolId++ + } + n := np.syncPool.Get().(*Node) + n.poolId = np.poolId + return n +} + +func (np *NodePool) Put(node *Node) { + node.leftNodeKey = emptyNodeKey + node.rightNodeKey = emptyNodeKey + node.rightNode = nil + node.leftNode = nil + node.nodeKey = emptyNodeKey + node.hash = nil + node.key = nil + node.value = nil + node.subtreeHeight = 0 + node.size = 0 + node.dirty = false + node.evict = false + + node.poolId = 0 + np.syncPool.Put(node) +} diff --git a/v2/pool_test.go b/v2/pool_test.go new file mode 100644 index 000000000..d8a90ac0a --- /dev/null +++ b/v2/pool_test.go @@ -0,0 +1,16 @@ +package iavl + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestNodePool_Get(t *testing.T) { + pool := NewNodePool() + node := pool.Get() + node.key = []byte("hello") + require.Equal(t, node.key, pool.nodes[node.poolId].key) + pool.Put(node) + require.Equal(t, []byte(nil), pool.nodes[node.poolId].key) +} diff --git a/v2/range.go b/v2/range.go new file mode 100644 index 000000000..0cf3987e7 --- /dev/null +++ b/v2/range.go @@ -0,0 +1,94 @@ +package iavl + +import ( + "fmt" +) + +type VersionRange struct { + versions []int64 + cache map[int64]int64 +} + +func (r *VersionRange) Add(version int64) error { + if len(r.versions) == 0 { + r.versions = append(r.versions, version) + return nil + } + if version <= r.versions[len(r.versions)-1] { + return fmt.Errorf("unordered insert: version %d is not greater than %d", version, r.versions[len(r.versions)-1]) + } + if version == r.versions[len(r.versions)-1] { + return fmt.Errorf("duplicate version: %d", version) + } + r.versions = append(r.versions, version) + return nil +} + +// Find returns the shard that contains the given version by binary searching +// the version range. If the version is after the last shard, -1 is returned. +func (r *VersionRange) Find(version int64) int64 { + vs := r.versions + if len(vs) == 0 || version > vs[len(vs)-1] { + return -1 + } + low, high := 0, len(vs)-1 + for low <= high { + mid := (low + high) / 2 + if vs[mid] == version { + return version + } + if vs[mid] < version { + low = mid + 1 + } else { + high = mid - 1 + } + } + return vs[low] +} + +func (r *VersionRange) FindPrevious(version int64) int64 { + vs := r.versions + if len(vs) == 0 || version < vs[0] { + return -1 + } + low, high := 0, len(vs)-1 + for low <= high { + mid := (low + high) / 2 + if vs[mid] == version { + return version + } + if vs[mid] < version { + low = mid + 1 + } else { + high = mid - 1 + } + } + return vs[high] +} + +func (r *VersionRange) FindMemoized(version int64) int64 { + if r.cache == nil { + r.cache = make(map[int64]int64) + } + if v, ok := r.cache[version]; ok { + return v + } + v := r.Find(version) + // don't cache err values + if v == -1 { + return -1 + } + r.cache[version] = v + return v +} + +func (r *VersionRange) Last() int64 { + if len(r.versions) == 0 { + return -1 + } + return r.versions[len(r.versions)-1] +} + +func (r *VersionRange) Len() int { + return len(r.versions) +} diff --git a/v2/range_test.go b/v2/range_test.go new file mode 100644 index 000000000..35d7ad4ba --- /dev/null +++ b/v2/range_test.go @@ -0,0 +1,104 @@ +package iavl_test + +import ( + "strings" + "testing" + + "github.com/cosmos/iavl/v2" +) + +func Test_VersionRange_Find(t *testing.T) { + cases := []struct { + name string + versions []int64 + find int64 + next int64 + prev int64 + wantErr string + }{ + { + name: "naive", + versions: []int64{1, 2, 3, 4, 5}, + find: 3, + prev: 3, + next: 3, + }, + { + name: "first", + versions: []int64{1, 2, 3, 4, 5}, + find: 1, + prev: 1, + next: 1, + }, + { + name: "unordered", + versions: []int64{5, 3}, + wantErr: "unordered insert: version 3 is not greater than 5", + }, + { + name: "typical", + versions: []int64{1, 2, 10}, + find: 3, + next: 10, + prev: 2, + }, + { + name: "past last", + versions: []int64{1, 2, 10}, + find: 11, + next: -1, + prev: 10, + }, + { + name: "before start", + versions: []int64{5, 10}, + find: 3, + next: 5, + prev: -1, + }, + { + name: "osmo like many", + versions: []int64{1, 51, 101, 151, 201, 251, 301, 351, 401}, + find: 38, + next: 51, + prev: 1, + }, + { + name: "osmo like many", + versions: []int64{1, 51, 101, 151, 201, 251, 301, 351, 401}, + find: 408, + next: -1, + prev: 401, + }, + } + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + r := &iavl.VersionRange{} + var addErr error + for _, v := range tc.versions { + addErr = r.Add(v) + if addErr != nil { + if tc.wantErr == "" { + t.Fatalf("unexpected error: %v", addErr) + } + if !strings.Contains(addErr.Error(), tc.wantErr) { + t.Fatalf("want error %q, got %v", tc.wantErr, addErr) + } else { + return + } + } + } + if addErr == nil && tc.wantErr != "" { + t.Fatalf("want error %q, got nil", tc.wantErr) + } + got := r.Find(tc.find) + if got != tc.next { + t.Fatalf("want %d, got %d", tc.next, got) + } + got = r.FindPrevious(tc.find) + if got != tc.prev { + t.Fatalf("want %d, got %d", tc.prev, got) + } + }) + } +} diff --git a/v2/snapshot.go b/v2/snapshot.go new file mode 100644 index 000000000..861a95a8a --- /dev/null +++ b/v2/snapshot.go @@ -0,0 +1,846 @@ +package iavl + +import ( + "bytes" + "context" + "errors" + "fmt" + "os" + "path/filepath" + "strconv" + "time" + + "github.com/bvinc/go-sqlite-lite/sqlite3" + "github.com/dustin/go-humanize" + api "github.com/kocubinski/costor-api" + "github.com/kocubinski/costor-api/logz" + "github.com/rs/zerolog" +) + +type sqliteSnapshot struct { + ctx context.Context + + snapshotInsert *sqlite3.Stmt + + sql *SqliteDb + leafInsert *sqlite3.Stmt + treeInsert *sqlite3.Stmt + + // if set will flush nodes to a tree & leaf tables as well as a snapshot table during import + writeTree bool + + lastWrite time.Time + ordinal int + batchSize int + version int64 + getLeft func(*Node) *Node + getRight func(*Node) *Node + log zerolog.Logger +} + +func (sql *SqliteDb) Snapshot(ctx context.Context, tree *Tree) error { + version := tree.version + err := sql.leafWrite.Exec( + fmt.Sprintf("CREATE TABLE snapshot_%d (ordinal int, version int, sequence int, bytes blob);", version)) + if err != nil { + return err + } + + snapshot := &sqliteSnapshot{ + ctx: ctx, + sql: sql, + batchSize: 200_000, + version: version, + log: log.With().Str("path", filepath.Base(sql.opts.Path)).Logger(), + getLeft: func(node *Node) *Node { + return node.left(tree) + }, + getRight: func(node *Node) *Node { + return node.right(tree) + }, + } + if err = snapshot.prepareWrite(); err != nil { + return err + } + if err = snapshot.writeStep(tree.root); err != nil { + return err + } + if err = snapshot.flush(); err != nil { + return err + } + log.Info().Str("path", sql.opts.Path).Msgf("creating index on snapshot_%d", version) + err = sql.leafWrite.Exec(fmt.Sprintf("CREATE INDEX snapshot_%d_idx ON snapshot_%d (ordinal);", version, version)) + return err +} + +type SnapshotOptions struct { + StoreLeafValues bool + WriteCheckpoint bool + DontWriteSnapshot bool + TraverseOrder TraverseOrderType +} + +func NewIngestSnapshotConnection(snapshotDbPath string) (*sqlite3.Conn, error) { + newDb := !api.IsFileExistent(snapshotDbPath) + + conn, err := sqlite3.Open(fmt.Sprintf("file:%s", snapshotDbPath)) + if err != nil { + return nil, err + } + pageSize := os.Getpagesize() + if newDb { + log.Info().Msgf("setting page size to %s", humanize.Bytes(uint64(pageSize))) + err = conn.Exec(fmt.Sprintf("PRAGMA page_size=%d; VACUUM;", pageSize)) + if err != nil { + return nil, err + } + err = conn.Exec("PRAGMA journal_mode=WAL;") + if err != nil { + return nil, err + } + } + err = conn.Exec("PRAGMA synchronous=OFF;") + if err != nil { + return nil, err + } + walSize := 1024 * 1024 * 1024 + if err = conn.Exec(fmt.Sprintf("PRAGMA wal_autocheckpoint=%d", walSize/pageSize)); err != nil { + return nil, err + } + return conn, err +} + +func IngestSnapshot(conn *sqlite3.Conn, prefix string, version int64, nextFn func() (*SnapshotNode, error)) (*Node, error) { + var ( + insert *sqlite3.Stmt + tableName = fmt.Sprintf("snapshot_%s_%d", prefix, version) + ordinal int + batchSize = 200_000 + log = logz.Logger.With().Str("prefix", prefix).Logger() + step func() (*Node, error) + lastWrite = time.Now() + ) + + err := conn.Exec(fmt.Sprintf("CREATE TABLE %s (ordinal int, version int, sequence int, bytes blob);", tableName)) + if err != nil { + return nil, err + } + prepare := func() error { + if err = conn.Begin(); err != nil { + return err + } + insert, err = conn.Prepare( + fmt.Sprintf("INSERT INTO %s (ordinal, version, sequence, bytes) VALUES (?, ?, ?, ?);", tableName)) + if err != nil { + return err + } + return nil + } + flush := func() error { + log.Info().Msgf("flush total=%s size=%s dur=%s wr/s=%s", + humanize.Comma(int64(ordinal)), + humanize.Comma(int64(batchSize)), + time.Since(lastWrite).Round(time.Millisecond), + humanize.Comma(int64(float64(batchSize)/time.Since(lastWrite).Seconds())), + ) + err = errors.Join(conn.Commit(), insert.Close()) + lastWrite = time.Now() + return err + } + maybeFlush := func() error { + if ordinal%batchSize == 0 { + if err = flush(); err != nil { + return err + } + if err = prepare(); err != nil { + return err + } + } + return nil + } + if err = prepare(); err != nil { + return nil, err + } + step = func() (*Node, error) { + snapshotNode, err := nextFn() + if err != nil { + return nil, err + } + ordinal++ + + node := &Node{ + key: snapshotNode.Key, + subtreeHeight: snapshotNode.Height, + nodeKey: NewNodeKey(snapshotNode.Version, uint32(ordinal)), + } + if node.subtreeHeight == 0 { + node.value = snapshotNode.Value + node.size = 1 + node._hash() + nodeBz, err := node.Bytes() + if err != nil { + return nil, err + } + if err = insert.Exec(ordinal, snapshotNode.Version, ordinal, nodeBz); err != nil { + return nil, err + } + if err = maybeFlush(); err != nil { + return nil, err + } + return node, nil + } + + node.leftNode, err = step() + if err != nil { + return nil, err + } + node.leftNodeKey = node.leftNode.nodeKey + node.rightNode, err = step() + if err != nil { + return nil, err + } + node.rightNodeKey = node.rightNode.nodeKey + + node.size = node.leftNode.size + node.rightNode.size + node._hash() + node.leftNode = nil + node.rightNode = nil + + nodeBz, err := node.Bytes() + if err != nil { + return nil, err + } + if err = insert.Exec(ordinal, snapshotNode.Version, ordinal, nodeBz); err != nil { + return nil, err + } + if err = maybeFlush(); err != nil { + return nil, err + } + return node, nil + } + root, err := step() + if err != nil { + return nil, err + } + if err = flush(); err != nil { + return nil, err + } + if err = conn.Exec(fmt.Sprintf("CREATE INDEX %s_idx ON %s (ordinal);", tableName, tableName)); err != nil { + return nil, err + } + if err = conn.Close(); err != nil { + return nil, err + } + return root, nil +} + +func (sql *SqliteDb) WriteSnapshot( + ctx context.Context, version int64, nextFn func() (*SnapshotNode, error), opts SnapshotOptions, +) (*Node, error) { + snap := &sqliteSnapshot{ + ctx: ctx, + sql: sql, + batchSize: 400_000, + version: version, + lastWrite: time.Now(), + log: log.With().Str("path", filepath.Base(sql.opts.Path)).Logger(), + writeTree: true, + } + if opts.WriteCheckpoint { + if _, err := sql.nextShard(version); err != nil { + return nil, err + } + } + err := snap.sql.leafWrite.Exec( + fmt.Sprintf(`CREATE TABLE snapshot_%d (ordinal int, version int, sequence int, bytes blob);`, version)) + if err != nil { + return nil, err + } + if err = snap.prepareWrite(); err != nil { + return nil, err + } + + var ( + root *Node + uniqueVersions map[int64]struct{} + ) + if opts.TraverseOrder == PostOrder { + root, uniqueVersions, err = snap.restorePostOrderStep(nextFn, opts.StoreLeafValues) + } else if opts.TraverseOrder == PreOrder { + root, uniqueVersions, err = snap.restorePreOrderStep(nextFn, opts.StoreLeafValues) + } + if err != nil { + return nil, err + } + + if err = snap.flush(); err != nil { + return nil, err + } + + var versions []int64 + for v := range uniqueVersions { + versions = append(versions, v) + } + + if err = sql.SaveRoot(version, root, true); err != nil { + return nil, err + } + + log.Info().Str("path", sql.opts.Path).Msg("creating table indexes") + err = sql.leafWrite.Exec(fmt.Sprintf("CREATE INDEX snapshot_%d_idx ON snapshot_%d (ordinal);", version, version)) + if err != nil { + return nil, err + } + err = snap.sql.treeWrite.Exec(fmt.Sprintf( + "CREATE INDEX IF NOT EXISTS tree_idx_%d ON tree_%d (version, sequence);", snap.version, snap.version)) + if err != nil { + return nil, err + } + err = snap.sql.leafWrite.Exec("CREATE UNIQUE INDEX IF NOT EXISTS leaf_idx ON leaf (version, sequence)") + if err != nil { + return nil, err + } + + return root, nil +} + +type SnapshotNode struct { + Key []byte + Value []byte + Version int64 + Height int8 +} + +func (sql *SqliteDb) ImportSnapshotFromTable(version int64, traverseOrder TraverseOrderType, loadLeaves bool) (*Node, error) { + read, err := sql.getReadConn() + if err != nil { + return nil, err + } + + var q *sqlite3.Stmt + if traverseOrder == PostOrder { + q, err = read.Prepare(fmt.Sprintf("SELECT version, sequence, bytes FROM snapshot_%d ORDER BY ordinal DESC", version)) + } else if traverseOrder == PreOrder { + q, err = read.Prepare(fmt.Sprintf("SELECT version, sequence, bytes FROM snapshot_%d ORDER BY ordinal ASC", version)) + } + if err != nil { + return nil, err + } + defer func(q *sqlite3.Stmt) { + err = q.Close() + if err != nil { + log.Error().Err(err).Msg("error closing import query") + } + }(q) + + imp := &sqliteImport{ + query: q, + pool: sql.pool, + loadLeaves: loadLeaves, + since: time.Now(), + log: log.With().Str("path", sql.opts.Path).Logger(), + } + var root *Node + if traverseOrder == PostOrder { + root, err = imp.queryStepPostOrder() + } else if traverseOrder == PreOrder { + root, err = imp.queryStepPreOrder() + } + if err != nil { + return nil, err + } + + if !loadLeaves { + return root, nil + } + + h := root.hash + rehashTree(root) + if !bytes.Equal(h, root.hash) { + return nil, fmt.Errorf("rehash failed; expected=%x, got=%x", h, root.hash) + } + + return root, nil +} + +func (sql *SqliteDb) ImportMostRecentSnapshot(targetVersion int64, traverseOrder TraverseOrderType, loadLeaves bool) (*Node, int64, error) { + read, err := sql.getReadConn() + if err != nil { + return nil, 0, err + } + q, err := read.Prepare("SELECT tbl_name FROM changelog.sqlite_master WHERE type='table' AND name LIKE 'snapshot_%' ORDER BY name DESC") + defer func(q *sqlite3.Stmt) { + err = q.Close() + if err != nil { + log.Error().Err(err).Msg("error closing import query") + } + }(q) + if err != nil { + return nil, 0, err + } + + var ( + name string + version int64 + ) + for { + ok, err := q.Step() + if err != nil { + return nil, 0, err + } + if !ok { + return nil, 0, fmt.Errorf("no prior snapshot found version=%d path=%s", targetVersion, sql.opts.Path) + } + err = q.Scan(&name) + if err != nil { + return nil, 0, err + } + vs := name[len("snapshot_"):] + if vs == "" { + return nil, 0, fmt.Errorf("unexpected snapshot table name %s", name) + } + version, err = strconv.ParseInt(vs, 10, 64) + if err != nil { + return nil, 0, err + } + if version <= targetVersion { + break + } + } + + root, err := sql.ImportSnapshotFromTable(version, traverseOrder, loadLeaves) + if err != nil { + return nil, 0, err + } + return root, version, err +} + +func FindDbsInPath(path string) ([]string, error) { + var paths []string + err := filepath.Walk(path, func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + if filepath.Base(path) == "changelog.sqlite" { + paths = append(paths, filepath.Dir(path)) + } + return nil + }) + if err != nil { + return nil, err + } + return paths, nil +} + +// TODO +// merge these two functions + +func (snap *sqliteSnapshot) writeStep(node *Node) error { + snap.ordinal++ + // Pre-order, NLR traversal + // Visit this node + nodeBz, err := node.Bytes() + if err != nil { + return err + } + err = snap.snapshotInsert.Exec(snap.ordinal, node.nodeKey.Version(), int(node.nodeKey.Sequence()), nodeBz) + if err != nil { + return err + } + + if snap.ordinal%snap.batchSize == 0 { + if err = snap.flush(); err != nil { + return err + } + if err = snap.prepareWrite(); err != nil { + return err + } + } + + if node.isLeaf() { + return nil + } + + // traverse left + err = snap.writeStep(snap.getLeft(node)) + if err != nil { + return err + } + + // traverse right + return snap.writeStep(snap.getRight(node)) +} + +func (snap *sqliteSnapshot) flush() error { + select { + case <-snap.ctx.Done(): + snap.log.Info().Msgf("snapshot cancelled at ordinal=%s", humanize.Comma(int64(snap.ordinal))) + errs := errors.Join( + snap.snapshotInsert.Reset(), + snap.snapshotInsert.Close(), + ) + if errs != nil { + return errs + } + if snap.writeTree { + errs = errors.Join( + snap.leafInsert.Reset(), + snap.leafInsert.Close(), + snap.treeInsert.Reset(), + snap.treeInsert.Close(), + ) + } + errs = errors.Join( + errs, + snap.sql.leafWrite.Rollback(), + snap.sql.leafWrite.Close(), + ) + if errs != nil { + return errs + } + if snap.writeTree { + errs = errors.Join( + errs, + snap.sql.treeWrite.Rollback(), + snap.sql.treeWrite.Close(), + ) + } + + return errs + default: + } + + snap.log.Info().Msgf("flush total=%s size=%s dur=%s wr/s=%s", + humanize.Comma(int64(snap.ordinal)), + humanize.Comma(int64(snap.batchSize)), + time.Since(snap.lastWrite).Round(time.Millisecond), + humanize.Comma(int64(float64(snap.batchSize)/time.Since(snap.lastWrite).Seconds())), + ) + + err := errors.Join( + snap.sql.leafWrite.Commit(), + snap.snapshotInsert.Close(), + ) + if err != nil { + return err + } + if snap.writeTree { + err = errors.Join( + snap.leafInsert.Close(), + snap.sql.treeWrite.Commit(), + snap.treeInsert.Close(), + ) + } + snap.lastWrite = time.Now() + return err +} + +func (snap *sqliteSnapshot) prepareWrite() error { + err := snap.sql.leafWrite.Begin() + if err != nil { + return err + } + + snap.snapshotInsert, err = snap.sql.leafWrite.Prepare( + fmt.Sprintf("INSERT INTO snapshot_%d (ordinal, version, sequence, bytes) VALUES (?, ?, ?, ?);", + snap.version)) + + if snap.writeTree { + err = snap.sql.treeWrite.Begin() + if err != nil { + return err + } + + snap.leafInsert, err = snap.sql.leafWrite.Prepare("INSERT INTO leaf (version, sequence, bytes) VALUES (?, ?, ?)") + if err != nil { + return err + } + snap.treeInsert, err = snap.sql.treeWrite.Prepare( + fmt.Sprintf("INSERT INTO tree_%d (version, sequence, bytes) VALUES (?, ?, ?)", snap.version)) + } + + return err +} + +func (snap *sqliteSnapshot) restorePostOrderStep(nextFn func() (*SnapshotNode, error), isStoreLeafValues bool) (*Node, map[int64]struct{}, error) { + var ( + snapshotNode *SnapshotNode + err error + count int + stack []*Node + uniqueVersions = make(map[int64]struct{}) + ) + + for { + snapshotNode, err = nextFn() + if err != nil || snapshotNode == nil { + break + } + + ordinal := snap.ordinal + + uniqueVersions[snapshotNode.Version] = struct{}{} + node := &Node{ + key: snapshotNode.Key, + subtreeHeight: snapshotNode.Height, + nodeKey: NewNodeKey(snapshotNode.Version, uint32(ordinal)), + } + + stackSize := len(stack) + if node.isLeaf() { + node.value = snapshotNode.Value + node.size = 1 + node._hash() + if !isStoreLeafValues { + node.value = nil + } + + count++ + if err := snap.writeSnapNode(node, snapshotNode.Version, count, ordinal, count); err != nil { + return nil, nil, err + } + } else if stackSize >= 2 && stack[stackSize-1].subtreeHeight < node.subtreeHeight && stack[stackSize-2].subtreeHeight < node.subtreeHeight { + node.leftNode = stack[stackSize-2] + node.leftNodeKey = node.leftNode.nodeKey + node.rightNode = stack[stackSize-1] + node.rightNodeKey = node.rightNode.nodeKey + node.size = node.leftNode.size + node.rightNode.size + node._hash() + stack = stack[:stackSize-2] + + node.leftNode = nil + node.rightNode = nil + + count++ + if err := snap.writeSnapNode(node, snapshotNode.Version, count, ordinal, count); err != nil { + return nil, nil, err + } + } + + stack = append(stack, node) + snap.ordinal++ + } + + if err != nil && !errors.Is(err, ErrorExportDone) { + return nil, nil, err + } + + if len(stack) != 1 { + return nil, nil, fmt.Errorf("expected stack size 1, got %d", len(stack)) + } + + return stack[0], uniqueVersions, nil +} + +func (snap *sqliteSnapshot) restorePreOrderStep(nextFn func() (*SnapshotNode, error), isStoreLeafValues bool) (*Node, map[int64]struct{}, error) { + var ( + count int + step func() (*Node, error) + uniqueVersions = make(map[int64]struct{}) + ) + + step = func() (*Node, error) { + snapshotNode, err := nextFn() + if err != nil { + return nil, err + } + + ordinal := snap.ordinal + snap.ordinal++ + + node := &Node{ + key: snapshotNode.Key, + subtreeHeight: snapshotNode.Height, + nodeKey: NewNodeKey(snapshotNode.Version, uint32(ordinal)), + } + + if node.isLeaf() { + node.value = snapshotNode.Value + node.size = 1 + node._hash() + if !isStoreLeafValues { + node.value = nil + } + } else { + node.leftNode, err = step() + if err != nil { + return nil, err + } + node.leftNodeKey = node.leftNode.nodeKey + node.rightNode, err = step() + if err != nil { + return nil, err + } + node.rightNodeKey = node.rightNode.nodeKey + + node.size = node.leftNode.size + node.rightNode.size + node._hash() + node.leftNode = nil + node.rightNode = nil + uniqueVersions[snapshotNode.Version] = struct{}{} + } + + count++ + if err := snap.writeSnapNode(node, snapshotNode.Version, ordinal, ordinal, count); err != nil { + return nil, err + } + snap.ordinal++ + + return node, nil + } + + node, err := step() + + return node, uniqueVersions, err +} + +func (snap *sqliteSnapshot) writeSnapNode(node *Node, version int64, ordinal, sequence, count int) error { + nodeBz, err := node.Bytes() + if err != nil { + return err + } + if err = snap.snapshotInsert.Exec(ordinal, version, sequence, nodeBz); err != nil { + return err + } + if snap.writeTree { + if node.isLeaf() { + if err = snap.leafInsert.Exec(version, ordinal, nodeBz); err != nil { + return err + } + } else { + if err = snap.treeInsert.Exec(version, sequence, nodeBz); err != nil { + return err + } + } + } + + if count%snap.batchSize == 0 { + if err := snap.flush(); err != nil { + return err + } + if err := snap.prepareWrite(); err != nil { + return err + } + } + + return nil +} + +func rehashTree(node *Node) { + if node.isLeaf() { + return + } + node.hash = nil + + rehashTree(node.leftNode) + rehashTree(node.rightNode) + + node._hash() +} + +type sqliteImport struct { + query *sqlite3.Stmt + pool *NodePool + loadLeaves bool + + i int64 + since time.Time + log zerolog.Logger +} + +func (sqlImport *sqliteImport) queryStepPreOrder() (node *Node, err error) { + sqlImport.i++ + if sqlImport.i%1_000_000 == 0 { + sqlImport.log.Debug().Msgf("import: nodes=%s, node/s=%s", + humanize.Comma(sqlImport.i), + humanize.Comma(int64(float64(1_000_000)/time.Since(sqlImport.since).Seconds())), + ) + sqlImport.since = time.Now() + } + + hasRow, err := sqlImport.query.Step() + if !hasRow { + return nil, nil + } + if err != nil { + return nil, err + } + var bz sqlite3.RawBytes + var version, seq int + err = sqlImport.query.Scan(&version, &seq, &bz) + if err != nil { + return nil, err + } + nodeKey := NewNodeKey(int64(version), uint32(seq)) + node, err = MakeNode(sqlImport.pool, nodeKey, bz) + if err != nil { + return nil, err + } + + if node.isLeaf() && sqlImport.i > 1 { + if sqlImport.loadLeaves { + return node, nil + } + sqlImport.pool.Put(node) + return nil, nil + } + + node.leftNode, err = sqlImport.queryStepPreOrder() + if err != nil { + return nil, err + } + node.rightNode, err = sqlImport.queryStepPreOrder() + if err != nil { + return nil, err + } + return node, nil +} + +func (sqlImport *sqliteImport) queryStepPostOrder() (node *Node, err error) { + sqlImport.i++ + if sqlImport.i%1_000_000 == 0 { + sqlImport.log.Debug().Msgf("import: nodes=%s, node/s=%s", + humanize.Comma(sqlImport.i), + humanize.Comma(int64(float64(1_000_000)/time.Since(sqlImport.since).Seconds())), + ) + sqlImport.since = time.Now() + } + + hasRow, err := sqlImport.query.Step() + if !hasRow { + return nil, nil + } + if err != nil { + return nil, err + } + var bz sqlite3.RawBytes + var version, seq int + err = sqlImport.query.Scan(&version, &seq, &bz) + if err != nil { + return nil, err + } + nodeKey := NewNodeKey(int64(version), uint32(seq)) + node, err = MakeNode(sqlImport.pool, nodeKey, bz) + if err != nil { + return nil, err + } + + if node.isLeaf() && sqlImport.i > 1 { + if sqlImport.loadLeaves { + return node, nil + } + sqlImport.pool.Put(node) + return nil, nil + } + + node.rightNode, err = sqlImport.queryStepPostOrder() + if err != nil { + return nil, err + } + node.leftNode, err = sqlImport.queryStepPostOrder() + if err != nil { + return nil, err + } + + return node, nil +} diff --git a/v2/sqlite.go b/v2/sqlite.go new file mode 100644 index 000000000..78feff512 --- /dev/null +++ b/v2/sqlite.go @@ -0,0 +1,1119 @@ +package iavl + +import ( + "bytes" + "fmt" + "os" + "strconv" + "time" + + "github.com/bvinc/go-sqlite-lite/sqlite3" + "github.com/cosmos/iavl/v2/metrics" + "github.com/dustin/go-humanize" + api "github.com/kocubinski/costor-api" + "github.com/rs/zerolog" +) + +const defaultSQLitePath = "/tmp/iavl-v2" + +type SqliteDbOptions struct { + Path string + Mode int + MmapSize uint64 + WalSize int + CacheSize int + ConnArgs string + ShardTrees bool + + walPages int +} + +type SqliteDb struct { + opts SqliteDbOptions + + pool *NodePool + + // 2 separate databases and 2 separate connections. the underlying databases have different WAL policies + // therefore separation is required. + leafWrite *sqlite3.Conn + treeWrite *sqlite3.Conn + + // for latest table queries + itrIdx int + iterators map[int]*sqlite3.Stmt + queryLatest *sqlite3.Stmt + + readConn *sqlite3.Conn + queryLeaf *sqlite3.Stmt + + shards *VersionRange + shardQueries map[int64]*sqlite3.Stmt + + metrics *metrics.DbMetrics + logger zerolog.Logger +} + +func defaultSqliteDbOptions(opts SqliteDbOptions) SqliteDbOptions { + if opts.Path == "" { + opts.Path = defaultSQLitePath + } + if opts.MmapSize == 0 { + opts.MmapSize = 8 * 1024 * 1024 * 1024 + } + if opts.WalSize == 0 { + opts.WalSize = 1024 * 1024 * 100 + } + opts.walPages = opts.WalSize / os.Getpagesize() + return opts +} + +func (opts SqliteDbOptions) connArgs() string { + if opts.ConnArgs == "" { + return "" + } + return fmt.Sprintf("?%s", opts.ConnArgs) +} + +func (opts SqliteDbOptions) leafConnectionString() string { + return fmt.Sprintf("file:%s/changelog.sqlite%s", opts.Path, opts.connArgs()) +} + +func (opts SqliteDbOptions) treeConnectionString() string { + return fmt.Sprintf("file:%s/tree.sqlite%s", opts.Path, opts.connArgs()) +} + +func (opts SqliteDbOptions) EstimateMmapSize() (uint64, error) { + logger := log.With().Str("path", opts.Path).Logger() + logger.Info().Msgf("calculate mmap size") + logger.Info().Msgf("leaf connection string: %s", opts.leafConnectionString()) + conn, err := sqlite3.Open(opts.leafConnectionString()) + if err != nil { + return 0, err + } + q, err := conn.Prepare("SELECT SUM(pgsize) FROM dbstat WHERE name = 'leaf'") + if err != nil { + return 0, err + } + hasRow, err := q.Step() + if err != nil { + return 0, err + } + if !hasRow { + return 0, fmt.Errorf("no row") + } + var leafSize int64 + err = q.Scan(&leafSize) + if err != nil { + return 0, err + } + if err = q.Close(); err != nil { + return 0, err + } + if err = conn.Close(); err != nil { + return 0, err + } + mmapSize := uint64(float64(leafSize) * 1.3) + logger.Info().Msgf("leaf mmap size: %s", humanize.Bytes(mmapSize)) + + return mmapSize, nil +} + +func NewInMemorySqliteDb(pool *NodePool) (*SqliteDb, error) { + opts := defaultSqliteDbOptions(SqliteDbOptions{ConnArgs: "mode=memory&cache=shared"}) + return NewSqliteDb(pool, opts) +} + +func NewSqliteDb(pool *NodePool, opts SqliteDbOptions) (*SqliteDb, error) { + opts = defaultSqliteDbOptions(opts) + logger := log.With().Str("module", "sqlite").Str("path", opts.Path).Logger() + sql := &SqliteDb{ + shards: &VersionRange{}, + shardQueries: make(map[int64]*sqlite3.Stmt), + iterators: make(map[int]*sqlite3.Stmt), + opts: opts, + pool: pool, + metrics: &metrics.DbMetrics{}, + logger: logger, + } + + if !api.IsFileExistent(opts.Path) { + err := os.MkdirAll(opts.Path, 0755) + if err != nil { + return nil, err + } + } + + if err := sql.resetWriteConn(); err != nil { + return nil, err + } + + if err := sql.init(); err != nil { + return nil, err + } + + return sql, nil +} + +func (sql *SqliteDb) init() error { + q, err := sql.treeWrite.Prepare("SELECT name from sqlite_master WHERE type='table' AND name='root'") + if err != nil { + return err + } + hasRow, err := q.Step() + if err != nil { + return err + } + if !hasRow { + err = sql.treeWrite.Exec(` +CREATE TABLE orphan (version int, sequence int, at int); +CREATE INDEX orphan_idx ON orphan (at); +CREATE TABLE root ( + version int, + node_version int, + node_sequence int, + bytes blob, + checkpoint bool, + PRIMARY KEY (version))`) + if err != nil { + return err + } + + pageSize := os.Getpagesize() + log.Info().Msgf("setting page size to %s", humanize.Bytes(uint64(pageSize))) + err = sql.treeWrite.Exec(fmt.Sprintf("PRAGMA page_size=%d; VACUUM;", pageSize)) + if err != nil { + return err + } + err = sql.treeWrite.Exec("PRAGMA journal_mode=WAL;") + if err != nil { + return err + } + } + if err = q.Close(); err != nil { + return err + } + + q, err = sql.leafWrite.Prepare("SELECT name from sqlite_master WHERE type='table' AND name='leaf'") + if err != nil { + return err + } + if !hasRow { + err = sql.leafWrite.Exec(` +CREATE TABLE latest (key blob, value blob, PRIMARY KEY (key)); +CREATE TABLE leaf (version int, sequence int, bytes blob, orphaned bool); +CREATE TABLE leaf_delete (version int, sequence int, key blob, PRIMARY KEY (version, sequence)); +CREATE TABLE leaf_orphan (version int, sequence int, at int); +CREATE INDEX leaf_orphan_idx ON leaf_orphan (at);`) + if err != nil { + return err + } + + pageSize := os.Getpagesize() + log.Info().Msgf("setting page size to %s", humanize.Bytes(uint64(pageSize))) + err = sql.leafWrite.Exec(fmt.Sprintf("PRAGMA page_size=%d; VACUUM;", pageSize)) + if err != nil { + return err + } + err = sql.leafWrite.Exec("PRAGMA journal_mode=WAL;") + if err != nil { + return err + } + } + if err = q.Close(); err != nil { + return err + } + + return nil +} + +func (sql *SqliteDb) resetWriteConn() (err error) { + if sql.treeWrite != nil { + err = sql.treeWrite.Close() + if err != nil { + return err + } + } + sql.treeWrite, err = sqlite3.Open(sql.opts.treeConnectionString()) + if err != nil { + return err + } + + err = sql.treeWrite.Exec("PRAGMA synchronous=OFF;") + if err != nil { + return err + } + + if err = sql.treeWrite.Exec(fmt.Sprintf("PRAGMA wal_autocheckpoint=%d", sql.opts.walPages)); err != nil { + return err + } + + sql.leafWrite, err = sqlite3.Open(sql.opts.leafConnectionString()) + if err != nil { + return err + } + + err = sql.leafWrite.Exec("PRAGMA synchronous=OFF;") + if err != nil { + return err + } + + if err = sql.leafWrite.Exec(fmt.Sprintf("PRAGMA wal_autocheckpoint=%d", sql.opts.walPages)); err != nil { + return err + } + + return err +} + +func (sql *SqliteDb) newReadConn() (*sqlite3.Conn, error) { + conn, err := sqlite3.Open(sql.opts.treeConnectionString()) + if err != nil { + return nil, err + } + err = conn.Exec(fmt.Sprintf("ATTACH DATABASE '%s' AS changelog;", sql.opts.leafConnectionString())) + if err != nil { + return nil, err + } + err = conn.Exec(fmt.Sprintf("PRAGMA mmap_size=%d;", sql.opts.MmapSize)) + if err != nil { + return nil, err + } + return conn, nil +} + +func (sql *SqliteDb) resetReadConn() (err error) { + if sql.readConn != nil { + err = sql.readConn.Close() + if err != nil { + return err + } + } + sql.readConn, err = sql.newReadConn() + return err +} + +func (sql *SqliteDb) getReadConn() (*sqlite3.Conn, error) { + var err error + if sql.readConn == nil { + sql.readConn, err = sql.newReadConn() + } + return sql.readConn, err +} + +func (sql *SqliteDb) getLeaf(nodeKey NodeKey) (*Node, error) { + start := time.Now() + + var err error + if sql.queryLeaf == nil { + sql.queryLeaf, err = sql.readConn.Prepare("SELECT bytes FROM changelog.leaf WHERE version = ? AND sequence = ?") + if err != nil { + return nil, err + } + } + if err = sql.queryLeaf.Bind(nodeKey.Version(), int(nodeKey.Sequence())); err != nil { + return nil, err + } + hasRow, err := sql.queryLeaf.Step() + if !hasRow { + return nil, sql.queryLeaf.Reset() + } + if err != nil { + return nil, err + } + var nodeBz sqlite3.RawBytes + err = sql.queryLeaf.Scan(&nodeBz) + if err != nil { + return nil, err + } + node, err := MakeNode(sql.pool, nodeKey, nodeBz) + if err != nil { + return nil, err + } + err = sql.queryLeaf.Reset() + if err != nil { + return nil, err + } + + dur := time.Since(start) + sql.metrics.QueryDurations = append(sql.metrics.QueryDurations, dur) + sql.metrics.QueryTime += dur + sql.metrics.QueryCount++ + sql.metrics.QueryLeafCount++ + + return node, nil +} + +func (sql *SqliteDb) getNode(nodeKey NodeKey, q *sqlite3.Stmt) (*Node, error) { + start := time.Now() + + if err := q.Reset(); err != nil { + return nil, err + } + if err := q.Bind(nodeKey.Version(), int(nodeKey.Sequence())); err != nil { + return nil, err + } + hasRow, err := q.Step() + if !hasRow { + return nil, fmt.Errorf("node not found: %v; shard=%d; path=%s", + nodeKey, sql.shards.Find(nodeKey.Version()), sql.opts.Path) + } + if err != nil { + return nil, err + } + var nodeBz sqlite3.RawBytes + err = q.Scan(&nodeBz) + if err != nil { + return nil, err + } + node, err := MakeNode(sql.pool, nodeKey, nodeBz) + if err != nil { + return nil, err + } + err = q.Reset() + if err != nil { + return nil, err + } + + dur := time.Since(start) + sql.metrics.QueryDurations = append(sql.metrics.QueryDurations, dur) + sql.metrics.QueryTime += dur + sql.metrics.QueryCount++ + sql.metrics.QueryBranchCount++ + + return node, nil +} + +func (sql *SqliteDb) Get(nodeKey NodeKey) (*Node, error) { + q, err := sql.getShardQuery(nodeKey.Version()) + if err != nil { + return nil, err + } + return sql.getNode(nodeKey, q) +} + +func (sql *SqliteDb) Close() error { + for _, q := range sql.shardQueries { + err := q.Close() + if err != nil { + return err + } + } + if sql.readConn != nil { + if sql.queryLeaf != nil { + if err := sql.queryLeaf.Close(); err != nil { + return err + } + } + if err := sql.readConn.Close(); err != nil { + return err + } + } + if err := sql.leafWrite.Close(); err != nil { + return err + } + + if err := sql.treeWrite.Close(); err != nil { + return err + } + return nil +} + +func (sql *SqliteDb) nextShard(version int64) (int64, error) { + if !sql.opts.ShardTrees { + switch sql.shards.Len() { + case 0: + break + case 1: + return sql.shards.Last(), nil + default: + return -1, fmt.Errorf("sharding is disabled but found shards; shards=%v", sql.shards.versions) + } + } + + sql.logger.Info().Msgf("creating shard %d", version) + err := sql.treeWrite.Exec(fmt.Sprintf("CREATE TABLE tree_%d (version int, sequence int, bytes blob, orphaned bool);", version)) + if err != nil { + return version, err + } + return version, sql.shards.Add(version) +} + +func (sql *SqliteDb) SaveRoot(version int64, node *Node, isCheckpoint bool) error { + if node != nil { + bz, err := node.Bytes() + if err != nil { + return err + } + return sql.treeWrite.Exec("INSERT OR REPLACE INTO root(version, node_version, node_sequence, bytes, checkpoint) VALUES (?, ?, ?, ?, ?)", + version, node.nodeKey.Version(), int(node.nodeKey.Sequence()), bz, isCheckpoint) + } + // for an empty root a sentinel is saved + return sql.treeWrite.Exec("INSERT OR REPLACE INTO root(version, checkpoint) VALUES (?, ?)", version, isCheckpoint) +} + +func (sql *SqliteDb) LoadRoot(version int64) (*Node, error) { + conn, err := sqlite3.Open(sql.opts.treeConnectionString()) + if err != nil { + return nil, err + } + rootQuery, err := conn.Prepare("SELECT node_version, node_sequence, bytes FROM root WHERE version = ?", version) + if err != nil { + return nil, err + } + + hasRow, err := rootQuery.Step() + if !hasRow { + return nil, fmt.Errorf("root not found for version %d", version) + } + if err != nil { + return nil, err + } + var ( + nodeSeq int + nodeVersion int64 + nodeBz []byte + ) + err = rootQuery.Scan(&nodeVersion, &nodeSeq, &nodeBz) + if err != nil { + return nil, err + } + + // if nodeBz is nil then a (valid) empty tree was saved, which a nil root represents + var root *Node + if nodeBz != nil { + rootKey := NewNodeKey(nodeVersion, uint32(nodeSeq)) + root, err = MakeNode(sql.pool, rootKey, nodeBz) + if err != nil { + return nil, err + } + } + + if err := rootQuery.Close(); err != nil { + return nil, err + } + if err := sql.ResetShardQueries(); err != nil { + return nil, err + } + if err := conn.Close(); err != nil { + return nil, err + } + return root, nil +} + +// lastCheckpoint fetches the last checkpoint version from the shard table previous to the loaded root's version. +// a return value of zero and nil error indicates no checkpoint was found. +func (sql *SqliteDb) lastCheckpoint(treeVersion int64) (checkpointVersion int64, err error) { + conn, err := sqlite3.Open(sql.opts.treeConnectionString()) + if err != nil { + return 0, err + } + rootQuery, err := conn.Prepare("SELECT MAX(version) FROM root WHERE checkpoint = true AND version <= ?", treeVersion) + if err != nil { + return 0, err + } + hasRow, err := rootQuery.Step() + if err != nil { + return 0, err + } + if !hasRow { + return 0, nil + } + err = rootQuery.Scan(&checkpointVersion) + if err != nil { + return 0, err + } + + if err = rootQuery.Close(); err != nil { + return 0, err + } + if err = conn.Close(); err != nil { + return 0, err + } + return checkpointVersion, nil +} + +func (sql *SqliteDb) loadCheckpointRange() (*VersionRange, error) { + conn, err := sqlite3.Open(sql.opts.treeConnectionString()) + if err != nil { + return nil, err + } + q, err := conn.Prepare("SELECT version FROM root WHERE checkpoint = true ORDER BY version") + if err != nil { + return nil, err + } + var version int64 + versionRange := &VersionRange{} + for { + hasRow, err := q.Step() + if err != nil { + return nil, err + } + if !hasRow { + break + } + err = q.Scan(&version) + if err != nil { + return nil, err + } + if err = versionRange.Add(version); err != nil { + return nil, err + + } + } + if err = q.Close(); err != nil { + return nil, err + } + if err = conn.Close(); err != nil { + return nil, err + } + return versionRange, nil +} + +func (sql *SqliteDb) getShard(version int64) (int64, error) { + if !sql.opts.ShardTrees { + if sql.shards.Len() != 1 { + return -1, fmt.Errorf("expected a single shard; path=%s", sql.opts.Path) + } + return sql.shards.Last(), nil + } + v := sql.shards.FindMemoized(version) + if v == -1 { + return -1, fmt.Errorf("version %d is after the first shard; shards=%v", version, sql.shards.versions) + } + return v, nil +} + +func (sql *SqliteDb) getShardQuery(version int64) (*sqlite3.Stmt, error) { + v, err := sql.getShard(version) + if err != nil { + return nil, err + } + + if q, ok := sql.shardQueries[v]; ok { + return q, nil + } + sqlQuery := fmt.Sprintf("SELECT bytes FROM tree_%d WHERE version = ? AND sequence = ?", v) + q, err := sql.readConn.Prepare(sqlQuery) + if err != nil { + return nil, err + } + sql.shardQueries[v] = q + sql.logger.Debug().Msgf("added shard query: %s", sqlQuery) + return q, nil +} + +func (sql *SqliteDb) ResetShardQueries() error { + for k, q := range sql.shardQueries { + err := q.Close() + if err != nil { + return err + } + delete(sql.shardQueries, k) + } + + sql.shards = &VersionRange{} + + if sql.readConn == nil { + if err := sql.resetReadConn(); err != nil { + return err + } + } + + q, err := sql.treeWrite.Prepare("SELECT name FROM sqlite_master WHERE type='table' AND name LIKE 'tree_%'") + if err != nil { + return err + } + for { + hasRow, err := q.Step() + if err != nil { + return err + } + if !hasRow { + break + } + var shard string + err = q.Scan(&shard) + if err != nil { + return err + } + shardVersion, err := strconv.Atoi(shard[5:]) + if err != nil { + return err + } + if err = sql.shards.Add(int64(shardVersion)); err != nil { + return fmt.Errorf("failed to add shard path=%s: %w", sql.opts.Path, err) + } + } + + return q.Close() +} + +func (sql *SqliteDb) WarmLeaves() error { + start := time.Now() + read, err := sql.getReadConn() + if err != nil { + return err + } + stmt, err := read.Prepare("SELECT version, sequence, bytes FROM leaf") + if err != nil { + return err + } + var ( + cnt, version, seq int64 + kz, vz []byte + ) + for { + ok, err := stmt.Step() + if err != nil { + return err + } + if !ok { + break + } + cnt++ + err = stmt.Scan(&version, &seq, &vz) + if err != nil { + return err + } + if cnt%5_000_000 == 0 { + sql.logger.Info().Msgf("warmed %s leaves", humanize.Comma(cnt)) + } + } + if err = stmt.Close(); err != nil { + return err + } + stmt, err = read.Prepare("SELECT key, value FROM latest") + if err != nil { + return err + } + for { + ok, err := stmt.Step() + if err != nil { + return err + } + if !ok { + break + } + cnt++ + err = stmt.Scan(&kz, &vz) + if err != nil { + return err + } + if cnt%5_000_000 == 0 { + sql.logger.Info().Msgf("warmed %s leaves", humanize.Comma(cnt)) + } + } + + sql.logger.Info().Msgf("warmed %s leaves in %s", humanize.Comma(cnt), time.Since(start)) + return stmt.Close() +} + +func (sql *SqliteDb) getRightNode(node *Node) (*Node, error) { + var err error + if node.subtreeHeight == 1 || node.subtreeHeight == 2 { + node.rightNode, err = sql.getLeaf(node.rightNodeKey) + if err != nil { + return nil, err + } + if node.rightNode != nil { + return node.rightNode, nil + } + sql.metrics.QueryLeafMiss++ + } + + node.rightNode, err = sql.Get(node.rightNodeKey) + if err != nil { + return nil, fmt.Errorf("failed to get right node node_key=%s height=%d path=%s: %w", + node.rightNodeKey, node.subtreeHeight, sql.opts.Path, err) + } + return node.rightNode, nil +} + +func (sql *SqliteDb) getLeftNode(node *Node) (*Node, error) { + var err error + if node.subtreeHeight == 1 || node.subtreeHeight == 2 { + node.leftNode, err = sql.getLeaf(node.leftNodeKey) + if err != nil { + return nil, err + } + if node.leftNode != nil { + return node.leftNode, nil + } + sql.metrics.QueryLeafMiss++ + } + + node.leftNode, err = sql.Get(node.leftNodeKey) + if err != nil { + return nil, err + } + return node.leftNode, err +} + +func (sql *SqliteDb) isSharded() (bool, error) { + q, err := sql.treeWrite.Prepare("SELECT name FROM sqlite_master WHERE type='table' AND name LIKE 'tree_%'") + if err != nil { + return false, err + } + var cnt int + for { + hasRow, err := q.Step() + if err != nil { + return false, err + } + if !hasRow { + break + } + cnt++ + if cnt > 1 { + break + } + } + return cnt > 1, q.Close() +} + +func (sql *SqliteDb) Revert(version int) error { + if err := sql.leafWrite.Exec("DELETE FROM leaf WHERE version > ?", version); err != nil { + return err + } + if err := sql.leafWrite.Exec("DELETE FROM leaf_delete WHERE version > ?", version); err != nil { + return err + } + if err := sql.leafWrite.Exec("DELETE FROM leaf_orphan WHERE at > ?", version); err != nil { + return err + } + if err := sql.treeWrite.Exec("DELETE FROM root WHERE version > ?", version); err != nil { + return err + } + if err := sql.treeWrite.Exec("DELETE FROM orphan WHERE at > ?", version); err != nil { + return err + } + + hasShards, err := sql.isSharded() + if err != nil { + return err + } + if hasShards { + q, err := sql.treeWrite.Prepare("SELECT name FROM sqlite_master WHERE type='table' AND name LIKE 'tree_%'") + if err != nil { + return err + } + var shards []string + for { + hasRow, err := q.Step() + if err != nil { + return err + } + if !hasRow { + break + } + var shard string + err = q.Scan(&shard) + if err != nil { + return err + } + shardVersion, err := strconv.Atoi(shard[5:]) + if err != nil { + return err + } + if shardVersion > version { + shards = append(shards, shard) + } + } + if err = q.Close(); err != nil { + return err + } + for _, shard := range shards { + if err = sql.treeWrite.Exec(fmt.Sprintf("DROP TABLE IF EXISTS %s", shard)); err != nil { + return err + } + } + } else { + + } + return nil +} + +func (sql *SqliteDb) GetLatestLeaf(key []byte) ([]byte, error) { + if sql.queryLatest == nil { + var err error + sql.queryLatest, err = sql.readConn.Prepare("SELECT value FROM changelog.latest WHERE key = ?") + if err != nil { + return nil, err + } + } + defer sql.queryLatest.Reset() + + if err := sql.queryLatest.Bind(key); err != nil { + return nil, err + } + hasRow, err := sql.queryLatest.Step() + if err != nil { + return nil, err + } + if !hasRow { + return nil, nil + } + var val []byte + err = sql.queryLatest.Scan(&val) + if err != nil { + return nil, err + } + return val, nil +} + +func (sql *SqliteDb) closeHangingIterators() error { + for idx, stmt := range sql.iterators { + sql.logger.Warn().Msgf("closing hanging iterator idx=%d", idx) + if err := stmt.Close(); err != nil { + return err + } + delete(sql.iterators, idx) + } + sql.itrIdx = 0 + return nil +} + +func (sql *SqliteDb) getLeafIteratorQuery(start, end []byte, ascending, _ bool) (stmt *sqlite3.Stmt, idx int, err error) { + var suffix string + if ascending { + suffix = "ASC" + } else { + suffix = "DESC" + } + + conn, err := sql.getReadConn() + if err != nil { + return nil, idx, err + } + + sql.itrIdx++ + idx = sql.itrIdx + + switch { + case start == nil && end == nil: + stmt, err = conn.Prepare( + fmt.Sprintf("SELECT key, value FROM changelog.latest ORDER BY key %s", suffix)) + if err != nil { + return nil, idx, err + } + if err = stmt.Bind(); err != nil { + return nil, idx, err + } + case start == nil: + stmt, err = conn.Prepare( + fmt.Sprintf("SELECT key, value FROM changelog.latest WHERE key < ? ORDER BY key %s", suffix)) + if err != nil { + return nil, idx, err + } + if err = stmt.Bind(end); err != nil { + return nil, idx, err + } + case end == nil: + stmt, err = conn.Prepare( + fmt.Sprintf("SELECT key, value FROM changelog.latest WHERE key >= ? ORDER BY key %s", suffix)) + if err != nil { + return nil, idx, err + } + if err = stmt.Bind(start); err != nil { + return nil, idx, err + } + default: + stmt, err = conn.Prepare( + fmt.Sprintf("SELECT key, value FROM changelog.latest WHERE key >= ? AND key < ? ORDER BY key %s", suffix)) + if err != nil { + return nil, idx, err + } + if err = stmt.Bind(start, end); err != nil { + return nil, idx, err + } + } + + sql.iterators[idx] = stmt + return stmt, idx, err +} + +func (sql *SqliteDb) replayChangelog(tree *Tree, toVersion int64, targetHash []byte) error { + var ( + version int + lastVersion int + sequence int + bz []byte + key []byte + count int64 + start = time.Now() + lg = log.With().Str("path", sql.opts.Path).Logger() + since = time.Now() + ) + tree.isReplaying = true + defer func() { + tree.isReplaying = false + }() + + lg.Info().Msgf("ensure leaf_delete_index exists...") + if err := sql.leafWrite.Exec("CREATE UNIQUE INDEX IF NOT EXISTS leaf_delete_idx ON leaf_delete (version, sequence)"); err != nil { + return err + } + lg.Info().Msg("...done") + lg.Info().Msgf("replaying changelog from=%d to=%d", tree.version, toVersion) + conn, err := sql.getReadConn() + if err != nil { + return err + } + q, err := conn.Prepare(`SELECT * FROM ( + SELECT version, sequence, bytes, null AS key + FROM leaf WHERE version > ? AND version <= ? + UNION + SELECT version, sequence, null as bytes, key + FROM leaf_delete WHERE version > ? AND version <= ? + ) as ops + ORDER BY version, sequence`) + if err != nil { + return err + } + if err = q.Bind(tree.version, toVersion, tree.version, toVersion); err != nil { + return err + } + for { + ok, err := q.Step() + if err != nil { + return err + } + if !ok { + break + } + count++ + if err = q.Scan(&version, &sequence, &bz, &key); err != nil { + return err + } + if version-1 != lastVersion { + tree.leaves, tree.branches, tree.leafOrphans, tree.deletes = nil, nil, nil, nil + tree.version = int64(version - 1) + tree.sequence = 0 + lastVersion = version - 1 + } + if bz != nil { + nk := NewNodeKey(0, 0) + node, err := MakeNode(tree.pool, nk, bz) + if err != nil { + return err + } + if _, err = tree.Set(node.key, node.hash); err != nil { + return err + } + if sequence != int(tree.sequence) { + return fmt.Errorf("sequence mismatch version=%d; expected %d got %d; path=%s", + version, sequence, tree.sequence, sql.opts.Path) + } + } else { + if _, _, err = tree.Remove(key); err != nil { + return err + } + deleteSequence := tree.deletes[len(tree.deletes)-1].deleteKey.Sequence() + if sequence != int(deleteSequence) { + return fmt.Errorf("sequence delete mismatch; version=%d expected %d got %d; path=%s", + version, sequence, tree.sequence, sql.opts.Path) + } + } + if count%250_000 == 0 { + lg.Info().Msgf("replayed changelog to version=%d count=%s node/s=%s", + version, humanize.Comma(count), humanize.Comma(int64(250_000/time.Since(since).Seconds()))) + since = time.Now() + } + } + rootHash := tree.computeHash() + if !bytes.Equal(targetHash, rootHash) { + return fmt.Errorf("root hash mismatch; expected %x got %x", targetHash, rootHash) + } + tree.leaves, tree.branches, tree.leafOrphans, tree.deletes = nil, nil, nil, nil + tree.sequence = 0 + tree.version = toVersion + lg.Info().Msgf("replayed changelog to version=%d count=%s dur=%s root=%v", + tree.version, humanize.Comma(count), time.Since(start).Round(time.Millisecond), tree.root) + return q.Close() +} + +func (sql *SqliteDb) WriteLatestLeaves(tree *Tree) (err error) { + var ( + since = time.Now() + batchSize = 200_000 + count = 0 + step func(node *Node) error + lg = log.With().Str("path", sql.opts.Path).Logger() + latestInsert *sqlite3.Stmt + ) + prepare := func() error { + latestInsert, err = sql.leafWrite.Prepare("INSERT INTO latest (key, value) VALUES (?, ?)") + if err != nil { + return err + } + if err = sql.leafWrite.Begin(); err != nil { + return err + } + return nil + } + + flush := func() error { + if err = sql.leafWrite.Commit(); err != nil { + return err + } + if err = latestInsert.Close(); err != nil { + return err + } + var rate string + if time.Since(since).Seconds() > 0 { + rate = humanize.Comma(int64(float64(batchSize) / time.Since(since).Seconds())) + } else { + rate = "n/a" + } + lg.Info().Msgf("latest flush; count=%s dur=%s wr/s=%s", + humanize.Comma(int64(count)), + time.Since(since).Round(time.Millisecond), + rate, + ) + since = time.Now() + return nil + } + + maybeFlush := func() error { + count++ + if count%batchSize == 0 { + err = flush() + if err != nil { + return err + } + return prepare() + } + return nil + } + + if err = prepare(); err != nil { + return err + } + + step = func(node *Node) error { + if node.isLeaf() { + err := latestInsert.Exec(node.key, node.value) + if err != nil { + return err + } + return maybeFlush() + } + if err = step(node.left(tree)); err != nil { + return err + } + if err = step(node.right(tree)); err != nil { + return err + } + return nil + } + + err = step(tree.root) + if err != nil { + return err + } + err = flush() + if err != nil { + return err + } + + return latestInsert.Close() +} diff --git a/v2/sqlite_batch.go b/v2/sqlite_batch.go new file mode 100644 index 000000000..304ca44fd --- /dev/null +++ b/v2/sqlite_batch.go @@ -0,0 +1,294 @@ +package iavl + +import ( + "fmt" + "time" + + "github.com/bvinc/go-sqlite-lite/sqlite3" + "github.com/dustin/go-humanize" + "github.com/rs/zerolog" +) + +type sqliteBatch struct { + tree *Tree + sql *SqliteDb + size int64 + logger zerolog.Logger + + treeCount int64 + treeSince time.Time + leafCount int64 + leafSince time.Time + + leafInsert *sqlite3.Stmt + deleteInsert *sqlite3.Stmt + latestInsert *sqlite3.Stmt + latestDelete *sqlite3.Stmt + treeInsert *sqlite3.Stmt + leafOrphan *sqlite3.Stmt + treeOrphan *sqlite3.Stmt +} + +func (b *sqliteBatch) newChangeLogBatch() (err error) { + if err = b.sql.leafWrite.Begin(); err != nil { + return err + } + b.leafInsert, err = b.sql.leafWrite.Prepare("INSERT OR REPLACE INTO leaf (version, sequence, bytes) VALUES (?, ?, ?)") + if err != nil { + return err + } + b.deleteInsert, err = b.sql.leafWrite.Prepare("INSERT OR REPLACE INTO leaf_delete (version, sequence, key) VALUES (?, ?, ?)") + if err != nil { + return err + } + b.latestInsert, err = b.sql.leafWrite.Prepare("INSERT OR REPLACE INTO latest (key, value) VALUES (?, ?)") + if err != nil { + return err + } + b.latestDelete, err = b.sql.leafWrite.Prepare("DELETE FROM latest WHERE key = ?") + if err != nil { + return err + } + b.leafOrphan, err = b.sql.leafWrite.Prepare("INSERT INTO leaf_orphan (version, sequence, at) VALUES (?, ?, ?)") + if err != nil { + return err + } + b.leafSince = time.Now() + return nil +} + +func (b *sqliteBatch) changelogMaybeCommit() (err error) { + if b.leafCount%b.size == 0 { + if err = b.changelogBatchCommit(); err != nil { + return err + } + if err = b.newChangeLogBatch(); err != nil { + return err + } + } + return nil +} + +func (b *sqliteBatch) changelogBatchCommit() error { + if err := b.sql.leafWrite.Commit(); err != nil { + return err + } + if err := b.leafInsert.Close(); err != nil { + return err + } + if err := b.deleteInsert.Close(); err != nil { + return err + } + if err := b.latestInsert.Close(); err != nil { + return err + } + if err := b.latestDelete.Close(); err != nil { + return err + } + if err := b.leafOrphan.Close(); err != nil { + return err + } + + return nil +} + +func (b *sqliteBatch) execBranchOrphan(nodeKey NodeKey) error { + return b.treeOrphan.Exec(nodeKey.Version(), int(nodeKey.Sequence()), b.tree.version) +} + +func (b *sqliteBatch) newTreeBatch(shardID int64) (err error) { + if err = b.sql.treeWrite.Begin(); err != nil { + return err + } + b.treeInsert, err = b.sql.treeWrite.Prepare(fmt.Sprintf( + "INSERT INTO tree_%d (version, sequence, bytes) VALUES (?, ?, ?)", shardID)) + if err != nil { + return err + } + b.treeOrphan, err = b.sql.treeWrite.Prepare("INSERT INTO orphan (version, sequence, at) VALUES (?, ?, ?)") + b.treeSince = time.Now() + return err +} + +func (b *sqliteBatch) treeBatchCommit() error { + if err := b.sql.treeWrite.Commit(); err != nil { + return err + } + if err := b.treeInsert.Close(); err != nil { + return err + } + if err := b.treeOrphan.Close(); err != nil { + return err + } + + if b.treeCount >= b.size { + batchSize := b.treeCount % b.size + if batchSize == 0 { + batchSize = b.size + } + b.logger.Debug().Msgf("db=tree count=%s dur=%s batch=%d rate=%s", + humanize.Comma(b.treeCount), + time.Since(b.treeSince).Round(time.Millisecond), + batchSize, + humanize.Comma(int64(float64(batchSize)/time.Since(b.treeSince).Seconds()))) + } + return nil +} + +func (b *sqliteBatch) treeMaybeCommit(shardID int64) (err error) { + if b.treeCount%b.size == 0 { + if err = b.treeBatchCommit(); err != nil { + return err + } + if err = b.newTreeBatch(shardID); err != nil { + return err + } + } + return nil +} + +func (b *sqliteBatch) saveLeaves() (int64, error) { + var byteCount int64 + + err := b.newChangeLogBatch() + if err != nil { + return 0, err + } + + var ( + bz []byte + val []byte + tree = b.tree + ) + for i, leaf := range tree.leaves { + b.leafCount++ + if tree.storeLatestLeaves { + val = leaf.value + leaf.value = nil + } + bz, err = leaf.Bytes() + if err != nil { + return 0, err + } + byteCount += int64(len(bz)) + if err = b.leafInsert.Exec(leaf.nodeKey.Version(), int(leaf.nodeKey.Sequence()), bz); err != nil { + return 0, err + } + if tree.storeLatestLeaves { + if err = b.latestInsert.Exec(leaf.key, val); err != nil { + return 0, err + } + } + if err = b.changelogMaybeCommit(); err != nil { + return 0, err + } + if tree.heightFilter > 0 { + if i != 0 { + // evict leaf + tree.returnNode(leaf) + } else if leaf.nodeKey != tree.root.nodeKey { + // never evict the root if it's a leaf + tree.returnNode(leaf) + } + } + } + + for _, leafDelete := range tree.deletes { + b.leafCount++ + err = b.deleteInsert.Exec(leafDelete.deleteKey.Version(), int(leafDelete.deleteKey.Sequence()), leafDelete.leafKey) + if err != nil { + return 0, err + } + if tree.storeLatestLeaves { + if err = b.latestDelete.Exec(leafDelete.leafKey); err != nil { + return 0, err + } + } + if err = b.changelogMaybeCommit(); err != nil { + return 0, err + } + } + + for _, orphan := range tree.leafOrphans { + b.leafCount++ + err = b.leafOrphan.Exec(orphan.Version(), int(orphan.Sequence()), b.tree.version) + if err != nil { + return 0, err + } + if err = b.changelogMaybeCommit(); err != nil { + return 0, err + } + } + + if err = b.changelogBatchCommit(); err != nil { + return 0, err + } + + err = tree.sql.leafWrite.Exec("CREATE UNIQUE INDEX IF NOT EXISTS leaf_idx ON leaf (version, sequence)") + if err != nil { + return byteCount, err + } + + return byteCount, nil +} + +func (b *sqliteBatch) isCheckpoint() bool { + return len(b.tree.branches) > 0 +} + +func (b *sqliteBatch) saveBranches() (n int64, err error) { + if b.isCheckpoint() { + tree := b.tree + b.treeCount = 0 + + shardID, err := tree.sql.nextShard(tree.version) + if err != nil { + return 0, err + } + b.logger.Debug().Msgf("checkpoint db=tree version=%d shard=%d orphans=%s", + tree.version, shardID, humanize.Comma(int64(len(tree.branchOrphans)))) + + if err = b.newTreeBatch(shardID); err != nil { + return 0, err + } + + for _, node := range tree.branches { + b.treeCount++ + bz, err := node.Bytes() + if err != nil { + return 0, err + } + if err = b.treeInsert.Exec(node.nodeKey.Version(), int(node.nodeKey.Sequence()), bz); err != nil { + return 0, err + } + if err = b.treeMaybeCommit(shardID); err != nil { + return 0, err + } + if node.evict { + tree.returnNode(node) + } + } + + for _, orphan := range tree.branchOrphans { + b.treeCount++ + err = b.execBranchOrphan(orphan) + if err != nil { + return 0, err + } + if err = b.treeMaybeCommit(shardID); err != nil { + return 0, err + } + } + + if err = b.treeBatchCommit(); err != nil { + return 0, err + } + err = b.sql.treeWrite.Exec(fmt.Sprintf( + "CREATE INDEX IF NOT EXISTS tree_idx_%d ON tree_%d (version, sequence);", shardID, shardID)) + if err != nil { + return 0, err + } + } + + return b.treeCount, nil +} diff --git a/v2/sqlite_metadata.go b/v2/sqlite_metadata.go new file mode 100644 index 000000000..2fd849645 --- /dev/null +++ b/v2/sqlite_metadata.go @@ -0,0 +1,90 @@ +package iavl + +import ( + "fmt" + "os" + "sync" + + "github.com/bvinc/go-sqlite-lite/sqlite3" +) + +// SqliteKVStore is a generic KV store which uses sqlite as the backend and be used by applications to store and +// retrieve generic key-value pairs, probably for metadata. +type SqliteKVStore struct { + options SqliteDbOptions + write *sqlite3.Conn + read *sqlite3.Conn + lock *sync.Mutex +} + +func NewSqliteKVStore(opts SqliteDbOptions) (kv *SqliteKVStore, err error) { + if opts.Path == "" { + return nil, fmt.Errorf("path cannot be empty") + } + if opts.WalSize == 0 { + opts.WalSize = 50 * 1024 * 1024 + } + + pageSize := os.Getpagesize() + kv = &SqliteKVStore{options: opts, lock: &sync.Mutex{}} + kv.write, err = sqlite3.Open(fmt.Sprintf( + "file:%s?_journal_mode=WAL&_synchronous=OFF&&_wal_autocheckpoint=%d", opts.Path, pageSize/opts.WalSize)) + if err != nil { + return nil, err + } + + // Create the tables if they don't exist + if err = kv.write.Exec("CREATE TABLE IF NOT EXISTS kv (key BLOB PRIMARY KEY, value BLOB)"); err != nil { + return nil, err + } + + kv.read, err = sqlite3.Open(fmt.Sprintf("file:%s?mode=ro", opts.Path)) + if err != nil { + return nil, err + } + + return kv, nil +} + +func (kv *SqliteKVStore) Set(key []byte, value []byte) error { + kv.lock.Lock() + defer kv.lock.Unlock() + if err := kv.write.Exec("INSERT OR REPLACE INTO kv (key, value) VALUES (?, ?)", key, value); err != nil { + return err + } + return nil +} + +func (kv *SqliteKVStore) Get(key []byte) (value []byte, err error) { + kv.lock.Lock() + defer kv.lock.Unlock() + stmt, err := kv.read.Prepare("SELECT value FROM kv WHERE key = ?") + if err != nil { + return nil, err + } + defer stmt.Close() + if err = stmt.Bind(key); err != nil { + return nil, err + } + ok, err := stmt.Step() + if err != nil { + return nil, err + } + if !ok { + return nil, nil + } + if err = stmt.Scan(&value); err != nil { + return nil, err + } + + return value, nil +} + +func (kv *SqliteKVStore) Delete(key []byte) error { + kv.lock.Lock() + defer kv.lock.Unlock() + if err := kv.write.Exec("DELETE FROM kv WHERE key = ?", key); err != nil { + return err + } + return nil +} diff --git a/v2/sqlite_test.go b/v2/sqlite_test.go new file mode 100644 index 000000000..1468e6868 --- /dev/null +++ b/v2/sqlite_test.go @@ -0,0 +1,296 @@ +package iavl + +import ( + "fmt" + "math/rand" + "testing" + "time" + + "github.com/bvinc/go-sqlite-lite/sqlite3" + "github.com/cosmos/iavl/v2/testutil" + "github.com/dustin/go-humanize" + "github.com/stretchr/testify/require" +) + +/* +Benchmarks measured from these leafRead-leafWrite tests below: + +# SQLite + +## Writes + +no index: +- structured batch insert (node table) - 507,700 nodes/sec +- unstructured batch insert (tree table) re-use same bytes.Buffer - 444,000 nodes/sec +- unstructured batch insert (tree table) alloc new bytes.Buffer - 473,800 nodes/sec + - !! surprising, why? because GC is async? probably worse in long tail run? + +indexed: +- structured batch insert (node table) - 441,000 nodes/sec + - the difference between indexed and not is not significant. the only way I can explain this is that the writes + below are sequential proceeding version number. this will always be the case with the current node key. +- unstructured batch insert (tree table) - 414,000 nodes/sec + +writing into a trie based table (indexed on key) will likely be *much* slower since it requires an order insertion +and potentially re-balancing the BTree index. +^^^ True, initial test started at 200k and quickly declined to 75k + +## Reads + +- fully memory mapped unstructured (tree table) - ~160,000 nodes/sec +- fully memory mapped structured (node table) - ~172,000 nodes/sec +- fully memory mapped structured (node table) leafRead by key []byte - ~160,000 nodes/sec + +# LevelDB +Writes: 245,000 nodes/sec +Reads: 30,000 nodes/sec !!! + +*/ + +var testDbLocation = "/tmp/sqlite_test" + +func TestBuildSqlite(t *testing.T) { + //dir := t.TempDir() + dir := testDbLocation + t.Logf("using temp dir %s", dir) + + sql, err := NewSqliteDb(NewNodePool(), SqliteDbOptions{Path: dir}) + + require.NoError(t, err) + + gen := testutil.OsmoLike() + version1 := gen.Iterator.Nodes() + var count int + require.Equal(t, int64(1), gen.Iterator.Version()) + + since := time.Now() + + err = sql.leafWrite.Exec("CREATE TABLE node (seq INTEGER, version INTEGER, hash BLOB, key BLOB, height INTEGER, size INTEGER, l_seq INTEGER, l_version INTEGER, r_seq INTEGER, r_version INTEGER)") + require.NoError(t, err) + + err = sql.leafWrite.Exec("CREATE INDEX trie_idx ON node (key)") + //err = sql.leafWrite.Exec("CREATE INDEX node_idx ON node (version, seq)") + require.NoError(t, err) + err = sql.leafWrite.Exec("CREATE INDEX tree_idx ON tree (version, sequence)") + require.NoError(t, err) + + require.NoError(t, sql.leafWrite.Begin()) + + var stmt *sqlite3.Stmt + //stmt, err = sql.leafWrite.Prepare("INSERT INTO tree(version, sequence, bytes) VALUES (?, ?, ?)") + stmt, err = sql.leafWrite.Prepare("INSERT INTO node(version, seq, hash, key, height, size, l_seq, l_version, r_seq, r_version)" + + "VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?, ?)") + + require.NoError(t, err) + + startTime := time.Now() + batchSize := 200_000 + //nodeBz := new(bytes.Buffer) + for ; version1.Valid(); err = version1.Next() { + node := version1.GetNode() + lnk := NewNodeKey(1, uint32(count+1)) + rnk := NewNodeKey(1, uint32(count+2)) + n := &Node{key: node.Key, hash: node.Key[:32], + subtreeHeight: 13, size: 4, leftNodeKey: lnk, rightNodeKey: rnk} + + //nodeBz.Reset() + //require.NoError(t, n.WriteBytes(nodeBz)) + + // tree table + //nk := NewNodeKey(1, uint32(count)) + //nodeBz, err := n.Bytes() + //require.NoError(t, err) + //err = stmt.Exec(int(nk.Version()), int(nk.Sequence()), nodeBz) + //require.NoError(t, err) + + // node table + err = stmt.Exec( + 1, // version + count, // seq + n.key[:32], // hash + n.key, // key + 13, // height + 4, // size + count+1, // l_seq + 1, // l_version + count+2, // r_seq + 1, // r_version + ) + + if count%batchSize == 0 { + err := sql.leafWrite.Commit() + require.NoError(t, err) + //stmt, err = newBatch() + //require.NoError(t, err) + require.NoError(t, sql.leafWrite.Begin()) + log.Info().Msgf("nodes=%s dur=%s; rate=%s", + humanize.Comma(int64(count)), + time.Since(since).Round(time.Millisecond), + humanize.Comma(int64(float64(batchSize)/time.Since(since).Seconds()))) + since = time.Now() + } + count++ + require.NoError(t, err) + } + + log.Info().Msg("final commit") + require.NoError(t, sql.leafWrite.Commit()) + log.Info().Msgf("total dur=%s rate=%s", + time.Since(startTime).Round(time.Millisecond), + humanize.Comma(int64(40_000_000/time.Since(startTime).Seconds())), + ) + require.NoError(t, stmt.Close()) + require.NoError(t, sql.Close()) +} + +func TestReadSqlite_Trie(t *testing.T) { + dir := testDbLocation + sql, err := NewSqliteDb(NewNodePool(), SqliteDbOptions{Path: dir}) + require.NoError(t, err) + + read, err := sql.getReadConn() + require.NoError(t, err) + + query, err := read.Prepare("SELECT version, seq, hash, key, height, size, l_seq, l_version, r_seq, r_version FROM node WHERE key = ?") + require.NoError(t, err) + + var hash, key []byte + var version, seq, height, size, lSeq, lVersion, rSeq, rVersion int + + i := int64(1) + since := time.Now() + gen := testutil.OsmoLike() + version1 := gen.Iterator.Nodes() + for ; version1.Valid(); err = version1.Next() { + node := version1.GetNode() + require.NoError(t, query.Bind(node.Key)) + hasRow, err := query.Step() + require.NoError(t, err) + require.True(t, hasRow) + require.NoError(t, query.Scan(&version, &seq, &hash, &key, &height, &size, &lSeq, &lVersion, &rSeq, &rVersion)) + require.NoError(t, err) + + if i%100_000 == 0 { + i++ + log.Info().Msgf("nodes=%s dur=%s; rate=%s", + humanize.Comma(i), + time.Since(since), + humanize.Comma(int64(float64(100_000)/time.Since(since).Seconds()))) + since = time.Now() + } + require.NoError(t, query.Reset()) + i++ + } + +} + +func TestReadSqlite(t *testing.T) { + //pool := NewNodePool() + //dir := t.TempDir() + var err error + dir := testDbLocation + t.Logf("using temp dir %s", dir) + sql, err := NewSqliteDb(NewNodePool(), SqliteDbOptions{Path: dir}) + require.NoError(t, err) + + var stmt *sqlite3.Stmt + //stmt, err = sql.leafWrite.Prepare("SELECT bytes FROM tree WHERE node_key = ?") + + sqlRead, err := sql.getReadConn() + require.NoError(t, err) + //stmt, err = sqlRead.Prepare("SELECT bytes FROM tree WHERE version = ? AND sequence = ?") + stmt, err = sqlRead.Prepare("SELECT hash, key, height, size, l_seq, l_version, r_seq, r_version FROM node WHERE seq = ? AND version = ?") + require.NoError(t, err) + + var hash, key []byte + var height, size, lSeq, lVersion, rSeq, rVersion int + + since := time.Now() + for i := 1; i < 40_000_000; i++ { + j := rand.Intn(40_000_000) + + // unstructured leafRead: + //nk := NewNodeKey(1, uint32(j)) + //require.NoError(t, stmt.Bind(1, j)) + //hasRow, err := stmt.Step() + //require.Truef(t, hasRow, "no row for %d", j) + //require.NoError(t, err) + //nodeBz, err := stmt.ColumnBlob(0) + //require.NoError(t, err) + //_, err = MakeNode(pool, nk, nodeBz) + //require.NoError(t, err) + + // structured leafRead: + require.NoError(t, stmt.Bind(j, 1)) + hasRow, err := stmt.Step() + require.NoError(t, err) + require.True(t, hasRow) + require.NoError(t, stmt.Scan(&hash, &key, &height, &size, &lSeq, &lVersion, &rSeq, &rVersion)) + + if i%100_000 == 0 { + log.Info().Msgf("nodes=%s dur=%s; rate=%s", + humanize.Comma(int64(i)), + time.Since(since), + humanize.Comma(int64(float64(100_000)/time.Since(since).Seconds()))) + since = time.Now() + } + require.NoError(t, stmt.Reset()) + } + + //gen := testutil.OsmoLike() + //version1 := gen.TreeIterator.Nodes() + //var count int + //require.Equal(t, int64(1), gen.TreeIterator.Version()) +} + +func TestNodeKeyFormat(t *testing.T) { + nk := NewNodeKey(100, 2) + k := (int(nk.Version()) << 32) | int(nk.Sequence()) + fmt.Printf("k: %d - %x\n", k, k) +} + +func TestFetchNode(t *testing.T) { + pool := NewNodePool() + conn, err := sqlite3.Open("/tmp/iavl-v2.db") + require.NoError(t, err) + q := "SELECT bytes FROM tree_1 WHERE version = 1 and sequence = 6756148" + stmt, err := conn.Prepare(q) + require.NoError(t, err) + hasRow, err := stmt.Step() + require.NoError(t, err) + require.True(t, hasRow) + nodeBz, err := stmt.ColumnBlob(0) + require.NoError(t, err) + nk := NewNodeKey(1, 6756148) + node, err := MakeNode(pool, nk, nodeBz) + require.NoError(t, err) + fmt.Printf("node: %v\n", node) +} + +func TestMmap(t *testing.T) { + tmpDir := t.TempDir() + conn, err := sqlite3.Open(tmpDir + "/test.db") + require.NoError(t, err) + stmt, err := conn.Prepare("PRAGMA mmap_size=1000000000000") + require.NoError(t, err) + ok, err := stmt.Step() + require.NoError(t, err) + require.True(t, ok) + + stmt, err = conn.Prepare("PRAGMA mmap_size") + require.NoError(t, err) + ok, err = stmt.Step() + require.NoError(t, err) + require.True(t, ok) + res, ok, err := stmt.ColumnRawString(0) + require.True(t, ok) + require.NoError(t, err) + fmt.Printf("res: %s\n", res) +} + +func Test_NewSqliteDb(t *testing.T) { + dir := t.TempDir() + sql, err := NewSqliteDb(NewNodePool(), SqliteDbOptions{Path: dir}) + require.NoError(t, err) + require.NotNil(t, sql) +} diff --git a/v2/sqlite_writer.go b/v2/sqlite_writer.go new file mode 100644 index 000000000..0a49b1055 --- /dev/null +++ b/v2/sqlite_writer.go @@ -0,0 +1,467 @@ +package iavl + +import ( + "context" + "errors" + "fmt" + "time" + + "github.com/bvinc/go-sqlite-lite/sqlite3" + "github.com/dustin/go-humanize" + "github.com/rs/zerolog" +) + +type pruneSignal struct { + pruneVersion int64 + checkpoints VersionRange +} + +type saveSignal struct { + batch *sqliteBatch + root *Node + version int64 + wantCheckpoint bool +} + +type saveResult struct { + n int64 + err error +} + +type sqlWriter struct { + sql *SqliteDb + logger zerolog.Logger + + treePruneCh chan *pruneSignal + treeCh chan *saveSignal + treeResult chan *saveResult + + leafPruneCh chan *pruneSignal + leafCh chan *saveSignal + leafResult chan *saveResult +} + +func (sql *SqliteDb) newSQLWriter() *sqlWriter { + return &sqlWriter{ + sql: sql, + leafPruneCh: make(chan *pruneSignal), + treePruneCh: make(chan *pruneSignal), + leafCh: make(chan *saveSignal), + treeCh: make(chan *saveSignal), + leafResult: make(chan *saveResult), + treeResult: make(chan *saveResult), + logger: sql.logger.With().Str("module", "write").Logger(), + } +} + +func (w *sqlWriter) start(ctx context.Context) { + go func() { + err := w.treeLoop(ctx) + if err != nil { + w.logger.Fatal().Err(err).Msg("tree loop failed") + } + }() + go func() { + err := w.leafLoop(ctx) + if err != nil { + w.logger.Fatal().Err(err).Msg("leaf loop failed") + } + }() +} + +func (w *sqlWriter) leafLoop(ctx context.Context) error { + var ( + pruneVersion int64 + nextPruneVersion int64 + checkpoints VersionRange + orphanQuery *sqlite3.Stmt + deleteOrphan *sqlite3.Stmt + deleteLeaf *sqlite3.Stmt + pruneCount int64 + pruneStartTime time.Time + err error + ) + + beginPruneBatch := func(pruneTo int64) error { + if err = w.sql.leafWrite.Begin(); err != nil { + return fmt.Errorf("failed to begin leaf prune tx; %w", err) + } + orphanQuery, err = w.sql.leafWrite.Prepare(`SELECT version, sequence, ROWID FROM leaf_orphan WHERE at <= ?`, pruneTo) + if err != nil { + return fmt.Errorf("failed to prepare leaf orphan query; %w", err) + } + deleteOrphan, err = w.sql.leafWrite.Prepare("DELETE FROM leaf_orphan WHERE ROWID = ?") + if err != nil { + return fmt.Errorf("failed to prepare leaf orphan delete; %w", err) + } + deleteLeaf, err = w.sql.leafWrite.Prepare("DELETE FROM leaf WHERE version = ? and sequence = ?") + if err != nil { + return fmt.Errorf("failed to prepare leaf delete; %w", err) + } + + return nil + } + startPrune := func(startPruningVersion int64) error { + // only prune leafs to shard (checkpoint) boundaries. + // e.g. given shards = [100, 200, 300]; + // startPruningVersion = 150; pruneTo = 100 + // startPruningVersion = 350; pruneTo = 300 + // startPruningVersion = 50; do nothing + pruneTo := checkpoints.FindPrevious(startPruningVersion) + if pruneTo == -1 { + w.logger.Debug().Msgf("skipping leaf prune: requested prune version %d < first checkpoint", startPruningVersion) + return nil + } + pruneVersion = pruneTo + pruneCount = 0 + pruneStartTime = time.Now() + + w.logger.Debug().Msgf("leaf prune starting requested=%d pruneTo=%d", startPruningVersion, pruneTo) + if err = beginPruneBatch(pruneVersion); err != nil { + return err + } + return nil + } + commitPrune := func() error { + if err = orphanQuery.Close(); err != nil { + return err + } + orphanQuery = nil + if err = w.sql.leafWrite.Commit(); err != nil { + return err + } + w.logger.Debug().Msgf("commit leaf prune count=%s", humanize.Comma(pruneCount)) + if err = w.sql.leafWrite.Exec("PRAGMA wal_checkpoint(RESTART)"); err != nil { + return fmt.Errorf("failed to checkpoint; %w", err) + } + + if err = deleteLeaf.Close(); err != nil { + return err + } + if err = deleteOrphan.Close(); err != nil { + return err + } + + return nil + } + stepPruning := func() error { + hasRow, err := orphanQuery.Step() + if err != nil { + return fmt.Errorf("failed to step leaf orphan query; %w", err) + } + if hasRow { + pruneCount++ + var ( + version int64 + sequence int + rowID int64 + ) + err = orphanQuery.Scan(&version, &sequence, &rowID) + if err != nil { + return err + } + if err = deleteLeaf.Exec(version, sequence); err != nil { + return err + } + if err = deleteOrphan.Exec(rowID); err != nil { + return err + } + if pruneCount%pruneBatchSize == 0 { + if err = commitPrune(); err != nil { + return err + } + if err = beginPruneBatch(pruneVersion); err != nil { + return err + } + } + } else { + if err = commitPrune(); err != nil { + return err + } + err = w.sql.leafWrite.Exec("DELETE FROM leaf_delete WHERE version < ?", pruneVersion) + if err != nil { + return fmt.Errorf("failed to prune leaf_delete; %w", err) + } + w.logger.Debug().Msgf("done leaf prune count=%s dur=%s to=%d", + humanize.Comma(pruneCount), + time.Since(pruneStartTime).Round(time.Millisecond), + pruneVersion, + ) + if nextPruneVersion != 0 { + if err = startPrune(nextPruneVersion); err != nil { + return err + } + nextPruneVersion = 0 + } else { + pruneVersion = 0 + } + } + + return nil + } + saveLeaves := func(sig *saveSignal) { + res := &saveResult{} + res.n, res.err = sig.batch.saveLeaves() + if sig.batch.isCheckpoint() { + if err = w.sql.leafWrite.Exec("PRAGMA wal_checkpoint(TRUNCATE)"); err != nil { + w.logger.Err(err).Msg("failed leaf wal_checkpoint") + } + } + w.leafResult <- res + } + for { + if pruneVersion != 0 { + select { + case sig := <-w.leafCh: + if err = commitPrune(); err != nil { + return fmt.Errorf("interrupt leaf prune failed in commit; %w", err) + } + saveLeaves(sig) + if err = beginPruneBatch(pruneVersion); err != nil { + return fmt.Errorf("interrupt leaf prune failed in begin; %w", err) + } + case sig := <-w.leafPruneCh: + w.logger.Warn().Msgf("leaf prune signal received while pruning version=%d next=%d", pruneVersion, sig.pruneVersion) + checkpoints = sig.checkpoints + nextPruneVersion = sig.pruneVersion + case <-ctx.Done(): + return nil + default: + err = stepPruning() + if err != nil { + return fmt.Errorf("failed to step pruning; %w", err) + } + } + } else { + select { + case sig := <-w.leafCh: + saveLeaves(sig) + case sig := <-w.leafPruneCh: + checkpoints = sig.checkpoints + err = startPrune(sig.pruneVersion) + if err != nil { + return fmt.Errorf("failed to start leaf prune; %w", err) + } + case <-ctx.Done(): + return nil + } + } + } +} + +const pruneBatchSize = 500_000 + +func (w *sqlWriter) treeLoop(ctx context.Context) error { + var ( + nextPruneVersion int64 + checkpoints VersionRange + pruneVersion int64 + pruneCount int64 + pruneStartTime time.Time + orphanQuery *sqlite3.Stmt + // TODO use a map + deleteBranch func(shardId int64, version int64, sequence int) (err error) + deleteOrphan *sqlite3.Stmt + ) + beginPruneBatch := func(version int64) (err error) { + if err = w.sql.treeWrite.Begin(); err != nil { + return err + } + orphanQuery, err = w.sql.treeWrite.Prepare( + "SELECT version, sequence, at, ROWID FROM orphan WHERE at <= ?", version) + if err != nil { + return fmt.Errorf("failed to prepare orphan query; %w", err) + } + deleteBranch = func(shardId int64, version int64, sequence int) (err error) { + return w.sql.treeWrite.Exec( + fmt.Sprintf("DELETE FROM tree_%d WHERE version = ? AND sequence = ?", shardId), version, sequence) + } + deleteOrphan, err = w.sql.treeWrite.Prepare("DELETE FROM orphan WHERE ROWID = ?") + if err != nil { + return fmt.Errorf("failed to prepare orphan delete; %w", err) + } + + return err + } + commitPrune := func() (err error) { + if err = orphanQuery.Close(); err != nil { + return err + } + if err = deleteOrphan.Close(); err != nil { + return err + } + if err = w.sql.treeWrite.Commit(); err != nil { + return err + } + w.logger.Debug().Msgf("commit tree prune count=%s", humanize.Comma(pruneCount)) + if err = w.sql.treeWrite.Exec("PRAGMA wal_checkpoint(RESTART)"); err != nil { + return fmt.Errorf("failed to checkpoint; %w", err) + } + return nil + } + saveTree := func(sig *saveSignal) { + res := &saveResult{} + res.n, res.err = sig.batch.saveBranches() + if res.err == nil { + err := w.sql.SaveRoot(sig.version, sig.root, sig.wantCheckpoint) + if err != nil { + res.err = fmt.Errorf("failed to save root path=%s version=%d: %w", w.sql.opts.Path, sig.version, err) + } + } + if sig.batch.isCheckpoint() { + if err := w.sql.treeWrite.Exec("PRAGMA wal_checkpoint(TRUNCATE)"); err != nil { + res.err = fmt.Errorf("failed tree checkpoint; %w", err) + } + } + w.treeResult <- res + } + startPrune := func(startPruningVersion int64) error { + w.logger.Debug().Msgf("tree prune to version=%d", startPruningVersion) + pruneStartTime = time.Now() + pruneCount = 0 + pruneVersion = startPruningVersion + err := beginPruneBatch(pruneVersion) + if err != nil { + return err + } + return nil + } + stepPruning := func() error { + hasRow, err := orphanQuery.Step() + if err != nil { + return fmt.Errorf("failed to step orphan query; %w", err) + } + if hasRow { + pruneCount++ + var ( + version int64 + sequence int + at int + rowID int64 + ) + err = orphanQuery.Scan(&version, &sequence, &at, &rowID) + if err != nil { + return err + } + shard, err := w.sql.getShard(version) + if err != nil { + return err + } + if err = deleteBranch(shard, version, sequence); err != nil { + return fmt.Errorf("failed to delete from tree_%d count=%d; %w", shard, pruneCount, err) + } + if err = deleteOrphan.Exec(rowID); err != nil { + return fmt.Errorf("failed to delete from orphan count=%d; %w", pruneCount, err) + } + if pruneCount%pruneBatchSize == 0 { + if err = commitPrune(); err != nil { + return err + } + if err = beginPruneBatch(pruneVersion); err != nil { + return err + } + } + } else { + if err = commitPrune(); err != nil { + return err + } + + prevCheckpoint := checkpoints.FindPrevious(pruneVersion) + if err = w.sql.treeWrite.Exec("DELETE FROM root WHERE version < ?", prevCheckpoint); err != nil { + return err + } + + w.logger.Debug().Msgf("done tree prune count=%s dur=%s to=%d", + humanize.Comma(pruneCount), + time.Since(pruneStartTime).Round(time.Millisecond), + prevCheckpoint, + ) + if nextPruneVersion != 0 { + if err = startPrune(nextPruneVersion); err != nil { + return err + } + nextPruneVersion = 0 + } else { + pruneVersion = 0 + } + } + + return nil + } + + for { + // if there is pruning in progress support interrupt and immediate continuation + if pruneVersion != 0 { + select { + case sig := <-w.treeCh: + if sig.wantCheckpoint { + if err := commitPrune(); err != nil { + return err + } + saveTree(sig) + if err := beginPruneBatch(pruneVersion); err != nil { + return err + } + } else { + saveTree(sig) + } + case sig := <-w.treePruneCh: + w.logger.Warn().Msgf("tree prune signal received while pruning version=%d next=%d", pruneVersion, sig.pruneVersion) + checkpoints = sig.checkpoints + nextPruneVersion = sig.pruneVersion + case <-ctx.Done(): + return nil + default: + // continue pruning if no signal + err := stepPruning() + if err != nil { + return err + } + } + } else { + select { + case sig := <-w.treeCh: + saveTree(sig) + case sig := <-w.treePruneCh: + checkpoints = sig.checkpoints + err := startPrune(sig.pruneVersion) + if err != nil { + return err + } + case <-ctx.Done(): + return nil + } + } + } +} + +func (w *sqlWriter) saveTree(tree *Tree) error { + saveStart := time.Now() + + batch := &sqliteBatch{ + sql: tree.sql, + tree: tree, + size: 200_000, + logger: log.With(). + Str("module", "sqlite-batch"). + Str("path", tree.sql.opts.Path).Logger(), + } + saveSig := &saveSignal{batch: batch, root: tree.root, version: tree.version, wantCheckpoint: tree.shouldCheckpoint} + w.treeCh <- saveSig + w.leafCh <- saveSig + treeResult := <-w.treeResult + leafResult := <-w.leafResult + dur := time.Since(saveStart) + tree.sql.metrics.WriteDurations = append(tree.sql.metrics.WriteDurations, dur) + tree.sql.metrics.WriteTime += dur + tree.sql.metrics.WriteLeaves += int64(len(tree.leaves)) + + err := errors.Join(treeResult.err, leafResult.err) + + return err +} + +// TODO +// unify delete approach between tree and leaf. tree uses rowid range in delete, leaf issues delete for each rowid. +// which one is faster? +// diff --git a/v2/testutil/util.go b/v2/testutil/util.go new file mode 100644 index 000000000..b95ab47fe --- /dev/null +++ b/v2/testutil/util.go @@ -0,0 +1,460 @@ +package testutil + +import ( + "fmt" + + "github.com/cosmos/iavl-bench/bench" + "github.com/dustin/go-humanize" + "github.com/kocubinski/costor-api/compact" + "github.com/kocubinski/costor-api/logz" +) + +type TreeBuildOptions struct { + Until int64 + UntilHash string + LoadVersion int64 + Iterator bench.ChangesetIterator + Report func() + SampleRate int64 +} + +func (opts *TreeBuildOptions) With10_000() *TreeBuildOptions { + opts.Until = 10_000 + opts.UntilHash = "34d9a0d607ecd96ddbde4c0089ee4be633bf0b73b9b6c9da827f7305f1591044" + return opts +} + +func (opts *TreeBuildOptions) With25_000() *TreeBuildOptions { + opts.Until = 25_000 + opts.UntilHash = "08482db3715bef1f3251894b8c37901950a4787ac5f10d75ab4318e4fea91642" + return opts +} + +func (opts *TreeBuildOptions) FastForward(version int64) error { + log := logz.Logger.With().Str("module", "fastForward").Logger() + log.Info().Msgf("fast forwarding changesets to version %d...", opts.LoadVersion+1) + i := 1 + itr := opts.Iterator + var err error + for ; itr.Valid(); err = itr.Next() { + if itr.Version() > version { + break + } + if err != nil { + return err + } + nodes := itr.Nodes() + for ; nodes.Valid(); err = nodes.Next() { + if err != nil { + return err + } + if i%5_000_000 == 0 { + fmt.Printf("fast forward %s nodes\n", humanize.Comma(int64(i))) + } + i++ + } + } + log.Info().Msgf("fast forward complete") + return nil +} + +func NewTreeBuildOptions() *TreeBuildOptions { + var seed int64 = 1234 + var versions int64 = 10_000_000 + bankGen := bench.BankLikeGenerator(seed, versions) + //bankGen.InitialSize = 10_000 + lockupGen := bench.LockupLikeGenerator(seed, versions) + //lockupGen.InitialSize = 10_000 + stakingGen := bench.StakingLikeGenerator(seed, versions) + //stakingGen.InitialSize = 10_000 + itr, err := bench.NewChangesetIterators([]bench.ChangesetGenerator{ + bankGen, + lockupGen, + stakingGen, + }) + if err != nil { + panic(err) + } + opts := TreeBuildOptions{ + Iterator: itr, + } + return opts.With25_000() +} + +func BankLockup25_000() TreeBuildOptions { + var seed int64 = 1234 + var versions int64 = 10_000_000 + bankGen := bench.BankLikeGenerator(seed, versions) + lockupGen := bench.LockupLikeGenerator(seed, versions) + itr, err := bench.NewChangesetIterators([]bench.ChangesetGenerator{ + bankGen, + lockupGen, + }) + if err != nil { + panic(err) + } + opts := TreeBuildOptions{ + Iterator: itr, + Until: 25_000, + UntilHash: "c1dc9dc7d3a8ae025d2a347eea19121e98435b06b421607119bc3cf3cf79be05", + } + return opts +} + +func BigTreeOptions_100_000() *TreeBuildOptions { + var seed int64 = 1234 + var versions int64 = 200_000 + bankGen := bench.BankLikeGenerator(seed, versions) + lockupGen := bench.LockupLikeGenerator(seed, versions) + stakingGen := bench.StakingLikeGenerator(seed, versions) + itr, err := bench.NewChangesetIterators([]bench.ChangesetGenerator{ + bankGen, + lockupGen, + stakingGen, + }) + if err != nil { + panic(err) + } + opts := &TreeBuildOptions{ + Iterator: itr, + Until: 100, + UntilHash: "c1dc9dc7d3a8ae025d2a347eea19121e98435b06b421607119bc3cf3cf79be05", + } + return opts +} + +func BigStartOptions() TreeBuildOptions { + initialSize := 1_000_000 + var seed int64 = 1234 + var versions int64 = 10_000 + bankGen := bench.BankLikeGenerator(seed, versions) + bankGen.InitialSize = initialSize + lockupGen := bench.LockupLikeGenerator(seed, versions) + lockupGen.InitialSize = initialSize + stakingGen := bench.StakingLikeGenerator(seed, versions) + stakingGen.InitialSize = initialSize + + itr, err := bench.NewChangesetIterators([]bench.ChangesetGenerator{ + bankGen, + lockupGen, + stakingGen, + }) + if err != nil { + panic(err) + } + + opts := TreeBuildOptions{ + Iterator: itr, + Until: 300, + UntilHash: "b7266b2b30979e1415bcb8ef7fed9637b542213fefd1bb77374aa1f14442aa50", // 300 + } + + return opts +} + +func OsmoLike() *TreeBuildOptions { + initialSize := 20_000_000 // revert to 20M!! + finalSize := int(1.5 * float64(initialSize)) + var seed int64 = 1234 + var versions int64 = 1_000_000 + bankGen := bench.BankLikeGenerator(seed, versions) + bankGen.InitialSize = initialSize + bankGen.FinalSize = finalSize + bankGen2 := bench.BankLikeGenerator(seed+1, versions) + bankGen2.InitialSize = initialSize + bankGen2.FinalSize = finalSize + //lockupGen := bench.LockupLikeGenerator(seed, versions) + //lockupGen.InitialSize = initialSize + //stakingGen := bench.StakingLikeGenerator(seed, versions) + //stakingGen.InitialSize = initialSize + + itr, err := bench.NewChangesetIterators([]bench.ChangesetGenerator{ + bankGen, + bankGen2, + }) + if err != nil { + panic(err) + } + + opts := &TreeBuildOptions{ + Iterator: itr, + Until: 10_000, + // hash for 10k WITHOUT a store key prefix on the key + UntilHash: "e996df6099bc4b6e8a723dc551af4fa7cfab50e3a182ab1e21f5e90e5e7124cd", // 10000 + // hash for 10k WITH store key prefix on key + //UntilHash: "3b43ef49895a7c483ef4b9a84a1f0ddbe7615c9a65bc533f69bc6bf3eb1b3d6c", // OsmoLike, 10000 + } + + return opts +} + +func OsmoLikeManyTrees() *TreeBuildOptions { + seed := int64(1234) + versions := int64(100_000) + changes := int(versions / 100) + deleteFrac := 0.2 + + wasm := bench.ChangesetGenerator{ + StoreKey: "wasm", + Seed: seed, + KeyMean: 79, + KeyStdDev: 23, + ValueMean: 170, + ValueStdDev: 202, + InitialSize: 8_500_000, + FinalSize: 8_600_000, + Versions: versions, + ChangePerVersion: changes, + DeleteFraction: deleteFrac, + } + ibc := bench.ChangesetGenerator{ + StoreKey: "ibc", + Seed: seed, + KeyMean: 58, + KeyStdDev: 4, + ValueMean: 22, + ValueStdDev: 29, + InitialSize: 23_400_000, + FinalSize: 23_500_000, + Versions: versions, + ChangePerVersion: changes, + DeleteFraction: deleteFrac, + } + upgrade := bench.ChangesetGenerator{ + StoreKey: "upgrade", + Seed: seed, + KeyMean: 20, + KeyStdDev: 1, + ValueMean: 8, + ValueStdDev: 0, + InitialSize: 60, + FinalSize: 62, + Versions: versions, + ChangePerVersion: 1, + DeleteFraction: 0, + } + concentratedliquidity := bench.ChangesetGenerator{ + StoreKey: "concentratedliquidity", + Seed: seed, + KeyMean: 25, + KeyStdDev: 11, + ValueMean: 44, + ValueStdDev: 48, + InitialSize: 600_000, + FinalSize: 610_000, + Versions: versions, + ChangePerVersion: changes, + DeleteFraction: deleteFrac, + } + icahost := bench.ChangesetGenerator{ + StoreKey: "icahost", + Seed: seed, + KeyMean: 103, + KeyStdDev: 11, + ValueMean: 37, + ValueStdDev: 25, + InitialSize: 1_500, + FinalSize: 1_600, + Versions: versions, + ChangePerVersion: changes, + DeleteFraction: deleteFrac, + } + capability := bench.ChangesetGenerator{ + StoreKey: "capability", + Seed: seed, + KeyMean: 24, + KeyStdDev: 1, + ValueMean: 42, + ValueStdDev: 1, + InitialSize: 5_000, + FinalSize: 5_400, + Versions: versions, + ChangePerVersion: changes, + DeleteFraction: deleteFrac, + } + authz := bench.ChangesetGenerator{ + StoreKey: "authz", + Seed: seed, + KeyMean: 83, + KeyStdDev: 9, + ValueMean: 113, + ValueStdDev: 30, + InitialSize: 45_000, + FinalSize: 48_000, + Versions: versions, + ChangePerVersion: changes, + DeleteFraction: deleteFrac, + } + incentives := bench.ChangesetGenerator{ + StoreKey: "incentives", + Seed: seed, + KeyMean: 23, + KeyStdDev: 5, + ValueMean: 91, + ValueStdDev: 20, + InitialSize: 45_000, + FinalSize: 50_000, + Versions: versions, + ChangePerVersion: changes, + DeleteFraction: deleteFrac, + } + acc := bench.ChangesetGenerator{ + StoreKey: "acc", + Seed: seed, + KeyMean: 21, + KeyStdDev: 1, + ValueMean: 149, + ValueStdDev: 25, + InitialSize: 850_000, + FinalSize: 940_000, + Versions: versions, + ChangePerVersion: changes, + DeleteFraction: deleteFrac, + } + superfluid := bench.ChangesetGenerator{ + StoreKey: "superfluid", + Seed: seed, + KeyMean: 10, + KeyStdDev: 2, + ValueMean: 22, + ValueStdDev: 11, + InitialSize: 850_000, + FinalSize: 940_000, + Versions: versions, + ChangePerVersion: changes, + DeleteFraction: deleteFrac, + } + slashing := bench.ChangesetGenerator{ + StoreKey: "slashing", + Seed: seed, + KeyMean: 30, + KeyStdDev: 1, + ValueMean: 2, + ValueStdDev: 1, + InitialSize: 3_100_000, + FinalSize: 3_700_000, + Versions: versions, + ChangePerVersion: changes, + DeleteFraction: deleteFrac, + } + gov := bench.ChangesetGenerator{ + StoreKey: "gov", + Seed: seed, + KeyMean: 28, + KeyStdDev: 5, + ValueMean: 600, + ValueStdDev: 7200, + InitialSize: 7_000, + FinalSize: 7_500, + Versions: versions, + ChangePerVersion: changes, + DeleteFraction: deleteFrac, + } + distribution := bench.ChangesetGenerator{ + StoreKey: "distribution", + Seed: seed, + KeyMean: 36, + KeyStdDev: 6, + ValueMean: 158, + ValueStdDev: 150, + InitialSize: 2_600_000, + FinalSize: 3_300_000, + Versions: versions, + ChangePerVersion: changes, + DeleteFraction: deleteFrac, + } + twap := bench.ChangesetGenerator{ + StoreKey: "twap", + Seed: seed, + KeyMean: 157, + KeyStdDev: 31, + ValueMean: 272, + ValueStdDev: 31, + InitialSize: 400_000, + FinalSize: 480_000, + Versions: versions, + ChangePerVersion: changes, + DeleteFraction: deleteFrac, + } + staking := bench.ChangesetGenerator{ + StoreKey: "staking", + Seed: seed, + KeyMean: 42, + KeyStdDev: 2, + ValueMean: 505, + ValueStdDev: 4950, + InitialSize: 1_500_000, + FinalSize: 1_700_000, + Versions: versions, + ChangePerVersion: changes, + DeleteFraction: deleteFrac, + } + bank := bench.ChangesetGenerator{ + StoreKey: "bank", + Seed: seed, + KeyMean: 57, + KeyStdDev: 25, + ValueMean: 45, + ValueStdDev: 25, + InitialSize: 2_000_000, + FinalSize: 2_300_000, + Versions: versions, + ChangePerVersion: changes, + DeleteFraction: deleteFrac, + } + lockup := bench.ChangesetGenerator{ + StoreKey: "lockup", + Seed: seed, + KeyMean: 60, + KeyStdDev: 35, + ValueMean: 25, + ValueStdDev: 36, + InitialSize: 2_000_000, + FinalSize: 2_300_000, + Versions: versions, + ChangePerVersion: changes, + DeleteFraction: deleteFrac, + } + + itr, err := bench.NewChangesetIterators([]bench.ChangesetGenerator{ + wasm, + ibc, + upgrade, + concentratedliquidity, + icahost, + capability, + authz, + incentives, + acc, + superfluid, + slashing, + gov, + distribution, + twap, + staking, + bank, + lockup, + }) + if err != nil { + panic(err) + } + return &TreeBuildOptions{ + Iterator: itr, + Until: versions, + } +} + +func CompactedChangelogs(logDir string) *TreeBuildOptions { + itr, err := compact.NewChangesetIterator(logDir) + if err != nil { + panic(err) + } + return &TreeBuildOptions{ + Iterator: itr, + Until: 10_000, + // hash for 10k WITHOUT a store key prefix on the key + UntilHash: "e996df6099bc4b6e8a723dc551af4fa7cfab50e3a182ab1e21f5e90e5e7124cd", // 10000 + // hash for 10k WITH store key prefix on key + //UntilHash: "3b43ef49895a7c483ef4b9a84a1f0ddbe7615c9a65bc533f69bc6bf3eb1b3d6c", // OsmoLike, 10000 + } +} diff --git a/v2/tree.go b/v2/tree.go new file mode 100644 index 000000000..e8111990a --- /dev/null +++ b/v2/tree.go @@ -0,0 +1,701 @@ +package iavl + +import ( + "bytes" + "context" + "crypto/sha256" + "fmt" + "os" + "time" + + "github.com/cosmos/iavl/v2/metrics" + "github.com/rs/zerolog" + zlog "github.com/rs/zerolog/log" +) + +var log = zlog.Output(zerolog.ConsoleWriter{ + Out: os.Stderr, + TimeFormat: time.Stamp, +}) + +type nodeDelete struct { + // the sequence in which this deletion was processed + deleteKey NodeKey + // the leaf key to delete in `latest` table (if maintained) + leafKey []byte +} + +type Tree struct { + version int64 + root *Node + metrics *metrics.TreeMetrics + sql *SqliteDb + sqlWriter *sqlWriter + writerCancel context.CancelFunc + pool *NodePool + + checkpoints *VersionRange + shouldCheckpoint bool + + // options + maxWorkingSize uint64 + workingBytes uint64 + checkpointInterval int64 + checkpointMemory uint64 + workingSize int64 + storeLeafValues bool + storeLatestLeaves bool + heightFilter int8 + metricsProxy metrics.Proxy + + // state + branches []*Node + leaves []*Node + branchOrphans []NodeKey + leafOrphans []NodeKey + deletes []*nodeDelete + sequence uint32 + isReplaying bool + evictionDepth int8 +} + +type TreeOptions struct { + CheckpointInterval int64 + CheckpointMemory uint64 + StateStorage bool + HeightFilter int8 + EvictionDepth int8 + MetricsProxy metrics.Proxy +} + +func DefaultTreeOptions() TreeOptions { + return TreeOptions{ + CheckpointInterval: 1000, + StateStorage: true, + HeightFilter: 1, + EvictionDepth: -1, + } +} + +func NewTree(sql *SqliteDb, pool *NodePool, opts TreeOptions) *Tree { + ctx, cancel := context.WithCancel(context.Background()) + tree := &Tree{ + sql: sql, + sqlWriter: sql.newSQLWriter(), + writerCancel: cancel, + pool: pool, + checkpoints: &VersionRange{}, + metrics: &metrics.TreeMetrics{}, + maxWorkingSize: 1.5 * 1024 * 1024 * 1024, + checkpointInterval: opts.CheckpointInterval, + checkpointMemory: opts.CheckpointMemory, + storeLeafValues: opts.StateStorage, + storeLatestLeaves: false, + heightFilter: opts.HeightFilter, + metricsProxy: opts.MetricsProxy, + evictionDepth: opts.EvictionDepth, + } + + tree.sqlWriter.start(ctx) + return tree +} + +func (tree *Tree) LoadVersion(version int64) (err error) { + if tree.sql == nil { + return fmt.Errorf("sql is nil") + } + + tree.workingBytes = 0 + tree.workingSize = 0 + + tree.checkpoints, err = tree.sql.loadCheckpointRange() + if err != nil { + return err + } + tree.version = tree.checkpoints.FindPrevious(version) + + tree.root, err = tree.sql.LoadRoot(tree.version) + if err != nil { + return err + } + if version > tree.version { + var targetHash []byte + targetRoot, err := tree.sql.LoadRoot(version) + if err != nil { + return err + } + if targetRoot == nil { + targetHash = emptyHash + } else { + targetHash = targetRoot.hash + } + + if err = tree.replayChangelog(version, targetHash); err != nil { + return err + } + } + + return nil +} + +func (tree *Tree) LoadSnapshot(version int64, traverseOrder TraverseOrderType) (err error) { + var v int64 + tree.root, v, err = tree.sql.ImportMostRecentSnapshot(version, traverseOrder, true) + if err != nil { + return err + } + if v < version { + return fmt.Errorf("requested %d found snapshot %d, replay not yet supported", version, v) + } + tree.version = v + tree.checkpoints, err = tree.sql.loadCheckpointRange() + if err != nil { + return err + } + return nil +} + +func (tree *Tree) SaveSnapshot() (err error) { + ctx := context.Background() + return tree.sql.Snapshot(ctx, tree) +} + +func (tree *Tree) SaveVersion() ([]byte, int64, error) { + tree.version++ + tree.sequence = 0 + + if err := tree.sql.closeHangingIterators(); err != nil { + return nil, 0, err + } + + if !tree.shouldCheckpoint { + tree.shouldCheckpoint = tree.version == 1 || + (tree.checkpointInterval > 0 && tree.version-tree.checkpoints.Last() >= tree.checkpointInterval) || + (tree.checkpointMemory > 0 && tree.workingBytes >= tree.checkpointMemory) + } + rootHash := tree.computeHash() + + err := tree.sqlWriter.saveTree(tree) + if err != nil { + return nil, tree.version, err + } + + if tree.shouldCheckpoint { + tree.branchOrphans = nil + if err = tree.checkpoints.Add(tree.version); err != nil { + return nil, tree.version, err + } + + // if we've checkpointed without loading any tree node reads this means this was the first checkpoint. + // shard queries will not be loaded. initialize them now. + if tree.shouldCheckpoint && tree.sql.readConn == nil { + if err := tree.sql.ResetShardQueries(); err != nil { + return nil, tree.version, err + } + } + } + tree.leafOrphans = nil + tree.leaves = nil + tree.branches = nil + tree.deletes = nil + tree.shouldCheckpoint = false + + return rootHash, tree.version, nil +} + +// ComputeHash the node and its descendants recursively. This usually mutates all +// descendant nodes. Returns the tree root node hash. +// If the tree is empty (i.e. the node is nil), returns the hash of an empty input, +// to conform with RFC-6962. +func (tree *Tree) computeHash() []byte { + if tree.root == nil { + return sha256.New().Sum(nil) + } + tree.deepHash(tree.root, 0) + return tree.root.hash +} + +func (tree *Tree) deepHash(node *Node, depth int8) { + if node == nil { + panic(fmt.Sprintf("node is nil; sql.path=%s", tree.sql.opts.Path)) + } + if node.isLeaf() { + // new leaves are written every version + if node.nodeKey.Version() == tree.version { + tree.leaves = append(tree.leaves, node) + } + // always end recursion at a leaf + return + } + + if node.hash == nil { + // When the child is a leaf, this will initiate a leafRead from storage for the sole purpose of producing a hash. + // Recall that a terminal tree node may have only updated one leaf this version. + // We can explore storing right/left hash in terminal tree nodes to avoid this, or changing the storage + // format to iavl v0 where left/right hash are stored in the node. + tree.deepHash(node.left(tree), depth+1) + tree.deepHash(node.right(tree), depth+1) + } + + if !tree.shouldCheckpoint { + // when not checkpointing, end recursion at a node with a hash (node.version < tree.version) + if node.hash != nil { + return + } + } else { + // otherwise accumulate the branch node for checkpointing + tree.branches = append(tree.branches, node) + + // if the node is missing a hash then it's children have already been loaded above. + // if the node has a hash then traverse the dirty path. + if node.hash != nil { + if node.leftNode != nil { + tree.deepHash(node.leftNode, depth+1) + } + if node.rightNode != nil { + tree.deepHash(node.rightNode, depth+1) + } + } + } + + node._hash() + + // when heightFilter > 0 remove the leaf nodes from memory. + // if the leaf node is not dirty, return it to the pool. + // if the leaf node is dirty, it will be written to storage then removed from the pool. + if tree.heightFilter > 0 { + if node.leftNode != nil && node.leftNode.isLeaf() { + if !node.leftNode.dirty { + tree.returnNode(node.leftNode) + } + node.leftNode = nil + } + if node.rightNode != nil && node.rightNode.isLeaf() { + if !node.rightNode.dirty { + tree.returnNode(node.rightNode) + } + node.rightNode = nil + } + } + + // finally, if checkpointing, remove node's children from memory if we're at the eviction height + if tree.shouldCheckpoint { + if depth >= tree.evictionDepth { + node.evictChildren() + } + } +} + +func (tree *Tree) Get(key []byte) ([]byte, error) { + if tree.metricsProxy != nil { + defer tree.metricsProxy.MeasureSince(time.Now(), "iavl_v2", "get") + } + var ( + res []byte + err error + ) + if tree.storeLatestLeaves { + res, err = tree.sql.GetLatestLeaf(key) + } else { + if tree.root == nil { + return nil, nil + } + _, res, err = tree.root.get(tree, key) + } + return res, err +} + +func (tree *Tree) Has(key []byte) (bool, error) { + if tree.metricsProxy != nil { + defer tree.metricsProxy.MeasureSince(time.Now(), "iavl_v2", "has") + } + var ( + err error + val []byte + ) + if tree.storeLatestLeaves { + val, err = tree.sql.GetLatestLeaf(key) + } else { + if tree.root == nil { + return false, nil + } + _, val, err = tree.root.get(tree, key) + } + if err != nil { + return false, err + } + return val != nil, nil +} + +// Set sets a key in the working tree. Nil values are invalid. The given +// key/value byte slices must not be modified after this call, since they point +// to slices stored within IAVL. It returns true when an existing value was +// updated, while false means it was a new key. +func (tree *Tree) Set(key, value []byte) (updated bool, err error) { + if tree.metricsProxy != nil { + defer tree.metricsProxy.MeasureSince(time.Now(), "iavl_v2", "set") + } + updated, err = tree.set(key, value) + if err != nil { + return false, err + } + if updated { + tree.metrics.TreeUpdate++ + } else { + tree.metrics.TreeNewNode++ + } + return updated, nil +} + +func (tree *Tree) set(key []byte, value []byte) (updated bool, err error) { + if value == nil { + return updated, fmt.Errorf("attempt to store nil value at key '%s'", key) + } + + if tree.root == nil { + tree.root = tree.NewNode(key, value) + return updated, nil + } + + tree.root, updated, err = tree.recursiveSet(tree.root, key, value) + return updated, err +} + +func (tree *Tree) recursiveSet(node *Node, key []byte, value []byte) ( + newSelf *Node, updated bool, err error, +) { + if node == nil { + panic("node is nil") + } + if node.isLeaf() { + switch bytes.Compare(key, node.key) { + case -1: // setKey < leafKey + tree.metrics.PoolGet += 2 + parent := tree.pool.Get() + parent.nodeKey = tree.nextNodeKey() + parent.key = node.key + parent.subtreeHeight = 1 + parent.size = 2 + parent.dirty = true + parent.setLeft(tree.NewNode(key, value)) + parent.setRight(node) + + tree.workingBytes += parent.sizeBytes() + tree.workingSize++ + return parent, false, nil + case 1: // setKey > leafKey + tree.metrics.PoolGet += 2 + parent := tree.pool.Get() + parent.nodeKey = tree.nextNodeKey() + parent.key = key + parent.subtreeHeight = 1 + parent.size = 2 + parent.dirty = true + parent.setLeft(node) + parent.setRight(tree.NewNode(key, value)) + + tree.workingBytes += parent.sizeBytes() + tree.workingSize++ + return parent, false, nil + default: + tree.addOrphan(node) + wasDirty := node.dirty + tree.mutateNode(node) + if tree.isReplaying { + node.hash = value + } else { + if wasDirty { + tree.workingBytes -= node.sizeBytes() + } + node.value = value + node._hash() + if !tree.storeLeafValues { + node.value = nil + } + tree.workingBytes += node.sizeBytes() + } + return node, true, nil + } + + } else { + tree.addOrphan(node) + tree.mutateNode(node) + + var child *Node + if bytes.Compare(key, node.key) < 0 { + child, updated, err = tree.recursiveSet(node.left(tree), key, value) + if err != nil { + return nil, updated, err + } + node.setLeft(child) + } else { + child, updated, err = tree.recursiveSet(node.right(tree), key, value) + if err != nil { + return nil, updated, err + } + node.setRight(child) + } + + if updated { + return node, updated, nil + } + err = node.calcHeightAndSize(tree) + if err != nil { + return nil, false, err + } + newNode, err := tree.balance(node) + if err != nil { + return nil, false, err + } + return newNode, updated, err + } +} + +// Remove removes a key from the working tree. The given key byte slice should not be modified +// after this call, since it may point to data stored inside IAVL. +func (tree *Tree) Remove(key []byte) ([]byte, bool, error) { + if tree.metricsProxy != nil { + tree.metricsProxy.MeasureSince(time.Now(), "iavL_v2", "remove") + } + + if tree.root == nil { + return nil, false, nil + } + newRoot, _, value, removed, err := tree.recursiveRemove(tree.root, key) + if err != nil { + return nil, false, err + } + if !removed { + return nil, false, nil + } + + tree.metrics.TreeDelete++ + + tree.root = newRoot + return value, true, nil +} + +// removes the node corresponding to the passed key and balances the tree. +// It returns: +// - the hash of the new node (or nil if the node is the one removed) +// - the node that replaces the orig. node after remove +// - new leftmost leaf key for tree after successfully removing 'key' if changed. +// - the removed value +func (tree *Tree) recursiveRemove(node *Node, key []byte) (newSelf *Node, newKey []byte, newValue []byte, removed bool, err error) { + if node.isLeaf() { + if bytes.Equal(key, node.key) { + // we don't create an orphan here because the leaf node is removed + tree.addDelete(node) + tree.returnNode(node) + return nil, nil, node.value, true, nil + } + return node, nil, nil, false, nil + } + + if err != nil { + return nil, nil, nil, false, err + } + + // node.key < key; we go to the left to find the key: + if bytes.Compare(key, node.key) < 0 { + newLeftNode, newKey, value, removed, err := tree.recursiveRemove(node.left(tree), key) + if err != nil { + return nil, nil, nil, false, err + } + + if !removed { + return node, nil, value, removed, nil + } + + tree.addOrphan(node) + + // left node held value, was removed + // collapse `node.rightNode` into `node` + if newLeftNode == nil { + right := node.right(tree) + k := node.key + tree.returnNode(node) + return right, k, value, removed, nil + } + + tree.mutateNode(node) + + node.setLeft(newLeftNode) + err = node.calcHeightAndSize(tree) + if err != nil { + return nil, nil, nil, false, err + } + node, err = tree.balance(node) + if err != nil { + return nil, nil, nil, false, err + } + + return node, newKey, value, removed, nil + } + // node.key >= key; either found or look to the right: + newRightNode, newKey, value, removed, err := tree.recursiveRemove(node.right(tree), key) + if err != nil { + return nil, nil, nil, false, err + } + + if !removed { + return node, nil, value, removed, nil + } + + tree.addOrphan(node) + + // right node held value, was removed + // collapse `node.leftNode` into `node` + if newRightNode == nil { + left := node.left(tree) + tree.returnNode(node) + return left, nil, value, removed, nil + } + + tree.mutateNode(node) + + node.setRight(newRightNode) + if newKey != nil { + node.key = newKey + } + err = node.calcHeightAndSize(tree) + if err != nil { + return nil, nil, nil, false, err + } + + node, err = tree.balance(node) + if err != nil { + return nil, nil, nil, false, err + } + + return node, nil, value, removed, nil +} + +func (tree *Tree) Size() int64 { + return tree.root.size +} + +func (tree *Tree) Height() int8 { + return tree.root.subtreeHeight +} + +func (tree *Tree) nextNodeKey() NodeKey { + tree.sequence++ + nk := NewNodeKey(tree.version+1, tree.sequence) + return nk +} + +func (tree *Tree) mutateNode(node *Node) { + // this second conditional is only relevant in replay; or more specifically, in cases where hashing has been + // deferred between versions + if node.hash == nil && node.nodeKey.Version() == tree.version+1 { + return + } + node.hash = nil + node.nodeKey = tree.nextNodeKey() + + if node.dirty { + return + } + + node.dirty = true + tree.workingSize++ + if !node.isLeaf() { + tree.workingBytes += node.sizeBytes() + } +} + +func (tree *Tree) addOrphan(node *Node) { + if node.hash == nil { + return + } + if !node.isLeaf() && node.nodeKey.Version() <= tree.checkpoints.Last() { + tree.branchOrphans = append(tree.branchOrphans, node.nodeKey) + } else if node.isLeaf() && !node.dirty { + tree.leafOrphans = append(tree.leafOrphans, node.nodeKey) + } +} + +func (tree *Tree) addDelete(node *Node) { + // added and removed in the same version; no op. + if node.nodeKey.Version() == tree.version+1 { + return + } + del := &nodeDelete{ + deleteKey: tree.nextNodeKey(), + leafKey: node.key, + } + tree.deletes = append(tree.deletes, del) +} + +// NewNode returns a new node from a key, value and version. +func (tree *Tree) NewNode(key []byte, value []byte) *Node { + node := tree.pool.Get() + + node.nodeKey = tree.nextNodeKey() + + node.key = key + node.subtreeHeight = 0 + node.size = 1 + + if tree.isReplaying { + node.hash = value + } else { + node.value = value + node._hash() + if !tree.storeLeafValues { + node.value = nil + } + } + + node.dirty = true + tree.workingBytes += node.sizeBytes() + tree.workingSize++ + return node +} + +func (tree *Tree) returnNode(node *Node) { + if node.dirty { + tree.workingBytes -= node.sizeBytes() + tree.workingSize-- + } + tree.pool.Put(node) +} + +func (tree *Tree) Close() error { + tree.writerCancel() + return tree.sql.Close() +} + +func (tree *Tree) Hash() []byte { + if tree.root == nil { + return emptyHash + } + return tree.root.hash +} + +func (tree *Tree) Version() int64 { + return tree.version +} + +func (tree *Tree) WriteLatestLeaves() (err error) { + return tree.sql.WriteLatestLeaves(tree) +} + +func (tree *Tree) replayChangelog(toVersion int64, targetHash []byte) error { + return tree.sql.replayChangelog(tree, toVersion, targetHash) +} + +func (tree *Tree) DeleteVersionsTo(toVersion int64) error { + tree.sqlWriter.treePruneCh <- &pruneSignal{pruneVersion: toVersion, checkpoints: *tree.checkpoints} + tree.sqlWriter.leafPruneCh <- &pruneSignal{pruneVersion: toVersion, checkpoints: *tree.checkpoints} + return nil +} + +func (tree *Tree) WorkingBytes() uint64 { + return tree.workingBytes +} + +func (tree *Tree) SetShouldCheckpoint() { + tree.shouldCheckpoint = true +} diff --git a/v2/tree_test.go b/v2/tree_test.go new file mode 100644 index 000000000..7bd5aa53a --- /dev/null +++ b/v2/tree_test.go @@ -0,0 +1,787 @@ +// TODO move to package iavl_test +// this means an audit of exported fields and types. +package iavl + +import ( + "context" + "crypto/sha256" + "encoding/hex" + "fmt" + "net/http" + "runtime" + "sort" + "testing" + "time" + "unsafe" + + "github.com/cosmos/iavl-bench/bench" + "github.com/cosmos/iavl/v2/metrics" + "github.com/cosmos/iavl/v2/testutil" + "github.com/dustin/go-humanize" + api "github.com/kocubinski/costor-api" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + "github.com/prometheus/client_golang/prometheus/promhttp" + "github.com/stretchr/testify/require" +) + +func MemUsage() string { + var m runtime.MemStats + runtime.ReadMemStats(&m) + // For info on each, see: https://golang.org/pkg/runtime/#MemStats + s := fmt.Sprintf("alloc=%s sys=%s gc=%d", + humanize.Bytes(m.HeapAlloc), + //humanize.Bytes(m.TotalAlloc), + humanize.Bytes(m.Sys), + m.NumGC) + return s +} + +func testTreeBuild(t *testing.T, multiTree *MultiTree, opts *testutil.TreeBuildOptions) (cnt int64) { + var ( + version int64 + err error + ) + cnt = 1 + + // generator + itr := opts.Iterator + fmt.Printf("Initial memory usage from generators:\n%s\n", MemUsage()) + + sampleRate := int64(100_000) + if opts.SampleRate != 0 { + sampleRate = opts.SampleRate + } + + since := time.Now() + itrStart := time.Now() + + report := func() { + dur := time.Since(since) + + var ( + workingBytes uint64 + workingSize int64 + writeLeaves int64 + writeTime time.Duration + ) + for _, tr := range multiTree.Trees { + m := tr.sql.metrics + workingBytes += tr.workingBytes + workingSize += tr.workingSize + writeLeaves += m.WriteLeaves + writeTime += m.WriteTime + m.WriteDurations = nil + m.WriteLeaves = 0 + m.WriteTime = 0 + } + fmt.Printf("leaves=%s time=%s last=%s μ=%s version=%d work-size=%s work-bytes=%s %s\n", + humanize.Comma(cnt), + dur.Round(time.Millisecond), + humanize.Comma(int64(float64(sampleRate)/time.Since(since).Seconds())), + humanize.Comma(int64(float64(cnt)/time.Since(itrStart).Seconds())), + version, + humanize.Comma(workingSize), + humanize.Bytes(workingBytes), + MemUsage()) + + if writeTime > 0 { + fmt.Printf("writes: cnt=%s wr/s=%s dur/wr=%s dur=%s\n", + humanize.Comma(writeLeaves), + humanize.Comma(int64(float64(writeLeaves)/writeTime.Seconds())), + time.Duration(int64(writeTime)/writeLeaves), + writeTime.Round(time.Millisecond), + ) + } + + if err := multiTree.QueryReport(0); err != nil { + t.Fatalf("query report err %v", err) + } + + fmt.Println() + + since = time.Now() + } + + for ; itr.Valid(); err = itr.Next() { + require.NoError(t, err) + changeset := itr.Nodes() + for ; changeset.Valid(); err = changeset.Next() { + cnt++ + require.NoError(t, err) + node := changeset.GetNode() + + //var keyBz bytes.Buffer + //keyBz.Write([]byte(node.StoreKey)) + //keyBz.Write(node.Key) + //key := keyBz.Bytes() + key := node.Key + + tree, ok := multiTree.Trees[node.StoreKey] + if !ok { + require.NoError(t, multiTree.MountTree(node.StoreKey)) + tree = multiTree.Trees[node.StoreKey] + } + + if !node.Delete { + _, err = tree.Set(key, node.Value) + require.NoError(t, err) + } else { + _, _, err := tree.Remove(key) + require.NoError(t, err) + } + + if cnt%sampleRate == 0 { + report() + } + } + + _, version, err = multiTree.SaveVersionConcurrently() + require.NoError(t, err) + + require.NoError(t, err) + if version == opts.Until { + break + } + } + fmt.Printf("final version: %d, hash: %x\n", version, multiTree.Hash()) + for sk, tree := range multiTree.Trees { + fmt.Printf("storekey: %s height: %d, size: %d\n", sk, tree.Height(), tree.Size()) + } + fmt.Printf("mean leaves/ms %s\n", humanize.Comma(cnt/time.Since(itrStart).Milliseconds())) + require.Equal(t, version, opts.Until) + require.Equal(t, opts.UntilHash, fmt.Sprintf("%x", multiTree.Hash())) + return cnt +} + +func TestTree_Hash(t *testing.T) { + var err error + + tmpDir := t.TempDir() + //tmpDir := "/tmp/iavl-test" + t.Logf("levelDb tmpDir: %s\n", tmpDir) + + require.NoError(t, err) + opts := testutil.BigTreeOptions_100_000() + + // this hash was validated as correct (with this same dataset) in iavl-bench + // with `go run . tree --seed 1234 --dataset std` + // at this commit tree: https://github.com/cosmos/iavl-bench/blob/3a6a1ec0a8cbec305e46239454113687da18240d/iavl-v0/main.go#L136 + opts.Until = 100 + opts.UntilHash = "0101e1d6f3158dcb7221acd7ed36ce19f2ef26847ffea7ce69232e362539e5cf" + treeOpts := TreeOptions{CheckpointInterval: 10, HeightFilter: 1, StateStorage: true, EvictionDepth: 8} + + testStart := time.Now() + multiTree := NewMultiTree(tmpDir, treeOpts) + itrs, ok := opts.Iterator.(*bench.ChangesetIterators) + require.True(t, ok) + for _, sk := range itrs.StoreKeys() { + require.NoError(t, multiTree.MountTree(sk)) + } + leaves := testTreeBuild(t, multiTree, opts) + treeDuration := time.Since(testStart) + fmt.Printf("mean leaves/s: %s\n", humanize.Comma(int64(float64(leaves)/treeDuration.Seconds()))) + + require.NoError(t, multiTree.Close()) +} + +func TestTree_Build_Load(t *testing.T) { + // build the initial version of the tree with periodic checkpoints + //tmpDir := t.TempDir() + tmpDir := "/tmp/iavl-v2-test" + opts := testutil.NewTreeBuildOptions().With10_000() + multiTree := NewMultiTree(tmpDir, TreeOptions{CheckpointInterval: 4000, HeightFilter: 0, StateStorage: false}) + itrs, ok := opts.Iterator.(*bench.ChangesetIterators) + require.True(t, ok) + for _, sk := range itrs.StoreKeys() { + require.NoError(t, multiTree.MountTree(sk)) + } + t.Log("building initial tree to version 10,000") + testTreeBuild(t, multiTree, opts) + + t.Log("snapshot tree at version 10,000") + // take a snapshot at version 10,000 + require.NoError(t, multiTree.SnapshotConcurrently()) + require.NoError(t, multiTree.Close()) + + t.Log("import snapshot into new tree") + mt, err := ImportMultiTree(multiTree.pool, 10_000, tmpDir, DefaultTreeOptions()) + require.NoError(t, err) + + t.Log("build tree to version 12,000 and verify hash") + require.NoError(t, opts.Iterator.Next()) + require.Equal(t, int64(10_001), opts.Iterator.Version()) + opts.Until = 12_000 + opts.UntilHash = "3a037f8dd67a5e1a9ef83a53b81c619c9ac0233abee6f34a400fb9b9dfbb4f8d" + testTreeBuild(t, mt, opts) + require.NoError(t, mt.Close()) + + t.Log("export the tree at version 12,000 and import it into a sql db in pre-order") + traverseOrder := PreOrder + restorePreOrderMt := NewMultiTree(t.TempDir(), TreeOptions{CheckpointInterval: 4000}) + for sk, tree := range multiTree.Trees { + require.NoError(t, restorePreOrderMt.MountTree(sk)) + exporter := tree.Export(traverseOrder) + + restoreTree := restorePreOrderMt.Trees[sk] + _, err := restoreTree.sql.WriteSnapshot(context.Background(), tree.Version(), exporter.Next, SnapshotOptions{WriteCheckpoint: true, TraverseOrder: traverseOrder}) + require.NoError(t, err) + require.NoError(t, restoreTree.LoadSnapshot(tree.Version(), traverseOrder)) + } + require.NoError(t, restorePreOrderMt.Close()) + + t.Log("export the tree at version 12,000 and import it into a sql db in post-order") + traverseOrder = PostOrder + restorePostOrderMt := NewMultiTree(t.TempDir(), TreeOptions{CheckpointInterval: 4000}) + for sk, tree := range multiTree.Trees { + require.NoError(t, restorePostOrderMt.MountTree(sk)) + exporter := tree.Export(traverseOrder) + + restoreTree := restorePostOrderMt.Trees[sk] + _, err := restoreTree.sql.WriteSnapshot(context.Background(), tree.Version(), exporter.Next, SnapshotOptions{WriteCheckpoint: true, TraverseOrder: traverseOrder}) + require.NoError(t, err) + require.NoError(t, restoreTree.LoadSnapshot(tree.Version(), traverseOrder)) + } + require.Equal(t, restorePostOrderMt.Hash(), restorePreOrderMt.Hash()) + + t.Log("build tree to version 20,000 and verify hash") + require.NoError(t, opts.Iterator.Next()) + require.Equal(t, int64(12_001), opts.Iterator.Version()) + opts.Until = 20_000 + opts.UntilHash = "25907b193c697903218d92fa70a87ef6cdd6fa5b9162d955a4d70a9d5d2c4824" + testTreeBuild(t, restorePostOrderMt, opts) + require.NoError(t, restorePostOrderMt.Close()) +} + +// pre-requisites for the 2 tests below: +// $ go run ./cmd gen tree --db /tmp/iavl-v2 --limit 1 --type osmo-like-many +// $ go run ./cmd snapshot --db /tmp/iavl-v2 --version 1 +// mkdir -p /tmp/osmo-like-many/v2 && go run ./cmd gen emit --start 2 --limit 5000 --type osmo-like-many --out /tmp/osmo-like-many/v2 +func TestOsmoLike_HotStart(t *testing.T) { + tmpDir := "/tmp/iavl-v2" + // logDir := "/tmp/osmo-like-many-v2" + logDir := "/Users/mattk/src/scratch/osmo-like-many/v2" + pool := NewNodePool() + multiTree, err := ImportMultiTree(pool, 1, tmpDir, TreeOptions{HeightFilter: 0, StateStorage: false}) + require.NoError(t, err) + require.NotNil(t, multiTree) + opts := testutil.CompactedChangelogs(logDir) + opts.SampleRate = 250_000 + + opts.Until = 1_000 + opts.UntilHash = "557663181d9ab97882ecfc6538e3b4cfe31cd805222fae905c4b4f4403ca5cda" + + testTreeBuild(t, multiTree, opts) +} + +func TestOsmoLike_ColdStart(t *testing.T) { + tmpDir := "/tmp/iavl-v2" + + treeOpts := DefaultTreeOptions() + treeOpts.CheckpointInterval = -1 + treeOpts.CheckpointMemory = 1.5 * 1024 * 1024 * 1024 + treeOpts.StateStorage = false + treeOpts.HeightFilter = 1 + treeOpts.EvictionDepth = 16 + treeOpts.MetricsProxy = newPrometheusMetricsProxy() + multiTree := NewMultiTree(tmpDir, treeOpts) + require.NoError(t, multiTree.MountTrees()) + require.NoError(t, multiTree.LoadVersion(1)) + // require.NoError(t, multiTree.WarmLeaves()) + + // logDir := "/tmp/osmo-like-many-v2" + opts := testutil.CompactedChangelogs("/Users/mattk/src/scratch/osmo-like-many/v2") + opts.SampleRate = 250_000 + + opts.Until = 1_000 + opts.UntilHash = "557663181d9ab97882ecfc6538e3b4cfe31cd805222fae905c4b4f4403ca5cda" + + testTreeBuild(t, multiTree, opts) +} + +func TestTree_Import(t *testing.T) { + tmpDir := "/Users/mattk/src/scratch/sqlite/height-zero" + + pool := NewNodePool() + sql, err := NewSqliteDb(pool, SqliteDbOptions{Path: tmpDir}) + require.NoError(t, err) + + root, err := sql.ImportSnapshotFromTable(1, PreOrder, true) + require.NoError(t, err) + require.NotNil(t, root) +} + +func TestTree_Rehash(t *testing.T) { + pool := NewNodePool() + sql, err := NewSqliteDb(pool, SqliteDbOptions{Path: "/Users/mattk/src/scratch/sqlite/height-zero"}) + require.NoError(t, err) + tree := NewTree(sql, pool, TreeOptions{}) + require.NoError(t, tree.LoadVersion(1)) + + savedHash := make([]byte, 32) + n := copy(savedHash, tree.root.hash) + require.Equal(t, 32, n) + var step func(node *Node) + step = func(node *Node) { + if node.isLeaf() { + return + } + node.hash = nil + step(node.left(tree)) + step(node.right(tree)) + node._hash() + } + step(tree.root) + require.Equal(t, savedHash, tree.root.hash) +} + +func TestTreeSanity(t *testing.T) { + cases := []struct { + name string + treeFn func() *Tree + hashFn func(*Tree) []byte + }{ + { + name: "sqlite", + treeFn: func() *Tree { + pool := NewNodePool() + sql, err := NewInMemorySqliteDb(pool) + require.NoError(t, err) + return NewTree(sql, pool, TreeOptions{}) + }, + hashFn: func(tree *Tree) []byte { + hash, _, err := tree.SaveVersion() + require.NoError(t, err) + return hash + }, + }, + { + name: "no db", + treeFn: func() *Tree { + pool := NewNodePool() + return NewTree(nil, pool, TreeOptions{}) + }, + hashFn: func(tree *Tree) []byte { + rehashTree(tree.root) + tree.version++ + return tree.root.hash + }, + }, + } + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + tree := tc.treeFn() + opts := testutil.NewTreeBuildOptions() + itr := opts.Iterator + var err error + for ; itr.Valid(); err = itr.Next() { + if itr.Version() > 150 { + break + } + require.NoError(t, err) + nodes := itr.Nodes() + for ; nodes.Valid(); err = nodes.Next() { + require.NoError(t, err) + node := nodes.GetNode() + if node.Delete { + _, _, err := tree.Remove(node.Key) + require.NoError(t, err) + } else { + _, err := tree.Set(node.Key, node.Value) + require.NoError(t, err) + } + } + switch itr.Version() { + case 1: + h := tc.hashFn(tree) + require.Equal(t, "48c3113b8ba523d3d539d8aea6fce28814e5688340ba7334935c1248b6c11c7a", hex.EncodeToString(h)) + require.Equal(t, int64(104938), tree.root.size) + fmt.Printf("version=%d, hash=%x size=%d\n", itr.Version(), h, tree.root.size) + case 150: + h := tc.hashFn(tree) + require.Equal(t, "04c42dd1cec683cbbd4974027e4b003b848e389a33d03d7a9105183e6d108dd9", hex.EncodeToString(h)) + require.Equal(t, int64(105030), tree.root.size) + fmt.Printf("version=%d, hash=%x size=%d\n", itr.Version(), h, tree.root.size) + } + } + }) + } +} + +func Test_EmptyTree(t *testing.T) { + pool := NewNodePool() + sql, err := NewInMemorySqliteDb(pool) + require.NoError(t, err) + tree := NewTree(sql, pool, TreeOptions{}) + + _, err = tree.Set([]byte("foo"), []byte("bar")) + require.NoError(t, err) + _, err = tree.Set([]byte("baz"), []byte("qux")) + require.NoError(t, err) + _, _, err = tree.SaveVersion() + require.NoError(t, err) + + _, _, err = tree.Remove([]byte("foo")) + require.NoError(t, err) + _, _, err = tree.SaveVersion() + require.NoError(t, err) + + _, _, err = tree.Remove([]byte("baz")) + require.NoError(t, err) + hash, version, err := tree.SaveVersion() + require.NoError(t, err) + + require.Equal(t, "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", hex.EncodeToString(sha256.New().Sum(nil))) + require.Equal(t, "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", hex.EncodeToString(hash)) + + err = tree.LoadVersion(version) + require.NoError(t, err) +} + +func Test_Replay_Tmp(t *testing.T) { + pool := NewNodePool() + sql, err := NewSqliteDb(pool, SqliteDbOptions{Path: "/Users/mattk/src/scratch/icahost"}) + require.NoError(t, err) + tree := NewTree(sql, pool, TreeOptions{StateStorage: true}) + err = tree.LoadVersion(13946707) + require.NoError(t, err) +} + +func Test_Replay(t *testing.T) { + unsafeBytesToStr := func(b []byte) string { + return *(*string)(unsafe.Pointer(&b)) + } + const versions = int64(1_000) + gen := bench.ChangesetGenerator{ + StoreKey: "replay", + Seed: 1, + KeyMean: 20, + KeyStdDev: 3, + ValueMean: 20, + ValueStdDev: 3, + InitialSize: 20, + FinalSize: 500, + Versions: versions, + ChangePerVersion: 10, + DeleteFraction: 0.2, + } + itr, err := gen.Iterator() + require.NoError(t, err) + + pool := NewNodePool() + tmpDir := t.TempDir() + sql, err := NewSqliteDb(pool, SqliteDbOptions{Path: tmpDir}) + require.NoError(t, err) + tree := NewTree(sql, pool, TreeOptions{StateStorage: true, CheckpointInterval: 100}) + + // we must buffer all sets/deletes and order them first for replay to work properly. + // store v1 and v2 already do this via cachekv write buffering. + // from cachekv a nil value is treated as a deletion; it is a domain requirement of the SDK that nil values are disallowed + // since from the perspective of the cachekv they are indistinguishable from a deletion. + + ingest := func(start, last int64) { + for ; itr.Valid(); err = itr.Next() { + if itr.Version() > last { + break + } + require.NoError(t, err) + changeset := itr.Nodes() + cache := make(map[string]*api.Node) + for ; changeset.Valid(); err = changeset.Next() { + require.NoError(t, err) + node := changeset.GetNode() + if itr.Version() < start { + continue + } + if !node.Delete { + // merge multiple sets into one set + cache[unsafeBytesToStr(node.Key)] = node + } else { + cache[unsafeBytesToStr(node.Key)] = nil + } + } + keys := make([]string, 0, len(cache)) + for k := range cache { + keys = append(keys, k) + } + sort.Strings(keys) + + for _, k := range keys { + node := cache[k] + if node == nil { + _, _, err := tree.Remove([]byte(k)) + require.NoError(t, err) + } else { + _, err := tree.Set([]byte(k), node.Value) + require.NoError(t, err) + } + } + + if len(cache) > 0 { + _, v, err := tree.SaveVersion() + fmt.Printf("version=%d, hash=%x\n", v, tree.Hash()) + require.NoError(t, err) + } + } + + require.NoError(t, tree.Close()) + } + + ingest(1, 150) + + sql, err = NewSqliteDb(pool, SqliteDbOptions{Path: tmpDir}) + require.NoError(t, err) + tree = NewTree(sql, pool, TreeOptions{StateStorage: true}) + err = tree.LoadVersion(140) + require.NoError(t, err) + itr, err = gen.Iterator() + require.NoError(t, err) + ingest(141, 170) + + sql, err = NewSqliteDb(pool, SqliteDbOptions{Path: tmpDir}) + require.NoError(t, err) + tree = NewTree(sql, pool, TreeOptions{StateStorage: true, CheckpointInterval: 100}) + err = tree.LoadVersion(170) + require.NoError(t, err) + itr, err = gen.Iterator() + require.NoError(t, err) + ingest(171, 250) + + //sql, err = NewSqliteDb(pool, SqliteDbOptions{Path: tmpDir}) + //require.NoError(t, err) + //tree = NewTree(sql, pool, TreeOptions{StateStorage: true}) + //require.NoError(t, err) + //require.NoError(t, tree.Close()) + // + //sql, err = NewSqliteDb(pool, SqliteDbOptions{Path: tmpDir}) + //require.NoError(t, err) + //tree = NewTree(sql, pool, TreeOptions{StateStorage: true}) + //err = tree.LoadVersion(5) + //require.NoError(t, err) + // + //tree = NewTree(sql, pool, TreeOptions{StateStorage: true}) + //err = tree.LoadVersion(555) + //require.NoError(t, err) + // + //tree = NewTree(sql, pool, TreeOptions{StateStorage: true}) + //err = tree.LoadVersion(1000) + //require.NoError(t, err) +} + +func Test_Prune_Logic(t *testing.T) { + const versions = int64(1_000) + gen := bench.ChangesetGenerator{ + StoreKey: "replay", + Seed: 1, + KeyMean: 20, + KeyStdDev: 3, + ValueMean: 20, + ValueStdDev: 3, + InitialSize: 20, + FinalSize: 500, + Versions: versions, + ChangePerVersion: 10, + DeleteFraction: 0.2, + } + itr, err := gen.Iterator() + require.NoError(t, err) + + pool := NewNodePool() + // tmpDir := "/tmp/prune-logic" + tmpDir := t.TempDir() + sql, err := NewSqliteDb(pool, SqliteDbOptions{Path: tmpDir, ShardTrees: false}) + require.NoError(t, err) + tree := NewTree(sql, pool, TreeOptions{StateStorage: true, CheckpointInterval: 100}) + + for ; itr.Valid(); err = itr.Next() { + require.NoError(t, err) + changeset := itr.Nodes() + for ; changeset.Valid(); err = changeset.Next() { + require.NoError(t, err) + node := changeset.GetNode() + if node.Delete { + _, _, err := tree.Remove(node.Key) + require.NoError(t, err) + } else { + _, err := tree.Set(node.Key, node.Value) + require.NoError(t, err) + } + } + _, version, err := tree.SaveVersion() + // fmt.Printf("version=%d, hash=%x\n", version, tree.Hash()) + switch version { + case 30: + require.NoError(t, tree.DeleteVersionsTo(20)) + case 100: + require.NoError(t, tree.DeleteVersionsTo(100)) + case 150: + require.NoError(t, tree.DeleteVersionsTo(140)) + case 650: + require.NoError(t, tree.DeleteVersionsTo(650)) + } + require.NoError(t, err) + } +} + +func Test_Prune_Performance(t *testing.T) { + tmpDir := "/tmp/iavl-v2" + + multiTree := NewMultiTree(tmpDir, TreeOptions{CheckpointInterval: 50, StateStorage: false}) + require.NoError(t, multiTree.MountTrees()) + require.NoError(t, multiTree.LoadVersion(1)) + require.NoError(t, multiTree.WarmLeaves()) + + // logDir := "/tmp/osmo-like-many-v2" + opts := testutil.CompactedChangelogs("/Users/mattk/src/scratch/osmo-like-many/v2") + opts.SampleRate = 250_000 + + opts.Until = 1_000 + opts.UntilHash = "557663181d9ab97882ecfc6538e3b4cfe31cd805222fae905c4b4f4403ca5cda" + + itr := opts.Iterator + var ( + err error + cnt int64 + version int64 + since = time.Now() + itrStart = time.Now() + lastPrune = 1 + ) + report := func() { + dur := time.Since(since) + + var ( + workingBytes uint64 + workingSize int64 + writeLeaves int64 + writeTime time.Duration + ) + for _, tr := range multiTree.Trees { + m := tr.sql.metrics + workingBytes += tr.workingBytes + workingSize += tr.workingSize + writeLeaves += m.WriteLeaves + writeTime += m.WriteTime + m.WriteDurations = nil + m.WriteLeaves = 0 + m.WriteTime = 0 + } + fmt.Printf("leaves=%s time=%s last=%s μ=%s version=%d work-bytes=%s work-size=%s %s\n", + humanize.Comma(cnt), + dur.Round(time.Millisecond), + humanize.Comma(int64(float64(opts.SampleRate)/time.Since(since).Seconds())), + humanize.Comma(int64(float64(cnt)/time.Since(itrStart).Seconds())), + version, + humanize.Bytes(workingBytes), + humanize.Comma(workingSize), + MemUsage()) + + if writeTime > 0 { + fmt.Printf("writes: cnt=%s wr/s=%s dur/wr=%s dur=%s\n", + humanize.Comma(writeLeaves), + humanize.Comma(int64(float64(writeLeaves)/writeTime.Seconds())), + time.Duration(int64(writeTime)/writeLeaves), + writeTime.Round(time.Millisecond), + ) + } + + if err := multiTree.QueryReport(0); err != nil { + t.Fatalf("query report err %v", err) + } + + fmt.Println() + + since = time.Now() + } + + for ; itr.Valid(); err = itr.Next() { + require.NoError(t, err) + changeset := itr.Nodes() + for ; changeset.Valid(); err = changeset.Next() { + cnt++ + require.NoError(t, err) + node := changeset.GetNode() + key := node.Key + + tree, ok := multiTree.Trees[node.StoreKey] + require.True(t, ok) + + if !node.Delete { + _, err = tree.Set(key, node.Value) + require.NoError(t, err) + } else { + _, _, err := tree.Remove(key) + require.NoError(t, err) + } + + if cnt%opts.SampleRate == 0 { + report() + } + } + + _, version, err = multiTree.SaveVersionConcurrently() + require.NoError(t, err) + + require.NoError(t, err) + if version == opts.Until { + break + } + + lastPrune++ + // trigger two prunes close together in order to test the receipt of a prune signal before a previous prune has completed + if lastPrune == 80 || lastPrune == 85 { + pruneTo := version - 1 + t.Logf("prune to version %d", pruneTo) + for _, tree := range multiTree.Trees { + require.NoError(t, tree.DeleteVersionsTo(pruneTo)) + } + t.Log("prune signals sent") + if lastPrune == 85 { + lastPrune = 0 + } + } + } +} + +var _ metrics.Proxy = &prometheusMetricsProxy{} + +type prometheusMetricsProxy struct { + workingSize prometheus.Gauge + workingBytes prometheus.Gauge +} + +func newPrometheusMetricsProxy() *prometheusMetricsProxy { + p := &prometheusMetricsProxy{} + p.workingSize = promauto.NewGauge(prometheus.GaugeOpts{ + Name: "iavl_working_size", + Help: "working size", + }) + p.workingBytes = promauto.NewGauge(prometheus.GaugeOpts{ + Name: "iavl_working_bytes", + Help: "working bytes", + }) + http.Handle("/metrics", promhttp.Handler()) + go func() { + err := http.ListenAndServe(":2112", nil) + if err != nil { + panic(err) + } + }() + return p +} + +func (p *prometheusMetricsProxy) IncrCounter(_ float32, _ ...string) { +} + +func (p *prometheusMetricsProxy) SetGauge(val float32, keys ...string) { + k := keys[1] + switch k { + case "working_size": + p.workingSize.Set(float64(val)) + case "working_bytes": + p.workingBytes.Set(float64(val)) + } +} + +func (p *prometheusMetricsProxy) MeasureSince(_ time.Time, _ ...string) {} diff --git a/v2/visualize.go b/v2/visualize.go new file mode 100644 index 000000000..6ef973166 --- /dev/null +++ b/v2/visualize.go @@ -0,0 +1,45 @@ +package iavl + +import ( + "fmt" + + "github.com/emicklei/dot" +) + +func writeDotGraph(root *Node, lastGraph *dot.Graph) *dot.Graph { + graph := dot.NewGraph(dot.Directed) + + var traverse func(node *Node) dot.Node + var i int + traverse = func(node *Node) dot.Node { + if node == nil { + return dot.Node{} + } + i++ + nodeKey := fmt.Sprintf("%s-%d", node.key, node.subtreeHeight) + nodeLabel := fmt.Sprintf("%s - %d", string(node.key), node.subtreeHeight) + n := graph.Node(nodeKey).Label(nodeLabel) + if _, found := lastGraph.FindNodeById(nodeKey); !found { + n.Attr("color", "red") + } + if node.isLeaf() { + return n + } + leftNode := traverse(node.leftNode) + rightNode := traverse(node.rightNode) + + leftEdge := n.Edge(leftNode, "l") + rightEdge := n.Edge(rightNode, "r") + if edges := lastGraph.FindEdges(n, leftNode); len(edges) == 0 { + leftEdge.Attr("color", "red") + } + if edges := lastGraph.FindEdges(n, rightNode); len(edges) == 0 { + rightEdge.Attr("color", "red") + } + + return n + } + + traverse(root) + return graph +}