diff --git a/.github/.repo b/.github/.repo new file mode 100644 index 0000000000..4638b1400b --- /dev/null +++ b/.github/.repo @@ -0,0 +1 @@ +github.com/akash-network/node diff --git a/.github/workflows/tests.yaml b/.github/workflows/tests.yaml index 0fffe23c06..b31f4a73de 100644 --- a/.github/workflows/tests.yaml +++ b/.github/workflows/tests.yaml @@ -182,6 +182,10 @@ jobs: network-upgrade-names: runs-on: upgrade-tester steps: + - name: Cleanup build folder + run: | + sudo rm -rf ./* || true + sudo rm -rf ./.??* || true - uses: actions/checkout@v4 - run: git fetch --prune --unshallow - name: Detect required Go version @@ -209,6 +213,10 @@ jobs: network-upgrade: runs-on: upgrade-tester steps: + - name: Cleanup build folder + run: | + sudo rm -rf ./* || true + sudo rm -rf ./.??* || true - uses: actions/checkout@v4 - run: git fetch --prune --unshallow - name: Detect required Go version @@ -237,6 +245,7 @@ jobs: name: validators-logs path: | .cache/run/upgrade/validators/logs/*.log + include-hidden-files: true dispatch-release: runs-on: ubuntu-latest diff --git a/.goreleaser-docker.yaml b/.goreleaser-docker.yaml index 1bc587bd7d..d9da6089a2 100644 --- a/.goreleaser-docker.yaml +++ b/.goreleaser-docker.yaml @@ -1,6 +1,7 @@ --- version: 2 project_name: node +dist: ./.cache/goreleaser/docker env: - GO111MODULE=on - CGO_ENABLED=1 diff --git a/.goreleaser-test-bins.yaml b/.goreleaser-test-bins.yaml new file mode 100644 index 0000000000..66517ad157 --- /dev/null +++ b/.goreleaser-test-bins.yaml @@ -0,0 +1,102 @@ +--- +project_name: node +version: 2 +dist: ./.cache/goreleaser/test-bins +env: + - GO111MODULE=on + - CGO_ENABLED=1 +builds: + - id: akash-darwin-amd64 + binary: akash + main: ./cmd/akash + goarch: + - amd64 + goos: + - darwin + env: + - CC=o64-clang + - CXX=o64-clang++ + flags: + - "-mod={{ .Env.MOD }}" + - "-tags={{ .Env.BUILD_TAGS }}" + - -trimpath + ldflags: + - "{{ .Env.BUILD_VARS }}" + - "{{ .Env.STRIP_FLAGS }}" + - "-linkmode={{ .Env.LINKMODE }}" + - id: akash-darwin-arm64 + binary: akash + main: ./cmd/akash + goarch: + - arm64 + goos: + - darwin + env: + - CC=oa64-clang + - CXX=oa64-clang++ + flags: + - "-mod={{ .Env.MOD }}" + - "-tags={{ .Env.BUILD_TAGS }}" + - -trimpath + ldflags: + - "{{ .Env.BUILD_VARS }}" + - "{{ .Env.STRIP_FLAGS }}" + - "-linkmode={{ .Env.LINKMODE }}" + - id: akash-linux-amd64 + binary: akash + main: ./cmd/akash + env: + - CC=x86_64-linux-gnu-gcc + - CXX=x86_64-linux-gnu-g++ + goarch: + - amd64 + goos: + - linux + flags: + - "-mod={{ .Env.MOD }}" + - "-tags={{ .Env.BUILD_TAGS }}" + - -trimpath + ldflags: + - "{{ .Env.BUILD_VARS }}" + - "{{ .Env.STRIP_FLAGS }}" + - "-linkmode={{ .Env.LINKMODE }}" + - -extldflags "-lc -lrt -lpthread --static" + - id: akash-linux-arm64 + binary: akash + main: ./cmd/akash + goarch: + - arm64 + goos: + - linux + env: + - CC=aarch64-linux-gnu-gcc + - CXX=aarch64-linux-gnu-g++ + flags: + - "-mod={{ .Env.MOD }}" + - "-tags={{ .Env.BUILD_TAGS }}" + - -trimpath + ldflags: + - "{{ .Env.BUILD_VARS }}" + - "{{ .Env.STRIP_FLAGS }}" + - "-linkmode={{ .Env.LINKMODE }}" + - -extldflags "-lc -lrt -lpthread --static" +universal_binaries: + - id: akash-darwin-universal + ids: + - akash-darwin-amd64 + - akash-darwin-arm64 + replace: true + name_template: "akash" + +archives: + - id: wo/version + builds: + - akash-darwin-universal + - akash-linux-amd64 + - akash-linux-arm64 + name_template: "akash_{{ .Os }}_{{ .Arch }}" + wrap_in_directory: false + formats: + - zip + files: + - none* diff --git a/.goreleaser.yaml b/.goreleaser.yaml index c31d3c6e0d..3bcad0fc85 100644 --- a/.goreleaser.yaml +++ b/.goreleaser.yaml @@ -115,7 +115,8 @@ archives: - akash-windows-amd64 name_template: "akash_{{ .Version }}_{{ .Os }}_{{ .Arch }}" wrap_in_directory: false - format: zip + formats: + - zip files: - none* - id: wo/version @@ -126,7 +127,8 @@ archives: - akash-windows-amd64 name_template: "akash_{{ .Os }}_{{ .Arch }}" wrap_in_directory: false - format: zip + formats: + - zip files: - none* diff --git a/app/app.go b/app/app.go index 0fdd05dc75..ba688185d6 100644 --- a/app/app.go +++ b/app/app.go @@ -8,6 +8,7 @@ import ( "path/filepath" "time" + "github.com/cosmos/cosmos-sdk/x/authz" "github.com/cosmos/cosmos-sdk/x/feegrant" feegrantkeeper "github.com/cosmos/cosmos-sdk/x/feegrant/keeper" feegrantmodule "github.com/cosmos/cosmos-sdk/x/feegrant/module" @@ -113,6 +114,10 @@ var ( allowedReceivingModAcc = map[string]bool{} ) +type ModulesStoreKeys map[string]*sdk.KVStoreKey +type ModulesTransientKeys map[string]*sdk.TransientStoreKey +type ModulesMemoryKeys map[string]*sdk.MemoryStoreKey + // AkashApp extends ABCI application type AkashApp struct { *bam.BaseApp @@ -123,9 +128,9 @@ type AkashApp struct { invCheckPeriod uint - keys map[string]*sdk.KVStoreKey - tkeys map[string]*sdk.TransientStoreKey - memkeys map[string]*sdk.MemoryStoreKey + skeys ModulesStoreKeys + tkeys ModulesTransientKeys + memkeys ModulesMemoryKeys // simulation manager sm *module.SimulationManager @@ -145,9 +150,6 @@ func NewApp( appOpts servertypes.AppOptions, options ...func(*bam.BaseApp), ) *AkashApp { - // find out the genesis time, to be used later in inflation calculation - // genesisTime := getGenesisTime(appOpts, homePath) - // TODO: Remove cdc in favor of appCodec once all modules are migrated. encodingConfig := MakeEncodingConfig() appCodec := encodingConfig.Marshaler @@ -159,9 +161,9 @@ func NewApp( bapp.SetVersion(version.Version) bapp.SetInterfaceRegistry(interfaceRegistry) - keys := kvStoreKeys() - tkeys := transientStoreKeys() - memkeys := memStoreKeys() + skeys := modulesStoreKeys() + tkeys := modulesTransientKeys() + memkeys := modulesMemoryKeys() app := &AkashApp{ BaseApp: bapp, @@ -169,13 +171,13 @@ func NewApp( appCodec: appCodec, interfaceRegistry: interfaceRegistry, invCheckPeriod: invCheckPeriod, - keys: keys, + skeys: skeys, tkeys: tkeys, memkeys: memkeys, } app.Configurator = module.NewConfigurator(app.appCodec, app.MsgServiceRouter(), app.GRPCQueryRouter()) - app.Keepers.Cosmos.Params = initParamsKeeper(appCodec, cdc, app.keys[paramstypes.StoreKey], tkeys[paramstypes.TStoreKey]) + app.Keepers.Cosmos.Params = initParamsKeeper(appCodec, cdc, app.skeys[paramstypes.ModuleName], tkeys[paramstypes.ModuleName]) // set the BaseApp's parameter store bapp.SetParamStore(app.Keepers.Cosmos.Params.Subspace(bam.Paramspace).WithKeyTable(paramskeeper.ConsensusParamsKeyTable())) @@ -183,8 +185,8 @@ func NewApp( // add capability keeper and ScopeToModule for ibc module app.Keepers.Cosmos.Cap = capabilitykeeper.NewKeeper( appCodec, - app.keys[capabilitytypes.StoreKey], - app.memkeys[capabilitytypes.MemStoreKey], + app.skeys[capabilitytypes.ModuleName], + app.memkeys[capabilitytypes.ModuleName], ) scopedIBCKeeper := app.Keepers.Cosmos.Cap.ScopeToModule(ibchost.ModuleName) @@ -196,24 +198,24 @@ func NewApp( app.Keepers.Cosmos.Acct = authkeeper.NewAccountKeeper( appCodec, - app.keys[authtypes.StoreKey], + app.skeys[authtypes.ModuleName], app.GetSubspace(authtypes.ModuleName), authtypes.ProtoBaseAccount, MacPerms(), ) // add authz keeper - app.Keepers.Cosmos.Authz = authzkeeper.NewKeeper(app.keys[authzkeeper.StoreKey], appCodec, app.MsgServiceRouter()) + app.Keepers.Cosmos.Authz = authzkeeper.NewKeeper(app.skeys[authz.ModuleName], appCodec, app.MsgServiceRouter()) app.Keepers.Cosmos.FeeGrant = feegrantkeeper.NewKeeper( appCodec, - keys[feegrant.StoreKey], + skeys[feegrant.ModuleName], app.Keepers.Cosmos.Acct, ) app.Keepers.Cosmos.Bank = bankkeeper.NewBaseKeeper( appCodec, - app.keys[banktypes.StoreKey], + app.skeys[banktypes.ModuleName], app.Keepers.Cosmos.Acct, app.GetSubspace(banktypes.ModuleName), app.BlockedAddrs(), @@ -224,7 +226,7 @@ func NewApp( { skeeper := stakingkeeper.NewKeeper( appCodec, - app.keys[stakingtypes.StoreKey], + app.skeys[stakingtypes.ModuleName], app.Keepers.Cosmos.Acct, app.Keepers.Cosmos.Bank, app.GetSubspace(stakingtypes.ModuleName), @@ -234,7 +236,7 @@ func NewApp( app.Keepers.Cosmos.Mint = mintkeeper.NewKeeper( appCodec, - app.keys[minttypes.StoreKey], + app.skeys[minttypes.ModuleName], app.GetSubspace(minttypes.ModuleName), app.Keepers.Cosmos.Staking, app.Keepers.Cosmos.Acct, @@ -244,7 +246,7 @@ func NewApp( app.Keepers.Cosmos.Distr = distrkeeper.NewKeeper( appCodec, - app.keys[distrtypes.StoreKey], + app.skeys[distrtypes.ModuleName], app.GetSubspace(distrtypes.ModuleName), app.Keepers.Cosmos.Acct, app.Keepers.Cosmos.Bank, @@ -255,7 +257,7 @@ func NewApp( app.Keepers.Cosmos.Slashing = slashingkeeper.NewKeeper( appCodec, - app.keys[slashingtypes.StoreKey], + app.skeys[slashingtypes.ModuleName], app.Keepers.Cosmos.Staking, app.GetSubspace(slashingtypes.ModuleName), ) @@ -278,7 +280,7 @@ func NewApp( app.Keepers.Cosmos.Upgrade = upgradekeeper.NewKeeper( skipUpgradeHeights, - app.keys[upgradetypes.StoreKey], + app.skeys[upgradetypes.ModuleName], appCodec, homePath, app.BaseApp, @@ -287,7 +289,7 @@ func NewApp( // register IBC Keeper app.Keepers.Cosmos.IBC = ibckeeper.NewKeeper( appCodec, - app.keys[ibchost.StoreKey], + app.skeys[ibchost.ModuleName], app.GetSubspace(ibchost.ModuleName), app.Keepers.Cosmos.Staking, app.Keepers.Cosmos.Upgrade, @@ -316,7 +318,7 @@ func NewApp( app.Keepers.Cosmos.Gov = govkeeper.NewKeeper( appCodec, - app.keys[govtypes.StoreKey], + app.skeys[govtypes.ModuleName], app.GetSubspace(govtypes.ModuleName), app.Keepers.Cosmos.Acct, app.Keepers.Cosmos.Bank, @@ -327,7 +329,7 @@ func NewApp( // register Transfer Keepers app.Keepers.Cosmos.Transfer = ibctransferkeeper.NewKeeper( appCodec, - app.keys[ibctransfertypes.StoreKey], + app.skeys[ibctransfertypes.ModuleName], app.GetSubspace(ibctransfertypes.ModuleName), app.Keepers.Cosmos.IBC.ChannelKeeper, app.Keepers.Cosmos.IBC.ChannelKeeper, @@ -349,7 +351,7 @@ func NewApp( // create evidence keeper with evidence router evidenceKeeper := evidencekeeper.NewKeeper( appCodec, - app.keys[evidencetypes.StoreKey], + app.skeys[evidencetypes.ModuleName], app.Keepers.Cosmos.Staking, app.Keepers.Cosmos.Slashing, ) @@ -411,6 +413,13 @@ func NewApp( app.MM.RegisterRoutes(app.Router(), app.QueryRouter(), encodingConfig.Amino) app.MM.RegisterServices(app.Configurator) + utypes.IterateMigrations(func(module string, version uint64, initfn utypes.NewMigrationFn) { + migrator := initfn(utypes.NewMigrator(app.appCodec, app.skeys[module])) + if err := app.Configurator.RegisterMigration(module, version, migrator.GetHandler()); err != nil { + panic(err) + } + }) + // add test gRPC service for testing gRPC queries in isolation testdata.RegisterQueryServer(app.GRPCQueryRouter(), testdata.QueryImpl{}) @@ -440,7 +449,7 @@ func NewApp( app.sm.RegisterStoreDecoders() // initialize stores - app.MountKVStores(keys) + app.MountKVStores(app.skeys.Keys()) app.MountTransientStores(tkeys) app.MountMemoryStores(memkeys) @@ -577,13 +586,13 @@ func (app *AkashApp) InterfaceRegistry() codectypes.InterfaceRegistry { } // GetKey returns the KVStoreKey for the provided store key. -func (app *AkashApp) GetKey(storeKey string) *sdk.KVStoreKey { - return app.keys[storeKey] +func (app *AkashApp) GetKey(module string) *sdk.KVStoreKey { + return app.skeys[module] } // GetTKey returns the TransientStoreKey for the provided store key. -func (app *AkashApp) GetTKey(storeKey string) *sdk.TransientStoreKey { - return app.tkeys[storeKey] +func (app *AkashApp) GetTKey(module string) *sdk.TransientStoreKey { + return app.tkeys[module] } // GetSubspace returns a param subspace for a given module name. diff --git a/app/app_configure.go b/app/app_configure.go index c2f615de80..4646c214c9 100644 --- a/app/app_configure.go +++ b/app/app_configure.go @@ -67,21 +67,6 @@ func akashModuleBasics() []module.AppModuleBasic { } } -func akashKVStoreKeys() []string { - return []string{ - take.StoreKey, - escrow.StoreKey, - deployment.StoreKey, - market.StoreKey, - provider.StoreKey, - audit.StoreKey, - cert.StoreKey, - inflation.StoreKey, - astaking.StoreKey, - agov.StoreKey, - } -} - func akashSubspaces(k paramskeeper.Keeper) paramskeeper.Keeper { k.Subspace(deployment.ModuleName) k.Subspace(market.ModuleName) @@ -89,19 +74,20 @@ func akashSubspaces(k paramskeeper.Keeper) paramskeeper.Keeper { k.Subspace(astaking.ModuleName) k.Subspace(agov.ModuleName) k.Subspace(take.ModuleName) + return k } func (app *AkashApp) setAkashKeepers() { app.Keepers.Akash.Take = tkeeper.NewKeeper( app.appCodec, - app.keys[take.StoreKey], + app.skeys[take.ModuleName], app.GetSubspace(take.ModuleName), ) app.Keepers.Akash.Escrow = ekeeper.NewKeeper( app.appCodec, - app.keys[escrow.StoreKey], + app.skeys[escrow.ModuleName], app.Keepers.Cosmos.Bank, app.Keepers.Akash.Take, app.Keepers.Cosmos.Distr, @@ -110,14 +96,14 @@ func (app *AkashApp) setAkashKeepers() { app.Keepers.Akash.Deployment = deployment.NewKeeper( app.appCodec, - app.keys[deployment.StoreKey], + app.skeys[deployment.ModuleName], app.GetSubspace(deployment.ModuleName), app.Keepers.Akash.Escrow, ) app.Keepers.Akash.Market = market.NewKeeper( app.appCodec, - app.keys[market.StoreKey], + app.skeys[market.ModuleName], app.GetSubspace(market.ModuleName), app.Keepers.Akash.Escrow, ) @@ -132,34 +118,34 @@ func (app *AkashApp) setAkashKeepers() { app.Keepers.Akash.Provider = provider.NewKeeper( app.appCodec, - app.keys[provider.StoreKey], + app.skeys[provider.ModuleName], ) app.Keepers.Akash.Audit = akeeper.NewKeeper( app.appCodec, - app.keys[audit.StoreKey], + app.skeys[audit.ModuleName], ) app.Keepers.Akash.Cert = ckeeper.NewKeeper( app.appCodec, - app.keys[cert.StoreKey], + app.skeys[cert.ModuleName], ) app.Keepers.Akash.Inflation = ikeeper.NewKeeper( app.appCodec, - app.keys[inflation.StoreKey], + app.skeys[inflation.ModuleName], app.GetSubspace(inflation.ModuleName), ) app.Keepers.Akash.Staking = astakingkeeper.NewKeeper( app.appCodec, - app.keys[astaking.StoreKey], + app.skeys[astaking.ModuleName], app.GetSubspace(astaking.ModuleName), ) app.Keepers.Akash.Gov = agovkeeper.NewKeeper( app.appCodec, - app.keys[agov.StoreKey], + app.skeys[agov.ModuleName], app.GetSubspace(agov.ModuleName), ) } diff --git a/app/config.go b/app/config.go index 9748de61c0..4ead6c6641 100644 --- a/app/config.go +++ b/app/config.go @@ -3,11 +3,13 @@ package app import ( simparams "github.com/cosmos/cosmos-sdk/simapp/params" "github.com/cosmos/cosmos-sdk/std" + "github.com/cosmos/cosmos-sdk/store/types" sdk "github.com/cosmos/cosmos-sdk/types" "github.com/cosmos/cosmos-sdk/types/module" "github.com/cosmos/cosmos-sdk/x/auth" authtypes "github.com/cosmos/cosmos-sdk/x/auth/types" "github.com/cosmos/cosmos-sdk/x/auth/vesting" + "github.com/cosmos/cosmos-sdk/x/authz" authzkeeper "github.com/cosmos/cosmos-sdk/x/authz/keeper" authzmodule "github.com/cosmos/cosmos-sdk/x/authz/module" "github.com/cosmos/cosmos-sdk/x/bank" @@ -44,6 +46,16 @@ import ( ibchost "github.com/cosmos/ibc-go/v4/modules/core/24-host" appparams "github.com/akash-network/node/app/params" + "github.com/akash-network/node/x/audit" + "github.com/akash-network/node/x/cert" + "github.com/akash-network/node/x/deployment" + "github.com/akash-network/node/x/escrow" + agov "github.com/akash-network/node/x/gov" + "github.com/akash-network/node/x/inflation" + "github.com/akash-network/node/x/market" + "github.com/akash-network/node/x/provider" + astaking "github.com/akash-network/node/x/staking" + "github.com/akash-network/node/x/take" ) var mbasics = module.NewBasicManager( @@ -102,34 +114,55 @@ func MakeEncodingConfig() simparams.EncodingConfig { return encodingConfig } -func kvStoreKeys() map[string]*sdk.KVStoreKey { - return sdk.NewKVStoreKeys( - append([]string{ - authtypes.StoreKey, - feegrant.StoreKey, - authzkeeper.StoreKey, - banktypes.StoreKey, - stakingtypes.StoreKey, - minttypes.StoreKey, - distrtypes.StoreKey, - slashingtypes.StoreKey, - govtypes.StoreKey, - paramstypes.StoreKey, - ibchost.StoreKey, - upgradetypes.StoreKey, - evidencetypes.StoreKey, - ibctransfertypes.StoreKey, - capabilitytypes.StoreKey, - }, - akashKVStoreKeys()..., - )..., - ) +func (m ModulesStoreKeys) Keys() map[string]*sdk.KVStoreKey { + res := make(map[string]*sdk.KVStoreKey) + + for _, key := range m { + res[key.Name()] = key + } + + return res +} + +func modulesStoreKeys() ModulesStoreKeys { + return ModulesStoreKeys{ + authtypes.ModuleName: types.NewKVStoreKey(authtypes.StoreKey), + feegrant.ModuleName: types.NewKVStoreKey(feegrant.StoreKey), + authz.ModuleName: types.NewKVStoreKey(authzkeeper.StoreKey), + banktypes.ModuleName: types.NewKVStoreKey(banktypes.StoreKey), + stakingtypes.ModuleName: types.NewKVStoreKey(stakingtypes.StoreKey), + minttypes.ModuleName: types.NewKVStoreKey(minttypes.StoreKey), + distrtypes.ModuleName: types.NewKVStoreKey(distrtypes.StoreKey), + slashingtypes.ModuleName: types.NewKVStoreKey(slashingtypes.StoreKey), + govtypes.ModuleName: types.NewKVStoreKey(govtypes.StoreKey), + paramstypes.ModuleName: types.NewKVStoreKey(paramstypes.StoreKey), + ibchost.ModuleName: types.NewKVStoreKey(ibchost.StoreKey), + upgradetypes.ModuleName: types.NewKVStoreKey(upgradetypes.StoreKey), + evidencetypes.ModuleName: types.NewKVStoreKey(evidencetypes.StoreKey), + ibctransfertypes.ModuleName: types.NewKVStoreKey(ibctransfertypes.StoreKey), + capabilitytypes.ModuleName: types.NewKVStoreKey(capabilitytypes.StoreKey), + // akash modules + take.ModuleName: types.NewKVStoreKey(take.StoreKey), + escrow.ModuleName: types.NewKVStoreKey(escrow.StoreKey), + deployment.ModuleName: types.NewKVStoreKey(deployment.StoreKey), + market.ModuleName: types.NewKVStoreKey(market.StoreKey), + provider.ModuleName: types.NewKVStoreKey(provider.StoreKey), + audit.ModuleName: types.NewKVStoreKey(audit.StoreKey), + cert.ModuleName: types.NewKVStoreKey(cert.StoreKey), + inflation.ModuleName: types.NewKVStoreKey(inflation.StoreKey), + astaking.ModuleName: types.NewKVStoreKey(astaking.StoreKey), + agov.ModuleName: types.NewKVStoreKey(agov.StoreKey), + } } -func transientStoreKeys() map[string]*sdk.TransientStoreKey { - return sdk.NewTransientStoreKeys(paramstypes.TStoreKey) +func modulesTransientKeys() ModulesTransientKeys { + return ModulesTransientKeys{ + paramstypes.ModuleName: sdk.NewTransientStoreKey(paramstypes.TStoreKey), + } } -func memStoreKeys() map[string]*sdk.MemoryStoreKey { - return sdk.NewMemoryStoreKeys(capabilitytypes.MemStoreKey) +func modulesMemoryKeys() ModulesMemoryKeys { + return ModulesMemoryKeys{ + capabilitytypes.ModuleName: types.NewMemoryStoreKey(capabilitytypes.MemStoreKey), + } } diff --git a/app/export.go b/app/export.go index e86f20b398..23763ea8f5 100644 --- a/app/export.go +++ b/app/export.go @@ -165,7 +165,7 @@ func (app *AkashApp) prepForZeroHeightGenesis(ctx sdk.Context, jailAllowedAddrs // Iterate through validators by power descending, reset bond heights, and // update bond intra-tx counters. - store := ctx.KVStore(app.keys[stakingtypes.StoreKey]) + store := ctx.KVStore(app.skeys[stakingtypes.ModuleName]) iter := sdk.KVStoreReversePrefixIterator(store, stakingtypes.ValidatorsKey) counter := int16(0) diff --git a/app/sim_test.go b/app/sim_test.go index 53e8617f2b..4141ef269e 100644 --- a/app/sim_test.go +++ b/app/sim_test.go @@ -7,6 +7,8 @@ import ( "os" "testing" + "github.com/cosmos/cosmos-sdk/x/authz" + authzkeeper "github.com/cosmos/cosmos-sdk/x/authz/keeper" "github.com/stretchr/testify/require" abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/libs/log" @@ -158,24 +160,27 @@ func TestAppImportExport(t *testing.T) { fmt.Printf("comparing stores...\n") storeKeysPrefixes := []StoreKeysPrefixes{ - {app.keys[authtypes.StoreKey], newApp.keys[authtypes.StoreKey], [][]byte{}}, + {app.skeys[authtypes.ModuleName], newApp.skeys[authtypes.ModuleName], [][]byte{}}, { - app.keys[stakingtypes.StoreKey], newApp.keys[stakingtypes.StoreKey], + app.skeys[stakingtypes.ModuleName], newApp.skeys[stakingtypes.ModuleName], [][]byte{ stakingtypes.UnbondingQueueKey, stakingtypes.RedelegationQueueKey, stakingtypes.ValidatorQueueKey, stakingtypes.HistoricalInfoKey, }, }, // ordering may change but it doesn't matter - {app.keys[slashingtypes.StoreKey], newApp.keys[slashingtypes.StoreKey], [][]byte{}}, - {app.keys[minttypes.StoreKey], newApp.keys[minttypes.StoreKey], [][]byte{}}, - {app.keys[distrtypes.StoreKey], newApp.keys[distrtypes.StoreKey], [][]byte{}}, - {app.keys[banktypes.StoreKey], newApp.keys[banktypes.StoreKey], [][]byte{banktypes.BalancesPrefix}}, - {app.keys[paramtypes.StoreKey], newApp.keys[paramtypes.StoreKey], [][]byte{}}, - {app.keys[govtypes.StoreKey], newApp.keys[govtypes.StoreKey], [][]byte{}}, - {app.keys[evidencetypes.StoreKey], newApp.keys[evidencetypes.StoreKey], [][]byte{}}, - {app.keys[capabilitytypes.StoreKey], newApp.keys[capabilitytypes.StoreKey], [][]byte{}}, - {app.keys[ibchost.StoreKey], newApp.keys[ibchost.StoreKey], [][]byte{}}, - {app.keys[ibctransfertypes.StoreKey], newApp.keys[ibctransfertypes.StoreKey], [][]byte{}}, + {app.skeys[slashingtypes.ModuleName], newApp.skeys[slashingtypes.StoreKey], [][]byte{}}, + {app.skeys[minttypes.ModuleName], newApp.skeys[minttypes.ModuleName], [][]byte{}}, + {app.skeys[distrtypes.ModuleName], newApp.skeys[distrtypes.ModuleName], [][]byte{}}, + {app.skeys[banktypes.ModuleName], newApp.skeys[banktypes.ModuleName], [][]byte{banktypes.BalancesPrefix}}, + {app.skeys[paramtypes.ModuleName], newApp.skeys[paramtypes.ModuleName], [][]byte{}}, + {app.skeys[govtypes.ModuleName], newApp.skeys[govtypes.ModuleName], [][]byte{}}, + {app.skeys[evidencetypes.ModuleName], newApp.skeys[evidencetypes.ModuleName], [][]byte{}}, + {app.skeys[capabilitytypes.ModuleName], newApp.skeys[capabilitytypes.ModuleName], [][]byte{}}, + {app.skeys[ibchost.ModuleName], newApp.skeys[ibchost.ModuleName], [][]byte{}}, + {app.skeys[ibctransfertypes.ModuleName], newApp.skeys[ibctransfertypes.ModuleName], [][]byte{}}, + {app.skeys[authz.ModuleName], newApp.skeys[authz.ModuleName], [][]byte{ + authzkeeper.GranteeKey, + }}, } for _, skp := range storeKeysPrefixes { diff --git a/go.mod b/go.mod index c6c98b89a3..5a858f1611 100644 --- a/go.mod +++ b/go.mod @@ -5,7 +5,7 @@ go 1.22.0 toolchain go1.23.6 require ( - github.com/akash-network/akash-api v0.0.73 + github.com/akash-network/akash-api v0.0.75 github.com/blang/semver/v4 v4.0.0 github.com/boz/go-lifecycle v0.1.1 github.com/cosmos/cosmos-sdk v0.45.16 @@ -52,7 +52,7 @@ retract ( replace ( // use cosmos fork of keyring github.com/99designs/keyring => github.com/cosmos/keyring v1.2.0 - github.com/cosmos/cosmos-sdk => github.com/akash-network/cosmos-sdk v0.45.16-akash.1 + github.com/cosmos/cosmos-sdk => github.com/akash-network/cosmos-sdk v0.45.16-akash.2 // use akash version of cosmos ledger api github.com/cosmos/ledger-cosmos-go => github.com/akash-network/ledger-go/cosmos v0.14.4 @@ -67,6 +67,7 @@ replace ( // To be replaced by cosmos/gogoproto in future versions github.com/gogo/protobuf => github.com/regen-network/protobuf v1.3.3-alpha.regen.1 // use cometBFT system fork of tendermint with akash patches + github.com/tendermint/tendermint => github.com/akash-network/cometbft v0.34.27-akash.2 github.com/zondax/hid => github.com/troian/hid v0.13.2 diff --git a/go.sum b/go.sum index 63b8ef9eb7..a796b7f453 100644 --- a/go.sum +++ b/go.sum @@ -76,12 +76,12 @@ github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBA github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c= github.com/ajg/form v1.5.1/go.mod h1:uL1WgH+h2mgNtvBq0339dVnzXdBETtL2LeUXaIv25UY= github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= -github.com/akash-network/akash-api v0.0.73 h1:Wm0NgVgHbuLTAAa9aIRxan1zGwSb+i9/1uWp65vJmXc= -github.com/akash-network/akash-api v0.0.73/go.mod h1:5JMjLPHvWOyyamLz8bJoy6QHqz4I46ANAQpEIIUY1bM= +github.com/akash-network/akash-api v0.0.75 h1:h9RZemWa7JqMGYb3nVRhRgP4xZnACIy0yN7de60JLyg= +github.com/akash-network/akash-api v0.0.75/go.mod h1:pvoHHEQbt63+U+HUSTjssZ1nUJ8sJuWtHCu6ztaXcqo= github.com/akash-network/cometbft v0.34.27-akash.2 h1:2hKEcX+cIv/OLAJ82gBWdkZlVWn+8JUYs4GrDoPAOhU= github.com/akash-network/cometbft v0.34.27-akash.2/go.mod h1:BcCbhKv7ieM0KEddnYXvQZR+pZykTKReJJYf7YC7qhw= -github.com/akash-network/cosmos-sdk v0.45.16-akash.1 h1:mAvN01zEMK8MMhtbk39/DsjGwczc2zI5kQvuubY5cxU= -github.com/akash-network/cosmos-sdk v0.45.16-akash.1/go.mod h1:NTnk/GuQdFyfk/iGFxDAgQH9fwcbRW/hREap6qaPg48= +github.com/akash-network/cosmos-sdk v0.45.16-akash.2 h1:irEGS3wenYS4lFgjHVdT0F3z22Z3KkNrJd8VHVhhSdo= +github.com/akash-network/cosmos-sdk v0.45.16-akash.2/go.mod h1:NTnk/GuQdFyfk/iGFxDAgQH9fwcbRW/hREap6qaPg48= github.com/akash-network/ledger-go v0.14.3 h1:LCEFkTfgGA2xFMN2CtiKvXKE7dh0QSM77PJHCpSkaAo= github.com/akash-network/ledger-go v0.14.3/go.mod h1:NfsjfFvno9Kaq6mfpsKz4sqjnAVVEsVsnBJfKB4ueAs= github.com/akash-network/ledger-go/cosmos v0.14.4 h1:h3WiXmoKKs9wkj1LHcJ12cLjXXg6nG1fp+UQ5+wu/+o= diff --git a/make/lint.mk b/make/lint.mk index f485a2f818..92c83efd31 100644 --- a/make/lint.mk +++ b/make/lint.mk @@ -6,7 +6,7 @@ SUBLINTERS = unused \ ineffassign \ unparam \ staticcheck \ - exportloopref \ + copyloopvar \ prealloc # TODO: ^ gochecknoglobals diff --git a/make/releasing.mk b/make/releasing.mk index d78fe56b6e..51486d1ab0 100644 --- a/make/releasing.mk +++ b/make/releasing.mk @@ -6,6 +6,8 @@ GORELEASER_RELEASE ?= false GORELEASER_MOUNT_CONFIG ?= false GORELEASER_SKIP := $(subst $(COMMA),$(SPACE),$(GORELEASER_SKIP)) +GORELEASER_MOD_MOUNT ?= $(shell cat $(AKASH_ROOT)/.github/.repo | tr -d '\n') + RELEASE_DOCKER_IMAGE ?= ghcr.io/akash-network/node ifneq ($(GORELEASER_RELEASE),true) @@ -50,6 +52,30 @@ install: image-minikube: eval $$(minikube docker-env) && docker-image +.PHONY: test-bins +test-bins: + docker run \ + --rm \ + -e STABLE=$(IS_STABLE) \ + -e MOD="$(GOMOD)" \ + -e BUILD_TAGS="$(BUILD_TAGS)" \ + -e BUILD_VARS="$(GORELEASER_BUILD_VARS)" \ + -e STRIP_FLAGS="$(GORELEASER_STRIP_FLAGS)" \ + -e LINKMODE="$(GO_LINKMODE)" \ + -e DOCKER_IMAGE=$(RELEASE_DOCKER_IMAGE) \ + -e GOPATH=/go \ + -e GOTOOLCHAIN="$(GOTOOLCHAIN)" \ + -v /var/run/docker.sock:/var/run/docker.sock \ + -v $(GOPATH):/go \ + -v $(AKASH_ROOT):/go/src/$(GORELEASER_MOD_MOUNT) \ + -w /go/src/$(GORELEASER_MOD_MOUNT) \ + $(GORELEASER_IMAGE) \ + -f .goreleaser-test-bins.yaml \ + --verbose=$(GORELEASER_VERBOSE) \ + --clean \ + --skip=publish,validate \ + --snapshot + .PHONY: docker-image docker-image: docker run \ diff --git a/make/test-upgrade.mk b/make/test-upgrade.mk index 3f964a1a8f..cc52e5237e 100644 --- a/make/test-upgrade.mk +++ b/make/test-upgrade.mk @@ -12,6 +12,7 @@ export AKASH_GAS = auto export AKASH_STATESYNC_ENABLE = false export AKASH_LOG_COLOR = true +TEST_CONFIG ?= test-config.json KEY_OPTS := --keyring-backend=$(AKASH_KEYRING_BACKEND) KEY_NAME ?= validator UPGRADE_TO ?= $(shell $(ROOT_DIR)/script/upgrades.sh upgrade-from-release $(RELEASE_TAG)) @@ -19,6 +20,9 @@ UPGRADE_FROM := $(shell cat $(ROOT_DIR)/meta.json | jq -r --arg name GENESIS_BINARY_VERSION := $(shell cat $(ROOT_DIR)/meta.json | jq -r --arg name $(UPGRADE_TO) '.upgrades[$$name].from_binary' | tr -d '\n') UPGRADE_BINARY_VERSION ?= local +REMOTE_TEST_WORKDIR ?= ~/go/src/github.com/akash-network/node +REMOTE_TEST_HOST ?= + $(AKASH_INIT): $(ROOT_DIR)/script/upgrades.sh --workdir=$(AP_RUN_DIR) --gbv=$(GENESIS_BINARY_VERSION) --ufrom=$(UPGRADE_FROM) --uto=$(UPGRADE_TO) --config="$(PWD)/config.json" init touch $@ @@ -31,17 +35,27 @@ genesis: $(GENESIS_DEST) .PHONY: test test: $(COSMOVISOR) init - $(GO_TEST) -run "^\QTestUpgrade\E$$" -tags e2e.upgrade -timeout 60m -v -args \ + $(GO_TEST) -run "^\QTestUpgrade\E$$" -tags e2e.upgrade -timeout 180m -v -args \ -cosmovisor=$(COSMOVISOR) \ -workdir=$(AP_RUN_DIR)/validators \ - -config=test-config.json \ + -config=$(TEST_CONFIG) \ -upgrade-name=$(UPGRADE_TO) \ -upgrade-version="$(UPGRADE_BINARY_VERSION)" \ -test-cases=test-cases.json .PHONY: test-reset test-reset: - $(ROOT_DIR)/script/upgrades.sh --workdir=$(AP_RUN_DIR) --config="$(PWD)/config.json" clean + $(ROOT_DIR)/script/upgrades.sh --workdir=$(AP_RUN_DIR) --config="$(PWD)/config.json" --uto=$(UPGRADE_TO) clean + $(ROOT_DIR)/script/upgrades.sh --workdir=$(AP_RUN_DIR) --config="$(PWD)/config.json" --uto=$(UPGRADE_TO) --gbv=$(GENESIS_BINARY_VERSION) bins + $(ROOT_DIR)/script/upgrades.sh --workdir=$(AP_RUN_DIR) --config="$(PWD)/config.json" --uto=$(UPGRADE_TO) keys + + +.PHONY: bins +bins: +ifneq ($(findstring build,$(SKIP)),build) +bins: + $(ROOT_DIR)/script/upgrades.sh --workdir=$(AP_RUN_DIR) --config="$(PWD)/config.json" --uto=$(UPGRADE_TO) bins +endif .PHONY: clean clean: diff --git a/meta.json b/meta.json index 8eebba44f9..90555e71dc 100644 --- a/meta.json +++ b/meta.json @@ -34,6 +34,11 @@ "skipped": false, "from_binary": "v0.34.1", "from_version": "v0.34.0" + }, + "v0.38.0": { + "skipped": false, + "from_binary": "v0.36.3-rc9", + "from_version": "v0.36.0" } } } diff --git a/script/semver.sh b/script/semver.sh index abdecaa33f..768cec2518 100755 --- a/script/semver.sh +++ b/script/semver.sh @@ -1,256 +1,131 @@ #!/usr/bin/env bash +# SPDX-License-Identifier: Apache-2.0 set -o errexit -o nounset -o pipefail -SEMVER_REGEX="^[v|V]?(0|[1-9][0-9]*)\\.(0|[1-9][0-9]*)\\.(0|[1-9][0-9]*)(\\-[0-9A-Za-z-]+(\\.[0-9A-Za-z-]+)*)?(\\+[0-9A-Za-z-]+(\\.[0-9A-Za-z-]+)*)?$" +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)" -SEMVER_REGEX_LEGACY="^[v|V]?(0|[1-9][0-9]*)\\.(0|[1-9][0-9]*)(\\.0|[1-9][0-9]*)?(\\-[0-9A-Za-z-]+(\\.[0-9A-Za-z-]+)*)?(\\+[0-9A-Za-z-]+(\\.[0-9A-Za-z-]+)*)?$" +source "$SCRIPT_DIR/semver_funcs.sh" PROG=semver -PROG_VERSION=2.1.0 +PROG_VERSION="3.4.0" USAGE="\ Usage: - $PROG bump (major|minor|patch|release|prerel |build ) + $PROG bump major + $PROG bump minor + $PROG bump patch + $PROG bump prerel|prerelease [] + $PROG bump build + $PROG bump release + $PROG get major + $PROG get minor + $PROG get patch + $PROG get prerel|prerelease + $PROG get build + $PROG get release $PROG compare - $PROG get (major|minor|patch|release|prerel|build) + $PROG diff + $PROG validate $PROG --help $PROG --version -Arguments: - A version must match the following regex pattern: - \"${SEMVER_REGEX}\". - In english, the version must match X.Y.Z(-PRERELEASE)(+BUILD) - where X, Y and Z are positive integers, PRERELEASE is an optional - string composed of alphanumeric characters and hyphens and - BUILD is also an optional string composed of alphanumeric - characters and hyphens. - See definition. - String that must be composed of alphanumeric characters and hyphens. - String that must be composed of alphanumeric characters and hyphens. -Options: - -v, --version Print the version of this tool. - -h, --help Print this help message. -Commands: - bump Bump by one of major, minor, patch, prerel, build - or a forced potentially conflicting version. The bumped version is - shown to stdout. - compare Compare with , output to stdout the - following values: -1 if is newer, 0 if equal, 1 if - older. - get Extract given part of , where part is one of major, minor, - patch, prerel, build. - validate Check version string is valid" - - -function error { - echo -e "$1" >&2 - exit 1 -} - -function usage-help { - error "$USAGE" -} - -function usage-version { - echo -e "${PROG}: $PROG_VERSION" - exit 0 -} - -function validate-version { - local version=$1 - if [[ "$version" =~ $SEMVER_REGEX ]]; then - # if a second argument is passed, store the result in var named by $2 - if [[ "$#" -eq "2" ]]; then - local major=${BASH_REMATCH[1]} - local minor=${BASH_REMATCH[2]} - local patch=${BASH_REMATCH[3]} - local prere=${BASH_REMATCH[4]} - local build=${BASH_REMATCH[6]} - eval "$2=(\"${major}\" \"${minor}\" \"${patch}\" \"${prere}\" \"${build}\")" - else - echo "$version" - fi - elif [[ "$version" =~ $SEMVER_REGEX_LEGACY ]]; then - # if a second argument is passed, store the result in var named by $2 - if [[ "$#" -eq "2" ]]; then - local major=${BASH_REMATCH[1]} - local minor=${BASH_REMATCH[2]} - local patch=0 - local prere=${BASH_REMATCH[4]} - local build=${BASH_REMATCH[6]} - eval "$2=(\"${major}\" \"${minor}\" \"${patch}\" \"${prere}\" \"${build}\")" - else - echo "$version" - fi - else - error "version $version does not match the semver scheme 'X.Y.Z(-PRERELEASE)(+BUILD)'. See help for more information." - fi -} - -function compare-version { - validate-version "$1" V - validate-version "$2" V_ - - # MAJOR, MINOR and PATCH should compare numerically - for i in 0 1 2; do - local diff=$((${V[$i]} - ${V_[$i]})) - if [[ ${diff} -lt 0 ]]; then - echo -1; - return 0 - elif [[ ${diff} -gt 0 ]]; then - echo 1; - return 0 - fi - done - # PREREL should compare with the ASCII order. - if [[ -z "${V[3]}" ]] && [[ -n "${V_[3]}" ]]; then - echo -1; - return 0; - elif [[ -n "${V[3]}" ]] && [[ -z "${V_[3]}" ]]; then - echo 1; - return 0; - elif [[ -n "${V[3]}" ]] && [[ -n "${V_[3]}" ]]; then - if [[ "${V[3]}" > "${V_[3]}" ]]; then - echo 1; - return 0; - elif [[ "${V[3]}" < "${V_[3]}" ]]; then - echo -1; - return 0; - fi - fi - - echo 0 -} - -function command-bump { - local new; - local version; - local sub_version; - local command; - - case $# in - 2) - case $1 in - major | minor | patch | release) - command=$1; - version=$2 ;; - *) - usage-help ;; - esac ;; - 3) - case $1 in - prerel | build) - command=$1; - sub_version=$2 version=$3 ;; - *) - usage-help ;; - esac ;; - *) - usage-help ;; - esac - - validate-version "$version" parts - # shellcheck disable=SC2154 - local major="${parts[0]}" - local minor="${parts[1]}" - local patch="${parts[2]}" - local prere="${parts[3]}" - local build="${parts[4]}" +Arguments: + A version must match the following regular expression: + \"${SEMVER_REGEX}\" + In English: + -- The version must match X.Y.Z[-PRERELEASE][+BUILD] + where X, Y and Z are non-negative integers. + -- PRERELEASE is a dot separated sequence of non-negative integers and/or + identifiers composed of alphanumeric characters and hyphens (with + at least one non-digit). Numeric identifiers must not have leading + zeros. A hyphen (\"-\") introduces this optional part. + -- BUILD is a dot separated sequence of identifiers composed of alphanumeric + characters and hyphens. A plus (\"+\") introduces this optional part. - case "$command" in - major) - new="$((major + 1)).0.0" ;; - minor) - new="${major}.$((minor + 1)).0" ;; - patch) - new="${major}.${minor}.$((patch + 1))" ;; - release) - new="${major}.${minor}.${patch}" ;; - prerel) - new=$(validate-version "${major}.${minor}.${patch}-${sub_version}") ;; - build) - new=$(validate-version "${major}.${minor}.${patch}${prere}+${sub_version}") ;; - *) - usage-help ;; - esac + See definition. - echo "$new" - exit 0 -} + A string as defined by PRERELEASE above. Or, it can be a PRERELEASE + prototype string followed by a dot. -function command-compare { - local v; - local v_; + A string as defined by BUILD above. - case $# in - 2) - v=$(validate-version "$1"); - v_=$(validate-version "$2") ;; - *) - usage-help ;; - esac +Options: + -v, --version Print the version of this tool. + -h, --help Print this help message. - compare-version "$v" "$v_" - exit 0 +Commands: + bump Bump by one of major, minor, patch; zeroing or removing + subsequent parts. \"bump prerel\" (or its synonym \"bump prerelease\") + sets the PRERELEASE part and removes any BUILD part. A trailing dot + in the argument introduces an incrementing numeric field + which is added or bumped. If no argument is provided, an + incrementing numeric field is introduced/bumped. \"bump build\" sets + the BUILD part. \"bump release\" removes any PRERELEASE or BUILD parts. + The bumped version is written to stdout. + + get Extract given part of , where part is one of major, minor, + patch, prerel (alternatively: prerelease), build, or release. + + compare Compare with , output to stdout the + following values: -1 if is newer, 0 if equal, 1 if + older. The BUILD part is not used in comparisons. + + diff Compare with , output to stdout the + difference between two versions by the release type (MAJOR, MINOR, + PATCH, PRERELEASE, BUILD). + + validate Validate if follows the SEMVER pattern (see + definition). Print 'valid' to stdout if the version is valid, otherwise + print 'invalid'. + +See also: + https://semver.org -- Semantic Versioning 2.0.0" + +function usage_help { + error "$USAGE" } -# shellcheck disable=SC2034 -function command-get { - local part version - - if [[ "$#" -ne "2" ]] || [[ -z "$1" ]] || [[ -z "$2" ]]; then - usage-help - fi - - part="$1" - version="$2" - - validate-version "$version" parts - local major="${parts[0]}" - local minor="${parts[1]}" - local patch="${parts[2]}" - local prerel="${parts[3]:1}" - local build="${parts[4]:1}" - - case "$part" in - "major-minor") - echo "$major.$minor" - ;; - major | minor | patch | release | prerel | build) - echo "${!part}" ;; - *) - usage-help ;; - esac - - exit 0 +function usage_version { + echo -e "${PROG}: $PROG_VERSION" + exit 0 } case $# in - 0) - echo "Unknown command: $*"; - usage-help ;; + 0) + echo "Unknown command: $*" + usage_help + ;; esac case $1 in - --help | -h) - echo -e "$USAGE"; - exit 0 ;; - --version | -v) - usage-version ;; - bump) - shift; - command-bump "$@" ;; - get) - shift; - command-get "$@" ;; - compare) - shift; - command-compare "$@" ;; - validate) - shift; - validate-version "$@" V ;; - *) - echo "Unknown arguments: $*"; - usage-help ;; + --help | -h) + echo -e "$USAGE" + exit 0 + ;; + --version | -v) usage_version ;; + bump) + shift + command_bump "$@" + ;; + get) + shift + command_get "$@" + ;; + compare) + shift + command_compare "$@" + ;; + diff) + shift + command_diff "$@" + ;; + validate) + shift + command_validate "$@" + ;; + *) + echo "Unknown arguments: $*" + usage_help + ;; esac diff --git a/script/semver_funcs.sh b/script/semver_funcs.sh new file mode 100644 index 0000000000..bd4ece5a22 --- /dev/null +++ b/script/semver_funcs.sh @@ -0,0 +1,417 @@ +#!/usr/bin/env bash + +NAT='0|[1-9][0-9]*' +ALPHANUM='[0-9]*[A-Za-z-][0-9A-Za-z-]*' +IDENT="$NAT|$ALPHANUM" +FIELD='[0-9A-Za-z-]+' + +SEMVER_REGEX_STR="\ +[vV]?\ +($NAT)\\.($NAT)\\.($NAT)\ +(\\-(${IDENT})(\\.(${IDENT}))*)?\ +(\\+${FIELD}(\\.${FIELD})*)?$" + +SEMVER_REGEX_LEGACY="\ +[vV]?\ +($NAT)\\.($NAT)(\\.($NAT))?\ +(\\-(${IDENT})(\\.(${IDENT}))*)?\ +(\\+${FIELD}(\\.${FIELD})*)?$" + +SEMVER_REGEX="^$SEMVER_REGEX_STR" + +function error { + echo -e "$1" >&2 + exit 1 +} + +# normalize the "part" keywords to a canonical string. At present, +# only "prerelease" is normalized to "prerel". + +function normalize_part { + if [ "$1" == "prerelease" ]; then + echo "prerel" + else + echo "$1" + fi +} + +function validate_version { + local version=$1 + if [[ "$version" =~ $SEMVER_REGEX ]]; then + # if a second argument is passed, store the result in var named by $2 + if [ "$#" -eq "2" ]; then + local major=${BASH_REMATCH[1]} + local minor=${BASH_REMATCH[2]} + local patch=${BASH_REMATCH[3]} + local prere=${BASH_REMATCH[4]} + local build=${BASH_REMATCH[8]} + eval "$2=(\"$major\" \"$minor\" \"$patch\" \"$prere\" \"$build\")" + else + echo "$version" + fi + elif [[ "$version" =~ $SEMVER_REGEX_LEGACY ]]; then + # if a second argument is passed, store the result in var named by $2 + if [[ "$#" -eq "2" ]]; then + local major=${BASH_REMATCH[1]} + local minor=${BASH_REMATCH[2]} + local patch=0 + local prere=${BASH_REMATCH[4]} + local build=${BASH_REMATCH[6]} + eval "$2=(\"${major}\" \"${minor}\" \"${patch}\" \"${prere}\" \"${build}\")" + else + echo "$version" + fi + else + error "version $version does not match the semver scheme 'X.Y.Z(-PRERELEASE)(+BUILD)'. See help for more information." + fi +} + +function is_nat { + [[ "$1" =~ ^($NAT)$ ]] +} + +function is_null { + [ -z "$1" ] +} + +function order_nat { + [ "$1" -lt "$2" ] && { + echo -1 + return + } + [ "$1" -gt "$2" ] && { + echo 1 + return + } + echo 0 +} + +function order_string { + [[ $1 < $2 ]] && { + echo -1 + return + } + [[ $1 > $2 ]] && { + echo 1 + return + } + echo 0 +} + +# given two (named) arrays containing NAT and/or ALPHANUM fields, compare them +# one by one according to semver 2.0.0 spec. Return -1, 0, 1 if left array ($1) +# is less-than, equal, or greater-than the right array ($2). The longer array +# is considered greater-than the shorter if the shorter is a prefix of the longer. +# +function compare_fields { + local l="$1[@]" + local r="$2[@]" + local leftfield=("${!l}") + local rightfield=("${!r}") + local left + local right + + local i=$((-1)) + local order=$((0)) + + while true; do + # shellcheck disable=SC2086 + [ $order -ne 0 ] && { + echo "$order" + return + } + + : $((i++)) + left="${leftfield[$i]}" + right="${rightfield[$i]}" + + is_null "$left" && is_null "$right" && { + echo 0 + return + } + is_null "$left" && { + echo -1 + return + } + is_null "$right" && { + echo 1 + return + } + + is_nat "$left" && is_nat "$right" && { + order=$(order_nat "$left" "$right") + continue + } + is_nat "$left" && { + echo -1 + return + } + is_nat "$right" && { + echo 1 + return + } + { + order=$(order_string "$left" "$right") + continue + } + done +} + +# shellcheck disable=SC2206 # checked by "validate"; ok to expand prerel id's into array +function compare_version { + local order + validate_version "$1" V + validate_version "$2" V_ + + # compare major, minor, patch + + local left=("${V[0]}" "${V[1]}" "${V[2]}") + local right=("${V_[0]}" "${V_[1]}" "${V_[2]}") + + order=$(compare_fields left right) + [ "$order" -ne 0 ] && { + echo "$order" + return + } + + # compare pre-release ids when M.m.p are equal + + local prerel="${V[3]:1}" + local prerel_="${V_[3]:1}" + local left=(${prerel//./ }) + local right=(${prerel_//./ }) + + # if left and right have no pre-release part, then left equals right + # if only one of left/right has pre-release part, that one is less than simple M.m.p + + [ -z "$prerel" ] && [ -z "$prerel_" ] && { + echo 0 + return + } + [ -z "$prerel" ] && { + echo 1 + return + } + [ -z "$prerel_" ] && { + echo -1 + return + } + + # otherwise, compare the pre-release id's + + compare_fields left right +} + +# render_prerel -- return a prerel field with a trailing numeric string +# usage: render_prerel numeric [prefix-string] +# +function render_prerel { + if [ -z "$2" ]; then + echo "${1}" + else + echo "${2}${1}" + fi +} + +# extract_prerel -- extract prefix and trailing numeric portions of a pre-release part +# usage: extract_prerel prerel prerel_parts +# The prefix and trailing numeric parts are returned in "prerel_parts". +# +PREFIX_ALPHANUM='[.0-9A-Za-z-]*[.A-Za-z-]' +DIGITS='[0-9][0-9]*' +EXTRACT_REGEX="^(${PREFIX_ALPHANUM})*(${DIGITS})$" + +function extract_prerel { + local prefix + local numeric + + if [[ "$1" =~ $EXTRACT_REGEX ]]; then # found prefix and trailing numeric parts + prefix="${BASH_REMATCH[1]}" + numeric="${BASH_REMATCH[2]}" + else # no numeric part + prefix="${1}" + numeric= + fi + + eval "$2=(\"$prefix\" \"$numeric\")" +} + +# bump_prerel -- return the new pre-release part based on previous pre-release part +# and prototype for bump +# usage: bump_prerel proto previous +# +function bump_prerel { + local proto + local prev_prefix + local prev_numeric + + # case one: no trailing dot in prototype => simply replace previous with proto + if [[ ! ("$1" =~ \.$) ]]; then + echo "$1" + return + fi + + proto="${1%.}" # discard trailing dot marker from prototype + + extract_prerel "${2#-}" prerel_parts # extract parts of previous pre-release + # shellcheck disable=SC2154 + prev_prefix="${prerel_parts[0]}" + prev_numeric="${prerel_parts[1]}" + + # case two: bump or append numeric to previous pre-release part + if [ "$proto" == "+" ]; then # dummy "+" indicates no prototype argument provided + if [ -n "$prev_numeric" ]; then + : $((++prev_numeric)) # previous pre-release is already numbered, bump it + render_prerel "$prev_numeric" "$prev_prefix" + else + render_prerel 1 "$prev_prefix" # append starting number + fi + return + fi + + # case three: set, bump, or append using prototype prefix + if [ "$prev_prefix" != "$proto" ]; then + render_prerel 1 "$proto" # proto not same pre-release; set and start at '1' + elif [ -n "$prev_numeric" ]; then + : $((++prev_numeric)) # pre-release is numbered; bump it + render_prerel "$prev_numeric" "$prev_prefix" + else + render_prerel 1 "$prev_prefix" # start pre-release at number '1' + fi +} + +function command_bump { + local new + local version + local sub_version + local command + + command="$(normalize_part "$1")" + + case $# in + 2) case "$command" in + major | minor | patch | prerel | release) + sub_version="+." + version=$2 + ;; + *) usage_help ;; + esac ;; + 3) case "$command" in + prerel | build) sub_version=$2 version=$3 ;; + *) usage_help ;; + esac ;; + *) usage_help ;; + esac + + validate_version "$version" parts + # shellcheck disable=SC2154 + local major="${parts[0]}" + local minor="${parts[1]}" + local patch="${parts[2]}" + local prere="${parts[3]}" + local build="${parts[4]}" + + case "$command" in + major) new="$((major + 1)).0.0" ;; + minor) new="${major}.$((minor + 1)).0" ;; + patch) new="${major}.${minor}.$((patch + 1))" ;; + release) new="${major}.${minor}.${patch}" ;; + prerel) new=$(validate_version "${major}.${minor}.${patch}-$(bump_prerel "$sub_version" "$prere")") ;; + build) new=$(validate_version "${major}.${minor}.${patch}${prere}+${sub_version}") ;; + *) usage_help ;; + esac + + echo "$new" + exit 0 +} + +function command_compare { + local v + local v_ + + case $# in + 2) + v=$(validate_version "$1") + v_=$(validate_version "$2") + ;; + *) usage_help ;; + esac + + set +u # need unset array element to evaluate to null + compare_version "$v" "$v_" + exit 0 +} + +function command_diff { + validate_version "$1" v1_parts + # shellcheck disable=SC2154 + local v1_major="${v1_parts[0]}" + local v1_minor="${v1_parts[1]}" + local v1_patch="${v1_parts[2]}" + local v1_prere="${v1_parts[3]}" + local v1_build="${v1_parts[4]}" + + validate_version "$2" v2_parts + # shellcheck disable=SC2154 + local v2_major="${v2_parts[0]}" + local v2_minor="${v2_parts[1]}" + local v2_patch="${v2_parts[2]}" + local v2_prere="${v2_parts[3]}" + local v2_build="${v2_parts[4]}" + + if [ "${v1_major}" != "${v2_major}" ]; then + echo "major" + elif [ "${v1_minor}" != "${v2_minor}" ]; then + echo "minor" + elif [ "${v1_patch}" != "${v2_patch}" ]; then + echo "patch" + elif [ "${v1_prere}" != "${v2_prere}" ]; then + echo "prerelease" + elif [ "${v1_build}" != "${v2_build}" ]; then + echo "build" + fi +} + +# shellcheck disable=SC2034 +function command_get { + local part version + + if [[ "$#" -ne "2" ]] || [[ -z "$1" ]] || [[ -z "$2" ]]; then + usage_help + exit 0 + fi + + part="$1" + version="$2" + + validate_version "$version" parts + local major="${parts[0]}" + local minor="${parts[1]}" + local patch="${parts[2]}" + local prerel="${parts[3]:1}" + local build="${parts[4]:1}" + local release="${major}.${minor}.${patch}" + + part="$(normalize_part "$part")" + + case "$part" in + major | minor | patch | release | prerel | build) echo "${!part}" ;; + *) usage_help ;; + esac + + exit 0 +} + +function command_validate { + if [[ "$#" -ne "1" ]]; then + usage_help + fi + + if [[ "$1" =~ $SEMVER_REGEX ]]; then + echo "valid" + else + echo "invalid" + fi + + exit 0 +} diff --git a/script/upgrades.sh b/script/upgrades.sh index 8a2e9d6838..bbf48af475 100755 --- a/script/upgrades.sh +++ b/script/upgrades.sh @@ -38,361 +38,517 @@ short_opts=h long_opts=help/workdir:/ufrom:/uto:/gbv:/config: # those who take an arg END with : while getopts ":$short_opts-:" o; do - case $o in - :) - echo >&2 "option -$OPTARG needs an argument" - continue - ;; - '?') - echo >&2 "bad option -$OPTARG" - continue - ;; - -) - o=${OPTARG%%=*} - OPTARG=${OPTARG#"$o"} - lo=/$long_opts/ - case $lo in - *"/$o"[!/:]*"/$o"[!/:]*) - echo >&2 "ambiguous option --$o" - continue - ;; - *"/$o"[:/]*) - ;; - *) - o=$o${lo#*"/$o"}; - o=${o%%[/:]*} - ;; - esac - - case $lo in - *"/$o/"*) - OPTARG= - ;; - *"/$o:/"*) - case $OPTARG in - '='*) - OPTARG=${OPTARG#=} - ;; - *) - eval "OPTARG=\$$OPTIND" - if [ "$OPTIND" -le "$#" ] && [ "$OPTARG" != -- ]; then - OPTIND=$((OPTIND + 1)) - else - echo >&2 "option --$o needs an argument" - continue - fi - ;; - esac - ;; - *) echo >&2 "unknown option --$o"; continue;; - esac - esac - case "$o" in - workdir) - WORKDIR=$OPTARG - ;; - ufrom) - UPGRADE_FROM=$OPTARG - ;; - uto) - UPGRADE_TO=$OPTARG - ;; - gbv) - GENESIS_BINARY_VERSION=$OPTARG - ;; - config) - CONFIG_FILE=$OPTARG - ;; - esac + case $o in + :) + echo >&2 "option -$OPTARG needs an argument" + continue + ;; + '?') + echo >&2 "bad option -$OPTARG" + continue + ;; + -) + o=${OPTARG%%=*} + OPTARG=${OPTARG#"$o"} + lo=/$long_opts/ + case $lo in + *"/$o"[!/:]*"/$o"[!/:]*) + echo >&2 "ambiguous option --$o" + continue + ;; + *"/$o"[:/]*) ;; + + *) + o=$o${lo#*"/$o"} + o=${o%%[/:]*} + ;; + esac + + case $lo in + *"/$o/"*) + OPTARG= + ;; + *"/$o:/"*) + case $OPTARG in + '='*) + OPTARG=${OPTARG#=} + ;; + *) + eval "OPTARG=\$$OPTIND" + if [ "$OPTIND" -le "$#" ] && [ "$OPTARG" != -- ]; then + OPTIND=$((OPTIND + 1)) + else + echo >&2 "option --$o needs an argument" + continue + fi + ;; + esac + ;; + *) + echo >&2 "unknown option --$o" + continue + ;; + esac + ;; + esac + case "$o" in + workdir) + WORKDIR=$OPTARG + ;; + ufrom) + UPGRADE_FROM=$OPTARG + ;; + uto) + UPGRADE_TO=$OPTARG + ;; + gbv) + GENESIS_BINARY_VERSION=$OPTARG + ;; + config) + CONFIG_FILE=$OPTARG + ;; + esac done shift "$((OPTIND - 1))" GENESIS_ORIG=${UTEST_GENESIS_ORIGIN:=https://github.com/akash-network/testnetify/releases/download/${UPGRADE_FROM}/genesis.json.tar.lz4} +pushd() { + command pushd "$@" >/dev/null +} + +popd() { + command popd >/dev/null +} + function content_type() { - case "$1" in - *.tar.cz*) - tar_cmd="tar -xJ -" - ;; - *.tar.gz*) - tar_cmd="tar xzf -" - ;; - *.tar.lz4*) - tar_cmd="lz4 -d | tar xf -" - ;; - *.tar.zst*) - tar_cmd="zstd -cd | tar xf -" - ;; - *) - tar_cmd="tar xf -" - ;; - esac - - echo "$tar_cmd" + case "$1" in + *.tar.cz*) + tar_cmd="tar -xJ -" + ;; + *.tar.gz*) + tar_cmd="tar xzf -" + ;; + *.tar.lz4*) + tar_cmd="lz4 -d | tar xf -" + ;; + *.tar.zst*) + tar_cmd="zstd -cd | tar xf -" + ;; + *) + tar_cmd="tar xf -" + ;; + esac + + echo "$tar_cmd" } function content_size() { - local size_in_bytes - - size_in_bytes=$(wget "$1" --spider --server-response -O - 2>&1 | grep "Content-Length" | awk '{print $2}' | tr -d '\n') - err=$? - case "$size_in_bytes" in - # Value cannot be started with `0`, and must be integer - [1-9]*[0-9]) - echo "$size_in_bytes" - ;; - esac - - return "$err" + local size_in_bytes + + size_in_bytes=$(wget "$1" --spider --server-response -O - 2>&1 | grep "Content-Length" | awk '{print $2}' | tr -d '\n') + err=$? + case "$size_in_bytes" in + # Value cannot be started with `0`, and must be integer + [1-9]*[0-9]) + echo "$size_in_bytes" + ;; + esac + + return "$err" } function content_name() { - name=$(wget "$1" --spider --server-response -O - 2>&1 | grep "Content-Disposition:" | tail -1 | awk -F"filename=" '{print $2}') - # shellcheck disable=SC2181 - if [[ "$name" == "" ]]; then - echo "$1" - else - echo "$name" - fi + name=$(wget "$1" --spider --server-response -O - 2>&1 | grep "Content-Disposition:" | tail -1 | awk -F"filename=" '{print $2}') + # shellcheck disable=SC2181 + if [[ "$name" == "" ]]; then + echo "$1" + else + echo "$name" + fi } uname_arch() { - arch=$(uname -m) - case $arch in - x86_64) arch="amd64" ;; - x86) arch="386" ;; - i686) arch="386" ;; - i386) arch="386" ;; - aarch64) arch="arm64" ;; - armv5*) arch="armv5" ;; - armv6*) arch="armv6" ;; - armv7*) arch="armv7" ;; - esac - echo "${arch}" + arch=$(uname -m) + case $arch in + x86_64) arch="amd64" ;; + x86) arch="386" ;; + i686) arch="386" ;; + i386) arch="386" ;; + aarch64) arch="arm64" ;; + armv5*) arch="armv5" ;; + armv6*) arch="armv6" ;; + armv7*) arch="armv7" ;; + esac + echo "${arch}" } untar() { - tarball=$1 - case "${tarball}" in - *.tar.gz | *.tgz) tar -xzf "${tarball}" ;; - *.tar) tar -xf "${tarball}" ;; - *.zip) unzip "${tarball}" ;; - *) - log_err "untar unknown archive format for ${tarball}" - return 1 - ;; - esac + tarball=$1 + case "${tarball}" in + *.tar.gz | *.tgz) tar -xzf "${tarball}" ;; + *.tar) tar -xf "${tarball}" ;; + *.zip) unzip "${tarball}" ;; + *) + log_err "untar unknown archive format for ${tarball}" + return 1 + ;; + esac +} + +function build_bins() { + local genesis_bin + local upgrade_bin + + genesis_bin=$1 + upgrade_bin=$2 + + ARCH=$GOARCH OS=$GOOS "$ROOT_DIR"/install.sh -b "$genesis_bin" "$GENESIS_BINARY_VERSION" + + make -sC "$ROOT_DIR" test-bins + + local archive + archive="akash" + + if [[ $GOOS == "darwin" ]]; then + archive="${archive}_darwin_all" + else + archive="${archive}_linux_$(uname_arch)" + fi + + unzip -o "${AKASH_DEVCACHE}/goreleaser/test-bins/${archive}.zip" -d "$upgrade_bin" + chmod +x "$genesis_bin/akash" + chmod +x "$upgrade_bin/akash" } function init() { - if [[ -z "${WORKDIR}" ]]; then - echo "workdir is not set" - echo -e "$USAGE"; - exit 1 - fi - - local config - config=$(cat "$CONFIG_FILE") - - local cnt=0 - local validators_dir=${WORKDIR}/validators - - mkdir -p "${WORKDIR}/validators/logs" - - for val in $(jq -c '.validators[]' <<<"$config"); do - local valdir=$validators_dir/.akash${cnt} - local cosmovisor_dir=$valdir/cosmovisor - local genesis_bin=$cosmovisor_dir/genesis/bin - local upgrade_bin=$cosmovisor_dir/upgrades/$UPGRADE_TO/bin - - local AKASH=$genesis_bin/akash - - mkdir -p "$genesis_bin" - mkdir -p "$upgrade_bin" - - if [[ $cnt -eq 0 ]]; then - "$ROOT_DIR"/install.sh -b "$genesis_bin" "$GENESIS_BINARY_VERSION" - - AKASH=$upgrade_bin/akash make -sC "$ROOT_DIR" akash - else - cp "$validators_dir/.akash0/cosmovisor/genesis/bin/akash" "$genesis_bin/akash" - cp "$validators_dir/.akash0/cosmovisor/upgrades/$UPGRADE_TO/bin/akash" "$upgrade_bin/akash" - fi - - $AKASH init --home "$valdir" "$(jq -rc '.moniker' <<<"$val")" > /dev/null 2>&1 - - if [[ $cnt -eq 0 ]]; then - pushd "$(pwd)" - cd "$valdir/config" - - if [[ "${GENESIS_ORIG}" =~ ^https?:\/\/.* ]]; then - echo "Downloading genesis from $GENESIS_ORIG" - wget -qO - "$GENESIS_ORIG" | lz4 - -d | tar xf - -C "$valdir/config" - - pv_args="-petrafb -i 5" - sz=$(content_size "$GENESIS_ORIG") - # shellcheck disable=SC2181 - if [ $? -eq 0 ]; then - if [[ -n $sz ]]; then - pv_args+=" -s $sz" - fi - - tar_cmd=$(content_type "$(content_name "$GENESIS_ORIG")") - - # shellcheck disable=SC2086 - (wget -nv -O - "$GENESIS_ORIG" | pv $pv_args | eval " $tar_cmd") 2>&1 | stdbuf -o0 tr '\r' '\n' - else - echo "unable to download genesis" - fi - else - echo "Unpacking genesis from $GENESIS_ORIG" - tar_cmd=$(content_type "$GENESIS_ORIG") - # shellcheck disable=SC2086 - (pv -petrafb -i 5 "$GENESIS_ORIG" | eval "$tar_cmd") 2>&1 | stdbuf -o0 tr '\r' '\n' - fi - - popd - - jq -c '.mnemonics[]' <<<"$config" | while read -r mnemonic; do - jq -c '.keys[]' <<<"$mnemonic" | while read -r key; do - jq -rc '.phrase' <<<"$mnemonic" | $AKASH --home="$valdir" --keyring-backend=test keys add "$(jq -rc '.name' <<<"$key")" --recover --index "$(jq -rc '.index' <<<"$key")" - done - done - else - cp -r "$validators_dir/.akash0/config/genesis.json" "$valdir/config/genesis.json" - fi - - jq -r '.keys.priv' <<< "$val" > "$valdir/config/priv_validator_key.json" - jq -r '.keys.node' <<< "$val" > "$valdir/config/priv_validator_key.json" - - ((cnt++)) || true - done + if [[ -z "${WORKDIR}" ]]; then + echo "workdir is not set" + echo -e "$USAGE" + exit 1 + fi + + local config + config=$(cat "$CONFIG_FILE") + + local cnt=0 + local validators_dir=${WORKDIR}/validators + + mkdir -p "${WORKDIR}/validators/logs" + + for val in $(jq -c '.validators[]' <<<"$config"); do + local valdir + local cosmovisor_dir + local genesis_bin + local upgrade_bin + local AKASH + + valdir=$validators_dir/.akash${cnt} + cosmovisor_dir=$valdir/cosmovisor + genesis_bin=$cosmovisor_dir/genesis/bin + upgrade_bin=$cosmovisor_dir/upgrades/$UPGRADE_TO/bin + + mkdir -p "$genesis_bin" + mkdir -p "$upgrade_bin" + + if [[ $cnt -eq 0 ]]; then + build_bins "$genesis_bin" "$upgrade_bin" + else + cp "$validators_dir/.akash0/cosmovisor/genesis/bin/akash" "$genesis_bin/akash" + cp "$validators_dir/.akash0/cosmovisor/upgrades/$UPGRADE_TO/bin/akash" "$upgrade_bin/akash" + fi + + AKASH=$genesis_bin/akash + + $AKASH init --home "$valdir" "$(jq -rc '.moniker' <<<"$val")" >/dev/null 2>&1 + + if [[ $cnt -eq 0 ]]; then + pushd "$(pwd)" + cd "$valdir/config" + + if [[ "${GENESIS_ORIG}" =~ ^https?:\/\/.* ]]; then + echo "Downloading genesis from $GENESIS_ORIG" + + pv_args="-petrafb -i 5" + sz=$(content_size "$GENESIS_ORIG") + # shellcheck disable=SC2181 + if [ $? -eq 0 ]; then + if [[ -n $sz ]]; then + pv_args+=" -s $sz" + fi + + tar_cmd=$(content_type "$(content_name "$GENESIS_ORIG")") + + # shellcheck disable=SC2086 + wget -nv -O - "$GENESIS_ORIG" | pv $pv_args | eval "$tar_cmd" + else + echo "unable to download genesis" + fi + else + echo "Unpacking genesis from $GENESIS_ORIG" + tar_cmd=$(content_type "$GENESIS_ORIG") + # shellcheck disable=SC2086 + (pv -petrafb -i 5 "$GENESIS_ORIG" | eval "$tar_cmd") 2>&1 | stdbuf -o0 tr '\r' '\n' + fi + + popd + + jq -c '.mnemonics[]' <<<"$config" | while read -r mnemonic; do + jq -c '.keys[]' <<<"$mnemonic" | while read -r key; do + jq -rc '.phrase' <<<"$mnemonic" | $AKASH --home="$valdir" --keyring-backend=test keys add "$(jq -rc '.name' <<<"$key")" --recover --index "$(jq -rc '.index' <<<"$key")" + done + done + + cat >"$valdir/.envrc" <"$valdir/config/priv_validator_key.json" + jq -r '.keys.node' <<<"$val" >"$valdir/config/node_key.json" + + ((cnt++)) || true + done } function clean() { - if [[ -z "${WORKDIR}" ]]; then - echo "workdir is not set" - echo -e "$USAGE"; - exit 1 - fi + if [[ -z "${WORKDIR}" ]]; then + echo "workdir is not set" + echo -e "$USAGE" + exit 1 + fi - local config - config=$(cat "$CONFIG_FILE") + local config + config=$(cat "$CONFIG_FILE") - local cnt=0 - local validators_dir=${WORKDIR}/validators + local cnt=0 + local validators_dir=${WORKDIR}/validators - for val in $(jq -c '.validators[]' <<<"$config"); do - local valdir=$validators_dir/.akash${cnt} - local cosmovisor_dir=$valdir/cosmovisor + for val in $(jq -c '.validators[]' <<<"$config"); do + local valdir=$validators_dir/.akash${cnt} + local cosmovisor_dir=$valdir/cosmovisor - rm -rf "$validators_dir/logs/.akash${cnt}-stderr.log" - rm -rf "$validators_dir/logs/.akash${cnt}-stdout.log" + rm -rf "$validators_dir/logs/.akash${cnt}-stderr.log" + rm -rf "$validators_dir/logs/.akash${cnt}-stdout.log" - rm -rf "$valdir"/data/* - rm -rf "$cosmovisor_dir/current" - rm -rf "$cosmovisor_dir/upgrades/${UPGRADE_TO}/upgrade-info.json" + rm -rf "$valdir"/data/* + rm -rf "$cosmovisor_dir/current" + rm -rf "$cosmovisor_dir/upgrades/${UPGRADE_TO}/upgrade-info.json" + rm -rf "$cosmovisor_dir/upgrades/${UPGRADE_TO}/bin/akash" - echo '{"height":"0","round": 0,"step": 0}' > "$valdir/data/priv_validator_state.json" + echo '{"height":"0","round": 0,"step": 0}' | jq > "$valdir/data/priv_validator_state.json" + + ((cnt++)) || true + done +} + +function import_keys() { + if [[ -z "${WORKDIR}" ]]; then + echo "workdir is not set" + echo -e "$USAGE" + exit 1 + fi + + local config + local validators_dir + local cosmovisor_dir + local genesis_bin + local validators_dir + + config=$(cat "$CONFIG_FILE") + + validators_dir=${WORKDIR}/validators + valdir=$validators_dir/.akash0 + cosmovisor_dir=$valdir/cosmovisor + genesis_bin=$cosmovisor_dir/genesis/bin + + # upgrades may upgrade keys format so reset them as well + rm -rf "$valdir"/keyring-test + + local AKASH + AKASH=$genesis_bin/akash + + jq -c '.mnemonics[]' <<<"$config" | while read -r mnemonic; do + jq -c '.keys[]' <<<"$mnemonic" | while read -r key; do + jq -rc '.phrase' <<<"$mnemonic" | $AKASH --home="$valdir" --keyring-backend=test keys add "$(jq -rc '.name' <<<"$key")" --recover --index "$(jq -rc '.index' <<<"$key")" + done + done +} - ((cnt++)) || true - done +function bins() { + if [[ -z "${WORKDIR}" ]]; then + echo "workdir is not set" + echo -e "$USAGE" + exit 1 + fi + + local config + config=$(cat "$CONFIG_FILE") + + local cnt=0 + local validators_dir=${WORKDIR}/validators + + for val in $(jq -c '.validators[]' <<<"$config"); do + local valdir + local cosmovisor_dir + local genesis_bin + local upgrade_bin + + valdir=$validators_dir/.akash${cnt} + cosmovisor_dir=$valdir/cosmovisor + genesis_bin=$cosmovisor_dir/genesis/bin + upgrade_bin=$cosmovisor_dir/upgrades/$UPGRADE_TO/bin + + mkdir -p "$genesis_bin" + mkdir -p "$upgrade_bin" + + if [[ $cnt -eq 0 ]]; then + build_bins "$genesis_bin" "$upgrade_bin" + else + cp "$validators_dir/.akash0/cosmovisor/genesis/bin/akash" "$genesis_bin/akash" + cp "$validators_dir/.akash0/cosmovisor/upgrades/$UPGRADE_TO/bin/akash" "$upgrade_bin/akash" + fi + + ((cnt++)) || true + done } case "$1" in -init) - shift - init - ;; -clean) - shift - clean - ;; -upgrade-from-release) - shift - upgrades_dir=${ROOT_DIR}/upgrades/software - upgrade_name=$(find "${upgrades_dir}" -mindepth 1 -maxdepth 1 -type d | awk -F/ '{print $NF}' | sort -r | head -n 1) - - # shellcheck disable=SC2086 - $semver validate $upgrade_name - echo -e "$upgrade_name" - exit 0 - - ;; -test-required) - shift - curr_ref=$1 - - upgrades_dir=${ROOT_DIR}/upgrades/software - upgrade_name=$(find "${upgrades_dir}" -mindepth 1 -maxdepth 1 -type d | awk -F/ '{print $NF}' | sort -r | head -n 1) - - # shellcheck disable=SC2086 - $semver validate $upgrade_name - - # current git reference is matching upgrade name. looks like release has been cut - # so lets run the last test - if [[ "$curr_ref" == "$upgrade_name" ]]; then - echo -e "$upgrade_name" - exit 0 - fi - - cnt=0 - - retracted_versions=$(go mod edit --json | jq -cr .Retract) - - while :; do - cnt=$((cnt+1)) - if [[ $cnt -gt 100 ]];then - echoerr "unable to determine tag to test upgrade" - exit 1 - fi - - # shellcheck disable=SC2086 - if git show-ref --tags $upgrade_name >/dev/null 2>&1; then - is_retracted=false - for retracted in $(jq -c '.[]' <<<"$retracted_versions"); do - vLow=$(jq -rc '.Low' <<<"$retracted") - vHigh=$(jq -rc '.High' <<<"$retracted") - tagsAreEqual=$($semver compare $vLow $vHigh) - - isTagInHigh=$($semver compare $upgrade_name $vHigh) - if [[ $isTagInHigh -le 0 ]]; then - if [[ $isTagInHigh -eq 0 ]]; then - is_retracted=true - break - elif [[ $tagsAreEqual -ne 0 ]]; then - isTagInLow=$($semver compare $upgrade_name $vLow) - if [[ $isTagInLow -ge 0 ]]; then - upgrade_name=$vHigh - is_retracted=true - break - fi - fi - fi - done - - if [[ $is_retracted == "true" ]]; then - upgrade_name=v$($semver bump patch $upgrade_name) - else - upgrade_name="" - break - fi - else - break - fi - done - - echo -n "$upgrade_name" - - exit 0 - ;; ---help | -h) - echo -e "$USAGE"; - exit 0 - ;; -*) - echo "unknown command $1" - echo -e "$USAGE"; - exit 1 - ;; + init) + shift + init + ;; + bins) + shift + bins + ;; + keys) + shift + import_keys + ;; + clean) + shift + clean + ;; + upgrade-from-release) + shift + upgrades_dir=${ROOT_DIR}/upgrades/software + upgrade_name=$(find "${upgrades_dir}" -mindepth 1 -maxdepth 1 -type d | awk -F/ '{print $NF}' | sort -r | head -n 1) + + # shellcheck disable=SC2086 + res=$($semver validate $upgrade_name) + if [[ "$res" == "valid" ]]; then + echo -e "$upgrade_name" + exit 0 + else + exit 1 + fi + + ;; + test-required) + shift + curr_ref=$1 + + upgrades_dir=${ROOT_DIR}/upgrades/software + upgrade_name=$(find "${upgrades_dir}" -mindepth 1 -maxdepth 1 -type d | awk -F/ '{print $NF}' | sort -r | head -n 1) + + # shellcheck disable=SC2086 + is_valid=$($semver validate $upgrade_name) + if [[ $is_valid != "valid" ]]; then + echoerr "upgrade name \"$upgrade_name\" does not comply with semver spec" + exit 1 + fi + + # current git reference is matching upgrade name. looks like release has been cut + # so lets run the last test + if [[ "$curr_ref" == "$upgrade_name" ]]; then + echo -e "$upgrade_name" + exit 0 + fi + + cnt=0 + + retracted_versions=$(go mod edit --json | jq -cr .Retract) + + while :; do + cnt=$((cnt + 1)) + if [[ $cnt -gt 100 ]]; then + echoerr "unable to determine tag to test upgrade" + exit 1 + fi + + # shellcheck disable=SC2086 + if git show-ref --tags $upgrade_name >/dev/null 2>&1; then + is_retracted=false + for retracted in $(jq -c '.[]' <<<"$retracted_versions"); do + vLow=$(jq -rc '.Low' <<<"$retracted") + vHigh=$(jq -rc '.High' <<<"$retracted") + tagsAreEqual=$($semver compare $vLow $vHigh) + + isTagInHigh=$($semver compare $upgrade_name $vHigh) + if [[ $isTagInHigh -le 0 ]]; then + if [[ $isTagInHigh -eq 0 ]]; then + is_retracted=true + break + elif [[ $tagsAreEqual -ne 0 ]]; then + isTagInLow=$($semver compare $upgrade_name $vLow) + if [[ $isTagInLow -ge 0 ]]; then + upgrade_name=$vHigh + is_retracted=true + break + fi + fi + fi + done + + if [[ $is_retracted == "true" ]]; then + upgrade_name=v$($semver bump patch $upgrade_name) + else + upgrade_name="" + break + fi + else + break + fi + done + + echo -n "$upgrade_name" + + exit 0 + ;; + --help | -h) + echo -e "$USAGE" + exit 0 + ;; + *) + echo "unknown command $1" + echo -e "$USAGE" + exit 1 + ;; esac diff --git a/tests/upgrade/test-cases.json b/tests/upgrade/test-cases.json index c7887e704e..3ad729b582 100644 --- a/tests/upgrade/test-cases.json +++ b/tests/upgrade/test-cases.json @@ -1,58 +1,30 @@ { - "v0.36.0": { - }, - "v0.34.0": { + "v0.38.0": { "migrations": { - } - }, - "v0.32.0": { - "migrations": { - "market": { - "from": "4", - "to": "5" - } - } - }, - "v0.30.0": { - }, - "v0.28.0": { - "migrations": { - "market": { - "from": "3", - "to": "4" - } - } - }, - "v0.26.0": { - "modules": { - "added": [ + "cert": [ + { + "from": "2", + "to": "3" + } + ], + "market": [ + { + "from": "5", + "to": "6" + } + ], + "deployment": [ + { + "from": "3", + "to": "4" + } + ], + "authz": [ + { + "from": "1", + "to": "2" + } ] - }, - "migrations": { - } - }, - "v0.24.0": { - "modules": { - "added": [ - "agov", - "astaking", - "feegrant", - "take" - ] - }, - "migrations": { - "deployment": { - "from": "2", - "to": "3" - }, - "market": { - "from": "2", - "to": "3" - }, - "transfer": { - "from": "1", - "to": "2" - } } } } diff --git a/tests/upgrade/types/types.go b/tests/upgrade/types/types.go index bf1feff5ba..eef62eaa88 100644 --- a/tests/upgrade/types/types.go +++ b/tests/upgrade/types/types.go @@ -4,6 +4,8 @@ import ( "context" "fmt" "testing" + + sdk "github.com/cosmos/cosmos-sdk/types" ) type TestParams struct { @@ -12,6 +14,7 @@ type TestParams struct { ChainID string KeyringBackend string From string + FromAddress sdk.AccAddress } type TestWorker interface { diff --git a/tests/upgrade/upgrade_test.go b/tests/upgrade/upgrade_test.go index 01af17d744..507806caf9 100644 --- a/tests/upgrade/upgrade_test.go +++ b/tests/upgrade/upgrade_test.go @@ -6,6 +6,7 @@ import ( "bufio" "context" "encoding/json" + "errors" "flag" "fmt" "io" @@ -35,7 +36,7 @@ import ( ) const ( - blockTimeWindow = 7 * time.Second + blockTimeWindow = 20 * time.Minute ) type nodeEvent int @@ -65,7 +66,8 @@ const ( const ( nodeTestStagePreUpgrade nodeTestStage = iota nodeTestStageUpgrade - nodeTestStagePostUpgrade + nodeTestStagePostUpgrade1 + nodeTestStagePostUpgrade2 ) const ( @@ -86,9 +88,10 @@ type publisher interface { var ( nodeTestStageMapStr = map[nodeTestStage]string{ - nodeTestStagePreUpgrade: "preupgrade", - nodeTestStageUpgrade: "upgrade", - nodeTestStagePostUpgrade: "postupgrade", + nodeTestStagePreUpgrade: "preupgrade", + nodeTestStageUpgrade: "upgrade", + nodeTestStagePostUpgrade1: "postupgrade1", + nodeTestStagePostUpgrade2: "postupgrade2", } testModuleStatusMapStr = map[testModuleStatus]string{ @@ -161,6 +164,13 @@ type nodeStatus struct { } `json:"SyncInfo"` } +type testMigration struct { + From string `json:"from"` + To string `json:"to"` +} + +type moduleMigrationVersions []testMigration + type testCase struct { Modules struct { Added []string `json:"added"` @@ -170,10 +180,7 @@ type testCase struct { To string `json:"to"` } `json:"renamed"` } `json:"modules"` - Migrations map[string]struct { - From string `json:"from"` - To string `json:"to"` - } `json:"migrations"` + Migrations map[string]moduleMigrationVersions `json:"migrations"` } type testCases map[string]testCase @@ -189,7 +196,7 @@ type validatorParams struct { rpcPort uint16 upgradeName string env []string - pub pubsub.Publisher + bus pubsub.Publisher } type validator struct { @@ -234,12 +241,18 @@ type upgradeTest struct { validators map[string]*validator } +type nodePortsRPC struct { + port uint16 + grpc uint16 +} type nodeInitParams struct { - nodeID string - homedir string - p2pPort uint16 - rpcPort uint16 - pprofPort uint16 + nodeID string + homedir string + rpc nodePortsRPC + p2pPort uint16 + grpcPort uint16 + grpcWebPort uint16 + pprofPort uint16 } var ( @@ -372,23 +385,30 @@ func TestUpgrade(t *testing.T) { fmt.Sprintf("AKASH_FROM=%s", cfg.Work.Key), fmt.Sprintf("AKASH_GAS_PRICES=0.0025uakt"), fmt.Sprintf("AKASH_GAS_ADJUSTMENT=2"), - // auto is failing with rpc error: code = Unknown desc = unknown query path: unknown request - fmt.Sprintf("AKASH_GAS=500000"), + fmt.Sprintf("AKASH_P2P_PEX=false"), + fmt.Sprintf("AKASH_MINIMUM_GAS_PRICES=0.0025uakt"), + fmt.Sprintf("AKASH_GAS=auto"), fmt.Sprintf("AKASH_YES=true"), }, } if cfg.Work.Home == name { + cmdr = valCmd + + output, err := cmdr.execute(ctx, fmt.Sprintf("keys show %s -a", cfg.Work.Key)) + require.NoError(t, err) + + addr, err := sdk.AccAddressFromBech32(strings.Trim(string(output), "\n")) + require.NoError(t, err) + + t.Logf("validator address: \"%s\"", addr.String()) + postUpgradeParams.Home = homedir postUpgradeParams.ChainID = cfg.ChainID postUpgradeParams.Node = "tcp://127.0.0.1:26657" postUpgradeParams.KeyringBackend = "test" postUpgradeParams.From = cfg.Work.Key - - cmdr = valCmd - - _, err = cmdr.execute(ctx, fmt.Sprintf("keys show %s -a", cfg.Work.Key)) - require.NoError(t, err) + postUpgradeParams.FromAddress = addr } cmdr.env = append(cmdr.env, fmt.Sprintf("AKASH_OUTPUT=json")) @@ -408,14 +428,21 @@ func TestUpgrade(t *testing.T) { p2pPort := 26656 + uint16(idx*2) initParams[name] = nodeInitParams{ - nodeID: strings.TrimSpace(string(res)), - homedir: homedir, - p2pPort: p2pPort, - rpcPort: p2pPort + 1, - pprofPort: 6060 + uint16(idx), + nodeID: strings.TrimSpace(string(res)), + homedir: homedir, + p2pPort: p2pPort, + rpc: nodePortsRPC{ + port: p2pPort + 1, + grpc: 9092 + uint16(idx*3), + }, + grpcPort: 9090 + uint16(idx*3), + grpcWebPort: 9091 + uint16(idx*3), + pprofPort: 6060 + uint16(idx), } } + listenAddr := "127.0.0.1" + for name, params := range initParams { var unconditionalPeerIDs string var persistentPeers string @@ -426,7 +453,7 @@ func TestUpgrade(t *testing.T) { } unconditionalPeerIDs += params1.nodeID + "," - persistentPeers += fmt.Sprintf("%s@127.0.0.1:%d,", params1.nodeID, params1.p2pPort) + persistentPeers += fmt.Sprintf("%s@%s:%d,", params1.nodeID, listenAddr, params1.p2pPort) } validatorsParams[name] = validatorParams{ @@ -437,30 +464,33 @@ func TestUpgrade(t *testing.T) { cosmovisor: *cosmovisor, isRPC: cfg.Work.Home == name, p2pPort: params.p2pPort, - rpcPort: params.rpcPort, + rpcPort: params.rpc.port, upgradeName: *upgradeName, - pub: bus, + bus: bus, env: []string{ - fmt.Sprintf("DAEMON_NAME=akash"), fmt.Sprintf("DAEMON_HOME=%s", params.homedir), - fmt.Sprintf("DAEMON_RESTART_AFTER_UPGRADE=true"), - fmt.Sprintf("DAEMON_ALLOW_DOWNLOAD_BINARIES=true"), - fmt.Sprintf("DAEMON_RESTART_DELAY=3s"), - fmt.Sprintf("COSMOVISOR_COLOR_LOGS=false"), - fmt.Sprintf("UNSAFE_SKIP_BACKUP=true"), fmt.Sprintf("HOME=%s", *workdir), fmt.Sprintf("AKASH_HOME=%s", params.homedir), fmt.Sprintf("AKASH_CHAIN_ID=%s", cfg.ChainID), - fmt.Sprintf("AKASH_KEYRING_BACKEND=test"), - fmt.Sprintf("AKASH_P2P_SEEDS=%s", strings.TrimSuffix(persistentPeers, ",")), fmt.Sprintf("AKASH_P2P_PERSISTENT_PEERS=%s", strings.TrimSuffix(persistentPeers, ",")), fmt.Sprintf("AKASH_P2P_UNCONDITIONAL_PEER_IDS=%s", strings.TrimSuffix(unconditionalPeerIDs, ",")), - fmt.Sprintf("AKASH_P2P_LADDR=tcp://127.0.0.1:%d", params.p2pPort), - fmt.Sprintf("AKASH_RPC_LADDR=tcp://127.0.0.1:%d", params.rpcPort), - fmt.Sprintf("AKASH_RPC_PPROF_LADDR=localhost:%d", params.pprofPort), + fmt.Sprintf("AKASH_P2P_LADDR=tcp://%s:%d", listenAddr, params.p2pPort), + fmt.Sprintf("AKASH_RPC_LADDR=tcp://%s:%d", listenAddr, params.rpc.port), + // fmt.Sprintf("AKASH_RPC_GRPC_LADDR=tcp://%s:%d", listenAddr, params.rpc.grpc), + fmt.Sprintf("AKASH_RPC_PPROF_LADDR=%s:%d", listenAddr, params.pprofPort), + fmt.Sprintf("AKASH_GRPC_ADDRESS=%s:%d", listenAddr, params.grpcPort), + fmt.Sprintf("AKASH_GRPC_WEB_ADDRESS=%s:%d", listenAddr, params.grpcWebPort), + "DAEMON_NAME=akash", + "DAEMON_RESTART_AFTER_UPGRADE=true", + "DAEMON_ALLOW_DOWNLOAD_BINARIES=true", + "DAEMON_RESTART_DELAY=3s", + "COSMOVISOR_COLOR_LOGS=false", + "UNSAFE_SKIP_BACKUP=true", + "AKASH_KEYRING_BACKEND=test", "AKASH_P2P_PEX=true", "AKASH_P2P_ADDR_BOOK_STRICT=false", "AKASH_P2P_ALLOW_DUPLICATE_IP=true", + "AKASH_P2P_SEEDS=", "AKASH_MINIMUM_GAS_PRICES=0.0025uakt", "AKASH_FAST_SYNC=false", "AKASH_LOG_COLOR=false", @@ -468,8 +498,8 @@ func TestUpgrade(t *testing.T) { "AKASH_LOG_FORMAT=plain", "AKASH_STATESYNC_ENABLE=false", "AKASH_TX_INDEX_INDEXER=null", - "AKASH_GRPC_ENABLE=false", - "AKASH_GRPC_WEB_ENABLE=false", + "AKASH_GRPC_ENABLE=true", + "AKASH_GRPC_WEB_ENABLE=true", }, } } @@ -505,14 +535,15 @@ func TestUpgrade(t *testing.T) { }(name) } + t.Logf("waiting for validator(s) to complete tasks") err = group.Wait() + t.Logf("all validators finished") assert.NoError(t, err) fail := false for val, vl := range validators { - select { - case errs := <-vl.testErrsCh: + for errs := range vl.testErrsCh { if len(errs) > 0 { for _, msg := range errs { t.Logf("[%s] %s", val, msg) @@ -520,11 +551,11 @@ func TestUpgrade(t *testing.T) { fail = true } - - case <-vl.ctx.Done(): } } + bus.Close() + if fail { t.Fail() } @@ -588,8 +619,10 @@ loop: }) if !result { - l.t.Error("post upgrade test handler failed") + l.t.Error("post upgrade test FAIL") return fmt.Errorf("post-upgrade check failed") + } else { + l.t.Log("post upgrade test PASS") } return nil @@ -709,7 +742,18 @@ func (l *upgradeTest) submitUpgradeProposal() error { return err } - cmdRes, err = l.cmdr.execute(l.ctx, "query gov proposals") + // give it two blocks to make sure proposal has been commited + tmctx, cancel := context.WithTimeout(l.ctx, 12*time.Second) + defer cancel() + + <-tmctx.Done() + + if !errors.Is(tmctx.Err(), context.DeadlineExceeded) { + l.t.Logf("error waiting for deadline\n") + return tmctx.Err() + } + + cmdRes, err = l.cmdr.execute(l.ctx, "query gov proposals --status=voting_period") if err != nil { l.t.Logf("executing cmd failed: %s\n", string(cmdRes)) return err @@ -798,7 +842,7 @@ func (l *validator) run() error { cmd := exec.CommandContext(l.ctx, l.params.cosmovisor, "run", "start", fmt.Sprintf("--home=%s", l.params.homedir)) - cmd.Stdout = io.MultiWriter(lStdout, wStdout) + cmd.Stdout = io.MultiWriter(wStdout, lStdout) cmd.Stderr = io.MultiWriter(lStderr) cmd.Env = l.params.env @@ -809,6 +853,9 @@ func (l *validator) run() error { } l.group.Go(func() error { + defer l.t.Logf("[%s] log scanner finished", l.params.name) + l.t.Logf("[%s] log scanner started", l.params.name) + defer func() { if r := recover(); r != nil { l.t.Fatal(r) @@ -819,6 +866,9 @@ func (l *validator) run() error { }) l.group.Go(func() error { + defer l.t.Logf("[%s] test case watcher finished", l.params.name) + l.t.Logf("[%s] test case watcher started", l.params.name) + defer func() { if r := recover(); r != nil { l.t.Fatal(r) @@ -834,6 +884,9 @@ func (l *validator) run() error { }) l.group.Go(func() error { + defer l.t.Logf("[%s] stdout reader finished", l.params.name) + l.t.Logf("[%s] stdout reader started", l.params.name) + defer func() { if r := recover(); r != nil { l.t.Fatal(r) @@ -848,6 +901,9 @@ func (l *validator) run() error { }) l.group.Go(func() error { + defer l.t.Logf("[%s] blocks watchdog finished", l.params.name) + l.t.Logf("[%s] blocks watchdog started", l.params.name) + defer func() { if r := recover(); r != nil { l.t.Fatal(r) @@ -859,18 +915,23 @@ func (l *validator) run() error { return err } + defer sub.Close() + return l.blocksWatchdog(l.ctx, sub) }) // state machine l.group.Go(func() error { + defer l.t.Logf("[%s] state machine finished", l.params.name) + l.t.Logf("[%s] state machine started", l.params.name) + defer func() { if r := recover(); r != nil { l.t.Fatal(r) } }() - return l.stateMachine(l.pubsub) + return l.stateMachine() }) err = cmd.Wait() @@ -883,21 +944,22 @@ func (l *validator) run() error { select { case <-l.upgradeSuccessful: err = nil + l.t.Logf("[%s] all workers finished", l.params.name) default: - l.t.Logf("[%s] cosmovisor finished with error. check %[1]s-stderr.log", l.params.name) + l.t.Logf("[%s] cosmovisor finished with error. check %s", l.params.name, lStderr.Name()) } return err } -func (l *validator) stateMachine(bus pubsub.Bus) error { +func (l *validator) stateMachine() error { defer l.cancel() var err error var sub pubsub.Subscriber - sub, err = bus.Subscribe() + sub, err = l.pubsub.Subscribe() if err != nil { return err } @@ -908,7 +970,7 @@ func (l *validator) stateMachine(bus pubsub.Bus) error { wdCtrl := func(ctx context.Context, ctrl watchdogCtrl) { resp := make(chan struct{}, 1) - _ = bus.Publish(wdReq{ + _ = l.pubsub.Publish(wdReq{ event: ctrl, resp: resp, }) @@ -932,7 +994,7 @@ loop: case nodeEventStart: l.t.Logf("[%s][%s]: node started", l.params.name, nodeTestStageMapStr[stage]) if stage == nodeTestStageUpgrade { - stage = nodeTestStagePostUpgrade + stage = nodeTestStagePostUpgrade1 blocksCount = 0 replayDone = false } @@ -956,14 +1018,15 @@ loop: } if stage == nodeTestStagePreUpgrade && blocksCount == 1 { - _ = l.params.pub.Publish(nodePreUpgradeReady{ + _ = l.params.bus.Publish(nodePreUpgradeReady{ name: l.params.name, }) - } else if stage == nodeTestStagePostUpgrade && blocksCount == 10 { + } else if stage == nodeTestStagePostUpgrade1 && blocksCount >= 10 { + stage = nodeTestStagePostUpgrade2 l.t.Logf("[%s][%s]: counted 10 blocks. signaling has performed upgrade", l.params.name, nodeTestStageMapStr[stage]) l.upgradeSuccessful <- struct{}{} - _ = l.params.pub.Publish(nodePostUpgradeReady{ + _ = l.params.bus.Publish(nodePostUpgradeReady{ name: l.params.name, }) } @@ -971,11 +1034,13 @@ loop: l.t.Logf("[%s][%s]: node detected upgrade", l.params.name, nodeTestStageMapStr[stage]) stage = nodeTestStageUpgrade wdCtrl(l.ctx, watchdogCtrlPause) + default: } case eventShutdown: l.t.Logf("[%s][%s]: received shutdown signal", l.params.name, nodeTestStageMapStr[stage]) wdCtrl(l.ctx, watchdogCtrlStop) break loop + default: } } } @@ -988,6 +1053,10 @@ loop: } func (l *validator) watchTestCases(subs pubsub.Subscriber) error { + defer func() { + close(l.testErrsCh) + }() + added := make(map[string]testModuleStatus) removed := make(map[string]testModuleStatus) migrations := make(map[string]*moduleMigrationStatus) @@ -1002,11 +1071,8 @@ func (l *validator) watchTestCases(subs pubsub.Subscriber) error { for name, vals := range l.tConfig.Migrations { migrations[name] = &moduleMigrationStatus{ - status: testModuleStatusNotChecked, - expected: moduleMigrationVersions{ - from: vals.From, - to: vals.To, - }, + status: testModuleStatusNotChecked, + expected: vals, } } @@ -1041,9 +1107,12 @@ loop: m := migrations[ctx.name] m.status = testModuleStatusChecked - m.actual.to = ctx.to - m.actual.from = ctx.from + m.actual = append(m.actual, testMigration{ + From: ctx.from, + To: ctx.to, + }) } + default: } } } @@ -1069,16 +1138,22 @@ loop: switch module.status { case testModuleStatusChecked: if !module.expected.compare(module.actual) { - merr := fmt.Sprintf("migration for module (%s) finished with mismatched versions:\n"+ - "\texpected:\n"+ - "\t\tfrom: %s\n"+ - "\t\tto: %s\n"+ - "\tactual:\n"+ - "\t\tfrom: %s\n"+ - "\t\tto: %s", - name, - module.expected.from, module.expected.to, - module.actual.from, module.actual.to) + merr := "migration for module (%s) finished with mismatched versions:\n" + merr += "\texpected:\n" + + for _, m := range module.expected { + merr += fmt.Sprintf( + "\t\t- from: %s\n"+ + "\t\t to: %s\n", m.From, m.To) + } + + merr += "\tactual:\n" + + for _, m := range module.actual { + merr += fmt.Sprintf( + "\t\t- from: %s\n"+ + "\t\t to: %s\n", m.From, m.To) + } errs = append(errs, merr) } @@ -1101,15 +1176,24 @@ func (l *validator) blocksWatchdog(ctx context.Context, sub pubsub.Subscriber) e defer func() { if err != nil { - l.t.Logf("blocksWatchdog finished with error: %s", err.Error()) + l.t.Logf("[%s] %s", l.params.name, err.Error()) + } else { + l.t.Logf("[%s] blocksWatchdog finished", l.params.name) } }() + // first few blocks may take a while to produce. + // give a dog generous timeout on them + + blockWindow := 20 * time.Minute + + blocksTm := time.NewTicker(blockWindow) + blocksTm.Stop() + + blocks := 0 + loop: for { - blocksTm := time.NewTicker(blockTimeWindow) - blocksTm.Stop() - select { case <-ctx.Done(): break loop @@ -1125,11 +1209,21 @@ loop: case watchdogCtrlStart: fallthrough case watchdogCtrlBlock: - blocksTm.Reset(blockTimeWindow) + blocks++ + + if blocks > 3 { + blockWindow = blockTimeWindow + } + + blocksTm.Reset(blockWindow) case watchdogCtrlPause: blocksTm.Stop() + blocks = 0 + blockWindow = 20 * time.Minute case watchdogCtrlStop: + blocks = 0 blocksTm.Stop() + blockWindow = 20 * time.Minute break loop } } @@ -1155,7 +1249,7 @@ func (l *validator) scanner(stdout io.Reader, p publisher) error { return err } - rModuleMigration, err := regexp.Compile(`^` + migratingModule + `(\w+) from version (\d+) to version (\d+)$`) + rModuleMigration, err := regexp.Compile(`^` + migratingModule + `(\w+) from version (\d+) to version (\d+).*`) if err != nil { return err } @@ -1202,11 +1296,6 @@ scan: return nil } -type moduleMigrationVersions struct { - from string - to string -} - type moduleMigrationStatus struct { status testModuleStatus expected moduleMigrationVersions @@ -1214,5 +1303,15 @@ type moduleMigrationStatus struct { } func (v moduleMigrationVersions) compare(to moduleMigrationVersions) bool { - return (v.from == to.from) && (v.to == v.to) + if len(v) != len(to) { + return false + } + + for i := range v { + if (v[i].From != to[i].From) || (v[i].To != to[i].To) { + return false + } + } + + return true } diff --git a/testutil/state/suite.go b/testutil/state/suite.go index e80f70b605..7af5316d51 100644 --- a/testutil/state/suite.go +++ b/testutil/state/suite.go @@ -90,24 +90,24 @@ func SetupTestSuiteWithKeepers(t testing.TB, keepers Keepers) *TestSuite { app := app.Setup(false) if keepers.Audit == nil { - keepers.Audit = akeeper.NewKeeper(atypes.ModuleCdc, app.GetKey(atypes.StoreKey)) + keepers.Audit = akeeper.NewKeeper(atypes.ModuleCdc, app.GetKey(atypes.ModuleName)) } if keepers.Take == nil { - keepers.Take = tkeeper.NewKeeper(ttypes.ModuleCdc, app.GetKey(ttypes.StoreKey), app.GetSubspace(ttypes.ModuleName)) + keepers.Take = tkeeper.NewKeeper(ttypes.ModuleCdc, app.GetKey(ttypes.ModuleName), app.GetSubspace(ttypes.ModuleName)) } if keepers.Escrow == nil { - keepers.Escrow = ekeeper.NewKeeper(etypes.ModuleCdc, app.GetKey(etypes.StoreKey), keepers.Bank, keepers.Take, keepers.Distr, keepers.Authz) + keepers.Escrow = ekeeper.NewKeeper(etypes.ModuleCdc, app.GetKey(etypes.ModuleName), keepers.Bank, keepers.Take, keepers.Distr, keepers.Authz) } if keepers.Market == nil { - keepers.Market = mkeeper.NewKeeper(mtypes.ModuleCdc, app.GetKey(mtypes.StoreKey), app.GetSubspace(mtypes.ModuleName), keepers.Escrow) + keepers.Market = mkeeper.NewKeeper(mtypes.ModuleCdc, app.GetKey(mtypes.ModuleName), app.GetSubspace(mtypes.ModuleName), keepers.Escrow) } if keepers.Deployment == nil { - keepers.Deployment = dkeeper.NewKeeper(dtypes.ModuleCdc, app.GetKey(dtypes.StoreKey), app.GetSubspace(dtypes.ModuleName), keepers.Escrow) + keepers.Deployment = dkeeper.NewKeeper(dtypes.ModuleCdc, app.GetKey(dtypes.ModuleName), app.GetSubspace(dtypes.ModuleName), keepers.Escrow) } if keepers.Provider == nil { - keepers.Provider = pkeeper.NewKeeper(ptypes.ModuleCdc, app.GetKey(ptypes.StoreKey)) + keepers.Provider = pkeeper.NewKeeper(ptypes.ModuleCdc, app.GetKey(ptypes.ModuleName)) } hook := mhooks.New(keepers.Deployment, keepers.Market) diff --git a/upgrades/CHANGELOG.md b/upgrades/CHANGELOG.md index c81ba3850b..05aa52edd1 100644 --- a/upgrades/CHANGELOG.md +++ b/upgrades/CHANGELOG.md @@ -8,14 +8,15 @@ |:----------:|--------:| | audit | 2 | | cert | 2 | -| deployment | 3 | +| deployment | 4 | | escrow | 2 | | agov | 1 | | inflation | 1 | -| market | 5 | +| market | 6 | | provider | 2 | | astaking | 1 | -| take | 1 | +| take | 2 | +| authz | 2 | #### Upgrades @@ -43,6 +44,16 @@ Goal of the upgrade here Add new upgrades after this line based on the template above ----- +##### v0.38.0 + +Upgrade x/stores keys to improve read performance of certain modules as described in [AEP-61](https://github.com/akash-network/AEP/blob/main/AEPS/AEP-61.md) + +- Migrations + - cert `2 -> 3` + - deployment `3 -> 4` + - market `5 -> 6` + - authz `1 -> 2` + ##### v0.36.0 1. Init Feegrant Keeper reference for `NewDeductFeeDecorator`. Fixes issue with feegrant enabled but not actually working due to uninitialized reference in Ante config diff --git a/upgrades/software/v0.32.0/market.go b/upgrades/software/v0.32.0/market.go index 66d8f92d21..d5056951b3 100644 --- a/upgrades/software/v0.32.0/market.go +++ b/upgrades/software/v0.32.0/market.go @@ -32,7 +32,7 @@ func (m marketMigrations) handler(ctx sdk.Context) error { var val types.Lease m.Codec().MustUnmarshal(iter.Value(), &val) - store.Delete(v1beta4.SecondaryLeaseKeyByProvider(val.LeaseID)) + store.Delete(v1beta4.SecondaryLeaseKeyByProviderLegacy(val.LeaseID)) } return nil diff --git a/upgrades/software/v0.38.0/authz.go b/upgrades/software/v0.38.0/authz.go new file mode 100644 index 0000000000..3156b26e0a --- /dev/null +++ b/upgrades/software/v0.38.0/authz.go @@ -0,0 +1,42 @@ +// Package v0_38_0 +// nolint revive +package v0_38_0 + +import ( + sdk "github.com/cosmos/cosmos-sdk/types" + sdkmodule "github.com/cosmos/cosmos-sdk/types/module" + + utypes "github.com/akash-network/node/upgrades/types" + + "github.com/cosmos/cosmos-sdk/x/authz/keeper" +) + +type authzMigrations struct { + utypes.Migrator +} + +func newAuthzMigration(m utypes.Migrator) utypes.Migration { + return authzMigrations{Migrator: m} +} + +func (m authzMigrations) GetHandler() sdkmodule.MigrationHandler { + return m.handler +} + +// handler migrates authz from version 1 to 2. +func (m authzMigrations) handler(ctx sdk.Context) error { + store := ctx.KVStore(m.StoreKey()) + + iter := sdk.KVStorePrefixIterator(store, keeper.GrantKey) + + defer func() { + _ = iter.Close() + }() + + for ; iter.Valid(); iter.Next() { + granter, grantee := keeper.AddressesFromGrantStoreKey(iter.Key()) + keeper.IncGranteeGrants(store, grantee, granter) + } + + return nil +} diff --git a/upgrades/software/v0.38.0/cert.go b/upgrades/software/v0.38.0/cert.go new file mode 100644 index 0000000000..c0f655c868 --- /dev/null +++ b/upgrades/software/v0.38.0/cert.go @@ -0,0 +1,57 @@ +// Package v0_38_0 +// nolint revive +package v0_38_0 + +import ( + "fmt" + + sdk "github.com/cosmos/cosmos-sdk/types" + sdkmodule "github.com/cosmos/cosmos-sdk/types/module" + + types "github.com/akash-network/akash-api/go/node/cert/v1beta3" + + utypes "github.com/akash-network/node/upgrades/types" + "github.com/akash-network/node/x/cert/keeper" +) + +type certMigrations struct { + utypes.Migrator +} + +func newCertMigration(m utypes.Migrator) utypes.Migration { + return certMigrations{Migrator: m} +} + +func (m certMigrations) GetHandler() sdkmodule.MigrationHandler { + return m.handler +} + +// handler migrates x/cert from version 2 to 3. +func (m certMigrations) handler(ctx sdk.Context) error { + store := ctx.KVStore(m.StoreKey()) + + iter := sdk.KVStorePrefixIterator(store, types.PrefixCertificateID()) + + defer func() { + _ = iter.Close() + }() + + var total int + + for ; iter.Valid(); iter.Next() { + id, err := keeper.ParseCertIDLegacy(types.PrefixCertificateID(), iter.Key()) + if err != nil { + return err + } + + key := keeper.CertificateKey(id) + store.Delete(iter.Key()) + store.Set(key, iter.Value()) + + total++ + } + + ctx.Logger().Info(fmt.Sprintf("[upgrade %s]: updated x/cert store keys. total=%d", UpgradeName, total)) + + return nil +} diff --git a/upgrades/software/v0.38.0/deployment.go b/upgrades/software/v0.38.0/deployment.go new file mode 100644 index 0000000000..b34ec76ed3 --- /dev/null +++ b/upgrades/software/v0.38.0/deployment.go @@ -0,0 +1,130 @@ +// Package v0_38_0 +// nolint revive +package v0_38_0 + +import ( + "fmt" + + sdk "github.com/cosmos/cosmos-sdk/types" + sdkmodule "github.com/cosmos/cosmos-sdk/types/module" + + dtypesbeta "github.com/akash-network/akash-api/go/node/deployment/v1beta3" + + utypes "github.com/akash-network/node/upgrades/types" + "github.com/akash-network/node/x/deployment/keeper" +) + +type deploymentMigrations struct { + utypes.Migrator +} + +func newDeploymentMigration(m utypes.Migrator) utypes.Migration { + return deploymentMigrations{Migrator: m} +} + +func (m deploymentMigrations) GetHandler() sdkmodule.MigrationHandler { + return m.handler +} + +// handler migrates deployment from version 2 to 3. +func (m deploymentMigrations) handler(ctx sdk.Context) error { + store := ctx.KVStore(m.StoreKey()) + diter := sdk.KVStorePrefixIterator(store, dtypesbeta.DeploymentPrefix()) + + defer func() { + _ = diter.Close() + }() + + var deploymentsTotal uint64 + var deploymentsActive uint64 + var deploymentsClosed uint64 + + for ; diter.Valid(); diter.Next() { + var val dtypesbeta.Deployment + m.Codec().MustUnmarshal(diter.Value(), &val) + + switch val.State { + case dtypesbeta.DeploymentActive: + deploymentsActive++ + case dtypesbeta.DeploymentClosed: + deploymentsClosed++ + default: + return fmt.Errorf("[upgrade %s]: unknown deployment state %d", UpgradeName, val.State) + } + + key, err := keeper.DeploymentKey(keeper.DeploymentStateToPrefix(val.State), val.DeploymentID) + if err != nil { + return err + } + + data, err := m.Codec().Marshal(&val) + if err != nil { + return err + } + + store.Delete(keeper.DeploymentKeyLegacy(val.DeploymentID)) + store.Set(key, data) + + deploymentsTotal++ + } + + giter := sdk.KVStorePrefixIterator(store, dtypesbeta.GroupPrefix()) + + defer func() { + _ = giter.Close() + }() + + var groupsTotal uint64 + var groupsOpen uint64 + var groupsPaused uint64 + var groupsInsufficientFunds uint64 + var groupsClosed uint64 + + for ; giter.Valid(); giter.Next() { + var val dtypesbeta.Group + m.Codec().MustUnmarshal(giter.Value(), &val) + + switch val.State { + case dtypesbeta.GroupOpen: + groupsOpen++ + case dtypesbeta.GroupPaused: + groupsPaused++ + case dtypesbeta.GroupInsufficientFunds: + groupsInsufficientFunds++ + case dtypesbeta.GroupClosed: + groupsClosed++ + default: + return fmt.Errorf("[upgrade %s]: unknown deployment group state %d", UpgradeName, val.State) + } + + key, err := keeper.GroupKey(keeper.GroupStateToPrefix(val.State), val.GroupID) + if err != nil { + return err + } + + data, err := m.Codec().Marshal(&val) + if err != nil { + return err + } + + store.Delete(keeper.GroupKeyLegacy(val.GroupID)) + store.Set(key, data) + + groupsTotal++ + } + + ctx.Logger().Info(fmt.Sprintf("[upgrade %s]: updated x/deployment store keys:"+ + "\n\tdeployments total: %d"+ + "\n\tdeployments active: %d"+ + "\n\tdeployments closed: %d"+ + "\n\tgroups total: %d"+ + "\n\tgroups open: %d"+ + "\n\tgroups paused: %d"+ + "\n\tgroups insufficient funds: %d"+ + "\n\tgroups closed: %d", + UpgradeName, + deploymentsTotal, deploymentsActive, deploymentsClosed, + groupsTotal, groupsOpen, groupsPaused, groupsInsufficientFunds, groupsClosed)) + + return nil +} diff --git a/upgrades/software/v0.38.0/init.go b/upgrades/software/v0.38.0/init.go new file mode 100644 index 0000000000..fe6fa0a5d4 --- /dev/null +++ b/upgrades/software/v0.38.0/init.go @@ -0,0 +1,21 @@ +// Package v0_38_0 +// nolint revive +package v0_38_0 + +import ( + ctypesbeta "github.com/akash-network/akash-api/go/node/cert/v1beta3" + dv1beta3 "github.com/akash-network/akash-api/go/node/deployment/v1beta3" + mtypesbeta "github.com/akash-network/akash-api/go/node/market/v1beta4" + + "github.com/cosmos/cosmos-sdk/x/authz" + + utypes "github.com/akash-network/node/upgrades/types" +) + +func init() { + utypes.RegisterUpgrade(UpgradeName, initUpgrade) + utypes.RegisterMigration(ctypesbeta.ModuleName, 2, newCertMigration) + utypes.RegisterMigration(mtypesbeta.ModuleName, 5, newMarketMigration) + utypes.RegisterMigration(dv1beta3.ModuleName, 3, newDeploymentMigration) + utypes.RegisterMigration(authz.ModuleName, 1, newAuthzMigration) +} diff --git a/upgrades/software/v0.38.0/market.go b/upgrades/software/v0.38.0/market.go new file mode 100644 index 0000000000..fc21538268 --- /dev/null +++ b/upgrades/software/v0.38.0/market.go @@ -0,0 +1,198 @@ +// Package v0_38_0 +// nolint revive +package v0_38_0 + +import ( + "fmt" + + sdk "github.com/cosmos/cosmos-sdk/types" + sdkmodule "github.com/cosmos/cosmos-sdk/types/module" + + mtypesbeta "github.com/akash-network/akash-api/go/node/market/v1beta4" + + utypes "github.com/akash-network/node/upgrades/types" + "github.com/akash-network/node/x/market/keeper/keys/v1beta4" +) + +type marketMigrations struct { + utypes.Migrator +} + +func newMarketMigration(m utypes.Migrator) utypes.Migration { + return marketMigrations{Migrator: m} +} + +func (m marketMigrations) GetHandler() sdkmodule.MigrationHandler { + return m.handler +} + +// handler migrates market from version 5 to 6. +func (m marketMigrations) handler(ctx sdk.Context) error { + store := ctx.KVStore(m.StoreKey()) + oiter := sdk.KVStorePrefixIterator(store, mtypesbeta.OrderPrefix()) + defer func() { + _ = oiter.Close() + }() + + var ordersTotal uint64 + var ordersOpen uint64 + var ordersActive uint64 + var ordersClosed uint64 + for ; oiter.Valid(); oiter.Next() { + var val mtypesbeta.Order + m.Codec().MustUnmarshal(oiter.Value(), &val) + + var state []byte + switch val.State { + case mtypesbeta.OrderOpen: + state = v1beta4.OrderStateOpenPrefix + ordersOpen++ + case mtypesbeta.OrderActive: + state = v1beta4.OrderStateActivePrefix + ordersActive++ + case mtypesbeta.OrderClosed: + state = v1beta4.OrderStateClosedPrefix + ordersClosed++ + default: + panic(fmt.Sprintf("unknown order state %d", val.State)) + } + + ordersTotal++ + store.Delete(v1beta4.OrderKeyLegacy(val.OrderID)) + + key, err := v1beta4.OrderKey(state, val.OrderID) + if err != nil { + return err + } + + data, err := m.Codec().Marshal(&val) + if err != nil { + return err + } + + store.Set(key, data) + } + + biter := sdk.KVStorePrefixIterator(store, mtypesbeta.BidPrefix()) + defer func() { + _ = biter.Close() + }() + + var bidsTotal uint64 + var bidsOpen uint64 + var bidsActive uint64 + var bidsLost uint64 + var bidsClosed uint64 + + for ; biter.Valid(); biter.Next() { + var val mtypesbeta.Bid + m.Codec().MustUnmarshal(biter.Value(), &val) + + switch val.State { + case mtypesbeta.BidOpen: + bidsOpen++ + case mtypesbeta.BidActive: + bidsActive++ + case mtypesbeta.BidLost: + bidsLost++ + case mtypesbeta.BidClosed: + bidsClosed++ + default: + panic(fmt.Sprintf("unknown order state %d", val.State)) + } + + bidsTotal++ + store.Delete(v1beta4.BidKeyLegacy(val.BidID)) + + data, err := m.Codec().Marshal(&val) + if err != nil { + return err + } + + state := v1beta4.BidStateToPrefix(val.State) + key, err := v1beta4.BidKey(state, val.BidID) + if err != nil { + return err + } + + revKey, err := v1beta4.BidStateReverseKey(val.State, val.BidID) + if err != nil { + return err + } + + store.Set(key, data) + if len(revKey) > 0 { + store.Set(revKey, data) + } + } + + liter := sdk.KVStorePrefixIterator(store, mtypesbeta.LeasePrefix()) + defer func() { + _ = liter.Close() + }() + + var leasesTotal uint64 + var leasesActive uint64 + var leasesInsufficientFunds uint64 + var leasesClosed uint64 + + for ; liter.Valid(); liter.Next() { + var val mtypesbeta.Lease + m.Codec().MustUnmarshal(liter.Value(), &val) + + switch val.State { + case mtypesbeta.LeaseActive: + leasesActive++ + case mtypesbeta.LeaseInsufficientFunds: + leasesInsufficientFunds++ + case mtypesbeta.LeaseClosed: + leasesClosed++ + default: + panic(fmt.Sprintf("unknown order state %d", val.State)) + } + + leasesTotal++ + store.Delete(v1beta4.LeaseKeyLegacy(val.LeaseID)) + + data, err := m.Codec().Marshal(&val) + if err != nil { + return err + } + + state := v1beta4.LeaseStateToPrefix(val.State) + key, err := v1beta4.LeaseKey(state, val.LeaseID) + if err != nil { + return err + } + + revKey, err := v1beta4.LeaseStateReverseKey(val.State, val.LeaseID) + if err != nil { + return err + } + + store.Set(key, data) + if len(revKey) > 0 { + store.Set(revKey, data) + } + } + ctx.Logger().Info(fmt.Sprintf("[upgrade %s]: updated x/market store keys:"+ + "\n\torders total: %d"+ + "\n\torders open: %d"+ + "\n\torders active: %d"+ + "\n\torders closed: %d"+ + "\n\tbids total: %d"+ + "\n\tbids open: %d"+ + "\n\tbids active: %d"+ + "\n\tbids lost: %d"+ + "\n\tbids closed: %d"+ + "\n\tleases total: %d"+ + "\n\tleases active: %d"+ + "\n\tleases insufficient funds: %d"+ + "\n\tleases closed: %d", + UpgradeName, + ordersTotal, ordersOpen, ordersActive, ordersClosed, + bidsTotal, bidsOpen, bidsActive, bidsLost, bidsClosed, + leasesTotal, leasesActive, leasesInsufficientFunds, leasesClosed)) + + return nil +} diff --git a/upgrades/software/v0.38.0/upgrade.go b/upgrades/software/v0.38.0/upgrade.go new file mode 100644 index 0000000000..539a60e28d --- /dev/null +++ b/upgrades/software/v0.38.0/upgrade.go @@ -0,0 +1,47 @@ +// Package v0_38_0 +// nolint revive +package v0_38_0 + +import ( + "fmt" + + "github.com/tendermint/tendermint/libs/log" + + storetypes "github.com/cosmos/cosmos-sdk/store/types" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/types/module" + upgradetypes "github.com/cosmos/cosmos-sdk/x/upgrade/types" + + apptypes "github.com/akash-network/node/app/types" + utypes "github.com/akash-network/node/upgrades/types" +) + +const ( + UpgradeName = "v0.38.0" +) + +type upgrade struct { + *apptypes.App + log log.Logger +} + +var _ utypes.IUpgrade = (*upgrade)(nil) + +func initUpgrade(log log.Logger, app *apptypes.App) (utypes.IUpgrade, error) { + up := &upgrade{ + App: app, + log: log.With("module", fmt.Sprintf("upgrade/%s", UpgradeName)), + } + + return up, nil +} + +func (up *upgrade) StoreLoader() *storetypes.StoreUpgrades { + return &storetypes.StoreUpgrades{} +} + +func (up *upgrade) UpgradeHandler() upgradetypes.UpgradeHandler { + return func(ctx sdk.Context, _ upgradetypes.Plan, fromVM module.VersionMap) (module.VersionMap, error) { + return up.MM.RunMigrations(ctx, up.Configurator, fromVM) + } +} diff --git a/upgrades/types/types.go b/upgrades/types/types.go index fcfe53ea9c..889ed77804 100644 --- a/upgrades/types/types.go +++ b/upgrades/types/types.go @@ -21,18 +21,7 @@ var ( // actual consensus versions is set when migrations are // registered. - currentConsensusVersions = map[string]uint64{ - "audit": 1, - "cert": 1, - "deployment": 1, - "escrow": 1, - "market": 1, - "provider": 1, - "inflation": 1, - "agov": 1, - "astaking": 1, - "take": 1, - } + // currentConsensusVersions = map[string]uint64{} ) type UpgradeInitFn func(log.Logger, *apptypes.App) (IUpgrade, error) @@ -62,6 +51,28 @@ type Migrator interface { Codec() codec.BinaryCodec } +type migrator struct { + cdc codec.BinaryCodec + skey sdk.StoreKey +} + +var _ Migrator = (*migrator)(nil) + +func NewMigrator(cdc codec.BinaryCodec, skey sdk.StoreKey) Migrator { + return &migrator{ + cdc: cdc, + skey: skey, + } +} + +func (m *migrator) Codec() codec.BinaryCodec { + return m.cdc +} + +func (m *migrator) StoreKey() sdk.StoreKey { + return m.skey +} + type Migration interface { GetHandler() sdkmodule.MigrationHandler } @@ -100,28 +111,15 @@ func RegisterMigration(module string, version uint64, initFn NewMigrationFn) { } migrations[module][version] = initFn - if val := currentConsensusVersions[module]; val <= version+1 { - currentConsensusVersions[module] = version + 1 - } + // if val := currentConsensusVersions[module]; val <= version+1 { + // currentConsensusVersions[module] = version + 1 + // } } -func ModuleVersion(module string) uint64 { - ver, exists := currentConsensusVersions[module] - if !exists { - panic(fmt.Sprintf("requested consensus version for non existing module (%s)", module)) - } - - return ver -} - -func ModuleMigrations(module string, migrator Migrator, fn func(string, uint64, sdkmodule.MigrationHandler)) { - moduleMigrations, exists := migrations[module] - if !exists { - return - } - - for version, initFn := range moduleMigrations { - migration := initFn(migrator) - fn(module, version, migration.GetHandler()) +func IterateMigrations(fn func(module string, version uint64, initfn NewMigrationFn)) { + for module, migrations := range migrations { + for version, handler := range migrations { + fn(module, version, handler) + } } } diff --git a/upgrades/upgrades.go b/upgrades/upgrades.go index 4595db5ae4..3c7a556677 100644 --- a/upgrades/upgrades.go +++ b/upgrades/upgrades.go @@ -2,21 +2,5 @@ package upgrades import ( // nolint: revive - _ "github.com/akash-network/node/upgrades/software/v0.36.0" - // nolint: revive - _ "github.com/akash-network/node/upgrades/software/v0.34.0" - // nolint: revive - _ "github.com/akash-network/node/upgrades/software/v0.32.0" - // nolint: revive - _ "github.com/akash-network/node/upgrades/software/v0.30.0" - // nolint: revive - _ "github.com/akash-network/node/upgrades/software/v0.28.0" - // nolint: revive - _ "github.com/akash-network/node/upgrades/software/v0.26.0" - // nolint: revive - _ "github.com/akash-network/node/upgrades/software/v0.24.0" - // nolint: revive - _ "github.com/akash-network/node/upgrades/software/v0.20.0" - // nolint: revive - _ "github.com/akash-network/node/upgrades/software/v0.15.0" + _ "github.com/akash-network/node/upgrades/software/v0.38.0" ) diff --git a/x/audit/keeper/grpc_query_test.go b/x/audit/keeper/grpc_query_test.go index fb0425b99d..0903dd3603 100644 --- a/x/audit/keeper/grpc_query_test.go +++ b/x/audit/keeper/grpc_query_test.go @@ -89,7 +89,6 @@ func TestGRPCQueryProvider(t *testing.T) { } for _, tc := range testCases { - tc := tc t.Run(fmt.Sprintf("Case %s", tc.msg), func(t *testing.T) { tc.malleate() ctx := sdk.WrapSDKContext(suite.ctx) @@ -150,7 +149,6 @@ func TestGRPCQueryProviders(t *testing.T) { } for _, tc := range testCases { - tc := tc t.Run(fmt.Sprintf("Case %s", tc.msg), func(t *testing.T) { tc.malleate() ctx := sdk.WrapSDKContext(suite.ctx) diff --git a/x/audit/module.go b/x/audit/module.go index 22b600be04..a935a74aea 100644 --- a/x/audit/module.go +++ b/x/audit/module.go @@ -6,25 +6,25 @@ import ( "fmt" "math/rand" - "github.com/cosmos/cosmos-sdk/client" - sdk "github.com/cosmos/cosmos-sdk/types" - sim "github.com/cosmos/cosmos-sdk/types/simulation" "github.com/spf13/cobra" + "github.com/gogo/protobuf/grpc" "github.com/gorilla/mux" "github.com/grpc-ecosystem/grpc-gateway/runtime" + abci "github.com/tendermint/tendermint/abci/types" + "github.com/cosmos/cosmos-sdk/client" "github.com/cosmos/cosmos-sdk/codec" cdctypes "github.com/cosmos/cosmos-sdk/codec/types" + sdk "github.com/cosmos/cosmos-sdk/types" "github.com/cosmos/cosmos-sdk/types/module" - "github.com/gogo/protobuf/grpc" + sim "github.com/cosmos/cosmos-sdk/types/simulation" v1beta1types "github.com/akash-network/akash-api/go/node/audit/v1beta1" v1beta2types "github.com/akash-network/akash-api/go/node/audit/v1beta2" types "github.com/akash-network/akash-api/go/node/audit/v1beta3" - utypes "github.com/akash-network/node/upgrades/types" "github.com/akash-network/node/x/audit/client/cli" "github.com/akash-network/node/x/audit/client/rest" "github.com/akash-network/node/x/audit/handler" @@ -162,12 +162,6 @@ func (am AppModule) RegisterServices(cfg module.Configurator) { types.RegisterMsgServer(cfg.MsgServer(), handler.NewMsgServerImpl(am.keeper)) querier := keeper.Querier{Keeper: am.keeper} types.RegisterQueryServer(cfg.QueryServer(), querier) - - utypes.ModuleMigrations(ModuleName, am.keeper, func(name string, forVersion uint64, handler module.MigrationHandler) { - if err := cfg.RegisterMigration(name, forVersion, handler); err != nil { - panic(err) - } - }) } // RegisterQueryService registers a GRPC query service to respond to the @@ -203,7 +197,7 @@ func (am AppModule) ExportGenesis(ctx sdk.Context, cdc codec.JSONCodec) json.Raw // ConsensusVersion implements module.AppModule#ConsensusVersion func (am AppModule) ConsensusVersion() uint64 { - return utypes.ModuleVersion(ModuleName) + return 2 } // ____________________________________________________________________________ diff --git a/x/cert/client/cli/grpc_rest_test.go b/x/cert/client/cli/grpc_rest_test.go index c63e22eec8..088472e199 100644 --- a/x/cert/client/cli/grpc_rest_test.go +++ b/x/cert/client/cli/grpc_rest_test.go @@ -109,7 +109,6 @@ func (s *GRPCRestTestSuite) TestGetCertificates() { } for _, tc := range testCases { - tc := tc s.Run(tc.name, func() { resp, err := sdkrest.GetRequest(tc.url) s.Require().NoError(err) diff --git a/x/cert/keeper/grpc_query_test.go b/x/cert/keeper/grpc_query_test.go index 560cb11722..3e500308b1 100644 --- a/x/cert/keeper/grpc_query_test.go +++ b/x/cert/keeper/grpc_query_test.go @@ -227,7 +227,6 @@ func TestCertGRPCQueryCertificates(t *testing.T) { } for _, tc := range testCases { - tc := tc t.Run(fmt.Sprintf("Case %s", tc.msg), func(t *testing.T) { tc.malleate() ctx := sdk.WrapSDKContext(suite.ctx) diff --git a/x/cert/keeper/keeper.go b/x/cert/keeper/keeper.go index 52aefb2ece..164f662c33 100644 --- a/x/cert/keeper/keeper.go +++ b/x/cert/keeper/keeper.go @@ -120,7 +120,7 @@ func (k keeper) GetCertificateByID(ctx sdk.Context, id types.CertID) (types.Cert // WithCertificates iterates all certificates func (k keeper) WithCertificates(ctx sdk.Context, fn func(id types.CertID, certificate types.CertificateResponse) bool) { store := ctx.KVStore(k.skey) - iter := store.Iterator(nil, nil) + iter := sdk.KVStorePrefixIterator(store, types.PrefixCertificateID()) defer func() { _ = iter.Close() @@ -210,7 +210,7 @@ func (k keeper) mustUnmarshal(key, val []byte) types.CertificateResponse { func (k keeper) unmarshalIterator(key, val []byte) (types.CertificateResponse, error) { id, err := ParseCertID(types.PrefixCertificateID(), key) if err != nil { - panic(err) + return types.CertificateResponse{}, err } item := types.CertificateResponse{ diff --git a/x/cert/keeper/key.go b/x/cert/keeper/key.go index 0e45f71638..66c32ed3b7 100644 --- a/x/cert/keeper/key.go +++ b/x/cert/keeper/key.go @@ -6,20 +6,35 @@ import ( sdk "github.com/cosmos/cosmos-sdk/types" "github.com/cosmos/cosmos-sdk/types/address" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" "github.com/cosmos/cosmos-sdk/types/kv" types "github.com/akash-network/akash-api/go/node/cert/v1beta3" ) +const ( + maxSerialLength = 8 +) + // CertificateKey creates a store key of the format: // prefix_bytes | owner_address_len (1 byte) | owner_address_bytes | serial_bytes func CertificateKey(id types.CertID) []byte { + addr, err := address.LengthPrefix(id.Owner.Bytes()) + if err != nil { + panic(err) + } + + serial, err := serialPrefix(id.Serial.Bytes()) + if err != nil { + panic(err) + } + buf := bytes.NewBuffer(types.PrefixCertificateID()) - if _, err := buf.Write(address.MustLengthPrefix(id.Owner.Bytes())); err != nil { + if _, err := buf.Write(addr); err != nil { panic(err) } - if _, err := buf.Write(id.Serial.Bytes()); err != nil { + if _, err := buf.Write(serial); err != nil { panic(err) } @@ -27,14 +42,22 @@ func CertificateKey(id types.CertID) []byte { } func CertificatePrefix(id sdk.Address) []byte { + addr, err := address.LengthPrefix(id.Bytes()) + if err != nil { + panic(err) + } + buf := bytes.NewBuffer(types.PrefixCertificateID()) - if _, err := buf.Write(address.MustLengthPrefix(id.Bytes())); err != nil { + if _, err := buf.Write(addr); err != nil { panic(err) } return buf.Bytes() } +// ParseCertID parse certificate key into id +// format <0x01> + func ParseCertID(prefix []byte, from []byte) (types.CertID, error) { res := types.CertID{ Serial: *big.NewInt(0), @@ -58,7 +81,57 @@ func ParseCertID(prefix []byte, from []byte) (types.CertID, error) { return res, err } - // todo add length prefix + from = from[addrLen:] + kv.AssertKeyAtLeastLength(from, 1) + serialLen := from[0] + + from = from[1:] + kv.AssertKeyLength(from, int(serialLen)) + + res.Owner = sdk.AccAddress(addr) + res.Serial.SetBytes(from) + + return res, nil +} + +// CertificateKeyLegacy creates a store key of the format: +// prefix_bytes | owner_address_len (1 byte) | owner_address_bytes | serial_bytes +func CertificateKeyLegacy(id types.CertID) []byte { + buf := bytes.NewBuffer(types.PrefixCertificateID()) + if _, err := buf.Write(address.MustLengthPrefix(id.Owner.Bytes())); err != nil { + panic(err) + } + + if _, err := buf.Write(id.Serial.Bytes()); err != nil { + panic(err) + } + + return buf.Bytes() +} + +func ParseCertIDLegacy(prefix []byte, from []byte) (types.CertID, error) { + res := types.CertID{ + Serial: *big.NewInt(0), + } + + kv.AssertKeyAtLeastLength(from, len(prefix)) + + // skip prefix if set + from = from[len(prefix):] + + kv.AssertKeyAtLeastLength(from, 1) + + addrLen := from[0] + from = from[1:] + + kv.AssertKeyAtLeastLength(from, int(addrLen)) + + addr := from[:addrLen-1] + err := sdk.VerifyAddressFormat(addr) + if err != nil { + return res, err + } + from = from[addrLen:] serial := from @@ -68,3 +141,26 @@ func ParseCertID(prefix []byte, from []byte) (types.CertID, error) { return res, nil } + +func serialPrefix(bz []byte) ([]byte, error) { + bzLen := len(bz) + if bzLen == 0 { + return bz, nil + } + + if bzLen > maxSerialLength { + return nil, sdkerrors.Wrapf(sdkerrors.ErrUnknownAddress, "serial length should be max %d bytes, got %d", maxSerialLength, bzLen) + } + + return append([]byte{byte(bzLen)}, bz...), nil +} + +// nolint: unused +func mustSerialPrefix(bz []byte) []byte { + res, err := serialPrefix(bz) + if err != nil { + panic(err) + } + + return res +} diff --git a/x/cert/module.go b/x/cert/module.go index f731232051..d5748af552 100644 --- a/x/cert/module.go +++ b/x/cert/module.go @@ -22,7 +22,6 @@ import ( cdctypes "github.com/cosmos/cosmos-sdk/codec/types" "github.com/cosmos/cosmos-sdk/types/module" - utypes "github.com/akash-network/node/upgrades/types" "github.com/akash-network/node/x/cert/client/cli" "github.com/akash-network/node/x/cert/handler" "github.com/akash-network/node/x/cert/keeper" @@ -148,12 +147,6 @@ func (am AppModule) LegacyQuerierHandler(_ *codec.LegacyAmino) sdk.Querier { func (am AppModule) RegisterServices(cfg module.Configurator) { types.RegisterMsgServer(cfg.MsgServer(), handler.NewMsgServerImpl(am.keeper)) types.RegisterQueryServer(cfg.QueryServer(), am.keeper.Querier()) - - utypes.ModuleMigrations(ModuleName, am.keeper, func(name string, forVersion uint64, handler module.MigrationHandler) { - if err := cfg.RegisterMigration(name, forVersion, handler); err != nil { - panic(err) - } - }) } // BeginBlock performs no-op @@ -182,7 +175,7 @@ func (am AppModule) ExportGenesis(ctx sdk.Context, cdc codec.JSONCodec) json.Raw // ConsensusVersion implements module.AppModule#ConsensusVersion func (am AppModule) ConsensusVersion() uint64 { - return utypes.ModuleVersion(ModuleName) + return 3 } // ____________________________________________________________________________ diff --git a/x/deployment/client/cli/grpc_rest_test.go b/x/deployment/client/cli/grpc_rest_test.go index 4a23e0602f..6e4746d13f 100644 --- a/x/deployment/client/cli/grpc_rest_test.go +++ b/x/deployment/client/cli/grpc_rest_test.go @@ -143,7 +143,6 @@ func (s *GRPCRestTestSuite) TestGetDeployments() { } for _, tc := range testCases { - tc := tc s.Run(tc.name, func() { resp, err := sdkrest.GetRequest(tc.url) s.Require().NoError(err) @@ -209,7 +208,6 @@ func (s *GRPCRestTestSuite) TestGetDeployment() { } for _, tc := range testCases { - tc := tc s.Run(tc.name, func() { resp, err := sdkrest.GetRequest(tc.url) s.Require().NoError(err) @@ -276,7 +274,6 @@ func (s *GRPCRestTestSuite) TestGetGroup() { } for _, tc := range testCases { - tc := tc s.Run(tc.name, func() { resp, err := sdkrest.GetRequest(tc.url) s.Require().NoError(err) diff --git a/x/deployment/genesis.go b/x/deployment/genesis.go index 4e27429dd0..ccd0f5a482 100644 --- a/x/deployment/genesis.go +++ b/x/deployment/genesis.go @@ -38,7 +38,7 @@ func InitGenesis(ctx sdk.Context, kpr keeper.IKeeper, data *types.GenesisState) store := ctx.KVStore(kpr.StoreKey()) for _, record := range data.Deployments { - key := keeper.DeploymentKey(record.Deployment.DeploymentID) + key := keeper.MustDeploymentKey(keeper.DeploymentStateToPrefix(record.Deployment.State), record.Deployment.DeploymentID) store.Set(key, cdc.MustMarshal(&record.Deployment)) @@ -48,7 +48,8 @@ func InitGenesis(ctx sdk.Context, kpr keeper.IKeeper, data *types.GenesisState) if !group.ID().DeploymentID().Equals(record.Deployment.ID()) { panic(types.ErrInvalidGroupID) } - gkey := keeper.GroupKey(group.ID()) + + gkey := keeper.MustGroupKey(keeper.GroupStateToPrefix(group.State), group.ID()) store.Set(gkey, cdc.MustMarshal(&group)) } } @@ -63,6 +64,7 @@ func ExportGenesis(ctx sdk.Context, k keeper.IKeeper) *types.GenesisState { var records []types.GenesisDeployment k.WithDeployments(ctx, func(deployment types.Deployment) bool { groups := k.GetGroups(ctx, deployment.ID()) + records = append(records, types.GenesisDeployment{ Deployment: deployment, Groups: groups, diff --git a/x/deployment/keeper/grpc_query.go b/x/deployment/keeper/grpc_query.go index 07e70733ea..8cd5af11b6 100644 --- a/x/deployment/keeper/grpc_query.go +++ b/x/deployment/keeper/grpc_query.go @@ -23,6 +23,13 @@ var _ types.QueryServer = Querier{} // Deployments returns deployments based on filters func (k Querier) Deployments(c context.Context, req *types.QueryDeploymentsRequest) (*types.QueryDeploymentsResponse, error) { + ctx := sdk.UnwrapSDKContext(c) + + defer func() { + if r := recover(); r != nil { + ctx.Logger().Error(fmt.Sprintf("%v", r)) + } + }() if req == nil { return nil, status.Error(codes.InvalidArgument, "empty request") } @@ -33,52 +40,107 @@ func (k Querier) Deployments(c context.Context, req *types.QueryDeploymentsReque return nil, status.Error(codes.InvalidArgument, "invalid state value") } - var deployments types.DeploymentResponses - ctx := sdk.UnwrapSDKContext(c) + var store prefix.Store - searchPrefix, err := deploymentPrefixFromFilter(req.Filters) - if err != nil { - return nil, status.Error(codes.Internal, err.Error()) + if req.Pagination == nil { + req.Pagination = &sdkquery.PageRequest{} + } else if req.Pagination != nil && req.Pagination.Offset > 0 && req.Filters.State == "" { + return nil, status.Error(codes.InvalidArgument, "invalid request parameters. if offset is set, filter.state must be provided") } - depStore := prefix.NewStore(ctx.KVStore(k.skey), searchPrefix) + if req.Pagination.Limit == 0 { + req.Pagination.Limit = sdkquery.DefaultLimit + } - pageRes, err := sdkquery.FilteredPaginate(depStore, req.Pagination, func(key []byte, value []byte, accumulate bool) (bool, error) { - var deployment types.Deployment + states := make([]types.Deployment_State, 0, 2) + + // setup for case 3 - cross-index search + if req.Filters.State == "" { + // request has pagination key set, determine store prefix + if len(req.Pagination.Key) > 0 { + if len(req.Pagination.Key) < 3 { + return nil, status.Error(codes.InvalidArgument, "invalid pagination key") + } + + switch req.Pagination.Key[2] { + case DeploymentStateActivePrefixID: + states = append(states, types.DeploymentActive) + fallthrough + case DeploymentStateClosedPrefixID: + states = append(states, types.DeploymentClosed) + default: + return nil, status.Error(codes.InvalidArgument, "invalid pagination key") + } + } else { + // request does not have pagination set. Start from active store + states = append(states, types.DeploymentActive) + states = append(states, types.DeploymentClosed) + } + } else { + states = append(states, stateVal) + } - err := k.cdc.Unmarshal(value, &deployment) + var deployments types.DeploymentResponses + var pageRes *sdkquery.PageResponse + + for _, state := range states { + var searchPrefix []byte + var err error + + req.Filters.State = state.String() + + searchPrefix, err = deploymentPrefixFromFilter(req.Filters) if err != nil { - return false, err + return nil, status.Error(codes.Internal, err.Error()) } - // filter deployments with provided filters - if req.Filters.Accept(deployment, stateVal) { - if accumulate { + store = prefix.NewStore(ctx.KVStore(k.skey), searchPrefix) - account, err := k.ekeeper.GetAccount( - ctx, - types.EscrowAccountForDeployment(deployment.ID()), - ) - if err != nil { - return true, fmt.Errorf("%w: fetching escrow account for DeploymentID=%s", err, deployment.DeploymentID) - } + count := uint64(0) + + pageRes, err = sdkquery.FilteredPaginate(store, req.Pagination, func(key []byte, value []byte, accumulate bool) (bool, error) { + var deployment types.Deployment - value := types.QueryDeploymentResponse{ - Deployment: deployment, - Groups: k.GetGroups(ctx, deployment.ID()), - EscrowAccount: account, + err := k.cdc.Unmarshal(value, &deployment) + if err != nil { + return false, err + } + + // filter deployments with provided filters + if req.Filters.Accept(deployment, stateVal) { + if accumulate { + account, err := k.ekeeper.GetAccount( + ctx, + types.EscrowAccountForDeployment(deployment.ID()), + ) + if err != nil { + return true, fmt.Errorf("%w: fetching escrow account for DeploymentID=%s", err, deployment.DeploymentID) + } + + value := types.QueryDeploymentResponse{ + Deployment: deployment, + Groups: k.GetGroups(ctx, deployment.ID()), + EscrowAccount: account, + } + + deployments = append(deployments, value) + count++ } - deployments = append(deployments, value) + return true, nil } - return true, nil + return false, nil + }) + if err != nil { + return nil, status.Error(codes.Internal, err.Error()) } - return false, nil - }) - if err != nil { - return nil, status.Error(codes.Internal, err.Error()) + req.Pagination.Limit -= count + + if req.Pagination.Limit == 0 { + break + } } return &types.QueryDeploymentsResponse{ diff --git a/x/deployment/keeper/grpc_query_test.go b/x/deployment/keeper/grpc_query_test.go index 08df49cd44..b79e00edca 100644 --- a/x/deployment/keeper/grpc_query_test.go +++ b/x/deployment/keeper/grpc_query_test.go @@ -111,7 +111,6 @@ func TestGRPCQueryDeployment(t *testing.T) { } for _, tc := range testCases { - tc := tc t.Run(fmt.Sprintf("Case %s", tc.msg), func(t *testing.T) { tc.malleate() ctx := sdk.WrapSDKContext(suite.ctx) @@ -190,7 +189,6 @@ func TestGRPCQueryDeployments(t *testing.T) { } for _, tc := range testCases { - tc := tc t.Run(fmt.Sprintf("Case %s", tc.msg), func(t *testing.T) { tc.malleate() ctx := sdk.WrapSDKContext(suite.ctx) @@ -457,7 +455,6 @@ func TestGRPCQueryGroup(t *testing.T) { } for _, tc := range testCases { - tc := tc t.Run(fmt.Sprintf("Case %s", tc.msg), func(t *testing.T) { tc.malleate() ctx := sdk.WrapSDKContext(suite.ctx) diff --git a/x/deployment/keeper/keeper.go b/x/deployment/keeper/keeper.go index 54858d31a1..3c712cf9b1 100644 --- a/x/deployment/keeper/keeper.go +++ b/x/deployment/keeper/keeper.go @@ -4,6 +4,7 @@ import ( "github.com/cosmos/cosmos-sdk/codec" "github.com/cosmos/cosmos-sdk/telemetry" sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/types/errors" paramtypes "github.com/cosmos/cosmos-sdk/x/params/types" @@ -27,8 +28,6 @@ type IKeeper interface { OnLeaseClosed(ctx sdk.Context, id types.GroupID) (types.Group, error) GetParams(ctx sdk.Context) (params types.Params) SetParams(ctx sdk.Context, params types.Params) - updateDeployment(ctx sdk.Context, obj types.Deployment) - NewQuerier() Querier } @@ -71,9 +70,9 @@ func (k Keeper) StoreKey() sdk.StoreKey { func (k Keeper) GetDeployment(ctx sdk.Context, id types.DeploymentID) (types.Deployment, bool) { store := ctx.KVStore(k.skey) - key := DeploymentKey(id) + key := k.findDeployment(ctx, id) - if !store.Has(key) { + if len(key) == 0 { return types.Deployment{}, false } @@ -90,9 +89,9 @@ func (k Keeper) GetDeployment(ctx sdk.Context, id types.DeploymentID) (types.Dep func (k Keeper) GetGroup(ctx sdk.Context, id types.GroupID) (types.Group, bool) { store := ctx.KVStore(k.skey) - key := GroupKey(id) + key := k.findGroup(ctx, id) - if !store.Has(key) { + if len(key) == 0 { return types.Group{}, false } @@ -108,19 +107,34 @@ func (k Keeper) GetGroup(ctx sdk.Context, id types.GroupID) (types.Group, bool) // GetGroups returns all groups of a deployment with given DeploymentID from deployment store func (k Keeper) GetGroups(ctx sdk.Context, id types.DeploymentID) []types.Group { store := ctx.KVStore(k.skey) - key := groupsKey(id) + keys := [][]byte{ + MustGroupsKey(GroupStateOpenPrefix, id), + MustGroupsKey(GroupStatePausedPrefix, id), + MustGroupsKey(GroupStateInsufficientFundsPrefix, id), + MustGroupsKey(GroupStateClosedPrefix, id), + } var vals []types.Group - iter := sdk.KVStorePrefixIterator(store, key) + iters := make([]sdk.Iterator, 0, len(keys)) - for ; iter.Valid(); iter.Next() { - var val types.Group - k.cdc.MustUnmarshal(iter.Value(), &val) - vals = append(vals, val) + defer func() { + for _, iter := range iters { + _ = iter.Close() + } + }() + + for _, key := range keys { + iter := sdk.KVStorePrefixIterator(store, key) + iters = append(iters, iter) + + for ; iter.Valid(); iter.Next() { + var val types.Group + k.cdc.MustUnmarshal(iter.Value(), &val) + vals = append(vals, val) + } } - iter.Close() return vals } @@ -128,12 +142,14 @@ func (k Keeper) GetGroups(ctx sdk.Context, id types.DeploymentID) []types.Group func (k Keeper) Create(ctx sdk.Context, deployment types.Deployment, groups []types.Group) error { store := ctx.KVStore(k.skey) - key := DeploymentKey(deployment.ID()) + key := k.findDeployment(ctx, deployment.ID()) - if store.Has(key) { + if len(key) != 0 { return types.ErrDeploymentExists } + key = MustDeploymentKey(DeploymentStateToPrefix(deployment.State), deployment.ID()) + store.Set(key, k.cdc.MustMarshal(&deployment)) for idx := range groups { @@ -142,7 +158,12 @@ func (k Keeper) Create(ctx sdk.Context, deployment types.Deployment, groups []ty if !group.ID().DeploymentID().Equals(deployment.ID()) { return types.ErrInvalidGroupID } - gkey := GroupKey(group.ID()) + + gkey, err := GroupKey(GroupStateToPrefix(group.State), group.ID()) + if err != nil { + return errors.Wrap(err, "failed to create group key") + } + store.Set(gkey, k.cdc.MustMarshal(&group)) } @@ -159,71 +180,93 @@ func (k Keeper) Create(ctx sdk.Context, deployment types.Deployment, groups []ty // UpdateDeployment updates deployment details func (k Keeper) UpdateDeployment(ctx sdk.Context, deployment types.Deployment) error { store := ctx.KVStore(k.skey) - key := DeploymentKey(deployment.ID()) - if !store.Has(key) { + key := k.findDeployment(ctx, deployment.ID()) + + if len(key) == 0 { return types.ErrDeploymentNotFound } + key = MustDeploymentKey(DeploymentStateToPrefix(deployment.State), deployment.ID()) + store.Set(key, k.cdc.MustMarshal(&deployment)) + ctx.EventManager().EmitEvent( types.NewEventDeploymentUpdated(deployment.ID(), deployment.Version). ToSDKEvent(), ) - store.Set(key, k.cdc.MustMarshal(&deployment)) return nil } -// UpdateDeployment updates deployment details +// CloseDeployment updates deployment details func (k Keeper) CloseDeployment(ctx sdk.Context, deployment types.Deployment) { if deployment.State == types.DeploymentClosed { return } store := ctx.KVStore(k.skey) - key := DeploymentKey(deployment.ID()) + key := k.findDeployment(ctx, deployment.ID()) - if !store.Has(key) { + if len(key) == 0 { return } + store.Delete(key) + deployment.State = types.DeploymentClosed + + key = MustDeploymentKey(DeploymentStateToPrefix(deployment.State), deployment.DeploymentID) + + store.Set(key, k.cdc.MustMarshal(&deployment)) + ctx.EventManager().EmitEvent( types.NewEventDeploymentClosed(deployment.ID()). ToSDKEvent(), ) - store.Set(key, k.cdc.MustMarshal(&deployment)) } // OnCloseGroup provides shutdown API for a Group func (k Keeper) OnCloseGroup(ctx sdk.Context, group types.Group, state types.Group_State) error { store := ctx.KVStore(k.skey) - key := GroupKey(group.ID()) - if !store.Has(key) { + key := k.findGroup(ctx, group.ID()) + if len(key) == 0 { return types.ErrGroupNotFound } + + store.Delete(key) + group.State = state + key, err := GroupKey(GroupStateToPrefix(group.State), group.ID()) + if err != nil { + return errors.Wrap(err, "failed to encode group key") + } + + store.Set(key, k.cdc.MustMarshal(&group)) + ctx.EventManager().EmitEvent( types.NewEventGroupClosed(group.ID()). ToSDKEvent(), ) - store.Set(key, k.cdc.MustMarshal(&group)) return nil } // OnPauseGroup provides shutdown API for a Group func (k Keeper) OnPauseGroup(ctx sdk.Context, group types.Group) error { store := ctx.KVStore(k.skey) - key := GroupKey(group.ID()) - if !store.Has(key) { + key := k.findGroup(ctx, group.ID()) + if len(key) == 0 { return types.ErrGroupNotFound } + + store.Delete(key) + group.State = types.GroupPaused + store.Set(key, k.cdc.MustMarshal(&group)) ctx.EventManager().EmitEvent( types.NewEventGroupPaused(group.ID()). @@ -237,27 +280,39 @@ func (k Keeper) OnPauseGroup(ctx sdk.Context, group types.Group) error { // OnStartGroup provides shutdown API for a Group func (k Keeper) OnStartGroup(ctx sdk.Context, group types.Group) error { store := ctx.KVStore(k.skey) - key := GroupKey(group.ID()) - if !store.Has(key) { + key := k.findGroup(ctx, group.ID()) + if len(key) == 0 { return types.ErrGroupNotFound } + + store.Delete(key) + group.State = types.GroupOpen + key, err := GroupKey(GroupStateToPrefix(group.State), group.ID()) + if err != nil { + return errors.Wrap(err, "failed to encode group key") + } + + store.Set(key, k.cdc.MustMarshal(&group)) ctx.EventManager().EmitEvent( types.NewEventGroupStarted(group.ID()). ToSDKEvent(), ) - store.Set(key, k.cdc.MustMarshal(&group)) return nil } // WithDeployments iterates all deployments in deployment store func (k Keeper) WithDeployments(ctx sdk.Context, fn func(types.Deployment) bool) { store := ctx.KVStore(k.skey) - iter := sdk.KVStorePrefixIterator(store, types.DeploymentPrefix()) - defer iter.Close() + iter := sdk.KVStorePrefixIterator(store, DeploymentPrefix) + + defer func() { + _ = iter.Close() + }() + for ; iter.Valid(); iter.Next() { var val types.Deployment k.cdc.MustUnmarshal(iter.Value(), &val) @@ -296,16 +351,43 @@ func (k Keeper) SetParams(ctx sdk.Context, params types.Params) { k.pspace.SetParamSet(ctx, ¶ms) } -func (k Keeper) updateDeployment(ctx sdk.Context, obj types.Deployment) { +func (k Keeper) findDeployment(ctx sdk.Context, id types.DeploymentID) []byte { store := ctx.KVStore(k.skey) - key := DeploymentKey(obj.ID()) - store.Set(key, k.cdc.MustMarshal(&obj)) + + aKey := MustDeploymentKey(DeploymentStateActivePrefix, id) + cKey := MustDeploymentKey(DeploymentStateClosedPrefix, id) + + var key []byte + + if store.Has(aKey) { + key = aKey + } else if store.Has(cKey) { + key = cKey + } + + return key } -// nolint: unused -func (k Keeper) updateGroup(ctx sdk.Context, group types.Group) { +func (k Keeper) findGroup(ctx sdk.Context, id types.GroupID) []byte { store := ctx.KVStore(k.skey) - key := GroupKey(group.ID()) - store.Set(key, k.cdc.MustMarshal(&group)) + oKey := MustGroupKey(GroupStateOpenPrefix, id) + pKey := MustGroupKey(GroupStatePausedPrefix, id) + iKey := MustGroupKey(GroupStateInsufficientFundsPrefix, id) + cKey := MustGroupKey(GroupStateClosedPrefix, id) + + var key []byte + + // nolint: gocritic + if store.Has(oKey) { + key = oKey + } else if store.Has(pKey) { + key = pKey + } else if store.Has(iKey) { + key = iKey + } else if store.Has(cKey) { + key = cKey + } + + return key } diff --git a/x/deployment/keeper/key.go b/x/deployment/keeper/key.go index ef1c6cd3a9..13ba2a9e52 100644 --- a/x/deployment/keeper/key.go +++ b/x/deployment/keeper/key.go @@ -4,42 +4,176 @@ import ( "bytes" "encoding/binary" + sdk "github.com/cosmos/cosmos-sdk/types" "github.com/cosmos/cosmos-sdk/types/address" types "github.com/akash-network/akash-api/go/node/deployment/v1beta3" "github.com/akash-network/akash-api/go/sdkutil" ) -func DeploymentKey(id types.DeploymentID) []byte { - buf := bytes.NewBuffer(types.DeploymentPrefix()) - buf.Write(address.MustLengthPrefix(sdkutil.MustAccAddressFromBech32(id.Owner))) +const ( + DeploymentStateActivePrefixID = byte(0x01) + DeploymentStateClosedPrefixID = byte(0x02) + GroupStateOpenPrefixID = byte(0x01) + GroupStatePausedPrefixID = byte(0x02) + GroupStateInsufficientFundsPrefixID = byte(0x03) + GroupStateClosedPrefixID = byte(0x04) +) + +var ( + DeploymentPrefix = []byte{0x11, 0x00} + DeploymentStateActivePrefix = []byte{DeploymentStateActivePrefixID} + DeploymentStateClosedPrefix = []byte{DeploymentStateClosedPrefixID} + GroupPrefix = []byte{0x12, 0x00} + GroupStateOpenPrefix = []byte{GroupStateOpenPrefixID} + GroupStatePausedPrefix = []byte{GroupStatePausedPrefixID} + GroupStateInsufficientFundsPrefix = []byte{GroupStateInsufficientFundsPrefixID} + GroupStateClosedPrefix = []byte{GroupStateClosedPrefixID} +) + +func DeploymentKey(statePrefix []byte, id types.DeploymentID) ([]byte, error) { + owner, err := sdk.AccAddressFromBech32(id.Owner) + if err != nil { + return nil, err + } + + lenPrefixedOwner, err := address.LengthPrefix(owner) + if err != nil { + return nil, err + } + + buf := bytes.NewBuffer(DeploymentPrefix) + buf.Write(statePrefix) + buf.Write(lenPrefixedOwner) + if err := binary.Write(buf, binary.BigEndian, id.DSeq); err != nil { + return nil, err + } + + return buf.Bytes(), nil +} + +func MustDeploymentKey(statePrefix []byte, id types.DeploymentID) []byte { + key, err := DeploymentKey(statePrefix, id) + if err != nil { panic(err) } - return buf.Bytes() + return key } // GroupKey provides prefixed key for a Group's marshalled data. -func GroupKey(id types.GroupID) []byte { - buf := bytes.NewBuffer(types.GroupPrefix()) - buf.Write(address.MustLengthPrefix(sdkutil.MustAccAddressFromBech32(id.Owner))) +func GroupKey(statePrefix []byte, id types.GroupID) ([]byte, error) { + owner, err := sdk.AccAddressFromBech32(id.Owner) + if err != nil { + return nil, err + } + + lenPrefixedOwner, err := address.LengthPrefix(owner) + if err != nil { + return nil, err + } + + buf := bytes.NewBuffer(GroupPrefix) + buf.Write(statePrefix) + + buf.Write(lenPrefixedOwner) if err := binary.Write(buf, binary.BigEndian, id.DSeq); err != nil { - panic(err) + return nil, err } if err := binary.Write(buf, binary.BigEndian, id.GSeq); err != nil { + return nil, err + } + + return buf.Bytes(), nil +} + +func MustGroupKey(statePrefix []byte, id types.GroupID) []byte { + key, err := GroupKey(statePrefix, id) + if err != nil { panic(err) } - return buf.Bytes() + return key } -// groupsKey provides default store Key for Group data. -func groupsKey(id types.DeploymentID) []byte { - buf := bytes.NewBuffer(types.GroupPrefix()) - buf.Write(address.MustLengthPrefix(sdkutil.MustAccAddressFromBech32(id.Owner))) +// GroupsKey provides default store Key for Group data. +func GroupsKey(statePrefix []byte, id types.DeploymentID) ([]byte, error) { + owner, err := sdk.AccAddressFromBech32(id.Owner) + if err != nil { + return nil, err + } + + lenPrefixedOwner, err := address.LengthPrefix(owner) + if err != nil { + return nil, err + } + + buf := bytes.NewBuffer(GroupPrefix) + buf.Write(statePrefix) + + buf.Write(lenPrefixedOwner) if err := binary.Write(buf, binary.BigEndian, id.DSeq); err != nil { + return nil, err + } + + return buf.Bytes(), nil +} + +func MustGroupsKey(statePrefix []byte, id types.DeploymentID) []byte { + key, err := GroupsKey(statePrefix, id) + if err != nil { panic(err) } - return buf.Bytes() + return key +} + +func DeploymentStateToPrefix(state types.Deployment_State) []byte { + var idx []byte + + switch state { + case types.DeploymentActive: + idx = DeploymentStateActivePrefix + case types.DeploymentClosed: + idx = DeploymentStateClosedPrefix + } + + return idx +} + +func GroupStateToPrefix(state types.Group_State) []byte { + var idx []byte + switch state { + case types.GroupOpen: + idx = GroupStateOpenPrefix + case types.GroupPaused: + idx = GroupStatePausedPrefix + case types.GroupInsufficientFunds: + idx = GroupStateInsufficientFundsPrefix + case types.GroupClosed: + idx = GroupStateClosedPrefix + } + + return idx +} + +func buildDeploymentPrefix(state types.Deployment_State) []byte { + idx := DeploymentStateToPrefix(state) + + res := make([]byte, 0, len(DeploymentPrefix)+len(idx)) + res = append(res, DeploymentPrefix...) + res = append(res, idx...) + + return res +} + +// nolint: unused +func buildGroupPrefix(state types.Group_State) []byte { + idx := GroupStateToPrefix(state) + + res := make([]byte, 0, len(GroupPrefix)+len(idx)) + res = append(res, GroupPrefix...) + res = append(res, idx...) + + return res } func filterToPrefix(prefix []byte, owner string, dseq uint64, gseq uint32) ([]byte, error) { @@ -49,7 +183,17 @@ func filterToPrefix(prefix []byte, owner string, dseq uint64, gseq uint32) ([]by return buf.Bytes(), nil } - if _, err := buf.Write(address.MustLengthPrefix(sdkutil.MustAccAddressFromBech32(owner))); err != nil { + ownerAddr, err := sdk.AccAddressFromBech32(owner) + if err != nil { + return nil, err + } + + lenPrefixedOwner, err := address.LengthPrefix(ownerAddr) + if err != nil { + return nil, err + } + + if _, err := buf.Write(lenPrefixedOwner); err != nil { return nil, err } @@ -73,5 +217,44 @@ func filterToPrefix(prefix []byte, owner string, dseq uint64, gseq uint32) ([]by } func deploymentPrefixFromFilter(f types.DeploymentFilters) ([]byte, error) { + return filterToPrefix(buildDeploymentPrefix(types.Deployment_State(types.Deployment_State_value[f.State])), f.Owner, f.DSeq, 0) +} + +func DeploymentKeyLegacy(id types.DeploymentID) []byte { + buf := bytes.NewBuffer(types.DeploymentPrefix()) + buf.Write(address.MustLengthPrefix(sdkutil.MustAccAddressFromBech32(id.Owner))) + + if err := binary.Write(buf, binary.BigEndian, id.DSeq); err != nil { + panic(err) + } + + return buf.Bytes() +} + +// GroupKeyLegacy provides prefixed key for a Group's marshalled data. +func GroupKeyLegacy(id types.GroupID) []byte { + buf := bytes.NewBuffer(types.GroupPrefix()) + buf.Write(address.MustLengthPrefix(sdkutil.MustAccAddressFromBech32(id.Owner))) + if err := binary.Write(buf, binary.BigEndian, id.DSeq); err != nil { + panic(err) + } + if err := binary.Write(buf, binary.BigEndian, id.GSeq); err != nil { + panic(err) + } + return buf.Bytes() +} + +// GroupsKeyLegacy provides default store Key for Group data. +func GroupsKeyLegacy(id types.DeploymentID) []byte { + buf := bytes.NewBuffer(types.GroupPrefix()) + buf.Write(address.MustLengthPrefix(sdkutil.MustAccAddressFromBech32(id.Owner))) + if err := binary.Write(buf, binary.BigEndian, id.DSeq); err != nil { + panic(err) + } + return buf.Bytes() +} + +// nolint: unused +func deploymentPrefixFromFilterLegacy(f types.DeploymentFilters) ([]byte, error) { return filterToPrefix(types.DeploymentPrefix(), f.Owner, f.DSeq, 0) } diff --git a/x/deployment/module.go b/x/deployment/module.go index 99572c39dd..422cb8f3c5 100644 --- a/x/deployment/module.go +++ b/x/deployment/module.go @@ -25,7 +25,6 @@ import ( v1beta2types "github.com/akash-network/akash-api/go/node/deployment/v1beta2" types "github.com/akash-network/akash-api/go/node/deployment/v1beta3" - utypes "github.com/akash-network/node/upgrades/types" "github.com/akash-network/node/x/deployment/client/cli" "github.com/akash-network/node/x/deployment/client/rest" "github.com/akash-network/node/x/deployment/handler" @@ -156,12 +155,6 @@ func (am AppModule) RegisterServices(cfg module.Configurator) { types.RegisterMsgServer(cfg.MsgServer(), handler.NewServer(am.keeper, am.mkeeper, am.ekeeper, am.authzKeeper)) querier := am.keeper.NewQuerier() types.RegisterQueryServer(cfg.QueryServer(), querier) - - utypes.ModuleMigrations(ModuleName, am.keeper, func(name string, forVersion uint64, handler module.MigrationHandler) { - if err := cfg.RegisterMigration(name, forVersion, handler); err != nil { - panic(err) - } - }) } // BeginBlock performs no-op @@ -190,7 +183,7 @@ func (am AppModule) ExportGenesis(ctx sdk.Context, cdc codec.JSONCodec) json.Raw // ConsensusVersion implements module.AppModule#ConsensusVersion func (am AppModule) ConsensusVersion() uint64 { - return utypes.ModuleVersion(ModuleName) + return 4 } // AppModuleSimulation implements an application simulation module for the deployment module. diff --git a/x/escrow/module.go b/x/escrow/module.go index ca05c438ac..8a2486e014 100644 --- a/x/escrow/module.go +++ b/x/escrow/module.go @@ -24,7 +24,6 @@ import ( v1beta2types "github.com/akash-network/akash-api/go/node/escrow/v1beta2" types "github.com/akash-network/akash-api/go/node/escrow/v1beta3" - utypes "github.com/akash-network/node/upgrades/types" "github.com/akash-network/node/x/escrow/client/cli" "github.com/akash-network/node/x/escrow/client/rest" "github.com/akash-network/node/x/escrow/keeper" @@ -158,12 +157,6 @@ func (am AppModule) LegacyQuerierHandler(legacyQuerierCdc *codec.LegacyAmino) sd func (am AppModule) RegisterServices(cfg module.Configurator) { querier := keeper.NewQuerier(am.keeper) types.RegisterQueryServer(cfg.QueryServer(), querier) - - utypes.ModuleMigrations(ModuleName, am.keeper, func(name string, forVersion uint64, handler module.MigrationHandler) { - if err := cfg.RegisterMigration(name, forVersion, handler); err != nil { - panic(err) - } - }) } // RegisterQueryService registers a GRPC query service to respond to the @@ -199,7 +192,7 @@ func (am AppModule) ExportGenesis(ctx sdk.Context, cdc codec.JSONCodec) json.Raw // ConsensusVersion implements module.AppModule#ConsensusVersion func (am AppModule) ConsensusVersion() uint64 { - return utypes.ModuleVersion(ModuleName) + return 2 } // ____________________________________________________________________________ diff --git a/x/gov/module.go b/x/gov/module.go index c1717ab24a..f2d87fb1fa 100644 --- a/x/gov/module.go +++ b/x/gov/module.go @@ -20,7 +20,6 @@ import ( types "github.com/akash-network/akash-api/go/node/gov/v1beta3" - utypes "github.com/akash-network/node/upgrades/types" "github.com/akash-network/node/x/gov/keeper" "github.com/akash-network/node/x/gov/simulation" ) @@ -84,11 +83,6 @@ func (AppModuleBasic) GetTxCmd() *cobra.Command { return nil } -// GetQueryClient returns a new query client for this module -// func (AppModuleBasic) GetQueryClient(clientCtx client.Context) types.QueryClient { -// return nil -// } - // AppModule implements an application module for the provider module. type AppModule struct { AppModuleBasic @@ -127,12 +121,7 @@ func (am AppModule) LegacyQuerierHandler(_ *codec.LegacyAmino) sdk.Querier { } // RegisterServices registers the module's services -func (am AppModule) RegisterServices(cfg module.Configurator) { - utypes.ModuleMigrations(ModuleName, am.keeper, func(name string, forVersion uint64, handler module.MigrationHandler) { - if err := cfg.RegisterMigration(name, forVersion, handler); err != nil { - panic(err) - } - }) +func (am AppModule) RegisterServices(_ module.Configurator) { } // BeginBlock performs no-op @@ -161,7 +150,7 @@ func (am AppModule) ExportGenesis(ctx sdk.Context, cdc codec.JSONCodec) json.Raw // ConsensusVersion implements module.AppModule#ConsensusVersion func (am AppModule) ConsensusVersion() uint64 { - return utypes.ModuleVersion(ModuleName) + return 1 } // ____________________________________________________________________________ diff --git a/x/inflation/module.go b/x/inflation/module.go index fd90a2d6c4..ae4414cf47 100644 --- a/x/inflation/module.go +++ b/x/inflation/module.go @@ -20,7 +20,6 @@ import ( types "github.com/akash-network/akash-api/go/node/inflation/v1beta3" - utypes "github.com/akash-network/node/upgrades/types" "github.com/akash-network/node/x/inflation/keeper" "github.com/akash-network/node/x/inflation/simulation" ) @@ -120,12 +119,7 @@ func (am AppModule) LegacyQuerierHandler(_ *codec.LegacyAmino) sdk.Querier { } // RegisterServices registers the module's services -func (am AppModule) RegisterServices(cfg module.Configurator) { - utypes.ModuleMigrations(ModuleName, am.keeper, func(name string, forVersion uint64, handler module.MigrationHandler) { - if err := cfg.RegisterMigration(name, forVersion, handler); err != nil { - panic(err) - } - }) +func (am AppModule) RegisterServices(_ module.Configurator) { } // BeginBlock performs no-op @@ -154,7 +148,7 @@ func (am AppModule) ExportGenesis(ctx sdk.Context, cdc codec.JSONCodec) json.Raw // ConsensusVersion implements module.AppModule#ConsensusVersion func (am AppModule) ConsensusVersion() uint64 { - return utypes.ModuleVersion(ModuleName) + return 1 } // AppModuleSimulation implements an application simulation module for the deployment module. diff --git a/x/market/client/cli/grpc_rest_test.go b/x/market/client/cli/grpc_rest_test.go index 1f412cfe7d..e0ab1b5dab 100644 --- a/x/market/client/cli/grpc_rest_test.go +++ b/x/market/client/cli/grpc_rest_test.go @@ -247,7 +247,6 @@ func (s *GRPCRestTestSuite) TestGetOrders() { } for _, tc := range testCases { - tc := tc s.Run(tc.name, func() { resp, err := sdkrest.GetRequest(tc.url) s.Require().NoError(err) @@ -315,7 +314,6 @@ func (s *GRPCRestTestSuite) TestGetOrder() { } for _, tc := range testCases { - tc := tc s.Run(tc.name, func() { resp, err := sdkrest.GetRequest(tc.url) s.Require().NoError(err) @@ -386,7 +384,6 @@ func (s *GRPCRestTestSuite) TestGetBids() { } for _, tc := range testCases { - tc := tc s.Run(tc.name, func() { resp, err := sdkrest.GetRequest(tc.url) s.Require().NoError(err) @@ -460,7 +457,6 @@ func (s *GRPCRestTestSuite) TestGetBid() { } for _, tc := range testCases { - tc := tc s.Run(tc.name, func() { resp, err := sdkrest.GetRequest(tc.url) s.Require().NoError(err) @@ -531,7 +527,6 @@ func (s *GRPCRestTestSuite) TestGetLeases() { } for _, tc := range testCases { - tc := tc s.Run(tc.name, func() { resp, err := sdkrest.GetRequest(tc.url) s.Require().NoError(err) @@ -605,7 +600,6 @@ func (s *GRPCRestTestSuite) TestGetLease() { } for _, tc := range testCases { - tc := tc s.Run(tc.name, func() { resp, err := sdkrest.GetRequest(tc.url) s.Require().NoError(err) diff --git a/x/market/genesis.go b/x/market/genesis.go index 3d21144c2f..efe4a2dc9c 100644 --- a/x/market/genesis.go +++ b/x/market/genesis.go @@ -37,7 +37,7 @@ func InitGenesis(ctx sdk.Context, kpr keeper.IKeeper, data *types.GenesisState) cdc := kpr.Codec() for _, record := range data.Orders { - key := keys.OrderKey(record.ID()) + key := keys.MustOrderKey(keys.OrderStateToPrefix(record.State), record.ID()) if store.Has(key) { panic(fmt.Errorf("market genesis orders init. order id %s: %w", record.ID(), types.ErrOrderExists)) @@ -47,7 +47,7 @@ func InitGenesis(ctx sdk.Context, kpr keeper.IKeeper, data *types.GenesisState) } for _, record := range data.Bids { - key := keys.BidKey(record.ID()) + key := keys.MustBidKey(keys.BidStateToPrefix(record.State), record.ID()) if store.Has(key) { panic(fmt.Errorf("market genesis bids init. bid id %s: %w", record.ID(), types.ErrBidExists)) @@ -57,7 +57,7 @@ func InitGenesis(ctx sdk.Context, kpr keeper.IKeeper, data *types.GenesisState) } for _, record := range data.Leases { - key := keys.LeaseKey(record.ID()) + key := keys.MustLeaseKey(keys.LeaseStateToPrefix(record.State), record.ID()) if store.Has(key) { panic(fmt.Errorf("market genesis leases init. order id %s: lease exists", record.ID())) diff --git a/x/market/handler/server.go b/x/market/handler/server.go index f96defcf1c..e81adabe71 100644 --- a/x/market/handler/server.go +++ b/x/market/handler/server.go @@ -145,7 +145,7 @@ func (ms msgServer) CloseBid(goCtx context.Context, msg *types.MsgCloseBid) (*ty ms.keepers.Market.OnBidClosed(ctx, bid) ms.keepers.Market.OnOrderClosed(ctx, order) - ms.keepers.Escrow.PaymentClose(ctx, + _ = ms.keepers.Escrow.PaymentClose(ctx, dtypes.EscrowAccountForDeployment(lease.ID().DeploymentID()), types.EscrowPaymentForLease(lease.ID())) @@ -220,25 +220,19 @@ func (ms msgServer) CreateLease(goCtx context.Context, msg *types.MsgCreateLease ms.keepers.Market.OnBidMatched(ctx, bid) // close losing bids - var lostbids []types.Bid - ms.keepers.Market.WithBidsForOrder(ctx, msg.BidID.OrderID(), func(bid types.Bid) bool { - if bid.ID().Equals(msg.BidID) { - return false - } - if bid.State != types.BidOpen { - return false + ms.keepers.Market.WithBidsForOrder(ctx, msg.BidID.OrderID(), types.BidOpen, func(bid types.Bid) bool { + ms.keepers.Market.OnBidLost(ctx, bid) + + if err = ms.keepers.Escrow.AccountClose(ctx, + types.EscrowAccountForBid(bid.ID())); err != nil { + return true } - lostbids = append(lostbids, bid) return false }) - for _, bid := range lostbids { - ms.keepers.Market.OnBidLost(ctx, bid) - if err := ms.keepers.Escrow.AccountClose(ctx, - types.EscrowAccountForBid(bid.ID())); err != nil { - return &types.MsgCreateLeaseResponse{}, err - } + if err != nil { + return &types.MsgCreateLeaseResponse{}, err } return &types.MsgCreateLeaseResponse{}, nil diff --git a/x/market/keeper/grpc_query.go b/x/market/keeper/grpc_query.go index eeb455acc2..9b22615f1f 100644 --- a/x/market/keeper/grpc_query.go +++ b/x/market/keeper/grpc_query.go @@ -35,38 +35,103 @@ func (k Querier) Orders(c context.Context, req *types.QueryOrdersRequest) (*type return nil, status.Error(codes.InvalidArgument, "invalid state value") } - var orders types.Orders - ctx := sdk.UnwrapSDKContext(c) + // case 1: no filters set, iterating over entire store + // case 2: state only or state plus underlying filters like owner, iterating over state store + // case 3: state not set, underlying filters like owner are set, most complex case - store := ctx.KVStore(k.skey) - searchPrefix, err := keys.OrderPrefixFromFilter(req.Filters) - if err != nil { - return nil, status.Error(codes.Internal, err.Error()) + var store prefix.Store + + if req.Pagination == nil { + req.Pagination = &sdkquery.PageRequest{} + } else if req.Pagination != nil && req.Pagination.Offset > 0 && req.Filters.State == "" { + return nil, status.Error(codes.InvalidArgument, "invalid request parameters. if offset is set, filter.state must be provided") + } + + if req.Pagination.Limit == 0 { + req.Pagination.Limit = sdkquery.DefaultLimit + } + + states := make([]types.Order_State, 0, 3) + + // setup for case 3 - cross-index search + if req.Filters.State == "" { + // request has pagination key set, determine store prefix + if len(req.Pagination.Key) > 0 { + if len(req.Pagination.Key) < 3 { + return nil, status.Error(codes.InvalidArgument, "invalid pagination key") + } + + switch req.Pagination.Key[2] { + case keys.OrderStateOpenPrefixID: + states = append(states, types.OrderOpen) + fallthrough + case keys.OrderStateActivePrefixID: + states = append(states, types.OrderActive) + fallthrough + case keys.OrderStateClosedPrefixID: + states = append(states, types.OrderClosed) + default: + return nil, status.Error(codes.InvalidArgument, "invalid pagination key") + } + } else { + // request does not have pagination set. Start from open store + states = append(states, types.OrderOpen) + states = append(states, types.OrderActive) + states = append(states, types.OrderClosed) + } + } else { + states = append(states, stateVal) } - orderStore := prefix.NewStore(store, searchPrefix) + var orders types.Orders + var pageRes *sdkquery.PageResponse + + ctx := sdk.UnwrapSDKContext(c) + + for _, state := range states { + var searchPrefix []byte + var err error - pageRes, err := sdkquery.FilteredPaginate(orderStore, req.Pagination, func(key []byte, value []byte, accumulate bool) (bool, error) { - var order types.Order + req.Filters.State = state.String() - err := k.cdc.Unmarshal(value, &order) + searchPrefix, err = keys.OrderPrefixFromFilter(req.Filters) if err != nil { - return false, err + return nil, status.Error(codes.Internal, err.Error()) } - // filter orders with provided filters - if req.Filters.Accept(order, stateVal) { - if accumulate { - orders = append(orders, order) + count := uint64(0) + + store = prefix.NewStore(ctx.KVStore(k.skey), searchPrefix) + + pageRes, err = sdkquery.FilteredPaginate(store, req.Pagination, func(key []byte, value []byte, accumulate bool) (bool, error) { + var order types.Order + + err := k.cdc.Unmarshal(value, &order) + if err != nil { + return false, err } - return true, nil + // filter orders with provided filters + if req.Filters.Accept(order, stateVal) { + if accumulate { + orders = append(orders, order) + count++ + } + + return true, nil + } + + return false, nil + }) + if err != nil { + return nil, status.Error(codes.Internal, err.Error()) } - return false, nil - }) - if err != nil { - return nil, status.Error(codes.Internal, err.Error()) + req.Pagination.Limit -= count + + if req.Pagination.Limit == 0 { + break + } } return &types.QueryOrdersResponse{ @@ -75,78 +140,127 @@ func (k Querier) Orders(c context.Context, req *types.QueryOrdersRequest) (*type }, nil } -// Order returns order details based on OrderID -func (k Querier) Order(c context.Context, req *types.QueryOrderRequest) (*types.QueryOrderResponse, error) { +// Bids returns bids based on filters +func (k Querier) Bids(c context.Context, req *types.QueryBidsRequest) (*types.QueryBidsResponse, error) { if req == nil { return nil, status.Error(codes.InvalidArgument, "empty request") } - if _, err := sdk.AccAddressFromBech32(req.ID.Owner); err != nil { - return nil, status.Error(codes.InvalidArgument, "invalid owner address") + stateVal := types.Bid_State(types.Bid_State_value[req.Filters.State]) + + if req.Filters.State != "" && stateVal == types.BidStateInvalid { + return nil, status.Error(codes.InvalidArgument, "invalid state value") } - ctx := sdk.UnwrapSDKContext(c) + if req.Pagination == nil { + req.Pagination = &sdkquery.PageRequest{} + } else if req.Pagination != nil && req.Pagination.Offset > 0 && req.Filters.State == "" { + return nil, status.Error(codes.InvalidArgument, "invalid request parameters. if offset is set, filter.state must be provided") + } - order, found := k.GetOrder(ctx, req.ID) - if !found { - return nil, types.ErrOrderNotFound + if req.Pagination.Limit == 0 { + req.Pagination.Limit = sdkquery.DefaultLimit } - return &types.QueryOrderResponse{Order: order}, nil -} + reverseSearch := (req.Filters.Owner == "") && (req.Filters.Provider != "") + states := make([]types.Bid_State, 0, 4) -// Bids returns bids based on filters -func (k Querier) Bids(c context.Context, req *types.QueryBidsRequest) (*types.QueryBidsResponse, error) { - if req == nil { - return nil, status.Error(codes.InvalidArgument, "empty request") - } + // setup for case 3 - cross-index search + if req.Filters.State == "" { + // request has pagination key set, determine store prefix + if len(req.Pagination.Key) > 0 { + if len(req.Pagination.Key) < 3 { + return nil, status.Error(codes.InvalidArgument, "invalid pagination key") + } - stateVal := types.Bid_State(types.Bid_State_value[req.Filters.State]) + if reverseSearch && req.Pagination.Key[2] > keys.BidStateActivePrefixID { + return nil, status.Error(codes.InvalidArgument, "invalid pagination key") + } - if req.Filters.State != "" && stateVal == types.BidStateInvalid { - return nil, status.Error(codes.InvalidArgument, "invalid state value") + switch req.Pagination.Key[2] { + case keys.BidStateOpenPrefixID: + states = append(states, types.BidOpen) + fallthrough + case keys.BidStateActivePrefixID: + states = append(states, types.BidActive) + fallthrough + case keys.BidStateLostPrefixID: + states = append(states, types.BidLost) + fallthrough + case keys.BidStateClosedPrefixID: + states = append(states, types.BidClosed) + default: + return nil, status.Error(codes.InvalidArgument, "invalid pagination key") + } + } else { + // request does not have pagination set. Start from open store + states = append(states, types.BidOpen, types.BidActive, types.BidLost, types.BidClosed) + } + } else { + states = append(states, stateVal) } var bids []types.QueryBidResponse + var pageRes *sdkquery.PageResponse ctx := sdk.UnwrapSDKContext(c) - store := ctx.KVStore(k.skey) - searchPrefix, err := keys.BidPrefixFromFilter(req.Filters) - if err != nil { - return nil, status.Error(codes.Internal, err.Error()) - } + for _, state := range states { + var searchPrefix []byte + var err error - bidStore := prefix.NewStore(store, searchPrefix) + req.Filters.State = state.String() - pageRes, err := sdkquery.FilteredPaginate(bidStore, req.Pagination, func(key []byte, value []byte, accumulate bool) (bool, error) { - var bid types.Bid + if reverseSearch { + searchPrefix, err = keys.BidReversePrefixFromFilter(req.Filters) + } else { + searchPrefix, err = keys.BidPrefixFromFilter(req.Filters) + } - err := k.cdc.Unmarshal(value, &bid) if err != nil { - return false, err + return nil, status.Error(codes.Internal, err.Error()) } - // filter bids with provided filters - if req.Filters.Accept(bid, stateVal) { - if accumulate { - acct, err := k.ekeeper.GetAccount(ctx, types.EscrowAccountForBid(bid.BidID)) - if err != nil { - return true, err + count := uint64(0) + + bidStore := prefix.NewStore(ctx.KVStore(k.skey), searchPrefix) + pageRes, err = sdkquery.FilteredPaginate(bidStore, req.Pagination, func(key []byte, value []byte, accumulate bool) (bool, error) { + var bid types.Bid + + err := k.cdc.Unmarshal(value, &bid) + if err != nil { + return false, err + } + + // filter bids with provided filters + if req.Filters.Accept(bid, stateVal) { + if accumulate { + acct, err := k.ekeeper.GetAccount(ctx, types.EscrowAccountForBid(bid.BidID)) + if err != nil { + return true, err + } + + bids = append(bids, types.QueryBidResponse{ + Bid: bid, + EscrowAccount: acct, + }) + + count++ } - bids = append(bids, types.QueryBidResponse{ - Bid: bid, - EscrowAccount: acct, - }) + return true, nil } - return true, nil + return false, nil + }) + if err != nil { + return nil, status.Error(codes.Internal, err.Error()) } - return false, nil - }) - if err != nil { - return nil, status.Error(codes.Internal, err.Error()) + req.Pagination.Limit -= count + + if req.Pagination.Limit == 0 { + break + } } return &types.QueryBidsResponse{ @@ -155,97 +269,129 @@ func (k Querier) Bids(c context.Context, req *types.QueryBidsRequest) (*types.Qu }, nil } -// Bid returns bid details based on BidID -func (k Querier) Bid(c context.Context, req *types.QueryBidRequest) (*types.QueryBidResponse, error) { +// Leases returns leases based on filters +func (k Querier) Leases(c context.Context, req *types.QueryLeasesRequest) (*types.QueryLeasesResponse, error) { if req == nil { return nil, status.Error(codes.InvalidArgument, "empty request") } - if _, err := sdk.AccAddressFromBech32(req.ID.Owner); err != nil { - return nil, status.Error(codes.InvalidArgument, "invalid owner address") - } + stateVal := types.Lease_State(types.Lease_State_value[req.Filters.State]) - if _, err := sdk.AccAddressFromBech32(req.ID.Provider); err != nil { - return nil, status.Error(codes.InvalidArgument, "invalid provider address") + if req.Filters.State != "" && stateVal == types.LeaseStateInvalid { + return nil, status.Error(codes.InvalidArgument, "invalid state value") } - ctx := sdk.UnwrapSDKContext(c) - - bid, found := k.GetBid(ctx, req.ID) - if !found { - return nil, types.ErrBidNotFound + if req.Pagination == nil { + req.Pagination = &sdkquery.PageRequest{} + } else if req.Pagination != nil && req.Pagination.Offset > 0 && req.Filters.State == "" { + return nil, status.Error(codes.InvalidArgument, "invalid request parameters. if offset is set, filter.state must be provided") } - acct, err := k.ekeeper.GetAccount(ctx, types.EscrowAccountForBid(bid.ID())) - if err != nil { - return nil, err + if req.Pagination.Limit == 0 { + req.Pagination.Limit = sdkquery.DefaultLimit } - return &types.QueryBidResponse{ - Bid: bid, - EscrowAccount: acct, - }, nil -} + reverseSearch := (req.Filters.Owner == "") && (req.Filters.Provider != "") -// Leases returns leases based on filters -func (k Querier) Leases(c context.Context, req *types.QueryLeasesRequest) (*types.QueryLeasesResponse, error) { - if req == nil { - return nil, status.Error(codes.InvalidArgument, "empty request") - } + states := make([]types.Lease_State, 0, 3) - stateVal := types.Lease_State(types.Lease_State_value[req.Filters.State]) + // setup for case 3 - cross-index search + if req.Filters.State == "" { + // request has pagination key set, determine store prefix + if len(req.Pagination.Key) > 0 { + if len(req.Pagination.Key) < 3 { + return nil, status.Error(codes.InvalidArgument, "invalid pagination key") + } - if req.Filters.State != "" && stateVal == types.LeaseStateInvalid { - return nil, status.Error(codes.InvalidArgument, "invalid state value") + if reverseSearch && req.Pagination.Key[2] > keys.LeaseStateActivePrefixID { + return nil, status.Error(codes.InvalidArgument, "invalid pagination key") + } + + switch req.Pagination.Key[2] { + case keys.LeaseStateActivePrefixID: + states = append(states, types.LeaseActive) + fallthrough + case keys.LeaseStateInsufficientFundsPrefixID: + states = append(states, types.LeaseInsufficientFunds) + fallthrough + case keys.LeaseStateClosedPrefixID: + states = append(states, types.LeaseClosed) + default: + return nil, status.Error(codes.InvalidArgument, "invalid pagination key") + } + } else { + // request does not have pagination set. Start from open store + req.Filters.State = types.LeaseActive.String() + states = append(states, types.LeaseActive, types.LeaseInsufficientFunds, types.LeaseClosed) + } + } else { + states = append(states, stateVal) } var leases []types.QueryLeaseResponse + var pageRes *sdkquery.PageResponse ctx := sdk.UnwrapSDKContext(c) - store := ctx.KVStore(k.skey) - searchPrefix, isSecondaryPrefix, err := keys.LeasePrefixFromFilter(req.Filters) - if err != nil { - return nil, status.Error(codes.Internal, err.Error()) - } - searchedStore := prefix.NewStore(store, searchPrefix) + for _, state := range states { + var searchPrefix []byte + var err error - pageRes, err := sdkquery.FilteredPaginate(searchedStore, req.Pagination, func(key []byte, value []byte, accumulate bool) (bool, error) { - var lease types.Lease + req.Filters.State = state.String() - if isSecondaryPrefix { - secondaryKey := value - // Load the actual key, from the secondary key - value = store.Get(secondaryKey) + if reverseSearch { + searchPrefix, err = keys.LeaseReversePrefixFromFilter(req.Filters) + } else { + searchPrefix, err = keys.LeasePrefixFromFilter(req.Filters) } - err := k.cdc.Unmarshal(value, &lease) if err != nil { - return false, err + return nil, status.Error(codes.Internal, err.Error()) } - // filter leases with provided filters - if req.Filters.Accept(lease, stateVal) { - if accumulate { - payment, err := k.ekeeper.GetPayment(ctx, - dtypes.EscrowAccountForDeployment(lease.ID().DeploymentID()), - types.EscrowPaymentForLease(lease.ID())) - if err != nil { - return true, err + count := uint64(0) + + searchedStore := prefix.NewStore(ctx.KVStore(k.skey), searchPrefix) + + pageRes, err = sdkquery.FilteredPaginate(searchedStore, req.Pagination, func(key []byte, value []byte, accumulate bool) (bool, error) { + var lease types.Lease + + err := k.cdc.Unmarshal(value, &lease) + if err != nil { + return false, err + } + + // filter leases with provided filters + if req.Filters.Accept(lease, stateVal) { + if accumulate { + payment, err := k.ekeeper.GetPayment(ctx, + dtypes.EscrowAccountForDeployment(lease.ID().DeploymentID()), + types.EscrowPaymentForLease(lease.ID())) + if err != nil { + return true, err + } + + leases = append(leases, types.QueryLeaseResponse{ + Lease: lease, + EscrowPayment: payment, + }) + + count++ } - leases = append(leases, types.QueryLeaseResponse{ - Lease: lease, - EscrowPayment: payment, - }) + return true, nil } - return true, nil + return false, nil + }) + if err != nil { + return nil, status.Error(codes.Internal, err.Error()) } - return false, nil - }) - if err != nil { - return nil, status.Error(codes.Internal, err.Error()) + req.Pagination.Limit -= count + + if req.Pagination.Limit == 0 { + break + } } return &types.QueryLeasesResponse{ @@ -254,6 +400,58 @@ func (k Querier) Leases(c context.Context, req *types.QueryLeasesRequest) (*type }, nil } +// Order returns order details based on OrderID +func (k Querier) Order(c context.Context, req *types.QueryOrderRequest) (*types.QueryOrderResponse, error) { + if req == nil { + return nil, status.Error(codes.InvalidArgument, "empty request") + } + + if _, err := sdk.AccAddressFromBech32(req.ID.Owner); err != nil { + return nil, status.Error(codes.InvalidArgument, "invalid owner address") + } + + ctx := sdk.UnwrapSDKContext(c) + + order, found := k.GetOrder(ctx, req.ID) + if !found { + return nil, types.ErrOrderNotFound + } + + return &types.QueryOrderResponse{Order: order}, nil +} + +// Bid returns bid details based on BidID +func (k Querier) Bid(c context.Context, req *types.QueryBidRequest) (*types.QueryBidResponse, error) { + if req == nil { + return nil, status.Error(codes.InvalidArgument, "empty request") + } + + if _, err := sdk.AccAddressFromBech32(req.ID.Owner); err != nil { + return nil, status.Error(codes.InvalidArgument, "invalid owner address") + } + + if _, err := sdk.AccAddressFromBech32(req.ID.Provider); err != nil { + return nil, status.Error(codes.InvalidArgument, "invalid provider address") + } + + ctx := sdk.UnwrapSDKContext(c) + + bid, found := k.GetBid(ctx, req.ID) + if !found { + return nil, types.ErrBidNotFound + } + + acct, err := k.ekeeper.GetAccount(ctx, types.EscrowAccountForBid(bid.ID())) + if err != nil { + return nil, err + } + + return &types.QueryBidResponse{ + Bid: bid, + EscrowAccount: acct, + }, nil +} + // Lease returns lease details based on LeaseID func (k Querier) Lease(c context.Context, req *types.QueryLeaseRequest) (*types.QueryLeaseResponse, error) { if req == nil { diff --git a/x/market/keeper/grpc_query_test.go b/x/market/keeper/grpc_query_test.go index b0bfeedd52..676f84865e 100644 --- a/x/market/keeper/grpc_query_test.go +++ b/x/market/keeper/grpc_query_test.go @@ -100,7 +100,6 @@ func TestGRPCQueryOrder(t *testing.T) { } for _, tc := range testCases { - tc := tc t.Run(fmt.Sprintf("Case %s", tc.msg), func(t *testing.T) { tc.malleate() ctx := sdk.WrapSDKContext(suite.ctx) @@ -170,7 +169,6 @@ func TestGRPCQueryOrders(t *testing.T) { } for _, tc := range testCases { - tc := tc t.Run(fmt.Sprintf("Case %s", tc.msg), func(t *testing.T) { tc.malleate() ctx := sdk.WrapSDKContext(suite.ctx) @@ -914,7 +912,6 @@ func TestGRPCQueryBid(t *testing.T) { } for _, tc := range testCases { - tc := tc t.Run(fmt.Sprintf("Case %s", tc.msg), func(t *testing.T) { tc.malleate() ctx := sdk.WrapSDKContext(suite.ctx) @@ -985,7 +982,6 @@ func TestGRPCQueryBids(t *testing.T) { } for _, tc := range testCases { - tc := tc t.Run(fmt.Sprintf("Case %s", tc.msg), func(t *testing.T) { tc.malleate() ctx := sdk.WrapSDKContext(suite.ctx) @@ -1055,7 +1051,6 @@ func TestGRPCQueryLease(t *testing.T) { } for _, tc := range testCases { - tc := tc t.Run(fmt.Sprintf("Case %s", tc.msg), func(t *testing.T) { tc.malleate() ctx := sdk.WrapSDKContext(suite.ctx) @@ -1131,7 +1126,6 @@ func TestGRPCQueryLeases(t *testing.T) { } for _, tc := range testCases { - tc := tc t.Run(fmt.Sprintf("Case %s", tc.msg), func(t *testing.T) { tc.malleate() ctx := sdk.WrapSDKContext(suite.ctx) diff --git a/x/market/keeper/keeper.go b/x/market/keeper/keeper.go index 164af4489f..a53c43a7d5 100644 --- a/x/market/keeper/keeper.go +++ b/x/market/keeper/keeper.go @@ -31,12 +31,10 @@ type IKeeper interface { GetOrder(ctx sdk.Context, id types.OrderID) (types.Order, bool) GetBid(ctx sdk.Context, id types.BidID) (types.Bid, bool) GetLease(ctx sdk.Context, id types.LeaseID) (types.Lease, bool) - LeaseForOrder(ctx sdk.Context, oid types.OrderID) (types.Lease, bool) WithOrders(ctx sdk.Context, fn func(types.Order) bool) WithBids(ctx sdk.Context, fn func(types.Bid) bool) WithLeases(ctx sdk.Context, fn func(types.Lease) bool) - WithOrdersForGroup(ctx sdk.Context, id dtypes.GroupID, fn func(types.Order) bool) - WithBidsForOrder(ctx sdk.Context, id types.OrderID, fn func(types.Bid) bool) + WithBidsForOrder(ctx sdk.Context, id types.OrderID, state types.Bid_State, fn func(types.Bid) bool) BidCountForOrder(ctx sdk.Context, id types.OrderID) uint32 GetParams(ctx sdk.Context) (params types.Params) SetParams(ctx sdk.Context, params types.Params) @@ -85,10 +83,17 @@ func (k Keeper) CreateOrder(ctx sdk.Context, gid dtypes.GroupID, spec dtypes.Gro oseq := uint32(1) var err error - k.WithOrdersForGroup(ctx, gid, func(order types.Order) bool { - if err = order.ValidateInactive(); err != nil { - return true - } + k.WithOrdersForGroup(ctx, gid, types.OrderActive, func(order types.Order) bool { + err = types.ErrOrderActive + return true + }) + + k.WithOrdersForGroup(ctx, gid, types.OrderOpen, func(order types.Order) bool { + err = types.ErrOrderActive + return true + }) + + k.WithOrdersForGroup(ctx, gid, types.OrderClosed, func(order types.Order) bool { oseq++ return false }) @@ -97,6 +102,12 @@ func (k Keeper) CreateOrder(ctx sdk.Context, gid dtypes.GroupID, spec dtypes.Gro return types.Order{}, fmt.Errorf("%w: create order: active order exists", err) } + orderID := types.MakeOrderID(gid, oseq) + + if res := k.findOrder(ctx, orderID); len(res) > 0 { + return types.Order{}, types.ErrOrderExists + } + order := types.Order{ OrderID: types.MakeOrderID(gid, oseq), Spec: spec, @@ -104,15 +115,12 @@ func (k Keeper) CreateOrder(ctx sdk.Context, gid dtypes.GroupID, spec dtypes.Gro CreatedAt: ctx.BlockHeight(), } - key := keys.OrderKey(order.ID()) - - if store.Has(key) { - return types.Order{}, types.ErrOrderExists - } + key := keys.MustOrderKey(keys.OrderStateOpenPrefix, order.ID()) store.Set(key, k.cdc.MustMarshal(&order)) ctx.Logger().Info("created order", "order", order.ID()) + ctx.EventManager().EmitEvent( types.NewEventOrderCreated(order.ID()). ToSDKEvent(), @@ -124,21 +132,30 @@ func (k Keeper) CreateOrder(ctx sdk.Context, gid dtypes.GroupID, spec dtypes.Gro func (k Keeper) CreateBid(ctx sdk.Context, oid types.OrderID, provider sdk.AccAddress, price sdk.DecCoin, roffer types.ResourcesOffer) (types.Bid, error) { store := ctx.KVStore(k.skey) + bidID := types.MakeBidID(oid, provider) + + if key := k.findBid(ctx, bidID); len(key) > 0 { + return types.Bid{}, types.ErrBidExists + } + bid := types.Bid{ - BidID: types.MakeBidID(oid, provider), + BidID: bidID, State: types.BidOpen, Price: price, CreatedAt: ctx.BlockHeight(), ResourcesOffer: roffer, } - key := keys.BidKey(bid.ID()) + data := k.cdc.MustMarshal(&bid) - if store.Has(key) { - return types.Bid{}, types.ErrBidExists - } + key := keys.MustBidKey(keys.BidStateToPrefix(bid.State), bidID) + revKey := keys.MustBidStateRevereKey(bid.State, bidID) + + store.Set(key, data) - store.Set(key, k.cdc.MustMarshal(&bid)) + if len(revKey) > 0 { + store.Set(revKey, data) + } ctx.EventManager().EmitEvent( types.NewEventBidCreated(bid.ID(), price). @@ -160,9 +177,16 @@ func (k Keeper) CreateLease(ctx sdk.Context, bid types.Bid) { CreatedAt: ctx.BlockHeight(), } + data := k.cdc.MustMarshal(&lease) + // create (active) lease in store - key := keys.LeaseKey(lease.ID()) - store.Set(key, k.cdc.MustMarshal(&lease)) + key := keys.MustLeaseKey(keys.LeaseStateToPrefix(lease.State), lease.ID()) + revKey := keys.MustLeaseStateReverseKey(lease.State, lease.LeaseID) + + store.Set(key, data) + if len(revKey) > 0 { + store.Set(revKey, data) + } ctx.Logger().Info("created lease", "lease", lease.ID()) ctx.EventManager().EmitEvent( @@ -173,20 +197,23 @@ func (k Keeper) CreateLease(ctx sdk.Context, bid types.Bid) { // OnOrderMatched updates order state to matched func (k Keeper) OnOrderMatched(ctx sdk.Context, order types.Order) { + currState := order.State order.State = types.OrderActive - k.updateOrder(ctx, order) + k.updateOrder(ctx, order, currState) } -// OnBidActive updates bid state to matched +// OnBidMatched updates bid state to matched func (k Keeper) OnBidMatched(ctx sdk.Context, bid types.Bid) { + currState := bid.State bid.State = types.BidActive - k.updateBid(ctx, bid) + k.updateBid(ctx, bid, currState) } // OnBidLost updates bid state to bid lost func (k Keeper) OnBidLost(ctx sdk.Context, bid types.Bid) { + currState := bid.State bid.State = types.BidLost - k.updateBid(ctx, bid) + k.updateBid(ctx, bid, currState) } // OnBidClosed updates bid state to closed @@ -195,10 +222,12 @@ func (k Keeper) OnBidClosed(ctx sdk.Context, bid types.Bid) { case types.BidClosed, types.BidLost: return } + + currState := bid.State bid.State = types.BidClosed - k.updateBid(ctx, bid) + k.updateBid(ctx, bid, currState) - k.ekeeper.AccountClose(ctx, types.EscrowAccountForBid(bid.ID())) + _ = k.ekeeper.AccountClose(ctx, types.EscrowAccountForBid(bid.ID())) ctx.EventManager().EmitEvent( types.NewEventBidClosed(bid.ID(), bid.Price). @@ -212,8 +241,11 @@ func (k Keeper) OnOrderClosed(ctx sdk.Context, order types.Order) { return } + currState := order.State + order.State = types.OrderClosed - k.updateOrder(ctx, order) + k.updateOrder(ctx, order, currState) + ctx.EventManager().EmitEvent( types.NewEventOrderClosed(order.ID()). ToSDKEvent(), @@ -226,9 +258,23 @@ func (k Keeper) OnLeaseClosed(ctx sdk.Context, lease types.Lease, state types.Le case types.LeaseClosed, types.LeaseInsufficientFunds: return } + + currState := lease.State lease.State = state lease.ClosedOn = ctx.BlockHeight() - k.updateLease(ctx, lease) + + store := ctx.KVStore(k.skey) + + key := keys.MustLeaseKey(keys.LeaseStateToPrefix(currState), lease.ID()) + revKey := keys.MustLeaseStateReverseKey(currState, lease.LeaseID) + + store.Delete(key) + if len(revKey) > 0 { + store.Delete(revKey) + } + + key = keys.MustLeaseKey(keys.LeaseStateToPrefix(lease.State), lease.ID()) + store.Set(key, k.cdc.MustMarshal(&lease)) ctx.EventManager().EmitEvent( types.NewEventLeaseClosed(lease.ID(), lease.Price). @@ -238,46 +284,106 @@ func (k Keeper) OnLeaseClosed(ctx sdk.Context, lease types.Lease, state types.Le // OnGroupClosed updates state of all orders, bids and leases in group to closed func (k Keeper) OnGroupClosed(ctx sdk.Context, id dtypes.GroupID) { - k.WithOrdersForGroup(ctx, id, func(order types.Order) bool { + processClose := func(ctx sdk.Context, bid types.Bid) { + k.OnBidClosed(ctx, bid) + if lease, ok := k.GetLease(ctx, bid.ID().LeaseID()); ok { + k.OnLeaseClosed(ctx, lease, types.LeaseClosed) + + if err := k.ekeeper.PaymentClose(ctx, + dtypes.EscrowAccountForDeployment(id.DeploymentID()), + types.EscrowPaymentForLease(lease.ID())); err != nil { + ctx.Logger().With("err", err).Info("error closing payment") + } + + } + } + + k.WithOrdersForGroup(ctx, id, types.OrderActive, func(order types.Order) bool { k.OnOrderClosed(ctx, order) - k.WithBidsForOrder(ctx, order.ID(), func(bid types.Bid) bool { - k.OnBidClosed(ctx, bid) - if lease, ok := k.GetLease(ctx, types.LeaseID(bid.ID())); ok { - k.OnLeaseClosed(ctx, lease, types.LeaseClosed) - if err := k.ekeeper.PaymentClose(ctx, - dtypes.EscrowAccountForDeployment(id.DeploymentID()), - types.EscrowPaymentForLease(lease.ID())); err != nil { - ctx.Logger().With("err", err).Info("error closing payment") - } + k.WithBidsForOrder(ctx, order.ID(), types.BidOpen, func(bid types.Bid) bool { + processClose(ctx, bid) + return false + }) - } + k.WithBidsForOrder(ctx, order.ID(), types.BidActive, func(bid types.Bid) bool { + processClose(ctx, bid) return false }) + return false }) } +func (k Keeper) findOrder(ctx sdk.Context, id types.OrderID) []byte { + store := ctx.KVStore(k.skey) + aKey := keys.MustOrderKey(keys.OrderStateActivePrefix, id) + oKey := keys.MustOrderKey(keys.OrderStateOpenPrefix, id) + cKey := keys.MustOrderKey(keys.OrderStateClosedPrefix, id) + + var key []byte + + // nolint: gocritic + if store.Has(aKey) { + key = aKey + } else if store.Has(oKey) { + key = oKey + } else if store.Has(cKey) { + key = cKey + } + + return key +} + // GetOrder returns order with given orderID from market store func (k Keeper) GetOrder(ctx sdk.Context, id types.OrderID) (types.Order, bool) { - store := ctx.KVStore(k.skey) - key := keys.OrderKey(id) - if !store.Has(key) { + key := k.findOrder(ctx, id) + + if len(key) == 0 { return types.Order{}, false } + store := ctx.KVStore(k.skey) + buf := store.Get(key) var val types.Order k.cdc.MustUnmarshal(buf, &val) + return val, true } +func (k Keeper) findBid(ctx sdk.Context, id types.BidID) []byte { + store := ctx.KVStore(k.skey) + + aKey := keys.MustBidKey(keys.BidStateActivePrefix, id) + oKey := keys.MustBidKey(keys.BidStateOpenPrefix, id) + lKey := keys.MustBidKey(keys.BidStateLostPrefix, id) + cKey := keys.MustBidKey(keys.BidStateClosedPrefix, id) + + var key []byte + + // nolint: gocritic + if store.Has(aKey) { + key = aKey + } else if store.Has(oKey) { + key = oKey + } else if store.Has(lKey) { + key = lKey + } else if store.Has(cKey) { + key = cKey + } + + return key +} + // GetBid returns bid with given bidID from market store func (k Keeper) GetBid(ctx sdk.Context, id types.BidID) (types.Bid, bool) { store := ctx.KVStore(k.skey) - key := keys.BidKey(id) - if !store.Has(key) { + + key := k.findBid(ctx, id) + + if len(key) == 0 { return types.Bid{}, false } @@ -285,14 +391,38 @@ func (k Keeper) GetBid(ctx sdk.Context, id types.BidID) (types.Bid, bool) { var val types.Bid k.cdc.MustUnmarshal(buf, &val) + return val, true } +func (k Keeper) findLease(ctx sdk.Context, id types.LeaseID) []byte { + store := ctx.KVStore(k.skey) + + aKey := keys.MustLeaseKey(keys.LeaseStateActivePrefix, id) + iKey := keys.MustLeaseKey(keys.LeaseStateInsufficientFundsPrefix, id) + cKey := keys.MustLeaseKey(keys.LeaseStateClosedPrefix, id) + + var key []byte + + // nolint: gocritic + if store.Has(aKey) { + key = aKey + } else if store.Has(iKey) { + key = iKey + } else if store.Has(cKey) { + key = cKey + } + + return key +} + // GetLease returns lease with given leaseID from market store func (k Keeper) GetLease(ctx sdk.Context, id types.LeaseID) (types.Lease, bool) { store := ctx.KVStore(k.skey) - key := keys.LeaseKey(id) - if !store.Has(key) { + + key := k.findLease(ctx, id) + + if len(key) == 0 { return types.Lease{}, false } @@ -303,32 +433,14 @@ func (k Keeper) GetLease(ctx sdk.Context, id types.LeaseID) (types.Lease, bool) return val, true } -// LeaseForOrder returns lease for order with given ID and lease found status -func (k Keeper) LeaseForOrder(ctx sdk.Context, oid types.OrderID) (types.Lease, bool) { - var ( - value types.Lease - found bool - ) - - k.WithBidsForOrder(ctx, oid, func(item types.Bid) bool { - if !item.ID().OrderID().Equals(oid) { - return false - } - if item.State != types.BidActive { - return false - } - value, found = k.GetLease(ctx, types.LeaseID(item.ID())) - return true - }) - - return value, found -} - // WithOrders iterates all orders in market func (k Keeper) WithOrders(ctx sdk.Context, fn func(types.Order) bool) { store := ctx.KVStore(k.skey) - iter := sdk.KVStorePrefixIterator(store, types.OrderPrefix()) - defer iter.Close() + iter := sdk.KVStorePrefixIterator(store, keys.OrderPrefix) + defer func() { + _ = iter.Close() + }() + for ; iter.Valid(); iter.Next() { var val types.Order k.cdc.MustUnmarshal(iter.Value(), &val) @@ -341,8 +453,12 @@ func (k Keeper) WithOrders(ctx sdk.Context, fn func(types.Order) bool) { // WithBids iterates all bids in market func (k Keeper) WithBids(ctx sdk.Context, fn func(types.Bid) bool) { store := ctx.KVStore(k.skey) - iter := sdk.KVStorePrefixIterator(store, types.BidPrefix()) - defer iter.Close() + iter := sdk.KVStorePrefixIterator(store, keys.BidPrefix) + + defer func() { + _ = iter.Close() + }() + for ; iter.Valid(); iter.Next() { var val types.Bid k.cdc.MustUnmarshal(iter.Value(), &val) @@ -355,8 +471,12 @@ func (k Keeper) WithBids(ctx sdk.Context, fn func(types.Bid) bool) { // WithLeases iterates all leases in market func (k Keeper) WithLeases(ctx sdk.Context, fn func(types.Lease) bool) { store := ctx.KVStore(k.skey) - iter := sdk.KVStorePrefixIterator(store, types.LeasePrefix()) - defer iter.Close() + iter := sdk.KVStorePrefixIterator(store, keys.LeasePrefix) + + defer func() { + _ = iter.Close() + }() + for ; iter.Valid(); iter.Next() { var val types.Lease k.cdc.MustUnmarshal(iter.Value(), &val) @@ -367,10 +487,14 @@ func (k Keeper) WithLeases(ctx sdk.Context, fn func(types.Lease) bool) { } // WithOrdersForGroup iterates all orders of a group in market with given GroupID -func (k Keeper) WithOrdersForGroup(ctx sdk.Context, id dtypes.GroupID, fn func(types.Order) bool) { +func (k Keeper) WithOrdersForGroup(ctx sdk.Context, id dtypes.GroupID, state types.Order_State, fn func(types.Order) bool) { store := ctx.KVStore(k.skey) - iter := sdk.KVStorePrefixIterator(store, keys.OrdersForGroupPrefix(id)) - defer iter.Close() + iter := sdk.KVStorePrefixIterator(store, keys.OrdersForGroupPrefix(keys.OrderStateToPrefix(state), id)) + + defer func() { + _ = iter.Close() + }() + for ; iter.Valid(); iter.Next() { var val types.Order k.cdc.MustUnmarshal(iter.Value(), &val) @@ -381,11 +505,14 @@ func (k Keeper) WithOrdersForGroup(ctx sdk.Context, id dtypes.GroupID, fn func(t } // WithBidsForOrder iterates all bids of a order in market with given OrderID -func (k Keeper) WithBidsForOrder(ctx sdk.Context, id types.OrderID, fn func(types.Bid) bool) { +func (k Keeper) WithBidsForOrder(ctx sdk.Context, id types.OrderID, state types.Bid_State, fn func(types.Bid) bool) { store := ctx.KVStore(k.skey) - iter := sdk.KVStorePrefixIterator(store, keys.BidsForOrderPrefix(id)) + iter := sdk.KVStorePrefixIterator(store, keys.BidsForOrderPrefix(keys.BidStateToPrefix(state), id)) + + defer func() { + _ = iter.Close() + }() - defer iter.Close() for ; iter.Valid(); iter.Next() { var val types.Bid k.cdc.MustUnmarshal(iter.Value(), &val) @@ -397,12 +524,29 @@ func (k Keeper) WithBidsForOrder(ctx sdk.Context, id types.OrderID, fn func(type func (k Keeper) BidCountForOrder(ctx sdk.Context, id types.OrderID) uint32 { store := ctx.KVStore(k.skey) - iter := sdk.KVStorePrefixIterator(store, keys.BidsForOrderPrefix(id)) - defer iter.Close() + oiter := sdk.KVStorePrefixIterator(store, keys.BidsForOrderPrefix(keys.BidStateOpenPrefix, id)) + aiter := sdk.KVStorePrefixIterator(store, keys.BidsForOrderPrefix(keys.BidStateActivePrefix, id)) + citer := sdk.KVStorePrefixIterator(store, keys.BidsForOrderPrefix(keys.BidStateClosedPrefix, id)) + + defer func() { + _ = oiter.Close() + _ = aiter.Close() + _ = citer.Close() + }() + count := uint32(0) - for ; iter.Valid(); iter.Next() { + for ; oiter.Valid(); oiter.Next() { + count++ + } + + for ; aiter.Valid(); aiter.Next() { + count++ + } + + for ; citer.Valid(); citer.Next() { count++ } + return count } @@ -417,20 +561,64 @@ func (k Keeper) SetParams(ctx sdk.Context, params types.Params) { k.pspace.SetParamSet(ctx, ¶ms) } -func (k Keeper) updateOrder(ctx sdk.Context, order types.Order) { +func (k Keeper) updateOrder(ctx sdk.Context, order types.Order, currState types.Order_State) { store := ctx.KVStore(k.skey) - key := keys.OrderKey(order.ID()) - store.Set(key, k.cdc.MustMarshal(&order)) -} -func (k Keeper) updateBid(ctx sdk.Context, bid types.Bid) { - store := ctx.KVStore(k.skey) - key := keys.BidKey(bid.ID()) - store.Set(key, k.cdc.MustMarshal(&bid)) + switch currState { + case types.OrderOpen: + case types.OrderActive: + default: + panic(fmt.Sprintf("unexpected current state of the order: %d", currState)) + } + + key := keys.MustOrderKey(keys.OrderStateToPrefix(currState), order.ID()) + store.Delete(key) + + switch order.State { + case types.OrderActive: + case types.OrderClosed: + default: + panic(fmt.Sprintf("unexpected new state of the order: %d", order.State)) + } + + data := k.cdc.MustMarshal(&order) + + key = keys.MustOrderKey(keys.OrderStateToPrefix(order.State), order.ID()) + store.Set(key, data) } -func (k Keeper) updateLease(ctx sdk.Context, lease types.Lease) { +func (k Keeper) updateBid(ctx sdk.Context, bid types.Bid, currState types.Bid_State) { store := ctx.KVStore(k.skey) - key := keys.LeaseKey(lease.ID()) - store.Set(key, k.cdc.MustMarshal(&lease)) + + switch currState { + case types.BidOpen: + case types.BidActive: + default: + panic(fmt.Sprintf("unexpected current state of the bid: %d", currState)) + } + + key := keys.MustBidKey(keys.BidStateToPrefix(currState), bid.ID()) + revKey := keys.MustBidStateRevereKey(currState, bid.ID()) + store.Delete(key) + if revKey != nil { + store.Delete(revKey) + } + + switch bid.State { + case types.BidActive: + case types.BidLost: + case types.BidClosed: + default: + panic(fmt.Sprintf("unexpected new state of the bid: %d", bid.State)) + } + + data := k.cdc.MustMarshal(&bid) + + key = keys.MustBidKey(keys.BidStateToPrefix(bid.State), bid.ID()) + revKey = keys.MustBidStateRevereKey(bid.State, bid.ID()) + + store.Set(key, data) + if len(revKey) > 0 { + store.Set(revKey, data) + } } diff --git a/x/market/keeper/keeper_test.go b/x/market/keeper/keeper_test.go index 54d4dac40b..e46ccb4dd6 100644 --- a/x/market/keeper/keeper_test.go +++ b/x/market/keeper/keeper_test.go @@ -54,23 +54,23 @@ func Test_WithOrders(t *testing.T) { assert.Equal(t, 1, count) } -func Test_WithOrdersForGroup(t *testing.T) { - ctx, keeper, _ := setupKeeper(t) - order, _ := createOrder(t, ctx, keeper) - - // create extra orders - createOrder(t, ctx, keeper) - - count := 0 - keeper.WithOrdersForGroup(ctx, order.ID().GroupID(), func(result types.Order) bool { - if assert.Equal(t, order.ID(), result.ID()) { - count++ - } - return false - }) - - assert.Equal(t, 1, count) -} +// func Test_WithOrdersForGroup(t *testing.T) { +// ctx, keeper, _ := setupKeeper(t) +// order, _ := createOrder(t, ctx, keeper) +// +// // create extra orders +// createOrder(t, ctx, keeper) +// +// count := 0 +// keeper.WithOrdersForGroup(ctx, order.ID().GroupID(), func(result types.Order) bool { +// if assert.Equal(t, order.ID(), result.ID()) { +// count++ +// } +// return false +// }) +// +// assert.Equal(t, 1, count) +// } func Test_CreateBid(t *testing.T) { _, _, suite := setupKeeper(t) @@ -116,7 +116,7 @@ func Test_WithBidsForOrder(t *testing.T) { createBid(t, suite) count := 0 - keeper.WithBidsForOrder(ctx, bid.ID().OrderID(), func(result types.Bid) bool { + keeper.WithBidsForOrder(ctx, bid.ID().OrderID(), types.BidOpen, func(result types.Bid) bool { if assert.Equal(t, bid.ID(), result.ID()) { count++ } @@ -154,26 +154,26 @@ func Test_WithLeases(t *testing.T) { assert.Equal(t, 1, count) } -func Test_LeaseForOrder(t *testing.T) { - ctx, keeper, suite := setupKeeper(t) - id := createLease(t, suite) - - // extra leases - createLease(t, suite) - createLease(t, suite) - - result, ok := keeper.LeaseForOrder(ctx, id.OrderID()) - assert.True(t, ok) - - assert.Equal(t, id, result.ID()) - - // no match - { - bid, _ := createBid(t, suite) - _, ok := keeper.LeaseForOrder(ctx, bid.ID().OrderID()) - assert.False(t, ok) - } -} +// func Test_LeaseForOrder(t *testing.T) { +// ctx, keeper, suite := setupKeeper(t) +// id := createLease(t, suite) +// +// // extra leases +// createLease(t, suite) +// createLease(t, suite) +// +// result, ok := keeper.LeaseForOrder(ctx, id.OrderID()) +// assert.True(t, ok) +// +// assert.Equal(t, id, result.ID()) +// +// // no match +// { +// bid, _ := createBid(t, suite) +// _, ok := keeper.LeaseForOrder(ctx, bid.ID().OrderID()) +// assert.False(t, ok) +// } +// } func Test_OnOrderMatched(t *testing.T) { ctx, keeper, suite := setupKeeper(t) diff --git a/x/market/keeper/keys/v1beta4/key.go b/x/market/keeper/keys/v1beta4/key.go index 6282c95dbb..93769f27d1 100644 --- a/x/market/keeper/keys/v1beta4/key.go +++ b/x/market/keeper/keys/v1beta4/key.go @@ -4,6 +4,7 @@ import ( "bytes" "encoding/binary" + sdk "github.com/cosmos/cosmos-sdk/types" "github.com/cosmos/cosmos-sdk/types/address" dtypes "github.com/akash-network/akash-api/go/node/deployment/v1beta3" @@ -11,6 +12,390 @@ import ( "github.com/akash-network/akash-api/go/sdkutil" ) +const ( + OrderStateOpenPrefixID = byte(0x01) + OrderStateActivePrefixID = byte(0x02) + OrderStateClosedPrefixID = byte(0x03) + BidStateOpenPrefixID = byte(0x01) + BidStateActivePrefixID = byte(0x02) + BidStateLostPrefixID = byte(0x03) + BidStateClosedPrefixID = byte(0x04) + LeaseStateActivePrefixID = byte(0x01) + LeaseStateInsufficientFundsPrefixID = byte(0x02) + LeaseStateClosedPrefixID = byte(0x03) +) + +var ( + OrderPrefix = []byte{0x11, 0x00} + OrderStateOpenPrefix = []byte{OrderStateOpenPrefixID} + OrderStateActivePrefix = []byte{OrderStateActivePrefixID} + OrderStateClosedPrefix = []byte{OrderStateClosedPrefixID} + BidPrefix = []byte{0x12, 0x00} + BidPrefixReverse = []byte{0x12, 0x01} + BidStateOpenPrefix = []byte{BidStateOpenPrefixID} + BidStateActivePrefix = []byte{BidStateActivePrefixID} + BidStateLostPrefix = []byte{BidStateLostPrefixID} + BidStateClosedPrefix = []byte{BidStateClosedPrefixID} + LeasePrefix = []byte{0x13, 0x00} + LeasePrefixReverse = []byte{0x13, 0x01} + LeaseStateActivePrefix = []byte{LeaseStateActivePrefixID} + LeaseStateInsufficientFundsPrefix = []byte{LeaseStateInsufficientFundsPrefixID} + LeaseStateClosedPrefix = []byte{LeaseStateClosedPrefixID} +) + +func OrderKey(statePrefix []byte, id types.OrderID) ([]byte, error) { + owner, err := sdk.AccAddressFromBech32(id.Owner) + if err != nil { + return nil, err + } + + lenPrefixedOwner, err := address.LengthPrefix(owner) + if err != nil { + return nil, err + } + + buf := bytes.NewBuffer(OrderPrefix) + buf.Write(statePrefix) + buf.Write(lenPrefixedOwner) + + if err := binary.Write(buf, binary.BigEndian, id.DSeq); err != nil { + return nil, err + } + if err := binary.Write(buf, binary.BigEndian, id.GSeq); err != nil { + return nil, err + } + if err := binary.Write(buf, binary.BigEndian, id.OSeq); err != nil { + return nil, err + } + + return buf.Bytes(), nil +} + +func MustOrderKey(statePrefix []byte, id types.OrderID) []byte { + key, err := OrderKey(statePrefix, id) + if err != nil { + panic(err) + } + return key +} + +func BidKey(statePrefix []byte, id types.BidID) ([]byte, error) { + owner, err := sdk.AccAddressFromBech32(id.Owner) + if err != nil { + return nil, err + } + + lenPrefixedOwner, err := address.LengthPrefix(owner) + if err != nil { + return nil, err + } + + provider, err := sdk.AccAddressFromBech32(id.Provider) + if err != nil { + return nil, err + } + + lenPrefixedProvider, err := address.LengthPrefix(provider) + if err != nil { + return nil, err + } + + buf := bytes.NewBuffer(BidPrefix) + buf.Write(statePrefix) + + buf.Write(lenPrefixedOwner) + if err := binary.Write(buf, binary.BigEndian, id.DSeq); err != nil { + return nil, err + } + if err := binary.Write(buf, binary.BigEndian, id.GSeq); err != nil { + return nil, err + } + if err := binary.Write(buf, binary.BigEndian, id.OSeq); err != nil { + return nil, err + } + + buf.Write(lenPrefixedProvider) + + return buf.Bytes(), nil +} + +func MustBidKey(statePrefix []byte, id types.BidID) []byte { + key, err := BidKey(statePrefix, id) + if err != nil { + panic(err) + } + return key +} + +func BidReverseKey(statePrefix []byte, id types.BidID) ([]byte, error) { + owner, err := sdk.AccAddressFromBech32(id.Owner) + if err != nil { + return nil, err + } + + lenPrefixedOwner, err := address.LengthPrefix(owner) + if err != nil { + return nil, err + } + + provider, err := sdk.AccAddressFromBech32(id.Provider) + if err != nil { + return nil, err + } + + lenPrefixedProvider, err := address.LengthPrefix(provider) + if err != nil { + return nil, err + } + + buf := bytes.NewBuffer(BidPrefixReverse) + + buf.Write(statePrefix) + buf.Write(lenPrefixedProvider) + + if err := binary.Write(buf, binary.BigEndian, id.DSeq); err != nil { + return nil, err + } + if err := binary.Write(buf, binary.BigEndian, id.GSeq); err != nil { + return nil, err + } + if err := binary.Write(buf, binary.BigEndian, id.OSeq); err != nil { + return nil, err + } + + buf.Write(lenPrefixedOwner) + + return buf.Bytes(), nil +} + +func MustBidReverseKey(statePrefix []byte, id types.BidID) []byte { + key, err := BidReverseKey(statePrefix, id) + if err != nil { + panic(err) + } + return key +} + +func BidStateReverseKey(state types.Bid_State, id types.BidID) ([]byte, error) { + if state != types.BidActive && state != types.BidOpen { + return nil, nil + } + + prefix := BidStateToPrefix(state) + key, err := BidReverseKey(prefix, id) + if err != nil { + return nil, err + } + + return key, nil +} + +func MustBidStateRevereKey(state types.Bid_State, id types.BidID) []byte { + key, err := BidStateReverseKey(state, id) + if err != nil { + panic(err) + } + + return key +} + +func LeaseKey(statePrefix []byte, id types.LeaseID) ([]byte, error) { + owner, err := sdk.AccAddressFromBech32(id.Owner) + if err != nil { + return nil, err + } + + lenPrefixedOwner, err := address.LengthPrefix(owner) + if err != nil { + return nil, err + } + + provider, err := sdk.AccAddressFromBech32(id.Provider) + if err != nil { + return nil, err + } + + lenPrefixedProvider, err := address.LengthPrefix(provider) + if err != nil { + return nil, err + } + + buf := bytes.NewBuffer(LeasePrefix) + buf.Write(statePrefix) + buf.Write(lenPrefixedOwner) + + if err := binary.Write(buf, binary.BigEndian, id.DSeq); err != nil { + return nil, err + } + if err := binary.Write(buf, binary.BigEndian, id.GSeq); err != nil { + return nil, err + } + if err := binary.Write(buf, binary.BigEndian, id.OSeq); err != nil { + return nil, err + } + + buf.Write(lenPrefixedProvider) + + return buf.Bytes(), nil +} + +func MustLeaseKey(statePrefix []byte, id types.LeaseID) []byte { + key, err := LeaseKey(statePrefix, id) + if err != nil { + panic(err) + } + return key +} + +func LeaseReverseKey(statePrefix []byte, id types.LeaseID) ([]byte, error) { + owner, err := sdk.AccAddressFromBech32(id.Owner) + if err != nil { + return nil, err + } + + lenPrefixedOwner, err := address.LengthPrefix(owner) + if err != nil { + return nil, err + } + + provider, err := sdk.AccAddressFromBech32(id.Provider) + if err != nil { + return nil, err + } + + lenPrefixedProvider, err := address.LengthPrefix(provider) + if err != nil { + return nil, err + } + + buf := bytes.NewBuffer(LeasePrefixReverse) + buf.Write(statePrefix) + buf.Write(lenPrefixedProvider) + + if err := binary.Write(buf, binary.BigEndian, id.DSeq); err != nil { + return nil, err + } + if err := binary.Write(buf, binary.BigEndian, id.GSeq); err != nil { + return nil, err + } + if err := binary.Write(buf, binary.BigEndian, id.OSeq); err != nil { + return nil, err + } + + buf.Write(lenPrefixedOwner) + + return buf.Bytes(), nil +} + +func LeaseStateReverseKey(state types.Lease_State, id types.LeaseID) ([]byte, error) { + if state != types.LeaseActive { + return nil, nil + } + + prefix := LeaseStateToPrefix(state) + key, err := LeaseReverseKey(prefix, id) + if err != nil { + return nil, err + } + + return key, nil +} + +func MustLeaseStateReverseKey(state types.Lease_State, id types.LeaseID) []byte { + key, err := LeaseStateReverseKey(state, id) + if err != nil { + panic(err) + } + + return key +} + +func MustLeaseReverseKey(statePrefix []byte, id types.LeaseID) []byte { + key, err := LeaseReverseKey(statePrefix, id) + if err != nil { + panic(err) + } + return key +} + +func OrdersForGroupPrefix(statePrefix []byte, id dtypes.GroupID) []byte { + buf := bytes.NewBuffer(OrderPrefix) + buf.Write(statePrefix) + buf.Write(address.MustLengthPrefix(sdkutil.MustAccAddressFromBech32(id.Owner))) + if err := binary.Write(buf, binary.BigEndian, id.DSeq); err != nil { + panic(err) + } + if err := binary.Write(buf, binary.BigEndian, id.GSeq); err != nil { + panic(err) + } + return buf.Bytes() +} + +func BidsForOrderPrefix(statePrefix []byte, id types.OrderID) []byte { + buf := bytes.NewBuffer(BidPrefix) + buf.Write(statePrefix) + buf.Write(address.MustLengthPrefix(sdkutil.MustAccAddressFromBech32(id.Owner))) + + if err := binary.Write(buf, binary.BigEndian, id.DSeq); err != nil { + panic(err) + } + if err := binary.Write(buf, binary.BigEndian, id.GSeq); err != nil { + panic(err) + } + if err := binary.Write(buf, binary.BigEndian, id.OSeq); err != nil { + panic(err) + } + + return buf.Bytes() +} + +func OrderStateToPrefix(state types.Order_State) []byte { + var res []byte + + switch state { + case types.OrderOpen: + res = OrderStateOpenPrefix + case types.OrderActive: + res = OrderStateActivePrefix + case types.OrderClosed: + res = OrderStateClosedPrefix + } + + return res +} + +func BidStateToPrefix(state types.Bid_State) []byte { + var res []byte + + switch state { + case types.BidOpen: + res = BidStateOpenPrefix + case types.BidActive: + res = BidStateActivePrefix + case types.BidLost: + res = BidStateLostPrefix + case types.BidClosed: + res = BidStateClosedPrefix + } + + return res +} + +func LeaseStateToPrefix(state types.Lease_State) []byte { + var res []byte + + switch state { + case types.LeaseActive: + res = LeaseStateActivePrefix + case types.LeaseInsufficientFunds: + res = LeaseStateInsufficientFundsPrefix + case types.LeaseClosed: + res = LeaseStateClosedPrefix + } + + return res +} + func filterToPrefix(prefix []byte, owner string, dseq uint64, gseq, oseq uint32, provider string) ([]byte, error) { buf := bytes.NewBuffer(prefix) @@ -54,20 +439,126 @@ func filterToPrefix(prefix []byte, owner string, dseq uint64, gseq, oseq uint32, return buf.Bytes(), nil } +// nolint: unused +func reverseFilterToPrefix(prefix []byte, provider string, dseq uint64, gseq, oseq uint32, owner string) ([]byte, error) { + buf := bytes.NewBuffer(prefix) + + if len(provider) == 0 { + return buf.Bytes(), nil + } + + if _, err := buf.Write(address.MustLengthPrefix(sdkutil.MustAccAddressFromBech32(provider))); err != nil { + return nil, err + } + + if dseq == 0 { + return buf.Bytes(), nil + } + if err := binary.Write(buf, binary.BigEndian, dseq); err != nil { + return nil, err + } + + if gseq == 0 { + return buf.Bytes(), nil + } + if err := binary.Write(buf, binary.BigEndian, gseq); err != nil { + return nil, err + } + + if oseq == 0 { + return buf.Bytes(), nil + } + if err := binary.Write(buf, binary.BigEndian, oseq); err != nil { + return nil, err + } + + if len(owner) == 0 { + return buf.Bytes(), nil + } + + if _, err := buf.Write(address.MustLengthPrefix(sdkutil.MustAccAddressFromBech32(owner))); err != nil { + return nil, err + } + + return buf.Bytes(), nil +} + func OrderPrefixFromFilter(f types.OrderFilters) ([]byte, error) { - return filterToPrefix(types.OrderPrefix(), f.Owner, f.DSeq, f.GSeq, f.OSeq, "") + var idx []byte + switch f.State { + case types.OrderOpen.String(): + idx = OrderStateOpenPrefix + case types.OrderActive.String(): + idx = OrderStateActivePrefix + case types.OrderClosed.String(): + idx = OrderStateClosedPrefix + } + + prefix := make([]byte, 0, len(OrderPrefix)+len(idx)) + prefix = append(prefix, OrderPrefix...) + prefix = append(prefix, idx...) + + return filterToPrefix(prefix, f.Owner, f.DSeq, f.GSeq, f.OSeq, "") } -func LeasePrefixFromFilter(f types.LeaseFilters) ([]byte, bool, error) { - prefix, err := filterToPrefix(types.LeasePrefix(), f.Owner, f.DSeq, f.GSeq, f.OSeq, f.Provider) - return prefix, false, err +func buildLeasePrefix(prefix []byte, state string) []byte { + var idx []byte + switch state { + case types.LeaseActive.String(): + idx = LeaseStateActivePrefix + case types.LeaseInsufficientFunds.String(): + idx = LeaseStateInsufficientFundsPrefix + case types.LeaseClosed.String(): + idx = LeaseStateClosedPrefix + } + + res := make([]byte, 0, len(prefix)+len(idx)) + res = append(res, prefix...) + res = append(res, idx...) + + return res +} + +func buildBidPrefix(prefix []byte, state string) []byte { + var idx []byte + switch state { + case types.BidActive.String(): + idx = BidStateActivePrefix + case types.BidOpen.String(): + idx = BidStateOpenPrefix + case types.BidLost.String(): + idx = BidStateLostPrefix + case types.BidClosed.String(): + idx = BidStateClosedPrefix + } + + res := make([]byte, 0, len(prefix)+len(idx)) + res = append(res, prefix...) + res = append(res, idx...) + + return res } func BidPrefixFromFilter(f types.BidFilters) ([]byte, error) { - return filterToPrefix(types.BidPrefix(), f.Owner, f.DSeq, f.GSeq, f.OSeq, f.Provider) + return filterToPrefix(buildBidPrefix(BidPrefix, f.State), f.Owner, f.DSeq, f.GSeq, f.OSeq, f.Provider) +} + +func BidReversePrefixFromFilter(f types.BidFilters) ([]byte, error) { + prefix, err := filterToPrefix(buildBidPrefix(BidPrefixReverse, f.State), f.Provider, f.DSeq, f.GSeq, f.OSeq, f.Owner) + return prefix, err +} + +func LeasePrefixFromFilter(f types.LeaseFilters) ([]byte, error) { + prefix, err := filterToPrefix(buildLeasePrefix(LeasePrefix, f.State), f.Owner, f.DSeq, f.GSeq, f.OSeq, f.Provider) + return prefix, err +} + +func LeaseReversePrefixFromFilter(f types.LeaseFilters) ([]byte, error) { + prefix, err := filterToPrefix(buildLeasePrefix(LeasePrefixReverse, f.State), f.Provider, f.DSeq, f.GSeq, f.OSeq, f.Owner) + return prefix, err } -func OrderKey(id types.OrderID) []byte { +func OrderKeyLegacy(id types.OrderID) []byte { buf := bytes.NewBuffer(types.OrderPrefix()) buf.Write(address.MustLengthPrefix(sdkutil.MustAccAddressFromBech32(id.Owner))) if err := binary.Write(buf, binary.BigEndian, id.DSeq); err != nil { @@ -82,7 +573,7 @@ func OrderKey(id types.OrderID) []byte { return buf.Bytes() } -func BidKey(id types.BidID) []byte { +func BidKeyLegacy(id types.BidID) []byte { buf := bytes.NewBuffer(types.BidPrefix()) buf.Write(address.MustLengthPrefix(sdkutil.MustAccAddressFromBech32(id.Owner))) if err := binary.Write(buf, binary.BigEndian, id.DSeq); err != nil { @@ -98,7 +589,7 @@ func BidKey(id types.BidID) []byte { return buf.Bytes() } -func LeaseKey(id types.LeaseID) []byte { +func LeaseKeyLegacy(id types.LeaseID) []byte { buf := bytes.NewBuffer(types.LeasePrefix()) buf.Write(address.MustLengthPrefix(sdkutil.MustAccAddressFromBech32(id.Owner))) if err := binary.Write(buf, binary.BigEndian, id.DSeq); err != nil { @@ -114,7 +605,7 @@ func LeaseKey(id types.LeaseID) []byte { return buf.Bytes() } -func SecondaryLeaseKeyByProvider(id types.LeaseID) []byte { +func SecondaryLeaseKeyByProviderLegacy(id types.LeaseID) []byte { buf := bytes.NewBuffer(types.SecondaryLeasePrefix()) buf.Write(address.MustLengthPrefix(sdkutil.MustAccAddressFromBech32(id.Provider))) buf.Write(address.MustLengthPrefix(sdkutil.MustAccAddressFromBech32(id.Owner))) @@ -130,13 +621,13 @@ func SecondaryLeaseKeyByProvider(id types.LeaseID) []byte { return buf.Bytes() } -func SecondaryKeysForLease(id types.LeaseID) [][]byte { +func SecondaryKeysForLeaseLegacy(id types.LeaseID) [][]byte { return [][]byte{ - SecondaryLeaseKeyByProvider(id), + SecondaryLeaseKeyByProviderLegacy(id), } } -func OrdersForGroupPrefix(id dtypes.GroupID) []byte { +func OrdersForGroupPrefixLegacy(id dtypes.GroupID) []byte { buf := bytes.NewBuffer(types.OrderPrefix()) buf.Write(address.MustLengthPrefix(sdkutil.MustAccAddressFromBech32(id.Owner))) if err := binary.Write(buf, binary.BigEndian, id.DSeq); err != nil { @@ -148,7 +639,7 @@ func OrdersForGroupPrefix(id dtypes.GroupID) []byte { return buf.Bytes() } -func BidsForOrderPrefix(id types.OrderID) []byte { +func BidsForOrderPrefixLegacy(id types.OrderID) []byte { buf := bytes.NewBuffer(types.BidPrefix()) buf.Write(address.MustLengthPrefix(sdkutil.MustAccAddressFromBech32(id.Owner))) if err := binary.Write(buf, binary.BigEndian, id.DSeq); err != nil { diff --git a/x/market/keeper/keys/v1beta4/key_test.go b/x/market/keeper/keys/v1beta4/key_test.go index 08752aef99..4fb4b25b98 100644 --- a/x/market/keeper/keys/v1beta4/key_test.go +++ b/x/market/keeper/keys/v1beta4/key_test.go @@ -22,14 +22,14 @@ func TestKeysAndSecondaryKeysFilter(t *testing.T) { State: types.LeaseClosed.String(), } - prefix, isSecondary, err := keys.LeasePrefixFromFilter(filter) + prefix, err := keys.LeasePrefixFromFilter(filter) require.NoError(t, err) - require.False(t, isSecondary) - require.Equal(t, types.LeasePrefix(), prefix[0:2]) + // require.False(t, isSecondary) + require.Equal(t, keys.LeasePrefix, prefix[0:2]) filter.Owner = "" - prefix, isSecondary, err = keys.LeasePrefixFromFilter(filter) + prefix, err = keys.LeasePrefixFromFilter(filter) require.NoError(t, err) - require.False(t, isSecondary) - require.Equal(t, types.LeasePrefix(), prefix[0:2]) + // require.False(t, isSecondary) + require.Equal(t, keys.LeasePrefix, prefix[0:2]) } diff --git a/x/market/module.go b/x/market/module.go index c724c0c372..8b7813fcfd 100644 --- a/x/market/module.go +++ b/x/market/module.go @@ -26,7 +26,6 @@ import ( v1beta3types "github.com/akash-network/akash-api/go/node/market/v1beta3" types "github.com/akash-network/akash-api/go/node/market/v1beta4" - utypes "github.com/akash-network/node/upgrades/types" akeeper "github.com/akash-network/node/x/audit/keeper" ekeeper "github.com/akash-network/node/x/escrow/keeper" "github.com/akash-network/node/x/market/client/cli" @@ -166,12 +165,6 @@ func (am AppModule) RegisterServices(cfg module.Configurator) { types.RegisterMsgServer(cfg.MsgServer(), handler.NewServer(am.keepers)) querier := am.keepers.Market.NewQuerier() types.RegisterQueryServer(cfg.QueryServer(), querier) - - utypes.ModuleMigrations(ModuleName, am.keepers.Market, func(name string, forVersion uint64, handler module.MigrationHandler) { - if err := cfg.RegisterMigration(name, forVersion, handler); err != nil { - panic(err) - } - }) } // BeginBlock performs no-op @@ -200,7 +193,7 @@ func (am AppModule) ExportGenesis(ctx sdk.Context, cdc codec.JSONCodec) json.Raw // ConsensusVersion implements module.AppModule#ConsensusVersion func (am AppModule) ConsensusVersion() uint64 { - return utypes.ModuleVersion(ModuleName) + return 6 } // AppModuleSimulation implements an application simulation module for the market module. diff --git a/x/provider/client/cli/grpc_rest_test.go b/x/provider/client/cli/grpc_rest_test.go index 513405d458..a7f3397f16 100644 --- a/x/provider/client/cli/grpc_rest_test.go +++ b/x/provider/client/cli/grpc_rest_test.go @@ -96,7 +96,6 @@ func (s *GRPCRestTestSuite) TestGetProviders() { } for _, tc := range testCases { - tc := tc s.Run(tc.name, func() { resp, err := sdkrest.GetRequest(tc.url) s.Require().NoError(err) @@ -144,7 +143,6 @@ func (s *GRPCRestTestSuite) TestGetProvider() { } for _, tc := range testCases { - tc := tc s.Run(tc.name, func() { resp, err := sdkrest.GetRequest(tc.url) s.Require().NoError(err) diff --git a/x/provider/keeper/grpc_query_test.go b/x/provider/keeper/grpc_query_test.go index 649918aaac..617e1fac0c 100644 --- a/x/provider/keeper/grpc_query_test.go +++ b/x/provider/keeper/grpc_query_test.go @@ -87,7 +87,6 @@ func TestGRPCQueryProvider(t *testing.T) { } for _, tc := range testCases { - tc := tc t.Run(fmt.Sprintf("Case %s", tc.msg), func(t *testing.T) { tc.malleate() ctx := sdk.WrapSDKContext(suite.ctx) @@ -143,7 +142,6 @@ func TestGRPCQueryProviders(t *testing.T) { } for _, tc := range testCases { - tc := tc t.Run(fmt.Sprintf("Case %s", tc.msg), func(t *testing.T) { tc.malleate() ctx := sdk.WrapSDKContext(suite.ctx) diff --git a/x/provider/module.go b/x/provider/module.go index 777b545e0b..bd28bd7c88 100644 --- a/x/provider/module.go +++ b/x/provider/module.go @@ -25,7 +25,6 @@ import ( v1beta2types "github.com/akash-network/akash-api/go/node/provider/v1beta2" types "github.com/akash-network/akash-api/go/node/provider/v1beta3" - utypes "github.com/akash-network/node/upgrades/types" mkeeper "github.com/akash-network/node/x/market/keeper" "github.com/akash-network/node/x/provider/client/cli" "github.com/akash-network/node/x/provider/client/rest" @@ -153,12 +152,6 @@ func (am AppModule) RegisterServices(cfg module.Configurator) { types.RegisterMsgServer(cfg.MsgServer(), handler.NewMsgServerImpl(am.keeper, am.mkeeper)) querier := am.keeper.NewQuerier() types.RegisterQueryServer(cfg.QueryServer(), querier) - - utypes.ModuleMigrations(ModuleName, am.keeper, func(name string, forVersion uint64, handler module.MigrationHandler) { - if err := cfg.RegisterMigration(name, forVersion, handler); err != nil { - panic(err) - } - }) } // BeginBlock performs no-op @@ -187,7 +180,7 @@ func (am AppModule) ExportGenesis(ctx sdk.Context, cdc codec.JSONCodec) json.Raw // ConsensusVersion implements module.AppModule#ConsensusVersion func (am AppModule) ConsensusVersion() uint64 { - return utypes.ModuleVersion(ModuleName) + return 2 } // ____________________________________________________________________________ diff --git a/x/staking/module.go b/x/staking/module.go index 6b0ad55fc1..6058fe01a2 100644 --- a/x/staking/module.go +++ b/x/staking/module.go @@ -20,7 +20,6 @@ import ( types "github.com/akash-network/akash-api/go/node/staking/v1beta3" - utypes "github.com/akash-network/node/upgrades/types" "github.com/akash-network/node/x/staking/keeper" "github.com/akash-network/node/x/staking/simulation" ) @@ -83,11 +82,6 @@ func (AppModuleBasic) GetTxCmd() *cobra.Command { return nil } -// GetQueryClient returns a new query client for this module -// func (AppModuleBasic) GetQueryClient(clientCtx client.Context) types.QueryClient { -// return types.NewQueryClient(clientCtx) -// } - // AppModule implements an application module for the provider module. type AppModule struct { AppModuleBasic @@ -126,12 +120,7 @@ func (am AppModule) LegacyQuerierHandler(_ *codec.LegacyAmino) sdk.Querier { } // RegisterServices registers the module's services -func (am AppModule) RegisterServices(cfg module.Configurator) { - utypes.ModuleMigrations(ModuleName, am.keeper, func(name string, forVersion uint64, handler module.MigrationHandler) { - if err := cfg.RegisterMigration(name, forVersion, handler); err != nil { - panic(err) - } - }) +func (am AppModule) RegisterServices(_ module.Configurator) { } // BeginBlock performs no-op @@ -160,7 +149,7 @@ func (am AppModule) ExportGenesis(ctx sdk.Context, cdc codec.JSONCodec) json.Raw // ConsensusVersion implements module.AppModule#ConsensusVersion func (am AppModule) ConsensusVersion() uint64 { - return utypes.ModuleVersion(ModuleName) + return 1 } // ____________________________________________________________________________ diff --git a/x/take/module.go b/x/take/module.go index 8a5989655e..0d8bfef9e6 100644 --- a/x/take/module.go +++ b/x/take/module.go @@ -138,7 +138,7 @@ func (am AppModule) LegacyQuerierHandler(legacyQuerierCdc *codec.LegacyAmino) sd } // RegisterServices registers the module's servicess -func (am AppModule) RegisterServices(cfg module.Configurator) { +func (am AppModule) RegisterServices(_ module.Configurator) { } // RegisterQueryService registers a GRPC query service to respond to the