diff --git a/.github/workflows/dependabot-update-all.yml b/.github/workflows/dependabot-update-all.yml index 3b8894802cfa..f63a0280d633 100644 --- a/.github/workflows/dependabot-update-all.yml +++ b/.github/workflows/dependabot-update-all.yml @@ -14,11 +14,7 @@ jobs: if: ${{ github.actor == 'dependabot[bot]' }} steps: - name: Generate Token -<<<<<<< HEAD uses: actions/create-github-app-token@3ff1caaa28b64c9cc276ce0a02e2ff584f3900c5 # v1 -======= - uses: actions/create-github-app-token@df432ceedc7162793a195dd1713ff69aefc7379e # v1 ->>>>>>> acb39aa52 (build(deps): Bump actions/create-github-app-token from 2.0.3 to 2.0.6 (#24673)) id: app-token with: app-id: "${{ secrets.APP_ID }}" diff --git a/CHANGELOG.md b/CHANGELOG.md index 27b05d57f0ce..8e236bf7469a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -55,6 +55,7 @@ Ref: https://keepachangelog.com/en/1.0.0/ * (genutil) [#24018](https://github.com/cosmos/cosmos-sdk/pull/24018) Allow manually setting the consensus key type in genesis * (client) [#18557](https://github.com/cosmos/cosmos-sdk/pull/18557) Add `--qrcode` flag to `keys show` command to support displaying keys address QR code. * (x/auth) [#24030](https://github.com/cosmos/cosmos-sdk/pull/24030) Allow usage of ed25519 keys for transaction signing. +* (baseapp) [#24159](https://github.com/cosmos/cosmos-sdk/pull/24159) Support mount object store in baseapp, add `ObjectStore` api in context. * (baseapp) [#24163](https://github.com/cosmos/cosmos-sdk/pull/24163) Add `StreamingManager` to baseapp to extend the abci listeners. * (x/protocolpool) [#23933](https://github.com/cosmos/cosmos-sdk/pull/23933) Add x/protocolpool module. * x/distribution can now utilize an externally managed community pool. NOTE: this will make the message handlers for FundCommunityPool and CommunityPoolSpend error, as well as the query handler for CommunityPool. diff --git a/baseapp/abci.go b/baseapp/abci.go index 8d4d83befa5c..7f54d74fb2e8 100644 --- a/baseapp/abci.go +++ b/baseapp/abci.go @@ -791,48 +791,47 @@ func (app *BaseApp) internalFinalizeBlock(ctx context.Context, req *abci.Request // Reset the gas meter so that the AnteHandlers aren't required to gasMeter = app.getBlockGasMeter(app.finalizeBlockState.Context()) - app.finalizeBlockState.SetContext(app.finalizeBlockState.Context().WithBlockGasMeter(gasMeter)) + app.finalizeBlockState.SetContext( + app.finalizeBlockState.Context(). + WithBlockGasMeter(gasMeter). + WithTxCount(len(req.Txs)), + ) // Iterate over all raw transactions in the proposal and attempt to execute // them, gathering the execution results. // // NOTE: Not all raw transactions may adhere to the sdk.Tx interface, e.g. // vote extensions, so skip those. - txResults := make([]*abci.ExecTxResult, 0, len(req.Txs)) - for _, rawTx := range req.Txs { - var response *abci.ExecTxResult - - if _, err := app.txDecoder(rawTx); err == nil { - response = app.deliverTx(rawTx) - } else { - // In the case where a transaction included in a block proposal is malformed, - // we still want to return a default response to comet. This is because comet - // expects a response for each transaction included in a block proposal. - response = sdkerrors.ResponseExecTxResultWithEvents( - sdkerrors.ErrTxDecode, - 0, - 0, - nil, - false, - ) - } - - // check after every tx if we should abort - select { - case <-ctx.Done(): - return nil, ctx.Err() - default: - // continue - } - - txResults = append(txResults, response) + txResults, err := app.executeTxs(ctx, req.Txs) + if err != nil { + // usually due to canceled + return nil, err } if app.finalizeBlockState.ms.TracingEnabled() { app.finalizeBlockState.ms = app.finalizeBlockState.ms.SetTracingContext(nil).(storetypes.CacheMultiStore) } - endBlock, err := app.endBlock(app.finalizeBlockState.Context()) + var ( + blockGasUsed uint64 + blockGasWanted uint64 + ) + for _, res := range txResults { + // GasUsed should not be -1 but just in case + if res.GasUsed > 0 { + blockGasUsed += uint64(res.GasUsed) + } + // GasWanted could be -1 if the tx is invalid + if res.GasWanted > 0 { + blockGasWanted += uint64(res.GasWanted) + } + } + app.finalizeBlockState.SetContext( + app.finalizeBlockState.Context(). + WithBlockGasUsed(blockGasUsed). + WithBlockGasWanted(blockGasWanted), + ) + endBlock, err := app.endBlock(ctx) if err != nil { return nil, err } @@ -856,6 +855,44 @@ func (app *BaseApp) internalFinalizeBlock(ctx context.Context, req *abci.Request }, nil } +func (app *BaseApp) executeTxs(ctx context.Context, txs [][]byte) ([]*abci.ExecTxResult, error) { + if app.txExecutor != nil { + return app.txExecutor(ctx, txs, app.finalizeBlockState.ms, func(i int, memTx sdk.Tx, ms storetypes.MultiStore, incarnationCache map[string]any) *abci.ExecTxResult { + return app.deliverTxWithMultiStore(txs[i], memTx, i, ms, incarnationCache) + }) + } + + txResults := make([]*abci.ExecTxResult, 0, len(txs)) + for i, rawTx := range txs { + var response *abci.ExecTxResult + + if memTx, err := app.txDecoder(rawTx); err == nil { + response = app.deliverTx(rawTx, memTx, i) + } else { + // In the case where a transaction included in a block proposal is malformed, + // we still want to return a default response to comet. This is because comet + // expects a response for each transaction included in a block proposal. + response = sdkerrors.ResponseExecTxResultWithEvents( + sdkerrors.ErrTxDecode, + 0, + 0, + nil, + false, + ) + } + // check after every tx if we should abort + select { + case <-ctx.Done(): + return nil, ctx.Err() + default: + // continue + } + + txResults = append(txResults, response) + } + return txResults, nil +} + // FinalizeBlock will execute the block proposal provided by RequestFinalizeBlock. // Specifically, it will execute an application's BeginBlock (if defined), followed // by the transactions in the proposal, finally followed by the application's @@ -1213,7 +1250,7 @@ func (app *BaseApp) CreateQueryContextWithCheckHeader(height int64, prove, check // use custom query multi-store if provided qms := app.qms if qms == nil { - qms = app.cms.(storetypes.MultiStore) + qms = storetypes.RootMultiStore(app.cms) } lastBlockHeight := qms.LatestVersion() diff --git a/baseapp/abci_utils.go b/baseapp/abci_utils.go index 85e9e1e03c12..80500330a836 100644 --- a/baseapp/abci_utils.go +++ b/baseapp/abci_utils.go @@ -201,7 +201,7 @@ type ( // to verify a transaction. ProposalTxVerifier interface { PrepareProposalVerifyTx(tx sdk.Tx) ([]byte, error) - ProcessProposalVerifyTx(txBz []byte) (sdk.Tx, error) + ProcessProposalVerifyTx(txBz []byte) (sdk.Tx, uint64, error) TxDecode(txBz []byte) (sdk.Tx, error) TxEncode(tx sdk.Tx) ([]byte, error) } @@ -213,6 +213,9 @@ type ( txVerifier ProposalTxVerifier txSelector TxSelector signerExtAdapter mempool.SignerExtractionAdapter + + // fastPrepareProposal together with NoOpMempool will bypass tx selector + fastPrepareProposal bool } ) @@ -225,6 +228,12 @@ func NewDefaultProposalHandler(mp mempool.Mempool, txVerifier ProposalTxVerifier } } +func NewDefaultProposalHandlerFast(mp mempool.Mempool, txVerifier ProposalTxVerifier) *DefaultProposalHandler { + h := NewDefaultProposalHandler(mp, txVerifier) + h.fastPrepareProposal = true + return h +} + // SetTxSelector sets the TxSelector function on the DefaultProposalHandler. func (h *DefaultProposalHandler) SetTxSelector(ts TxSelector) { h.txSelector = ts @@ -265,13 +274,23 @@ func (h *DefaultProposalHandler) PrepareProposalHandler() sdk.PrepareProposalHan // Note, we still need to ensure the transactions returned respect req.MaxTxBytes. _, isNoOp := h.mempool.(mempool.NoOpMempool) if h.mempool == nil || isNoOp { + if h.fastPrepareProposal { + txs := h.txSelector.SelectTxForProposalFast(ctx, req.Txs) + return &abci.ResponsePrepareProposal{Txs: txs}, nil + } + for _, txBz := range req.Txs { tx, err := h.txVerifier.TxDecode(txBz) if err != nil { return nil, err } - stop := h.txSelector.SelectTxForProposal(ctx, uint64(req.MaxTxBytes), maxBlockGas, tx, txBz) + var txGasLimit uint64 + if gasTx, ok := tx.(mempool.GasTx); ok { + txGasLimit = gasTx.GetGas() + } + + stop := h.txSelector.SelectTxForProposal(ctx, uint64(req.MaxTxBytes), maxBlockGas, tx, txBz, txGasLimit) if stop { break } @@ -286,14 +305,14 @@ func (h *DefaultProposalHandler) PrepareProposalHandler() sdk.PrepareProposalHan selectedTxsNums int invalidTxs []sdk.Tx // invalid txs to be removed out of the loop to avoid dead lock ) - mempool.SelectBy(ctx, h.mempool, req.Txs, func(memTx sdk.Tx) bool { - unorderedTx, ok := memTx.(sdk.TxWithUnordered) + mempool.SelectBy(ctx, h.mempool, req.Txs, func(memTx mempool.Tx) bool { + unorderedTx, ok := memTx.Tx.(sdk.TxWithUnordered) isUnordered := ok && unorderedTx.GetUnordered() txSignersSeqs := make(map[string]uint64) // if the tx is unordered, we don't need to check the sequence, we just add it if !isUnordered { - signerData, err := h.signerExtAdapter.GetSigners(memTx) + signerData, err := h.signerExtAdapter.GetSigners(memTx.Tx) if err != nil { // propagate the error to the caller resError = err @@ -328,11 +347,11 @@ func (h *DefaultProposalHandler) PrepareProposalHandler() sdk.PrepareProposalHan // which calls mempool.Insert, in theory everything in the pool should be // valid. But some mempool implementations may insert invalid txs, so we // check again. - txBz, err := h.txVerifier.PrepareProposalVerifyTx(memTx) + txBz, err := h.txVerifier.PrepareProposalVerifyTx(memTx.Tx) if err != nil { - invalidTxs = append(invalidTxs, memTx) + invalidTxs = append(invalidTxs, memTx.Tx) } else { - stop := h.txSelector.SelectTxForProposal(ctx, uint64(req.MaxTxBytes), maxBlockGas, memTx, txBz) + stop := h.txSelector.SelectTxForProposal(ctx, uint64(req.MaxTxBytes), maxBlockGas, memTx.Tx, txBz, memTx.GasWanted) if stop { return false } @@ -404,17 +423,13 @@ func (h *DefaultProposalHandler) ProcessProposalHandler() sdk.ProcessProposalHan } for _, txBytes := range req.Txs { - tx, err := h.txVerifier.ProcessProposalVerifyTx(txBytes) + _, gasWanted, err := h.txVerifier.ProcessProposalVerifyTx(txBytes) if err != nil { return &abci.ResponseProcessProposal{Status: abci.ResponseProcessProposal_REJECT}, nil } if maxBlockGas > 0 { - gasTx, ok := tx.(GasTx) - if ok { - totalTxGas += gasTx.GetGas() - } - + totalTxGas += gasWanted if totalTxGas > uint64(maxBlockGas) { return &abci.ResponseProcessProposal{Status: abci.ResponseProcessProposal_REJECT}, nil } @@ -472,7 +487,13 @@ type TxSelector interface { // a proposal based on inclusion criteria defined by the TxSelector. It must // return if the caller should halt the transaction selection loop // (typically over a mempool) or otherwise. - SelectTxForProposal(ctx context.Context, maxTxBytes, maxBlockGas uint64, memTx sdk.Tx, txBz []byte) bool + SelectTxForProposal(ctx context.Context, maxTxBytes, maxBlockGas uint64, memTx sdk.Tx, txBz []byte, gasWanted uint64) bool + + // SelectTxForProposalFast is called in the case of NoOpMempool, + // where cometbft already checked the block gas/size limit, + // so the tx selector should simply accept them all to the proposal. + // But extra validations on the tx are still possible though. + SelectTxForProposalFast(ctx context.Context, txs [][]byte) [][]byte } type defaultTxSelector struct { @@ -494,26 +515,19 @@ func (ts *defaultTxSelector) SelectedTxs(_ context.Context) [][]byte { func (ts *defaultTxSelector) Clear() { ts.totalTxBytes = 0 ts.totalTxGas = 0 - ts.selectedTxs = nil + ts.selectedTxs = ts.selectedTxs[:0] // keep the allocated memory } -func (ts *defaultTxSelector) SelectTxForProposal(_ context.Context, maxTxBytes, maxBlockGas uint64, memTx sdk.Tx, txBz []byte) bool { +func (ts *defaultTxSelector) SelectTxForProposal(_ context.Context, maxTxBytes, maxBlockGas uint64, memTx sdk.Tx, txBz []byte, gasWanted uint64) bool { txSize := uint64(cmttypes.ComputeProtoSizeForTxs([]cmttypes.Tx{txBz})) - var txGasLimit uint64 - if memTx != nil { - if gasTx, ok := memTx.(GasTx); ok { - txGasLimit = gasTx.GetGas() - } - } - // only add the transaction to the proposal if we have enough capacity if (txSize + ts.totalTxBytes) <= maxTxBytes { // If there is a max block gas limit, add the tx only if the limit has // not been met. if maxBlockGas > 0 { - if (txGasLimit + ts.totalTxGas) <= maxBlockGas { - ts.totalTxGas += txGasLimit + if (gasWanted + ts.totalTxGas) <= maxBlockGas { + ts.totalTxGas += gasWanted ts.totalTxBytes += txSize ts.selectedTxs = append(ts.selectedTxs, txBz) } @@ -526,3 +540,7 @@ func (ts *defaultTxSelector) SelectTxForProposal(_ context.Context, maxTxBytes, // check if we've reached capacity; if so, we cannot select any more transactions return ts.totalTxBytes >= maxTxBytes || (maxBlockGas > 0 && (ts.totalTxGas >= maxBlockGas)) } + +func (ts *defaultTxSelector) SelectTxForProposalFast(ctx context.Context, txs [][]byte) [][]byte { + return txs +} diff --git a/baseapp/baseapp.go b/baseapp/baseapp.go index c10fa15936a9..a662df6fa2db 100644 --- a/baseapp/baseapp.go +++ b/baseapp/baseapp.go @@ -68,7 +68,7 @@ type BaseApp struct { name string // application name from abci.BlockInfo db dbm.DB // common DB backend cms storetypes.CommitMultiStore // Main (uncached) state - qms storetypes.MultiStore // Optional alternative multistore for querying only. + qms storetypes.RootMultiStore // Optional alternative multistore for querying only. storeLoader StoreLoader // function to handle store loading, may be overridden with SetStoreLoader() grpcQueryRouter *GRPCQueryRouter // router for redirecting gRPC query calls msgServiceRouter *MsgServiceRouter // router for redirecting Msg service messages @@ -198,6 +198,9 @@ type BaseApp struct { // // SAFETY: it's safe to do if validators validate the total gas wanted in the `ProcessProposal`, which is the case in the default handler. disableBlockGasMeter bool + + // Optional alternative tx executor, used for block-stm parallel transaction execution. + txExecutor TxExecutor } // NewBaseApp returns a reference to an initialized BaseApp. It accepts a @@ -318,6 +321,9 @@ func (app *BaseApp) MountStores(keys ...storetypes.StoreKey) { case *storetypes.MemoryStoreKey: app.MountStore(key, storetypes.StoreTypeMemory) + case *storetypes.ObjectStoreKey: + app.MountStore(key, storetypes.StoreTypeObject) + default: panic(fmt.Sprintf("Unrecognized store key type :%T", key)) } @@ -356,6 +362,16 @@ func (app *BaseApp) MountMemoryStores(keys map[string]*storetypes.MemoryStoreKey } } +// MountObjectStores mounts all transient object stores with the BaseApp's internal +// commit multi-store. +func (app *BaseApp) MountObjectStores(keys map[string]*storetypes.ObjectStoreKey) { + skeys := slices.Sorted(maps.Keys(keys)) + for _, key := range skeys { + memKey := keys[key] + app.MountStore(memKey, storetypes.StoreTypeObject) + } +} + // MountStore mounts a store to the provided key in the BaseApp multistore, // using the default DB. func (app *BaseApp) MountStore(key storetypes.StoreKey, typ storetypes.StoreType) { @@ -674,7 +690,7 @@ func (app *BaseApp) getBlockGasMeter(ctx sdk.Context) storetypes.GasMeter { } // retrieve the context for the tx w/ txBytes and other memoized values. -func (app *BaseApp) getContextForTx(mode execMode, txBytes []byte) sdk.Context { +func (app *BaseApp) getContextForTx(mode execMode, txBytes []byte, txIndex int) sdk.Context { app.mu.Lock() defer app.mu.Unlock() @@ -684,7 +700,8 @@ func (app *BaseApp) getContextForTx(mode execMode, txBytes []byte) sdk.Context { } ctx := modeState.Context(). WithTxBytes(txBytes). - WithGasMeter(storetypes.NewInfiniteGasMeter()) + WithGasMeter(storetypes.NewInfiniteGasMeter()). + WithTxIndex(txIndex) // WithVoteInfos(app.voteInfos) // TODO: identify if this is needed ctx = ctx.WithIsSigverifyTx(app.sigverifyTx) @@ -769,7 +786,11 @@ func (app *BaseApp) beginBlock(_ *abci.RequestFinalizeBlock) (sdk.BeginBlock, er return resp, nil } -func (app *BaseApp) deliverTx(tx []byte) *abci.ExecTxResult { +func (app *BaseApp) deliverTx(tx []byte, memTx sdk.Tx, txIndex int) *abci.ExecTxResult { + return app.deliverTxWithMultiStore(tx, memTx, txIndex, nil, nil) +} + +func (app *BaseApp) deliverTxWithMultiStore(tx []byte, memTx sdk.Tx, txIndex int, txMultiStore storetypes.MultiStore, incarnationCache map[string]any) *abci.ExecTxResult { gInfo := sdk.GasInfo{} resultStr := "successful" @@ -782,7 +803,7 @@ func (app *BaseApp) deliverTx(tx []byte) *abci.ExecTxResult { telemetry.SetGauge(float32(gInfo.GasWanted), "tx", "gas", "wanted") }() - gInfo, result, anteEvents, err := app.runTx(execModeFinalize, tx, nil) + gInfo, result, anteEvents, err := app.runTxWithMultiStore(execModeFinalize, tx, memTx, txIndex, txMultiStore, incarnationCache) if err != nil { resultStr = "failed" resp = sdkerrors.ResponseExecTxResultWithEvents( @@ -842,12 +863,20 @@ func (app *BaseApp) endBlock(_ context.Context) (sdk.EndBlock, error) { // both txbytes and the decoded tx are passed to runTx to avoid the state machine encoding the tx and decoding the transaction twice // passing the decoded tx to runTX is optional, it will be decoded if the tx is nil func (app *BaseApp) runTx(mode execMode, txBytes []byte, tx sdk.Tx) (gInfo sdk.GasInfo, result *sdk.Result, anteEvents []abci.Event, err error) { + return app.runTxWithMultiStore(mode, txBytes, tx, -1, nil, nil) +} + +func (app *BaseApp) runTxWithMultiStore(mode execMode, txBytes []byte, tx sdk.Tx, txIndex int, txMultiStore storetypes.MultiStore, incarnationCache map[string]any) (gInfo sdk.GasInfo, result *sdk.Result, anteEvents []abci.Event, err error) { // NOTE: GasWanted should be returned by the AnteHandler. GasUsed is // determined by the GasMeter. We need access to the context to get the gas // meter, so we initialize upfront. var gasWanted uint64 - ctx := app.getContextForTx(mode, txBytes) + ctx := app.getContextForTx(mode, txBytes, txIndex) + ctx = ctx.WithIncarnationCache(incarnationCache) + if txMultiStore != nil { + ctx = ctx.WithMultiStore(txMultiStore) + } ms := ctx.MultiStore() // only run the tx if there is block gas remaining @@ -957,7 +986,7 @@ func (app *BaseApp) runTx(mode execMode, txBytes []byte, tx sdk.Tx) (gInfo sdk.G switch mode { case execModeCheck: - err = app.mempool.Insert(ctx, tx) + err = app.mempool.InsertWithGasWanted(ctx, tx, gasWanted) if err != nil { return gInfo, nil, anteEvents, err } @@ -1040,6 +1069,8 @@ func (app *BaseApp) runMsgs(ctx sdk.Context, msgs []sdk.Msg, msgsV2 []protov2.Me break } + ctx = ctx.WithMsgIndex(i) + handler := app.msgServiceRouter.Handler(msg) if handler == nil { return nil, errorsmod.Wrapf(sdkerrors.ErrUnknownRequest, "no message handler found for %T", msg) @@ -1151,18 +1182,18 @@ func (app *BaseApp) PrepareProposalVerifyTx(tx sdk.Tx) ([]byte, error) { // ProcessProposal state internally will be discarded. will be // returned if the transaction cannot be decoded. will be returned if // the transaction is valid, otherwise will be returned. -func (app *BaseApp) ProcessProposalVerifyTx(txBz []byte) (sdk.Tx, error) { +func (app *BaseApp) ProcessProposalVerifyTx(txBz []byte) (sdk.Tx, uint64, error) { tx, err := app.txDecoder(txBz) if err != nil { - return nil, err + return nil, 0, err } - _, _, _, err = app.runTx(execModeProcessProposal, txBz, tx) + gInfo, _, _, err := app.runTx(execModeProcessProposal, txBz, tx) if err != nil { - return nil, err + return nil, 0, err } - return tx, nil + return tx, gInfo.GasWanted, nil } func (app *BaseApp) TxDecode(txBytes []byte) (sdk.Tx, error) { diff --git a/baseapp/genesis.go b/baseapp/genesis.go index 4662d1187b4a..1aa562ec8f4b 100644 --- a/baseapp/genesis.go +++ b/baseapp/genesis.go @@ -13,7 +13,7 @@ var _ genesis.TxHandler = (*BaseApp)(nil) // ExecuteGenesisTx implements genesis.GenesisState from // cosmossdk.io/core/genesis to set initial state in genesis func (ba *BaseApp) ExecuteGenesisTx(tx []byte) error { - res := ba.deliverTx(tx) + res := ba.deliverTx(tx, nil, -1) if res.Code != types.CodeTypeOK { return errors.New(res.Log) diff --git a/baseapp/options.go b/baseapp/options.go index 1f809e498b9c..096d860d8337 100644 --- a/baseapp/options.go +++ b/baseapp/options.go @@ -129,6 +129,11 @@ func DisableBlockGasMeter() func(*BaseApp) { return func(app *BaseApp) { app.SetDisableBlockGasMeter(true) } } +// SetTxExecutor sets a custom tx executor for the BaseApp, usually for parallel execution. +func SetTxExecutor(executor TxExecutor) func(*BaseApp) { + return func(app *BaseApp) { app.txExecutor = executor } +} + func (app *BaseApp) SetName(name string) { if app.sealed { panic("SetName() on sealed BaseApp") @@ -321,7 +326,7 @@ func (app *BaseApp) SetTxEncoder(txEncoder sdk.TxEncoder) { // SetQueryMultiStore set a alternative MultiStore implementation to support grpc query service. // // Ref: https://github.com/cosmos/cosmos-sdk/issues/13317 -func (app *BaseApp) SetQueryMultiStore(ms storetypes.MultiStore) { +func (app *BaseApp) SetQueryMultiStore(ms storetypes.RootMultiStore) { app.qms = ms } @@ -403,3 +408,8 @@ func (app *BaseApp) SetMsgServiceRouter(msgServiceRouter *MsgServiceRouter) { func (app *BaseApp) SetGRPCQueryRouter(grpcQueryRouter *GRPCQueryRouter) { app.grpcQueryRouter = grpcQueryRouter } + +// SetTxExecutor sets a custom tx executor for the BaseApp, usually for parallel execution. +func (app *BaseApp) SetTxExecutor(executor TxExecutor) { + app.txExecutor = executor +} diff --git a/baseapp/test_helpers.go b/baseapp/test_helpers.go index b3aa396a0250..9da28a1e6d4a 100644 --- a/baseapp/test_helpers.go +++ b/baseapp/test_helpers.go @@ -51,12 +51,6 @@ func (app *BaseApp) SimTxFinalizeBlock(txEncoder sdk.TxEncoder, tx sdk.Tx) (sdk. return gasInfo, result, err } -// SimWriteState is an entrypoint for simulations only. They are not executed during the normal ABCI finalize -// block step but later. Therefor an extra call to the root multi-store (app.cms) is required to write the changes. -func (app *BaseApp) SimWriteState() { - app.finalizeBlockState.ms.Write() -} - // NewContextLegacy returns a new sdk.Context with the provided header func (app *BaseApp) NewContextLegacy(isCheckTx bool, header cmtproto.Header) sdk.Context { if isCheckTx { @@ -77,9 +71,9 @@ func (app *BaseApp) NewUncachedContext(isCheckTx bool, header cmtproto.Header) s } func (app *BaseApp) GetContextForFinalizeBlock(txBytes []byte) sdk.Context { - return app.getContextForTx(execModeFinalize, txBytes) + return app.getContextForTx(execModeFinalize, txBytes, -1) } func (app *BaseApp) GetContextForCheckTx(txBytes []byte) sdk.Context { - return app.getContextForTx(execModeCheck, txBytes) + return app.getContextForTx(execModeCheck, txBytes, -1) } diff --git a/baseapp/testutil/mock/mocks.go b/baseapp/testutil/mock/mocks.go index 51de61fa7156..1ac2102a0584 100644 --- a/baseapp/testutil/mock/mocks.go +++ b/baseapp/testutil/mock/mocks.go @@ -135,12 +135,13 @@ func (mr *MockProposalTxVerifierMockRecorder) PrepareProposalVerifyTx(tx any) *g } // ProcessProposalVerifyTx mocks base method. -func (m *MockProposalTxVerifier) ProcessProposalVerifyTx(txBz []byte) (types.Tx, error) { +func (m *MockProposalTxVerifier) ProcessProposalVerifyTx(txBz []byte) (types.Tx, uint64, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ProcessProposalVerifyTx", txBz) ret0, _ := ret[0].(types.Tx) - ret1, _ := ret[1].(error) - return ret0, ret1 + ret1, _ := ret[1].(uint64) + ret2, _ := ret[2].(error) + return ret0, ret1, ret2 } // ProcessProposalVerifyTx indicates an expected call of ProcessProposalVerifyTx. @@ -216,17 +217,17 @@ func (mr *MockTxSelectorMockRecorder) Clear() *gomock.Call { } // SelectTxForProposal mocks base method. -func (m *MockTxSelector) SelectTxForProposal(ctx context.Context, maxTxBytes, maxBlockGas uint64, memTx types.Tx, txBz []byte) bool { +func (m *MockTxSelector) SelectTxForProposal(ctx context.Context, maxTxBytes, maxBlockGas uint64, memTx types.Tx, txBz []byte, gasWanted uint64) bool { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SelectTxForProposal", ctx, maxTxBytes, maxBlockGas, memTx, txBz) + ret := m.ctrl.Call(m, "SelectTxForProposal", ctx, maxTxBytes, maxBlockGas, memTx, txBz, gasWanted) ret0, _ := ret[0].(bool) return ret0 } // SelectTxForProposal indicates an expected call of SelectTxForProposal. -func (mr *MockTxSelectorMockRecorder) SelectTxForProposal(ctx, maxTxBytes, maxBlockGas, memTx, txBz any) *gomock.Call { +func (mr *MockTxSelectorMockRecorder) SelectTxForProposal(ctx, maxTxBytes, maxBlockGas, memTx, txBz, gasWanted any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SelectTxForProposal", reflect.TypeOf((*MockTxSelector)(nil).SelectTxForProposal), ctx, maxTxBytes, maxBlockGas, memTx, txBz) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SelectTxForProposal", reflect.TypeOf((*MockTxSelector)(nil).SelectTxForProposal), ctx, maxTxBytes, maxBlockGas, memTx, txBz, gasWanted) } // SelectedTxs mocks base method. diff --git a/baseapp/txexecutor.go b/baseapp/txexecutor.go new file mode 100644 index 000000000000..a2dd83f42031 --- /dev/null +++ b/baseapp/txexecutor.go @@ -0,0 +1,18 @@ +package baseapp + +import ( + "context" + + abci "github.com/cometbft/cometbft/abci/types" + + "cosmossdk.io/store/types" + + sdk "github.com/cosmos/cosmos-sdk/types" +) + +type TxExecutor func( + ctx context.Context, + block [][]byte, + cms types.MultiStore, + deliverTxWithMultiStore func(int, sdk.Tx, types.MultiStore, map[string]any) *abci.ExecTxResult, +) ([]*abci.ExecTxResult, error) diff --git a/client/grpc_query_test.go b/client/grpc_query_test.go index f409103c7344..622306f0ab64 100644 --- a/client/grpc_query_test.go +++ b/client/grpc_query_test.go @@ -90,15 +90,6 @@ func (s *IntegrationTestSuite) SetupSuite() { }) s.NoError(err) - _, err = app.FinalizeBlock(&abci.RequestFinalizeBlock{ - Height: app.LastBlockHeight() + 1, - Hash: app.LastCommitID().Hash, - NextValidatorsHash: valSet.Hash(), - }) - s.NoError(err) - - // end of app init - s.ctx = app.NewContext(false) s.cdc = cdc queryHelper := baseapp.NewQueryServerTestHelper(s.ctx, interfaceRegistry) @@ -107,6 +98,14 @@ func (s *IntegrationTestSuite) SetupSuite() { s.bankClient = types.NewQueryClient(queryHelper) s.testClient = testdata.NewQueryClient(queryHelper) s.genesisAccount = acc + + _, err = app.FinalizeBlock(&abci.RequestFinalizeBlock{ + Height: app.LastBlockHeight() + 1, + Hash: app.LastCommitID().Hash, + NextValidatorsHash: valSet.Hash(), + }) + s.NoError(err) + // end of app init } func (s *IntegrationTestSuite) TearDownSuite() { diff --git a/client/v2/go.mod b/client/v2/go.mod index 237e4019c9cc..afacfa21ed86 100644 --- a/client/v2/go.mod +++ b/client/v2/go.mod @@ -159,3 +159,8 @@ require ( nhooyr.io/websocket v1.8.6 // indirect pgregory.net/rapid v1.2.0 // indirect ) + +// replace ( +// +// ) +replace cosmossdk.io/store => ../../store diff --git a/client/v2/go.sum b/client/v2/go.sum index 53db6f5ed32c..53ddf601c540 100644 --- a/client/v2/go.sum +++ b/client/v2/go.sum @@ -16,8 +16,6 @@ cosmossdk.io/math v1.5.3 h1:WH6tu6Z3AUCeHbeOSHg2mt9rnoiUWVWaQ2t6Gkll96U= cosmossdk.io/math v1.5.3/go.mod h1:uqcZv7vexnhMFJF+6zh9EWdm/+Ylyln34IvPnBauPCQ= cosmossdk.io/schema v1.1.0 h1:mmpuz3dzouCoyjjcMcA/xHBEmMChN+EHh8EHxHRHhzE= cosmossdk.io/schema v1.1.0/go.mod h1:Gb7pqO+tpR+jLW5qDcNOSv0KtppYs7881kfzakguhhI= -cosmossdk.io/store v1.1.2 h1:3HOZG8+CuThREKv6cn3WSohAc6yccxO3hLzwK6rBC7o= -cosmossdk.io/store v1.1.2/go.mod h1:60rAGzTHevGm592kFhiUVkNC9w7gooSEn5iUBPzHQ6A= cosmossdk.io/x/tx v0.14.0 h1:hB3O25kIcyDW/7kMTLMaO8Ripj3yqs5imceVd6c/heA= cosmossdk.io/x/tx v0.14.0/go.mod h1:Tn30rSRA1PRfdGB3Yz55W4Sn6EIutr9xtMKSHij+9PM= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= diff --git a/go.mod b/go.mod index afc12ca8c8c3..51b4f37ca696 100644 --- a/go.mod +++ b/go.mod @@ -174,6 +174,12 @@ require ( // replace ( // // ) +replace ( + cosmossdk.io/api => ./api + cosmossdk.io/log => ./log +) + +replace cosmossdk.io/store => ./store // Below are the long-lived replace of the Cosmos SDK replace ( diff --git a/go.sum b/go.sum index b354c6d45e36..6bebe7762118 100644 --- a/go.sum +++ b/go.sum @@ -1,7 +1,5 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cosmossdk.io/api v0.9.2 h1:9i9ptOBdmoIEVEVWLtYYHjxZonlF/aOVODLFaxpmNtg= -cosmossdk.io/api v0.9.2/go.mod h1:CWt31nVohvoPMTlPv+mMNCtC0a7BqRdESjCsstHcTkU= cosmossdk.io/collections v1.2.0 h1:IesfVG8G/+FYCMVMP01frS/Cw99Omk5vBh3cHbO01Gg= cosmossdk.io/collections v1.2.0/go.mod h1:4NkMoYw6qRA8fnSH/yn1D/MOutr8qyQnwsO50Mz9ItU= cosmossdk.io/core v0.11.3 h1:mei+MVDJOwIjIniaKelE3jPDqShCc/F4LkNNHh+4yfo= @@ -10,14 +8,10 @@ cosmossdk.io/depinject v1.2.0 h1:6NW/FSK1IkWTrX7XxUpBmX1QMBozpEI9SsWkKTBc5zw= cosmossdk.io/depinject v1.2.0/go.mod h1:pvitjtUxZZZTQESKNS9KhGjWVslJZxtO9VooRJYyPjk= cosmossdk.io/errors v1.0.2 h1:wcYiJz08HThbWxd/L4jObeLaLySopyyuUFB5w4AGpCo= cosmossdk.io/errors v1.0.2/go.mod h1:0rjgiHkftRYPj//3DrD6y8hcm40HcPv/dR4R/4efr0k= -cosmossdk.io/log v1.5.1 h1:wLwiYXmfrort/O+j6EkjF+HvbdrRQd+4cYCPKFSm+zM= -cosmossdk.io/log v1.5.1/go.mod h1:5cXXBvfBkR2/BcXmosdCSLXllvgSjphrrDVdfVRmBGM= cosmossdk.io/math v1.5.3 h1:WH6tu6Z3AUCeHbeOSHg2mt9rnoiUWVWaQ2t6Gkll96U= cosmossdk.io/math v1.5.3/go.mod h1:uqcZv7vexnhMFJF+6zh9EWdm/+Ylyln34IvPnBauPCQ= cosmossdk.io/schema v1.1.0 h1:mmpuz3dzouCoyjjcMcA/xHBEmMChN+EHh8EHxHRHhzE= cosmossdk.io/schema v1.1.0/go.mod h1:Gb7pqO+tpR+jLW5qDcNOSv0KtppYs7881kfzakguhhI= -cosmossdk.io/store v1.1.2 h1:3HOZG8+CuThREKv6cn3WSohAc6yccxO3hLzwK6rBC7o= -cosmossdk.io/store v1.1.2/go.mod h1:60rAGzTHevGm592kFhiUVkNC9w7gooSEn5iUBPzHQ6A= cosmossdk.io/x/tx v0.14.0 h1:hB3O25kIcyDW/7kMTLMaO8Ripj3yqs5imceVd6c/heA= cosmossdk.io/x/tx v0.14.0/go.mod h1:Tn30rSRA1PRfdGB3Yz55W4Sn6EIutr9xtMKSHij+9PM= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= diff --git a/runtime/module.go b/runtime/module.go index d71c668ef49b..bd098b767e99 100644 --- a/runtime/module.go +++ b/runtime/module.go @@ -68,6 +68,7 @@ func init() { ProvideKVStoreKey, ProvideTransientStoreKey, ProvideMemoryStoreKey, + ProvideObjectStoreKey, ProvideGenesisTxHandler, ProvideKVStoreService, ProvideMemoryStoreService, @@ -238,6 +239,12 @@ func ProvideMemoryStoreKey(config *runtimev1alpha1.Module, key depinject.ModuleK return storeKey } +func ProvideObjectStoreKey(key depinject.ModuleKey, app *AppBuilder) *storetypes.ObjectStoreKey { + storeKey := storetypes.NewObjectStoreKey(fmt.Sprintf("object:%s", key.Name())) + registerStoreKey(app, storeKey) + return storeKey +} + func ProvideGenesisTxHandler(appBuilder *AppBuilder) genesis.TxHandler { return appBuilder.app } diff --git a/runtime/store.go b/runtime/store.go index 4cfe131c5071..9a956d4459e8 100644 --- a/runtime/store.go +++ b/runtime/store.go @@ -4,8 +4,6 @@ import ( "context" "io" - dbm "github.com/cosmos/cosmos-db" - "cosmossdk.io/core/store" storetypes "cosmossdk.io/store/types" @@ -150,7 +148,7 @@ func (s kvStoreAdapter) Set(key, value []byte) { } } -func (s kvStoreAdapter) Iterator(start, end []byte) dbm.Iterator { +func (s kvStoreAdapter) Iterator(start, end []byte) storetypes.Iterator { it, err := s.store.Iterator(start, end) if err != nil { panic(err) @@ -158,7 +156,7 @@ func (s kvStoreAdapter) Iterator(start, end []byte) dbm.Iterator { return it } -func (s kvStoreAdapter) ReverseIterator(start, end []byte) dbm.Iterator { +func (s kvStoreAdapter) ReverseIterator(start, end []byte) storetypes.Iterator { it, err := s.store.ReverseIterator(start, end) if err != nil { panic(err) diff --git a/server/mock/store.go b/server/mock/store.go index 60aa64ca7462..ebfd973c25c6 100644 --- a/server/mock/store.go +++ b/server/mock/store.go @@ -114,6 +114,10 @@ func (ms multiStore) GetKVStore(key storetypes.StoreKey) storetypes.KVStore { return ms.kv[key] } +func (ms multiStore) GetObjKVStore(storetypes.StoreKey) storetypes.ObjKVStore { + panic("not implemented") +} + func (ms multiStore) GetStore(key storetypes.StoreKey) storetypes.Store { panic("not implemented") } @@ -182,7 +186,7 @@ func (kv kvStore) CacheWrap() storetypes.CacheWrap { panic("not implemented") } -func (kv kvStore) CacheWrapWithTrace(w io.Writer, tc storetypes.TraceContext) storetypes.CacheWrap { +func (kv kvStore) CacheWrapWithTrace(_ io.Writer, _ storetypes.TraceContext) storetypes.CacheWrap { panic("not implemented") } diff --git a/simapp/app.go b/simapp/app.go index 620e73b9fc90..2c8ac7e42676 100644 --- a/simapp/app.go +++ b/simapp/app.go @@ -274,6 +274,7 @@ func NewSimApp( epochstypes.StoreKey, protocolpooltypes.StoreKey, ) + okeys := storetypes.NewObjectStoreKeys(banktypes.ObjectStoreKey) // register streaming services if err := bApp.RegisterStreamingServices(appOpts, keys); err != nil { @@ -312,6 +313,7 @@ func NewSimApp( app.BankKeeper = bankkeeper.NewBaseKeeper( appCodec, + okeys[banktypes.ObjectStoreKey], runtime.NewKVStoreService(keys[banktypes.StoreKey]), app.AccountKeeper, BlockedAddresses(), @@ -568,6 +570,7 @@ func NewSimApp( epochstypes.ModuleName, ) app.ModuleManager.SetOrderEndBlockers( + banktypes.ModuleName, govtypes.ModuleName, stakingtypes.ModuleName, genutiltypes.ModuleName, @@ -663,6 +666,7 @@ func NewSimApp( // initialize stores app.MountKVStores(keys) + app.MountObjectStores(okeys) // initialize BaseApp app.SetInitChainer(app.InitChainer) diff --git a/simapp/app_config.go b/simapp/app_config.go index f072296c3156..99dc0cc01fdc 100644 --- a/simapp/app_config.go +++ b/simapp/app_config.go @@ -124,6 +124,7 @@ var ( epochstypes.ModuleName, }, EndBlockers: []string{ + banktypes.ModuleName, govtypes.ModuleName, stakingtypes.ModuleName, feegrant.ModuleName, diff --git a/simapp/go.mod b/simapp/go.mod index 8f5181bba7e5..37f57bcd6ea0 100644 --- a/simapp/go.mod +++ b/simapp/go.mod @@ -226,8 +226,16 @@ require ( // // ) +replace ( + cosmossdk.io/client/v2 => ../client/v2 + cosmossdk.io/store => ../store + cosmossdk.io/x/circuit => ../x/circuit +) + // Below are the long-lived replace of the SimApp replace ( + cosmossdk.io/api => ../api + cosmossdk.io/log => ../log // use cosmos fork of keyring github.com/99designs/keyring => github.com/cosmos/keyring v1.2.0 // Simapp always use the latest version of the cosmos-sdk diff --git a/simapp/go.sum b/simapp/go.sum index c885b4b19c99..63cba9f4d9aa 100644 --- a/simapp/go.sum +++ b/simapp/go.sum @@ -614,10 +614,6 @@ cloud.google.com/go/workflows v1.7.0/go.mod h1:JhSrZuVZWuiDfKEFxU0/F1PQjmpnpcoIS cloud.google.com/go/workflows v1.8.0/go.mod h1:ysGhmEajwZxGn1OhGOGKsTXc5PyxOc0vfKf5Af+to4M= cloud.google.com/go/workflows v1.9.0/go.mod h1:ZGkj1aFIOd9c8Gerkjjq7OW7I5+l6cSvT3ujaO/WwSA= cloud.google.com/go/workflows v1.10.0/go.mod h1:fZ8LmRmZQWacon9UCX1r/g/DfAXx5VcPALq2CxzdePw= -cosmossdk.io/api v0.9.2 h1:9i9ptOBdmoIEVEVWLtYYHjxZonlF/aOVODLFaxpmNtg= -cosmossdk.io/api v0.9.2/go.mod h1:CWt31nVohvoPMTlPv+mMNCtC0a7BqRdESjCsstHcTkU= -cosmossdk.io/client/v2 v2.0.0-beta.9 h1:xc06zg4G858/pK5plhf8RCfo+KR2mdDKJNrEkfrVAqc= -cosmossdk.io/client/v2 v2.0.0-beta.9/go.mod h1:pHf3CCHX5gmbL9rDCVbXhGI2+/DdAVTEZSLpdd5V9Zs= cosmossdk.io/collections v1.2.0 h1:IesfVG8G/+FYCMVMP01frS/Cw99Omk5vBh3cHbO01Gg= cosmossdk.io/collections v1.2.0/go.mod h1:4NkMoYw6qRA8fnSH/yn1D/MOutr8qyQnwsO50Mz9ItU= cosmossdk.io/core v0.11.3 h1:mei+MVDJOwIjIniaKelE3jPDqShCc/F4LkNNHh+4yfo= @@ -626,18 +622,12 @@ cosmossdk.io/depinject v1.2.0 h1:6NW/FSK1IkWTrX7XxUpBmX1QMBozpEI9SsWkKTBc5zw= cosmossdk.io/depinject v1.2.0/go.mod h1:pvitjtUxZZZTQESKNS9KhGjWVslJZxtO9VooRJYyPjk= cosmossdk.io/errors v1.0.2 h1:wcYiJz08HThbWxd/L4jObeLaLySopyyuUFB5w4AGpCo= cosmossdk.io/errors v1.0.2/go.mod h1:0rjgiHkftRYPj//3DrD6y8hcm40HcPv/dR4R/4efr0k= -cosmossdk.io/log v1.5.1 h1:wLwiYXmfrort/O+j6EkjF+HvbdrRQd+4cYCPKFSm+zM= -cosmossdk.io/log v1.5.1/go.mod h1:5cXXBvfBkR2/BcXmosdCSLXllvgSjphrrDVdfVRmBGM= cosmossdk.io/math v1.5.3 h1:WH6tu6Z3AUCeHbeOSHg2mt9rnoiUWVWaQ2t6Gkll96U= cosmossdk.io/math v1.5.3/go.mod h1:uqcZv7vexnhMFJF+6zh9EWdm/+Ylyln34IvPnBauPCQ= cosmossdk.io/schema v1.1.0 h1:mmpuz3dzouCoyjjcMcA/xHBEmMChN+EHh8EHxHRHhzE= cosmossdk.io/schema v1.1.0/go.mod h1:Gb7pqO+tpR+jLW5qDcNOSv0KtppYs7881kfzakguhhI= -cosmossdk.io/store v1.1.2 h1:3HOZG8+CuThREKv6cn3WSohAc6yccxO3hLzwK6rBC7o= -cosmossdk.io/store v1.1.2/go.mod h1:60rAGzTHevGm592kFhiUVkNC9w7gooSEn5iUBPzHQ6A= cosmossdk.io/tools/confix v0.1.2 h1:2hoM1oFCNisd0ltSAAZw2i4ponARPmlhuNu3yy0VwI4= cosmossdk.io/tools/confix v0.1.2/go.mod h1:7XfcbK9sC/KNgVGxgLM0BrFbVcR/+6Dg7MFfpx7duYo= -cosmossdk.io/x/circuit v0.2.0 h1:RJPMBQWCQU77EcM9HDTBnqRhq21fcUxgWZl7BZylJZo= -cosmossdk.io/x/circuit v0.2.0/go.mod h1:CjiGXDeZs64nMv0fG+QmvGVTcn7n3Sv4cDszMRR2JqU= cosmossdk.io/x/evidence v0.2.0 h1:o72zbmgCM7U0v7z7b0XnMB+NqX0tFamqb1HHkQbhrZ0= cosmossdk.io/x/evidence v0.2.0/go.mod h1:zx/Xqy+hnGVzkqVuVuvmP9KsO6YCl4SfbAetYi+k+sE= cosmossdk.io/x/feegrant v0.2.0 h1:oq3WVpoJdxko/XgWmpib63V1mYy9ZQN/1qxDajwGzJ8= diff --git a/store/CHANGELOG.md b/store/CHANGELOG.md index 53bf82923d6d..6120bc0d4673 100644 --- a/store/CHANGELOG.md +++ b/store/CHANGELOG.md @@ -29,6 +29,10 @@ Ref: https://keepachangelog.com/en/1.0.0/ * [#24090](https://github.com/cosmos/cosmos-sdk/pull/24090) Running the `prune` command now disables async pruning. +### Features + +* [#24159](https://github.com/cosmos/cosmos-sdk/pull/24159) Support mount object store in baseapp, add `ObjectStore` api in context. + ## v1.1.1 (September 06, 2024) ### Improvements diff --git a/store/cachekv/internal/btree.go b/store/cachekv/internal/btree.go deleted file mode 100644 index 209f7e58c4dd..000000000000 --- a/store/cachekv/internal/btree.go +++ /dev/null @@ -1,91 +0,0 @@ -package internal - -import ( - "bytes" - "errors" - - "github.com/tidwall/btree" - - "cosmossdk.io/store/types" -) - -const ( - // The approximate number of items and children per B-tree node. Tuned with benchmarks. - // copied from memdb. - bTreeDegree = 32 -) - -var errKeyEmpty = errors.New("key cannot be empty") - -// BTree implements the sorted cache for cachekv store, -// we don't use MemDB here because cachekv is used extensively in sdk core path, -// we need it to be as fast as possible, while `MemDB` is mainly used as a mocking db in unit tests. -// -// We choose tidwall/btree over google/btree here because it provides API to implement step iterator directly. -type BTree struct { - tree *btree.BTreeG[item] -} - -// NewBTree creates a wrapper around `btree.BTreeG`. -func NewBTree() BTree { - return BTree{ - tree: btree.NewBTreeGOptions(byKeys, btree.Options{ - Degree: bTreeDegree, - NoLocks: false, - }), - } -} - -func (bt BTree) Set(key, value []byte) { - bt.tree.Set(newItem(key, value)) -} - -func (bt BTree) Get(key []byte) []byte { - i, found := bt.tree.Get(newItem(key, nil)) - if !found { - return nil - } - return i.value -} - -func (bt BTree) Delete(key []byte) { - bt.tree.Delete(newItem(key, nil)) -} - -func (bt BTree) Iterator(start, end []byte) (types.Iterator, error) { - if (start != nil && len(start) == 0) || (end != nil && len(end) == 0) { - return nil, errKeyEmpty - } - return newMemIterator(start, end, bt, true), nil -} - -func (bt BTree) ReverseIterator(start, end []byte) (types.Iterator, error) { - if (start != nil && len(start) == 0) || (end != nil && len(end) == 0) { - return nil, errKeyEmpty - } - return newMemIterator(start, end, bt, false), nil -} - -// Copy the tree. This is a copy-on-write operation and is very fast because -// it only performs a shadowed copy. -func (bt BTree) Copy() BTree { - return BTree{ - tree: bt.tree.Copy(), - } -} - -// item is a btree item with byte slices as keys and values -type item struct { - key []byte - value []byte -} - -// byKeys compares the items by key -func byKeys(a, b item) bool { - return bytes.Compare(a.key, b.key) == -1 -} - -// newItem creates a new pair item. -func newItem(key, value []byte) item { - return item{key: key, value: value} -} diff --git a/store/cachekv/internal/mergeiterator.go b/store/cachekv/internal/mergeiterator.go index 58e9497b3028..c9a1e7d3a677 100644 --- a/store/cachekv/internal/mergeiterator.go +++ b/store/cachekv/internal/mergeiterator.go @@ -14,21 +14,24 @@ import ( // cache shadows (overrides) the parent. // // TODO: Optimize by memoizing. -type cacheMergeIterator struct { - parent types.Iterator - cache types.Iterator +type cacheMergeIterator[V any] struct { + parent types.GIterator[V] + cache types.GIterator[V] ascending bool valid bool + + isZero func(V) bool } -var _ types.Iterator = (*cacheMergeIterator)(nil) +var _ types.Iterator = (*cacheMergeIterator[[]byte])(nil) -func NewCacheMergeIterator(parent, cache types.Iterator, ascending bool) types.Iterator { - iter := &cacheMergeIterator{ +func NewCacheMergeIterator[V any](parent, cache types.GIterator[V], ascending bool, isZero func(V) bool) types.GIterator[V] { + iter := &cacheMergeIterator[V]{ parent: parent, cache: cache, ascending: ascending, + isZero: isZero, } iter.valid = iter.skipUntilExistsOrInvalid() @@ -37,17 +40,17 @@ func NewCacheMergeIterator(parent, cache types.Iterator, ascending bool) types.I // Domain implements Iterator. // Returns parent domain because cache and parent domains are the same. -func (iter *cacheMergeIterator) Domain() (start, end []byte) { +func (iter *cacheMergeIterator[V]) Domain() (start, end []byte) { return iter.parent.Domain() } // Valid implements Iterator. -func (iter *cacheMergeIterator) Valid() bool { +func (iter *cacheMergeIterator[V]) Valid() bool { return iter.valid } // Next implements Iterator -func (iter *cacheMergeIterator) Next() { +func (iter *cacheMergeIterator[V]) Next() { iter.assertValid() switch { @@ -74,7 +77,7 @@ func (iter *cacheMergeIterator) Next() { } // Key implements Iterator -func (iter *cacheMergeIterator) Key() []byte { +func (iter *cacheMergeIterator[V]) Key() []byte { iter.assertValid() // If parent is invalid, get the cache key. @@ -104,7 +107,7 @@ func (iter *cacheMergeIterator) Key() []byte { } // Value implements Iterator -func (iter *cacheMergeIterator) Value() []byte { +func (iter *cacheMergeIterator[V]) Value() V { iter.assertValid() // If parent is invalid, get the cache value. @@ -134,7 +137,7 @@ func (iter *cacheMergeIterator) Value() []byte { } // Close implements Iterator -func (iter *cacheMergeIterator) Close() error { +func (iter *cacheMergeIterator[V]) Close() error { err1 := iter.cache.Close() if err := iter.parent.Close(); err != nil { return err @@ -145,7 +148,7 @@ func (iter *cacheMergeIterator) Close() error { // Error returns an error if the cacheMergeIterator is invalid defined by the // Valid method. -func (iter *cacheMergeIterator) Error() error { +func (iter *cacheMergeIterator[V]) Error() error { if !iter.Valid() { return errors.New("invalid cacheMergeIterator") } @@ -155,14 +158,14 @@ func (iter *cacheMergeIterator) Error() error { // If not valid, panics. // NOTE: May have side-effect of iterating over cache. -func (iter *cacheMergeIterator) assertValid() { +func (iter *cacheMergeIterator[V]) assertValid() { if err := iter.Error(); err != nil { panic(err) } } // Like bytes.Compare but opposite if not ascending. -func (iter *cacheMergeIterator) compare(a, b []byte) int { +func (iter *cacheMergeIterator[V]) compare(a, b []byte) int { if iter.ascending { return bytes.Compare(a, b) } @@ -175,9 +178,9 @@ func (iter *cacheMergeIterator) compare(a, b []byte) int { // If the current cache item is not a delete item, does nothing. // If `until` is nil, there is no limit, and cache may end up invalid. // CONTRACT: cache is valid. -func (iter *cacheMergeIterator) skipCacheDeletes(until []byte) { +func (iter *cacheMergeIterator[V]) skipCacheDeletes(until []byte) { for iter.cache.Valid() && - iter.cache.Value() == nil && + iter.isZero(iter.cache.Value()) && (until == nil || iter.compare(iter.cache.Key(), until) < 0) { iter.cache.Next() } @@ -186,7 +189,7 @@ func (iter *cacheMergeIterator) skipCacheDeletes(until []byte) { // Fast forwards cache (or parent+cache in case of deleted items) until current // item exists, or until iterator becomes invalid. // Returns whether the iterator is valid. -func (iter *cacheMergeIterator) skipUntilExistsOrInvalid() bool { +func (iter *cacheMergeIterator[V]) skipUntilExistsOrInvalid() bool { for { // If parent is invalid, fast-forward cache. if !iter.parent.Valid() { @@ -211,7 +214,7 @@ func (iter *cacheMergeIterator) skipUntilExistsOrInvalid() bool { case 0: // parent == cache. // Skip over if cache item is a delete. valueC := iter.cache.Value() - if valueC == nil { + if iter.isZero(valueC) { iter.parent.Next() iter.cache.Next() @@ -223,7 +226,7 @@ func (iter *cacheMergeIterator) skipUntilExistsOrInvalid() bool { case 1: // cache < parent // Skip over if cache item is a delete. valueC := iter.cache.Value() - if valueC == nil { + if iter.isZero(valueC) { iter.skipCacheDeletes(keyP) continue } diff --git a/store/cachekv/search_benchmark_test.go b/store/cachekv/search_benchmark_test.go deleted file mode 100644 index ecdc86a8e43a..000000000000 --- a/store/cachekv/search_benchmark_test.go +++ /dev/null @@ -1,44 +0,0 @@ -package cachekv - -import ( - "strconv" - "testing" - - "cosmossdk.io/store/cachekv/internal" -) - -func BenchmarkLargeUnsortedMisses(b *testing.B) { - for i := 0; i < b.N; i++ { - b.StopTimer() - store := generateStore() - b.StartTimer() - - for k := 0; k < 10000; k++ { - // cache has A + Z values - // these are within range, but match nothing - store.dirtyItems([]byte("B1"), []byte("B2")) - } - } -} - -func generateStore() *Store { - cache := map[string]*cValue{} - unsorted := map[string]struct{}{} - for i := 0; i < 5000; i++ { - key := "A" + strconv.Itoa(i) - unsorted[key] = struct{}{} - cache[key] = &cValue{} - } - - for i := 0; i < 5000; i++ { - key := "Z" + strconv.Itoa(i) - unsorted[key] = struct{}{} - cache[key] = &cValue{} - } - - return &Store{ - cache: cache, - unsortedCache: unsorted, - sortedCache: internal.NewBTree(), - } -} diff --git a/store/cachekv/search_test.go b/store/cachekv/search_test.go deleted file mode 100644 index aedc0669030a..000000000000 --- a/store/cachekv/search_test.go +++ /dev/null @@ -1,139 +0,0 @@ -package cachekv - -import "testing" - -func TestFindStartIndex(t *testing.T) { - tests := []struct { - name string - sortedL []string - query string - want int - }{ - { - name: "non-existent value", - sortedL: []string{"a", "b", "c", "d", "e", "l", "m", "n", "u", "v", "w", "x", "y", "z"}, - query: "o", - want: 8, - }, - { - name: "dupes start at index 0", - sortedL: []string{"a", "a", "a", "b", "c", "d", "e", "l", "m", "n", "u", "v", "w", "x", "y", "z"}, - query: "a", - want: 0, - }, - { - name: "dupes start at non-index 0", - sortedL: []string{"a", "c", "c", "c", "c", "d", "e", "l", "m", "n", "u", "v", "w", "x", "y", "z"}, - query: "c", - want: 1, - }, - { - name: "at end", - sortedL: []string{"a", "e", "u", "v", "w", "x", "y", "z"}, - query: "z", - want: 7, - }, - { - name: "dupes at end", - sortedL: []string{"a", "e", "u", "v", "w", "x", "y", "z", "z", "z", "z"}, - query: "z", - want: 7, - }, - { - name: "entirely dupes", - sortedL: []string{"z", "z", "z", "z", "z"}, - query: "z", - want: 0, - }, - { - name: "non-existent but within >=start", - sortedL: []string{"z", "z", "z", "z", "z"}, - query: "p", - want: 0, - }, - { - name: "non-existent and out of range", - sortedL: []string{"d", "e", "f", "g", "h"}, - query: "z", - want: -1, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - body := tt.sortedL - got := findStartIndex(body, tt.query) - if got != tt.want { - t.Fatalf("Got: %d, want: %d", got, tt.want) - } - }) - } -} - -func TestFindEndIndex(t *testing.T) { - tests := []struct { - name string - sortedL []string - query string - want int - }{ - { - name: "non-existent value", - sortedL: []string{"a", "b", "c", "d", "e", "l", "m", "n", "u", "v", "w", "x", "y", "z"}, - query: "o", - want: 7, - }, - { - name: "dupes start at index 0", - sortedL: []string{"a", "a", "a", "b", "c", "d", "e", "l", "m", "n", "u", "v", "w", "x", "y", "z"}, - query: "a", - want: 0, - }, - { - name: "dupes start at non-index 0", - sortedL: []string{"a", "c", "c", "c", "c", "d", "e", "l", "m", "n", "u", "v", "w", "x", "y", "z"}, - query: "c", - want: 1, - }, - { - name: "at end", - sortedL: []string{"a", "e", "u", "v", "w", "x", "y", "z"}, - query: "z", - want: 7, - }, - { - name: "dupes at end", - sortedL: []string{"a", "e", "u", "v", "w", "x", "y", "z", "z", "z", "z"}, - query: "z", - want: 7, - }, - { - name: "entirely dupes", - sortedL: []string{"z", "z", "z", "z", "z"}, - query: "z", - want: 0, - }, - { - name: "non-existent and out of range", - sortedL: []string{"z", "z", "z", "z", "z"}, - query: "p", - want: -1, - }, - { - name: "non-existent and out of range", - sortedL: []string{"d", "e", "f", "g", "h"}, - query: "z", - want: 4, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - body := tt.sortedL - got := findEndIndex(body, tt.query) - if got != tt.want { - t.Fatalf("Got: %d, want: %d", got, tt.want) - } - }) - } -} diff --git a/store/cachekv/store.go b/store/cachekv/store.go index 08cfc2b325f6..084adef7cd79 100644 --- a/store/cachekv/store.go +++ b/store/cachekv/store.go @@ -1,199 +1,163 @@ package cachekv import ( - "bytes" "io" - "sort" - "sync" - dbm "github.com/cosmos/cosmos-db" - - "cosmossdk.io/math" "cosmossdk.io/store/cachekv/internal" - "cosmossdk.io/store/internal/conv" - "cosmossdk.io/store/internal/kv" + "cosmossdk.io/store/internal/btree" "cosmossdk.io/store/tracekv" "cosmossdk.io/store/types" ) -// cValue represents a cached value. -// If dirty is true, it indicates the cached value is different from the underlying value. -type cValue struct { - value []byte - dirty bool -} +type Store = GStore[[]byte] + +var ( + _ types.CacheKVStore = (*Store)(nil) + _ types.CacheWrap = (*Store)(nil) + _ types.BranchStore = (*Store)(nil) +) -// Store wraps an in-memory cache around an underlying types.KVStore. -type Store struct { - mtx sync.Mutex - cache map[string]*cValue - unsortedCache map[string]struct{} - sortedCache internal.BTree // always ascending sorted - parent types.KVStore +func NewStore(parent types.KVStore) *Store { + return NewGStore( + parent, + func(v []byte) bool { return v == nil }, + func(v []byte) int { return len(v) }, + ) } -var _ types.CacheKVStore = (*Store)(nil) +// GStore wraps an in-memory cache around an underlying types.KVStore. +type GStore[V any] struct { + writeSet btree.BTree[V] // always ascending sorted + parent types.GKVStore[V] + + // isZero is a function that returns true if the value is considered "zero", for []byte and pointers the zero value + // is `nil`, zero value is not allowed to set to a key, and it's returned if the key is not found. + isZero func(V) bool + zeroValue V + // valueLen validates the value before it's set + valueLen func(V) int +} // NewStore creates a new Store object -func NewStore(parent types.KVStore) *Store { - return &Store{ - cache: make(map[string]*cValue), - unsortedCache: make(map[string]struct{}), - sortedCache: internal.NewBTree(), - parent: parent, +func NewGStore[V any](parent types.GKVStore[V], isZero func(V) bool, valueLen func(V) int) *GStore[V] { + return &GStore[V]{ + writeSet: btree.NewBTree[V](), + parent: parent, + isZero: isZero, + valueLen: valueLen, } } // GetStoreType implements Store. -func (store *Store) GetStoreType() types.StoreType { +func (store *GStore[V]) GetStoreType() types.StoreType { return store.parent.GetStoreType() } -// Get implements types.KVStore. -func (store *Store) Get(key []byte) (value []byte) { - store.mtx.Lock() - defer store.mtx.Unlock() +// Clone creates a copy-on-write snapshot of the cache store, +// it only performs a shallow copy so is very fast. +func (store *GStore[V]) Clone() types.BranchStore { + v := *store + v.writeSet = store.writeSet.Copy() + return &v +} + +// swapCache swap out the internal cache store and leave the current store unusable. +func (store *GStore[V]) swapCache() btree.BTree[V] { + cache := store.writeSet + store.writeSet = btree.BTree[V]{} + return cache +} +// Restore restores the store cache to a given snapshot, leaving the snapshot unusable. +func (store *GStore[V]) Restore(s types.BranchStore) { + store.writeSet = s.(*GStore[V]).swapCache() +} + +// Get implements types.KVStore. +func (store *GStore[V]) Get(key []byte) V { types.AssertValidKey(key) - cacheValue, ok := store.cache[conv.UnsafeBytesToStr(key)] - if !ok { - value = store.parent.Get(key) - store.setCacheValue(key, value, false) - } else { - value = cacheValue.value + value, found := store.writeSet.Get(key) + if !found { + return store.parent.Get(key) } - return value } // Set implements types.KVStore. -func (store *Store) Set(key, value []byte) { +func (store *GStore[V]) Set(key []byte, value V) { types.AssertValidKey(key) - types.AssertValidValue(value) + types.AssertValidValueGeneric(value, store.isZero, store.valueLen) - store.mtx.Lock() - defer store.mtx.Unlock() - store.setCacheValue(key, value, true) + store.writeSet.Set(key, value) } // Has implements types.KVStore. -func (store *Store) Has(key []byte) bool { - value := store.Get(key) - return value != nil -} - -// Delete implements types.KVStore. -func (store *Store) Delete(key []byte) { +func (store *GStore[V]) Has(key []byte) bool { types.AssertValidKey(key) - store.mtx.Lock() - defer store.mtx.Unlock() - - store.setCacheValue(key, nil, true) + value, found := store.writeSet.Get(key) + if !found { + return store.parent.Has(key) + } + return !store.isZero(value) } -func (store *Store) resetCaches() { - if len(store.cache) > 100_000 { - // Cache is too large. We likely did something linear time - // (e.g. Epoch block, Genesis block, etc). Free the old caches from memory, and let them get re-allocated. - // TODO: In a future CacheKV redesign, such linear workloads should get into a different cache instantiation. - // 100_000 is arbitrarily chosen as it solved Osmosis' InitGenesis RAM problem. - store.cache = make(map[string]*cValue) - store.unsortedCache = make(map[string]struct{}) - } else { - // Clear the cache using the map clearing idiom - // and not allocating fresh objects. - // Please see https://bencher.orijtech.com/perfclinic/mapclearing/ - for key := range store.cache { - delete(store.cache, key) - } - for key := range store.unsortedCache { - delete(store.unsortedCache, key) - } - } - store.sortedCache = internal.NewBTree() +// Delete implements types.KVStore. +func (store *GStore[V]) Delete(key []byte) { + types.AssertValidKey(key) + store.writeSet.Set(key, store.zeroValue) } // Implements Cachetypes.KVStore. -func (store *Store) Write() { - store.mtx.Lock() - defer store.mtx.Unlock() - - if len(store.cache) == 0 && len(store.unsortedCache) == 0 { - store.sortedCache = internal.NewBTree() - return - } - - type cEntry struct { - key string - val *cValue - } - - // We need a copy of all of the keys. - // Not the best. To reduce RAM pressure, we copy the values as well - // and clear out the old caches right after the copy. - sortedCache := make([]cEntry, 0, len(store.cache)) - - for key, dbValue := range store.cache { - if dbValue.dirty { - sortedCache = append(sortedCache, cEntry{key, dbValue}) +func (store *GStore[V]) Write() { + store.writeSet.Scan(func(key []byte, value V) bool { + if store.isZero(value) { + store.parent.Delete(key) + } else { + store.parent.Set(key, value) } - } - store.resetCaches() - sort.Slice(sortedCache, func(i, j int) bool { - return sortedCache[i].key < sortedCache[j].key + return true }) + store.writeSet.Clear() +} - // TODO: Consider allowing usage of Batch, which would allow the write to - // at least happen atomically. - for _, obj := range sortedCache { - // We use []byte(key) instead of conv.UnsafeStrToBytes because we cannot - // be sure if the underlying store might do a save with the byteslice or - // not. Once we get confirmation that .Delete is guaranteed not to - // save the byteslice, then we can assume only a read-only copy is sufficient. - if obj.val.value != nil { - // It already exists in the parent, hence update it. - store.parent.Set([]byte(obj.key), obj.val.value) - } else { - store.parent.Delete([]byte(obj.key)) - } - } +func (store *GStore[V]) Discard() { + store.writeSet.Clear() } // CacheWrap implements CacheWrapper. -func (store *Store) CacheWrap() types.CacheWrap { - return NewStore(store) +func (store *GStore[V]) CacheWrap() types.CacheWrap { + return NewGStore(store, store.isZero, store.valueLen) } // CacheWrapWithTrace implements the CacheWrapper interface. -func (store *Store) CacheWrapWithTrace(w io.Writer, tc types.TraceContext) types.CacheWrap { - return NewStore(tracekv.NewStore(store, w, tc)) +func (store *GStore[V]) CacheWrapWithTrace(w io.Writer, tc types.TraceContext) types.CacheWrap { + if store, ok := any(store).(*GStore[[]byte]); ok { + return NewStore(tracekv.NewStore(store, w, tc)) + } + return store.CacheWrap() } //---------------------------------------- // Iteration // Iterator implements types.KVStore. -func (store *Store) Iterator(start, end []byte) types.Iterator { +func (store *GStore[V]) Iterator(start, end []byte) types.GIterator[V] { return store.iterator(start, end, true) } // ReverseIterator implements types.KVStore. -func (store *Store) ReverseIterator(start, end []byte) types.Iterator { +func (store *GStore[V]) ReverseIterator(start, end []byte) types.GIterator[V] { return store.iterator(start, end, false) } -func (store *Store) iterator(start, end []byte, ascending bool) types.Iterator { - store.mtx.Lock() - defer store.mtx.Unlock() - - store.dirtyItems(start, end) - isoSortedCache := store.sortedCache.Copy() +func (store *GStore[V]) iterator(start, end []byte, ascending bool) types.GIterator[V] { + isoSortedCache := store.writeSet.Copy() var ( err error - parent, cache types.Iterator + parent, cache types.GIterator[V] ) if ascending { @@ -207,202 +171,5 @@ func (store *Store) iterator(start, end []byte, ascending bool) types.Iterator { panic(err) } - return internal.NewCacheMergeIterator(parent, cache, ascending) -} - -func findStartIndex(strL []string, startQ string) int { - // Modified binary search to find the very first element in >=startQ. - if len(strL) == 0 { - return -1 - } - - var left, right, mid int - right = len(strL) - 1 - for left <= right { - mid = (left + right) >> 1 - midStr := strL[mid] - if midStr == startQ { - // Handle condition where there might be multiple values equal to startQ. - // We are looking for the very first value < midStL, that i+1 will be the first - // element >= midStr. - for i := mid - 1; i >= 0; i-- { - if strL[i] != midStr { - return i + 1 - } - } - return 0 - } - if midStr < startQ { - left = mid + 1 - } else { // midStrL > startQ - right = mid - 1 - } - } - if left >= 0 && left < len(strL) && strL[left] >= startQ { - return left - } - return -1 -} - -func findEndIndex(strL []string, endQ string) int { - if len(strL) == 0 { - return -1 - } - - // Modified binary search to find the very first element > 1 - midStr := strL[mid] - if midStr == endQ { - // Handle condition where there might be multiple values equal to startQ. - // We are looking for the very first value < midStL, that i+1 will be the first - // element >= midStr. - for i := mid - 1; i >= 0; i-- { - if strL[i] < midStr { - return i + 1 - } - } - return 0 - } - if midStr < endQ { - left = mid + 1 - } else { // midStrL > startQ - right = mid - 1 - } - } - - // Binary search failed, now let's find a value less than endQ. - for i := right; i >= 0; i-- { - if strL[i] < endQ { - return i - } - } - - return -1 -} - -type sortState int - -const ( - stateUnsorted sortState = iota - stateAlreadySorted -) - -const minSortSize = 1024 - -// Constructs a slice of dirty items, to use w/ memIterator. -func (store *Store) dirtyItems(start, end []byte) { - startStr, endStr := conv.UnsafeBytesToStr(start), conv.UnsafeBytesToStr(end) - if end != nil && startStr > endStr { - // Nothing to do here. - return - } - - n := len(store.unsortedCache) - unsorted := make([]*kv.Pair, 0) - // If the unsortedCache is too big, its costs too much to determine - // whats in the subset we are concerned about. - // If you are interleaving iterator calls with writes, this can easily become an - // O(N^2) overhead. - // Even without that, too many range checks eventually becomes more expensive - // than just not having the cache. - if n < minSortSize { - for key := range store.unsortedCache { - // dbm.IsKeyInDomain is nil safe and returns true iff key is greater than start - if dbm.IsKeyInDomain(conv.UnsafeStrToBytes(key), start, end) { - cacheValue := store.cache[key] - unsorted = append(unsorted, &kv.Pair{Key: []byte(key), Value: cacheValue.value}) - } - } - store.clearUnsortedCacheSubset(unsorted, stateUnsorted) - return - } - - // Otherwise it is large so perform a modified binary search to find - // the target ranges for the keys that we should be looking for. - strL := make([]string, 0, n) - for key := range store.unsortedCache { - strL = append(strL, key) - } - sort.Strings(strL) - - // Now find the values within the domain - // [start, end) - startIndex := findStartIndex(strL, startStr) - if startIndex < 0 { - startIndex = 0 - } - - var endIndex int - if end == nil { - endIndex = len(strL) - 1 - } else { - endIndex = findEndIndex(strL, endStr) - } - if endIndex < 0 { - endIndex = len(strL) - 1 - } - - // Since we spent cycles to sort the values, we should process and remove a reasonable amount - // ensure start to end is at least minSortSize in size - // if below minSortSize, expand it to cover additional values - // this amortizes the cost of processing elements across multiple calls - if endIndex-startIndex < minSortSize { - endIndex = math.Min(startIndex+minSortSize, len(strL)-1) - if endIndex-startIndex < minSortSize { - startIndex = math.Max(endIndex-minSortSize, 0) - } - } - - kvL := make([]*kv.Pair, 0, 1+endIndex-startIndex) - for i := startIndex; i <= endIndex; i++ { - key := strL[i] - cacheValue := store.cache[key] - kvL = append(kvL, &kv.Pair{Key: []byte(key), Value: cacheValue.value}) - } - - // kvL was already sorted so pass it in as is. - store.clearUnsortedCacheSubset(kvL, stateAlreadySorted) -} - -func (store *Store) clearUnsortedCacheSubset(unsorted []*kv.Pair, sortState sortState) { - n := len(store.unsortedCache) - if len(unsorted) == n { // This pattern allows the Go compiler to emit the map clearing idiom for the entire map. - for key := range store.unsortedCache { - delete(store.unsortedCache, key) - } - } else { // Otherwise, normally delete the unsorted keys from the map. - for _, kv := range unsorted { - delete(store.unsortedCache, conv.UnsafeBytesToStr(kv.Key)) - } - } - - if sortState == stateUnsorted { - sort.Slice(unsorted, func(i, j int) bool { - return bytes.Compare(unsorted[i].Key, unsorted[j].Key) < 0 - }) - } - - for _, item := range unsorted { - // sortedCache is able to store `nil` value to represent deleted items. - store.sortedCache.Set(item.Key, item.Value) - } -} - -//---------------------------------------- -// etc - -// Only entrypoint to mutate store.cache. -// A `nil` value means a deletion. -func (store *Store) setCacheValue(key, value []byte, dirty bool) { - keyStr := conv.UnsafeBytesToStr(key) - store.cache[keyStr] = &cValue{ - value: value, - dirty: dirty, - } - if dirty { - store.unsortedCache[keyStr] = struct{}{} - } + return internal.NewCacheMergeIterator(parent, cache, ascending, store.isZero) } diff --git a/store/cachekv/store_test.go b/store/cachekv/store_test.go index 980fe194677b..f99514ecce4a 100644 --- a/store/cachekv/store_test.go +++ b/store/cachekv/store_test.go @@ -447,6 +447,20 @@ func TestIteratorDeadlock(t *testing.T) { defer it2.Close() } +func TestBranchStore(t *testing.T) { + mem := dbadapter.Store{DB: dbm.NewMemDB()} + store := cachekv.NewStore(mem) + + store.Set([]byte("key1"), []byte("value1")) + + branch := store.Clone().(types.CacheKVStore) + branch.Set([]byte("key1"), []byte("value2")) + + require.Equal(t, []byte("value1"), store.Get([]byte("key1"))) + store.Restore(branch.(types.BranchStore)) + require.Equal(t, []byte("value2"), store.Get([]byte("key1"))) +} + //------------------------------------------------------------------------------------------- // do some random ops diff --git a/store/cachemulti/store.go b/store/cachemulti/store.go index a2bd800265d0..b884f69b177c 100644 --- a/store/cachemulti/store.go +++ b/store/cachemulti/store.go @@ -5,12 +5,9 @@ import ( "io" "maps" - dbm "github.com/cosmos/cosmos-db" - - "cosmossdk.io/store/cachekv" - "cosmossdk.io/store/dbadapter" "cosmossdk.io/store/tracekv" "cosmossdk.io/store/types" + dbm "github.com/cosmos/cosmos-db" ) // storeNameCtxKey is the TraceContext metadata key that identifies @@ -25,12 +22,13 @@ const storeNameCtxKey = "store_name" // NOTE: a Store (and MultiStores in general) should never expose the // keys for the substores. type Store struct { - db types.CacheKVStore stores map[types.StoreKey]types.CacheWrap - keys map[string]types.StoreKey traceWriter io.Writer traceContext types.TraceContext + parentStore func(types.StoreKey) types.CacheWrapper + + branched bool } var _ types.CacheMultiStore = Store{} @@ -39,26 +37,17 @@ var _ types.CacheMultiStore = Store{} // CacheWrapper objects and a KVStore as the database. Each CacheWrapper store // is a branched store. func NewFromKVStore( - store types.KVStore, stores map[types.StoreKey]types.CacheWrapper, - keys map[string]types.StoreKey, traceWriter io.Writer, traceContext types.TraceContext, + stores map[types.StoreKey]types.CacheWrapper, + traceWriter io.Writer, traceContext types.TraceContext, ) Store { cms := Store{ - db: cachekv.NewStore(store), stores: make(map[types.StoreKey]types.CacheWrap, len(stores)), - keys: keys, traceWriter: traceWriter, traceContext: traceContext, } for key, store := range stores { - if cms.TracingEnabled() { - tctx := cms.traceContext.Clone().Merge(types.TraceContext{ - storeNameCtxKey: key.Name(), - }) - - store = tracekv.NewStore(store.(types.KVStore), cms.traceWriter, tctx) - } - cms.stores[key] = cachekv.NewStore(store.(types.KVStore)) + cms.initStore(key, store) } return cms @@ -67,19 +56,40 @@ func NewFromKVStore( // NewStore creates a new Store object from a mapping of store keys to // CacheWrapper objects. Each CacheWrapper store is a branched store. func NewStore( - db dbm.DB, stores map[types.StoreKey]types.CacheWrapper, keys map[string]types.StoreKey, + _ dbm.DB, stores map[types.StoreKey]types.CacheWrapper, _ map[string]types.StoreKey, traceWriter io.Writer, traceContext types.TraceContext, ) Store { - return NewFromKVStore(dbadapter.Store{DB: db}, stores, keys, traceWriter, traceContext) + return NewFromKVStore(stores, traceWriter, traceContext) } -func newCacheMultiStoreFromCMS(cms Store) Store { - stores := make(map[types.StoreKey]types.CacheWrapper) - for k, v := range cms.stores { - stores[k] = v +// NewFromParent constructs a cache multistore with a parent store lazily, +// the parent is usually another cache multistore or the block-stm multiversion store. +func NewFromParent( + parentStore func(types.StoreKey) types.CacheWrapper, + traceWriter io.Writer, traceContext types.TraceContext, +) Store { + return Store{ + stores: make(map[types.StoreKey]types.CacheWrap), + traceWriter: traceWriter, + traceContext: traceContext, + parentStore: parentStore, } +} + +func (cms Store) initStore(key types.StoreKey, store types.CacheWrapper) types.CacheWrap { + if cms.TracingEnabled() { + // only support tracing on KVStore. + if kvstore, ok := store.(types.KVStore); ok { + tctx := cms.traceContext.Clone().Merge(types.TraceContext{ + storeNameCtxKey: key.Name(), + }) - return NewFromKVStore(cms.db, stores, nil, cms.traceWriter, cms.traceContext) + store = tracekv.NewStore(kvstore, cms.traceWriter, tctx) + } + } + cache := store.CacheWrap() + cms.stores[key] = cache + return cache } // SetTracer sets the tracer for the MultiStore that the underlying @@ -108,11 +118,6 @@ func (cms Store) TracingEnabled() bool { return cms.traceWriter != nil } -// LatestVersion returns the branch version of the store -func (cms Store) LatestVersion() int64 { - panic("cannot get latest version from branch cached multi-store") -} - // GetStoreType returns the type of the store. func (cms Store) GetStoreType() types.StoreType { return types.StoreTypeMulti @@ -120,12 +125,20 @@ func (cms Store) GetStoreType() types.StoreType { // Write calls Write on each underlying store. func (cms Store) Write() { - cms.db.Write() + if cms.branched { + panic("cannot Write on branched store") + } for _, store := range cms.stores { store.Write() } } +func (cms Store) Discard() { + for _, store := range cms.stores { + store.Discard() + } +} + // Implements CacheWrapper. func (cms Store) CacheWrap() types.CacheWrap { return cms.CacheMultiStore().(types.CacheWrap) @@ -138,32 +151,93 @@ func (cms Store) CacheWrapWithTrace(_ io.Writer, _ types.TraceContext) types.Cac // Implements MultiStore. func (cms Store) CacheMultiStore() types.CacheMultiStore { - return newCacheMultiStoreFromCMS(cms) + return NewFromParent(cms.getCacheWrapper, cms.traceWriter, cms.traceContext) } -// CacheMultiStoreWithVersion implements the MultiStore interface. It will panic -// as an already cached multi-store cannot load previous versions. -// -// TODO: The store implementation can possibly be modified to support this as it -// seems safe to load previous versions (heights). -func (cms Store) CacheMultiStoreWithVersion(_ int64) (types.CacheMultiStore, error) { - panic("cannot branch cached multi-store with a version") +func (cms Store) getCacheWrapper(key types.StoreKey) types.CacheWrapper { + store, ok := cms.stores[key] + if !ok && cms.parentStore != nil { + // load on demand + store = cms.initStore(key, cms.parentStore(key)) + } + if key == nil || store == nil { + panic(fmt.Sprintf("kv store with key %v has not been registered in stores", key)) + } + return store } // GetStore returns an underlying Store by key. func (cms Store) GetStore(key types.StoreKey) types.Store { - s := cms.stores[key] - if key == nil || s == nil { - panic(fmt.Sprintf("kv store with key %v has not been registered in stores", key)) + store, ok := cms.getCacheWrapper(key).(types.Store) + if !ok { + panic(fmt.Sprintf("store with key %v is not Store", key)) } - return s.(types.Store) + return store } // GetKVStore returns an underlying KVStore by key. func (cms Store) GetKVStore(key types.StoreKey) types.KVStore { - store := cms.stores[key] - if key == nil || store == nil { - panic(fmt.Sprintf("kv store with key %v has not been registered in stores", key)) + store, ok := cms.getCacheWrapper(key).(types.KVStore) + if !ok { + panic(fmt.Sprintf("store with key %v is not KVStore", key)) + } + return store +} + +// GetObjKVStore returns an underlying KVStore by key. +func (cms Store) GetObjKVStore(key types.StoreKey) types.ObjKVStore { + store, ok := cms.getCacheWrapper(key).(types.ObjKVStore) + if !ok { + panic(fmt.Sprintf("store with key %v is not ObjKVStore", key)) + } + return store +} + +func (cms Store) Clone() Store { + stores := make(map[types.StoreKey]types.CacheWrap, len(cms.stores)) + for k, v := range cms.stores { + stores[k] = v.(types.BranchStore).Clone().(types.CacheWrap) + } + return Store{ + stores: stores, + traceWriter: cms.traceWriter, + traceContext: cms.traceContext, + parentStore: cms.parentStore, + + branched: true, } - return store.(types.KVStore) +} + +func (cms Store) Restore(other Store) { + if !other.branched { + panic("cannot restore from non-branched store") + } + + // discard the non-exists stores + for k, v := range cms.stores { + if _, ok := other.stores[k]; !ok { + // clear the cache store if it's not in the other + v.Discard() + } + } + + // restore the other stores + for k, v := range other.stores { + store, ok := cms.stores[k] + if !ok { + store = cms.initStore(k, cms.parentStore(k)) + } + + store.(types.BranchStore).Restore(v.(types.BranchStore)) + } +} + +func (cms Store) RunAtomic(cb func(types.CacheMultiStore) error) error { + branch := cms.Clone() + if err := cb(branch); err != nil { + return err + } + + cms.Restore(branch) + return nil } diff --git a/store/cachemulti/store_test.go b/store/cachemulti/store_test.go index 0ea7785bff93..33af1625c618 100644 --- a/store/cachemulti/store_test.go +++ b/store/cachemulti/store_test.go @@ -1,11 +1,16 @@ package cachemulti import ( + "errors" "fmt" "testing" + dbm "github.com/cosmos/cosmos-db" "github.com/stretchr/testify/require" + "cosmossdk.io/store/dbadapter" + "cosmossdk.io/store/internal" + "cosmossdk.io/store/internal/btree" "cosmossdk.io/store/types" ) @@ -22,3 +27,63 @@ func TestStoreGetKVStore(t *testing.T) { require.PanicsWithValue(errMsg, func() { s.GetKVStore(key) }) } + +func TestRunAtomic(t *testing.T) { + store := dbadapter.Store{DB: dbm.NewMemDB()} + objStore := internal.NewBTreeStore(btree.NewBTree[any](), + func(v any) bool { return v == nil }, + func(v any) int { return 1 }, + ) + keys := map[string]types.StoreKey{ + "abc": types.NewKVStoreKey("abc"), + "obj": types.NewObjectStoreKey("obj"), + } + parent := Store{stores: map[types.StoreKey]types.CacheWrap{ + keys["abc"]: store.CacheWrap(), + keys["obj"]: objStore.CacheWrap(), + }} + + s := Store{stores: map[types.StoreKey]types.CacheWrap{}, parentStore: parent.getCacheWrapper} + err := s.RunAtomic(func(ms types.CacheMultiStore) error { + ms.GetKVStore(keys["abc"]).Set([]byte("key"), []byte("value")) + ms.GetObjKVStore(keys["obj"]).Set([]byte("key"), "value") + return nil + }) + require.NoError(t, err) + require.Equal(t, []byte("value"), s.GetKVStore(keys["abc"]).Get([]byte("key"))) + require.Equal(t, []byte(nil), s.GetKVStore(keys["abc"]).Get([]byte("key-non-exist"))) + require.Equal(t, "value", s.GetObjKVStore(keys["obj"]).Get([]byte("key")).(string)) + + require.Error(t, s.RunAtomic(func(ms types.CacheMultiStore) error { + ms.GetKVStore(keys["abc"]).Set([]byte("key"), []byte("value2")) + ms.GetObjKVStore(keys["obj"]).Set([]byte("key"), "value2") + return errors.New("failure") + })) + require.Equal(t, []byte("value"), s.GetKVStore(keys["abc"]).Get([]byte("key"))) + require.Equal(t, "value", s.GetObjKVStore(keys["obj"]).Get([]byte("key")).(string)) +} + +func TestBranchStore(t *testing.T) { + store := dbadapter.Store{DB: dbm.NewMemDB()} + objStore := internal.NewBTreeStore(btree.NewBTree[any](), + func(v any) bool { return v == nil }, + func(v any) int { return 1 }, + ) + keys := map[string]types.StoreKey{ + "abc": types.NewKVStoreKey("abc"), + "obj": types.NewObjectStoreKey("obj"), + } + parent := Store{stores: map[types.StoreKey]types.CacheWrap{ + keys["abc"]: store.CacheWrap(), + keys["obj"]: objStore.CacheWrap(), + }} + + s := Store{stores: map[types.StoreKey]types.CacheWrap{}, parentStore: parent.getCacheWrapper} + s.GetKVStore(keys["abc"]).Set([]byte("key"), []byte("value")) + snapshot := s.Clone() + s.GetKVStore(keys["abc"]).Set([]byte("key"), []byte("value2")) + s.GetObjKVStore(keys["obj"]).Set([]byte("key"), "value") + s.Restore(snapshot) + require.Equal(t, []byte("value"), s.GetKVStore(keys["abc"]).Get([]byte("key"))) + require.Equal(t, nil, s.GetObjKVStore(keys["obj"]).Get([]byte("key"))) +} diff --git a/store/gaskv/store.go b/store/gaskv/store.go index e0f96af7151e..90444372308c 100644 --- a/store/gaskv/store.go +++ b/store/gaskv/store.go @@ -6,62 +6,94 @@ import ( "cosmossdk.io/store/types" ) +// ObjectValueLength is the emulated number of bytes for storing transient objects in gas accounting. +const ObjectValueLength = 16 + var _ types.KVStore = &Store{} -// Store applies gas tracking to an underlying KVStore. It implements the +type Store = GStore[[]byte] + +func NewStore(parent types.KVStore, gasMeter types.GasMeter, gasConfig types.GasConfig) *Store { + return NewGStore(parent, gasMeter, gasConfig, + func(v []byte) bool { return v == nil }, + func(v []byte) int { return len(v) }, + ) +} + +type ObjStore = GStore[any] + +func NewObjStore(parent types.ObjKVStore, gasMeter types.GasMeter, gasConfig types.GasConfig) *ObjStore { + return NewGStore(parent, gasMeter, gasConfig, + func(v any) bool { return v == nil }, + func(v any) int { return ObjectValueLength }, + ) +} + +// GStore applies gas tracking to an underlying KVStore. It implements the // KVStore interface. -type Store struct { +type GStore[V any] struct { gasMeter types.GasMeter gasConfig types.GasConfig - parent types.KVStore + parent types.GKVStore[V] + + isZero func(V) bool + valueLen func(V) int } -// NewStore returns a reference to a new GasKVStore. -func NewStore(parent types.KVStore, gasMeter types.GasMeter, gasConfig types.GasConfig) *Store { - kvs := &Store{ +// NewGStore returns a reference to a new GasKVStore. +func NewGStore[V any]( + parent types.GKVStore[V], + gasMeter types.GasMeter, + gasConfig types.GasConfig, + isZero func(V) bool, + valueLen func(V) int, +) *GStore[V] { + kvs := &GStore[V]{ gasMeter: gasMeter, gasConfig: gasConfig, parent: parent, + isZero: isZero, + valueLen: valueLen, } return kvs } // Implements Store. -func (gs *Store) GetStoreType() types.StoreType { +func (gs *GStore[V]) GetStoreType() types.StoreType { return gs.parent.GetStoreType() } // Implements KVStore. -func (gs *Store) Get(key []byte) (value []byte) { +func (gs *GStore[V]) Get(key []byte) (value V) { gs.gasMeter.ConsumeGas(gs.gasConfig.ReadCostFlat, types.GasReadCostFlatDesc) value = gs.parent.Get(key) // TODO overflow-safe math? gs.gasMeter.ConsumeGas(gs.gasConfig.ReadCostPerByte*types.Gas(len(key)), types.GasReadPerByteDesc) - gs.gasMeter.ConsumeGas(gs.gasConfig.ReadCostPerByte*types.Gas(len(value)), types.GasReadPerByteDesc) + gs.gasMeter.ConsumeGas(gs.gasConfig.ReadCostPerByte*types.Gas(gs.valueLen(value)), types.GasReadPerByteDesc) return value } // Implements KVStore. -func (gs *Store) Set(key, value []byte) { +func (gs *GStore[V]) Set(key []byte, value V) { types.AssertValidKey(key) - types.AssertValidValue(value) + types.AssertValidValueGeneric(value, gs.isZero, gs.valueLen) gs.gasMeter.ConsumeGas(gs.gasConfig.WriteCostFlat, types.GasWriteCostFlatDesc) // TODO overflow-safe math? gs.gasMeter.ConsumeGas(gs.gasConfig.WriteCostPerByte*types.Gas(len(key)), types.GasWritePerByteDesc) - gs.gasMeter.ConsumeGas(gs.gasConfig.WriteCostPerByte*types.Gas(len(value)), types.GasWritePerByteDesc) + gs.gasMeter.ConsumeGas(gs.gasConfig.WriteCostPerByte*types.Gas(gs.valueLen(value)), types.GasWritePerByteDesc) gs.parent.Set(key, value) } // Implements KVStore. -func (gs *Store) Has(key []byte) bool { +func (gs *GStore[V]) Has(key []byte) bool { gs.gasMeter.ConsumeGas(gs.gasConfig.HasCost, types.GasHasDesc) return gs.parent.Has(key) } // Implements KVStore. -func (gs *Store) Delete(key []byte) { +func (gs *GStore[V]) Delete(key []byte) { // charge gas to prevent certain attack vectors even though space is being freed gs.gasMeter.ConsumeGas(gs.gasConfig.DeleteCost, types.GasDeleteDesc) gs.parent.Delete(key) @@ -70,7 +102,7 @@ func (gs *Store) Delete(key []byte) { // Iterator implements the KVStore interface. It returns an iterator which // incurs a flat gas cost for seeking to the first key/value pair and a variable // gas cost based on the current value's length if the iterator is valid. -func (gs *Store) Iterator(start, end []byte) types.Iterator { +func (gs *GStore[V]) Iterator(start, end []byte) types.GIterator[V] { return gs.iterator(start, end, true) } @@ -78,99 +110,100 @@ func (gs *Store) Iterator(start, end []byte) types.Iterator { // iterator which incurs a flat gas cost for seeking to the first key/value pair // and a variable gas cost based on the current value's length if the iterator // is valid. -func (gs *Store) ReverseIterator(start, end []byte) types.Iterator { +func (gs *GStore[V]) ReverseIterator(start, end []byte) types.GIterator[V] { return gs.iterator(start, end, false) } -// Implements KVStore. -func (gs *Store) CacheWrap() types.CacheWrap { +// CacheWrap implements KVStore. +func (gs *GStore[V]) CacheWrap() types.CacheWrap { panic("cannot CacheWrap a GasKVStore") } // CacheWrapWithTrace implements the KVStore interface. -func (gs *Store) CacheWrapWithTrace(_ io.Writer, _ types.TraceContext) types.CacheWrap { +func (gs *GStore[V]) CacheWrapWithTrace(_ io.Writer, _ types.TraceContext) types.CacheWrap { panic("cannot CacheWrapWithTrace a GasKVStore") } -func (gs *Store) iterator(start, end []byte, ascending bool) types.Iterator { - var parent types.Iterator +func (gs *GStore[V]) iterator(start, end []byte, ascending bool) types.GIterator[V] { + var parent types.GIterator[V] if ascending { parent = gs.parent.Iterator(start, end) } else { parent = gs.parent.ReverseIterator(start, end) } - gi := newGasIterator(gs.gasMeter, gs.gasConfig, parent) - gi.(*gasIterator).consumeSeekGas() + gi := newGasIterator(gs.gasMeter, gs.gasConfig, parent, gs.valueLen) + gi.consumeSeekGas() return gi } -type gasIterator struct { +type gasIterator[V any] struct { gasMeter types.GasMeter gasConfig types.GasConfig - parent types.Iterator + parent types.GIterator[V] + valueLen func(V) int } -func newGasIterator(gasMeter types.GasMeter, gasConfig types.GasConfig, parent types.Iterator) types.Iterator { - return &gasIterator{ +func newGasIterator[V any](gasMeter types.GasMeter, gasConfig types.GasConfig, parent types.GIterator[V], valueLen func(V) int) *gasIterator[V] { + return &gasIterator[V]{ gasMeter: gasMeter, gasConfig: gasConfig, parent: parent, + valueLen: valueLen, } } // Implements Iterator. -func (gi *gasIterator) Domain() (start, end []byte) { +func (gi *gasIterator[V]) Domain() (start, end []byte) { return gi.parent.Domain() } // Implements Iterator. -func (gi *gasIterator) Valid() bool { +func (gi *gasIterator[V]) Valid() bool { return gi.parent.Valid() } // Next implements the Iterator interface. It seeks to the next key/value pair // in the iterator. It incurs a flat gas cost for seeking and a variable gas // cost based on the current value's length if the iterator is valid. -func (gi *gasIterator) Next() { +func (gi *gasIterator[V]) Next() { gi.consumeSeekGas() gi.parent.Next() } // Key implements the Iterator interface. It returns the current key and it does // not incur any gas cost. -func (gi *gasIterator) Key() (key []byte) { +func (gi *gasIterator[V]) Key() (key []byte) { key = gi.parent.Key() return key } // Value implements the Iterator interface. It returns the current value and it // does not incur any gas cost. -func (gi *gasIterator) Value() (value []byte) { - value = gi.parent.Value() - return value +func (gi *gasIterator[V]) Value() (value V) { + return gi.parent.Value() } // Implements Iterator. -func (gi *gasIterator) Close() error { +func (gi *gasIterator[V]) Close() error { return gi.parent.Close() } // Error delegates the Error call to the parent iterator. -func (gi *gasIterator) Error() error { +func (gi *gasIterator[V]) Error() error { return gi.parent.Error() } // consumeSeekGas consumes on each iteration step a flat gas cost and a variable gas cost // based on the current value's length. -func (gi *gasIterator) consumeSeekGas() { +func (gi *gasIterator[V]) consumeSeekGas() { if gi.Valid() { key := gi.Key() value := gi.Value() gi.gasMeter.ConsumeGas(gi.gasConfig.ReadCostPerByte*types.Gas(len(key)), types.GasValuePerByteDesc) - gi.gasMeter.ConsumeGas(gi.gasConfig.ReadCostPerByte*types.Gas(len(value)), types.GasValuePerByteDesc) + gi.gasMeter.ConsumeGas(gi.gasConfig.ReadCostPerByte*types.Gas(gi.valueLen(value)), types.GasValuePerByteDesc) } gi.gasMeter.ConsumeGas(gi.gasConfig.IterNextCostFlat, types.GasIterNextCostFlatDesc) } diff --git a/store/go.sum b/store/go.sum index d180cdf7a5d2..33ae8c76b48c 100644 --- a/store/go.sum +++ b/store/go.sum @@ -444,5 +444,3 @@ gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools/v3 v3.5.2 h1:7koQfIKdy+I8UTetycgUqXWSDwpgv193Ka+qRsmBY8Q= gotest.tools/v3 v3.5.2/go.mod h1:LtdLGcnqToBH83WByAAi/wiwSFCArdFIUV/xxN4pcjA= nullprogram.com/x/optparse v1.0.0/go.mod h1:KdyPE+Igbe0jQUrVfMqDMeJQIJZEuyV7pjYmp6pbG50= -sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= -sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= diff --git a/store/internal/btree/btree.go b/store/internal/btree/btree.go new file mode 100644 index 000000000000..920b373ff491 --- /dev/null +++ b/store/internal/btree/btree.go @@ -0,0 +1,126 @@ +package btree + +import ( + "bytes" + "errors" + + "github.com/tidwall/btree" + + "cosmossdk.io/store/types" +) + +const ( + // The approximate number of items and children per B-tree node. Tuned with benchmarks. + // copied from memdb. + bTreeDegree = 32 +) + +var errKeyEmpty = errors.New("key cannot be empty") + +// BTree implements the sorted cache for cachekv store, +// we don't use MemDB here because cachekv is used extensively in sdk core path, +// we need it to be as fast as possible, while `MemDB` is mainly used as a mocking db in unit tests. +// +// We choose tidwall/btree over google/btree here because it provides API to implement step iterator directly. +type BTree[V any] struct { + tree *btree.BTreeG[item[V]] +} + +// NewBTree creates a wrapper around `btree.BTreeG`. +func NewBTree[V any]() BTree[V] { + return BTree[V]{} +} + +func (bt *BTree[V]) init() { + if bt.tree != nil { + return + } + bt.tree = btree.NewBTreeGOptions(byKeys[V], btree.Options{ + Degree: bTreeDegree, + NoLocks: false, + }) +} + +// Set supports nil as value when used as overlay +func (bt *BTree[V]) Set(key []byte, value V) { + bt.init() + bt.tree.Set(newItem(key, value)) +} + +func (bt BTree[V]) Get(key []byte) (V, bool) { + if bt.tree == nil { + var zero V + return zero, false + } + i, found := bt.tree.Get(newItemWithKey[V](key)) + return i.value, found +} + +func (bt *BTree[V]) Delete(key []byte) { + if bt.tree == nil { + return + } + bt.tree.Delete(newItemWithKey[V](key)) +} + +func (bt BTree[V]) Iterator(start, end []byte) (types.GIterator[V], error) { + if (start != nil && len(start) == 0) || (end != nil && len(end) == 0) { + return nil, errKeyEmpty + } + return newMemIterator(start, end, bt, true), nil +} + +func (bt BTree[V]) ReverseIterator(start, end []byte) (types.GIterator[V], error) { + if (start != nil && len(start) == 0) || (end != nil && len(end) == 0) { + return nil, errKeyEmpty + } + return newMemIterator(start, end, bt, false), nil +} + +// Copy the tree. This is a copy-on-write operation and is very fast because +// it only performs a shadowed copy. +func (bt BTree[V]) Copy() BTree[V] { + if bt.tree == nil { + return BTree[V]{} + } + return BTree[V]{ + tree: bt.tree.Copy(), + } +} + +func (bt BTree[V]) Clear() { + if bt.tree == nil { + return + } + bt.tree.Clear() +} + +func (bt BTree[V]) Scan(cb func(key []byte, value V) bool) { + if bt.tree == nil { + return + } + bt.tree.Scan(func(i item[V]) bool { + return cb(i.key, i.value) + }) +} + +// item is a btree item with byte slices as keys and values +type item[V any] struct { + key []byte + value V +} + +// byKeys compares the items by key +func byKeys[V any](a, b item[V]) bool { + return bytes.Compare(a.key, b.key) == -1 +} + +// newItem creates a new pair item. +func newItem[V any](key []byte, value V) item[V] { + return item[V]{key: key, value: value} +} + +// newItem creates a new pair item with empty value. +func newItemWithKey[V any](key []byte) item[V] { + return item[V]{key: key} +} diff --git a/store/cachekv/internal/btree_test.go b/store/internal/btree/btree_test.go similarity index 91% rename from store/cachekv/internal/btree_test.go rename to store/internal/btree/btree_test.go index 06437997f636..75613ebbb69c 100644 --- a/store/cachekv/internal/btree_test.go +++ b/store/internal/btree/btree_test.go @@ -1,4 +1,4 @@ -package internal +package btree import ( "testing" @@ -9,20 +9,23 @@ import ( ) func TestGetSetDelete(t *testing.T) { - db := NewBTree() + db := NewBTree[[]byte]() // A nonexistent key should return nil. - value := db.Get([]byte("a")) + value, found := db.Get([]byte("a")) require.Nil(t, value) + require.False(t, found) // Set and get a value. db.Set([]byte("a"), []byte{0x01}) db.Set([]byte("b"), []byte{0x02}) - value = db.Get([]byte("a")) + value, found = db.Get([]byte("a")) require.Equal(t, []byte{0x01}, value) + require.True(t, found) - value = db.Get([]byte("b")) + value, found = db.Get([]byte("b")) require.Equal(t, []byte{0x02}, value) + require.True(t, found) // Deleting a non-existent value is fine. db.Delete([]byte("x")) @@ -30,17 +33,27 @@ func TestGetSetDelete(t *testing.T) { // Delete a value. db.Delete([]byte("a")) - value = db.Get([]byte("a")) + value, found = db.Get([]byte("a")) require.Nil(t, value) + require.False(t, found) db.Delete([]byte("b")) - value = db.Get([]byte("b")) + value, found = db.Get([]byte("b")) require.Nil(t, value) + require.False(t, found) +} + +func TestNilValue(t *testing.T) { + db := NewBTree[[]byte]() + db.Set([]byte("a"), nil) + value, found := db.Get([]byte("a")) + require.Nil(t, value) + require.True(t, found) } func TestDBIterator(t *testing.T) { - db := NewBTree() + db := NewBTree[[]byte]() for i := 0; i < 10; i++ { if i != 6 { // but skip 6. @@ -171,7 +184,7 @@ func TestDBIterator(t *testing.T) { []int64(nil), "reverse iterator from 2 (ex) to 4") // Ensure that the iterators don't panic with an empty database. - db2 := NewBTree() + db2 := NewBTree[[]byte]() itr, err = db2.Iterator(nil, nil) require.NoError(t, err) diff --git a/store/cachekv/internal/memiterator.go b/store/internal/btree/memiterator.go similarity index 60% rename from store/cachekv/internal/memiterator.go rename to store/internal/btree/memiterator.go index 9dbba7587071..b3caabd4584b 100644 --- a/store/cachekv/internal/memiterator.go +++ b/store/internal/btree/memiterator.go @@ -1,4 +1,4 @@ -package internal +package btree import ( "bytes" @@ -9,13 +9,13 @@ import ( "cosmossdk.io/store/types" ) -var _ types.Iterator = (*memIterator)(nil) +var _ types.Iterator = (*memIterator[[]byte])(nil) // memIterator iterates over iterKVCache items. // if value is nil, means it was deleted. // Implements Iterator. -type memIterator struct { - iter btree.IterG[item] +type memIterator[V any] struct { + iter btree.IterG[item[V]] start []byte end []byte @@ -23,18 +23,30 @@ type memIterator struct { valid bool } -func newMemIterator(start, end []byte, items BTree, ascending bool) *memIterator { +func newMemIterator[V any](start, end []byte, items BTree[V], ascending bool) *memIterator[V] { + if items.tree == nil { + return &memIterator[V]{ + start: start, + end: end, + ascending: ascending, + valid: false, + } + } + + var ( + valid bool + empty V + ) iter := items.tree.Iter() - var valid bool if ascending { if start != nil { - valid = iter.Seek(newItem(start, nil)) + valid = iter.Seek(newItem(start, empty)) } else { valid = iter.First() } } else { if end != nil { - valid = iter.Seek(newItem(end, nil)) + valid = iter.Seek(newItem(end, empty)) if !valid { valid = iter.Last() } else { @@ -46,7 +58,7 @@ func newMemIterator(start, end []byte, items BTree, ascending bool) *memIterator } } - mi := &memIterator{ + mi := &memIterator[V]{ iter: iter, start: start, end: end, @@ -61,27 +73,27 @@ func newMemIterator(start, end []byte, items BTree, ascending bool) *memIterator return mi } -func (mi *memIterator) Domain() (start, end []byte) { +func (mi *memIterator[V]) Domain() (start, end []byte) { return mi.start, mi.end } -func (mi *memIterator) Close() error { +func (mi *memIterator[V]) Close() error { mi.iter.Release() return nil } -func (mi *memIterator) Error() error { +func (mi *memIterator[V]) Error() error { if !mi.Valid() { return errors.New("invalid memIterator") } return nil } -func (mi *memIterator) Valid() bool { +func (mi *memIterator[V]) Valid() bool { return mi.valid } -func (mi *memIterator) Next() { +func (mi *memIterator[V]) Next() { mi.assertValid() if mi.ascending { @@ -95,7 +107,7 @@ func (mi *memIterator) Next() { } } -func (mi *memIterator) keyInRange(key []byte) bool { +func (mi *memIterator[V]) keyInRange(key []byte) bool { if mi.ascending && mi.end != nil && bytes.Compare(key, mi.end) >= 0 { return false } @@ -105,15 +117,15 @@ func (mi *memIterator) keyInRange(key []byte) bool { return true } -func (mi *memIterator) Key() []byte { +func (mi *memIterator[V]) Key() []byte { return mi.iter.Item().key } -func (mi *memIterator) Value() []byte { +func (mi *memIterator[V]) Value() V { return mi.iter.Item().value } -func (mi *memIterator) assertValid() { +func (mi *memIterator[V]) assertValid() { if err := mi.Error(); err != nil { panic(err) } diff --git a/store/internal/btreeadaptor.go b/store/internal/btreeadaptor.go new file mode 100644 index 000000000000..c4b3b9b25b50 --- /dev/null +++ b/store/internal/btreeadaptor.go @@ -0,0 +1,68 @@ +package internal + +import ( + "io" + + "cosmossdk.io/store/cachekv" + "cosmossdk.io/store/internal/btree" + "cosmossdk.io/store/types" +) + +var ( + _ types.KVStore = (*BTreeStore[[]byte])(nil) + _ types.ObjKVStore = (*BTreeStore[any])(nil) +) + +// BTreeStore is a wrapper for a BTree with GKVStore[V] implementation +type BTreeStore[V any] struct { + btree.BTree[V] + isZero func(V) bool + valueLen func(V) int +} + +// NewBTreeStore constructs new BTree adapter +func NewBTreeStore[V any](btree btree.BTree[V], isZero func(V) bool, valueLen func(V) int) *BTreeStore[V] { + return &BTreeStore[V]{btree, isZero, valueLen} +} + +func (ts *BTreeStore[V]) Get(key []byte) (value V) { + value, _ = ts.BTree.Get(key) + return +} + +// Hash Implements GKVStore. +func (ts *BTreeStore[V]) Has(key []byte) bool { + _, found := ts.BTree.Get(key) + return found +} + +func (ts *BTreeStore[V]) Iterator(start, end []byte) types.GIterator[V] { + it, err := ts.BTree.Iterator(start, end) + if err != nil { + panic(err) + } + return it +} + +func (ts *BTreeStore[V]) ReverseIterator(start, end []byte) types.GIterator[V] { + it, err := ts.BTree.ReverseIterator(start, end) + if err != nil { + panic(err) + } + return it +} + +// GetStoreType returns the type of the store. +func (ts *BTreeStore[V]) GetStoreType() types.StoreType { + return types.StoreTypeDB +} + +// CacheWrap branches the underlying store. +func (ts *BTreeStore[V]) CacheWrap() types.CacheWrap { + return cachekv.NewGStore(ts, ts.isZero, ts.valueLen) +} + +// CacheWrapWithTrace branches the underlying store. +func (ts *BTreeStore[V]) CacheWrapWithTrace(w io.Writer, tc types.TraceContext) types.CacheWrap { + return cachekv.NewGStore(ts, ts.isZero, ts.valueLen) +} diff --git a/store/listenkv/store.go b/store/listenkv/store.go index b08a6e395071..5ca4472d6da2 100644 --- a/store/listenkv/store.go +++ b/store/listenkv/store.go @@ -3,6 +3,7 @@ package listenkv import ( "io" + "cosmossdk.io/store/cachekv" "cosmossdk.io/store/types" ) @@ -132,7 +133,7 @@ func (s *Store) GetStoreType() types.StoreType { // CacheWrap implements the KVStore interface. It panics as a Store // cannot be cache wrapped. func (s *Store) CacheWrap() types.CacheWrap { - panic("cannot CacheWrap a ListenKVStore") + return cachekv.NewStore(s) } // CacheWrapWithTrace implements the KVStore interface. It panics as a diff --git a/store/listenkv/store_test.go b/store/listenkv/store_test.go index 51b88912c2e1..a664593e85da 100644 --- a/store/listenkv/store_test.go +++ b/store/listenkv/store_test.go @@ -272,7 +272,7 @@ func TestListenKVStoreGetStoreType(t *testing.T) { func TestListenKVStoreCacheWrap(t *testing.T) { store := newEmptyListenKVStore(nil) - require.Panics(t, func() { store.CacheWrap() }) + store.CacheWrap() } func TestListenKVStoreCacheWrapWithTrace(t *testing.T) { diff --git a/store/prefix/store.go b/store/prefix/store.go index 32b9e8247e2c..0adbe5aaae1a 100644 --- a/store/prefix/store.go +++ b/store/prefix/store.go @@ -10,20 +10,53 @@ import ( "cosmossdk.io/store/types" ) -var _ types.KVStore = Store{} +type ( + Store = GStore[[]byte] + ObjStore = GStore[any] +) + +var ( + _ types.KVStore = Store{} + _ types.ObjKVStore = ObjStore{} +) + +func NewStore(parent types.KVStore, prefix []byte) Store { + return NewGStore( + parent, prefix, + func(v []byte) bool { return v == nil }, + func(v []byte) int { return len(v) }, + ) +} -// Store is similar with cometbft/cometbft/libs/db/prefix_db +func NewObjStore(parent types.ObjKVStore, prefix []byte) ObjStore { + return NewGStore( + parent, prefix, + func(v any) bool { return v == nil }, + func(v any) int { return 1 }, + ) +} + +// GStore is similar with cometbft/cometbft/libs/db/prefix_db // both gives access only to the limited subset of the store // for convinience or safety -type Store struct { - parent types.KVStore +type GStore[V any] struct { + parent types.GKVStore[V] prefix []byte + + isZero func(V) bool + valueLen func(V) int } -func NewStore(parent types.KVStore, prefix []byte) Store { - return Store{ +func NewGStore[V any]( + parent types.GKVStore[V], prefix []byte, + isZero func(V) bool, valueLen func(V) int, +) GStore[V] { + return GStore[V]{ parent: parent, prefix: prefix, + + isZero: isZero, + valueLen: valueLen, } } @@ -34,7 +67,7 @@ func cloneAppend(bz, tail []byte) (res []byte) { return } -func (s Store) key(key []byte) (res []byte) { +func (s GStore[V]) key(key []byte) (res []byte) { if key == nil { panic("nil key on Store") } @@ -43,46 +76,49 @@ func (s Store) key(key []byte) (res []byte) { } // Implements Store -func (s Store) GetStoreType() types.StoreType { +func (s GStore[V]) GetStoreType() types.StoreType { return s.parent.GetStoreType() } // Implements CacheWrap -func (s Store) CacheWrap() types.CacheWrap { - return cachekv.NewStore(s) +func (s GStore[V]) CacheWrap() types.CacheWrap { + return cachekv.NewGStore(s, s.isZero, s.valueLen) } // CacheWrapWithTrace implements the KVStore interface. -func (s Store) CacheWrapWithTrace(w io.Writer, tc types.TraceContext) types.CacheWrap { - return cachekv.NewStore(tracekv.NewStore(s, w, tc)) +func (s GStore[V]) CacheWrapWithTrace(w io.Writer, tc types.TraceContext) types.CacheWrap { + if store, ok := any(s).(*GStore[[]byte]); ok { + return cachekv.NewGStore(tracekv.NewStore(store, w, tc), store.isZero, store.valueLen) + } + return s.CacheWrap() } // Implements KVStore -func (s Store) Get(key []byte) []byte { +func (s GStore[V]) Get(key []byte) V { res := s.parent.Get(s.key(key)) return res } // Implements KVStore -func (s Store) Has(key []byte) bool { +func (s GStore[V]) Has(key []byte) bool { return s.parent.Has(s.key(key)) } // Implements KVStore -func (s Store) Set(key, value []byte) { +func (s GStore[V]) Set(key []byte, value V) { types.AssertValidKey(key) - types.AssertValidValue(value) + types.AssertValidValueGeneric(value, s.isZero, s.valueLen) s.parent.Set(s.key(key), value) } // Implements KVStore -func (s Store) Delete(key []byte) { +func (s GStore[V]) Delete(key []byte) { s.parent.Delete(s.key(key)) } // Implements KVStore // Check https://github.com/cometbft/cometbft/blob/master/libs/db/prefix_db.go#L106 -func (s Store) Iterator(start, end []byte) types.Iterator { +func (s GStore[V]) Iterator(start, end []byte) types.GIterator[V] { newstart := cloneAppend(s.prefix, start) var newend []byte @@ -99,7 +135,7 @@ func (s Store) Iterator(start, end []byte) types.Iterator { // ReverseIterator implements KVStore // Check https://github.com/cometbft/cometbft/blob/master/libs/db/prefix_db.go#L129 -func (s Store) ReverseIterator(start, end []byte) types.Iterator { +func (s GStore[V]) ReverseIterator(start, end []byte) types.GIterator[V] { newstart := cloneAppend(s.prefix, start) var newend []byte @@ -114,18 +150,18 @@ func (s Store) ReverseIterator(start, end []byte) types.Iterator { return newPrefixIterator(s.prefix, start, end, iter) } -var _ types.Iterator = (*prefixIterator)(nil) +var _ types.Iterator = (*prefixIterator[[]byte])(nil) -type prefixIterator struct { +type prefixIterator[V any] struct { prefix []byte start []byte end []byte - iter types.Iterator + iter types.GIterator[V] valid bool } -func newPrefixIterator(prefix, start, end []byte, parent types.Iterator) *prefixIterator { - return &prefixIterator{ +func newPrefixIterator[V any](prefix, start, end []byte, parent types.GIterator[V]) *prefixIterator[V] { + return &prefixIterator[V]{ prefix: prefix, start: start, end: end, @@ -135,17 +171,17 @@ func newPrefixIterator(prefix, start, end []byte, parent types.Iterator) *prefix } // Implements Iterator -func (pi *prefixIterator) Domain() ([]byte, []byte) { +func (pi *prefixIterator[V]) Domain() ([]byte, []byte) { return pi.start, pi.end } // Implements Iterator -func (pi *prefixIterator) Valid() bool { +func (pi *prefixIterator[V]) Valid() bool { return pi.valid && pi.iter.Valid() } // Implements Iterator -func (pi *prefixIterator) Next() { +func (pi *prefixIterator[V]) Next() { if !pi.valid { panic("prefixIterator invalid, cannot call Next()") } @@ -157,7 +193,7 @@ func (pi *prefixIterator) Next() { } // Implements Iterator -func (pi *prefixIterator) Key() (key []byte) { +func (pi *prefixIterator[V]) Key() (key []byte) { if !pi.valid { panic("prefixIterator invalid, cannot call Key()") } @@ -169,7 +205,7 @@ func (pi *prefixIterator) Key() (key []byte) { } // Implements Iterator -func (pi *prefixIterator) Value() []byte { +func (pi *prefixIterator[V]) Value() V { if !pi.valid { panic("prefixIterator invalid, cannot call Value()") } @@ -178,13 +214,13 @@ func (pi *prefixIterator) Value() []byte { } // Implements Iterator -func (pi *prefixIterator) Close() error { +func (pi *prefixIterator[V]) Close() error { return pi.iter.Close() } // Error returns an error if the prefixIterator is invalid defined by the Valid // method. -func (pi *prefixIterator) Error() error { +func (pi *prefixIterator[V]) Error() error { if !pi.Valid() { return errors.New("invalid prefixIterator") } diff --git a/store/rootmulti/store.go b/store/rootmulti/store.go index b9987455097a..7c2997a14500 100644 --- a/store/rootmulti/store.go +++ b/store/rootmulti/store.go @@ -66,9 +66,10 @@ type Store struct { // iavlSyncPruning should rarely be set to true. // The Prune command will automatically set this to true. // This allows the prune command to wait for the pruning to finish before returning. - iavlSyncPruning bool - storesParams map[types.StoreKey]storeParams - stores map[types.StoreKey]types.CommitKVStore + iavlSyncPruning bool + storesParams map[types.StoreKey]storeParams + // CommitStore is a common interface to unify generic CommitKVStore of different value types + stores map[types.StoreKey]types.CommitStore keysByName map[string]types.StoreKey initialVersion int64 removalMap map[types.StoreKey]bool @@ -97,7 +98,7 @@ func NewStore(db dbm.DB, logger log.Logger, metricGatherer metrics.StoreMetrics) iavlCacheSize: iavl.DefaultIAVLCacheSize, iavlDisableFastNode: iavlDisablefastNodeDefault, storesParams: make(map[types.StoreKey]storeParams), - stores: make(map[types.StoreKey]types.CommitKVStore), + stores: make(map[types.StoreKey]types.CommitStore), keysByName: make(map[string]types.StoreKey), listeners: make(map[types.StoreKey]*types.MemoryListener), removalMap: make(map[types.StoreKey]bool), @@ -164,12 +165,6 @@ func (rs *Store) MountStoreWithDB(key types.StoreKey, typ types.StoreType, db db // GetCommitStore returns a mounted CommitStore for a given StoreKey. If the // store is wrapped in an inter-block cache, it will be unwrapped before returning. func (rs *Store) GetCommitStore(key types.StoreKey) types.CommitStore { - return rs.GetCommitKVStore(key) -} - -// GetCommitKVStore returns a mounted CommitKVStore for a given StoreKey. If the -// store is wrapped in an inter-block cache, it will be unwrapped before returning. -func (rs *Store) GetCommitKVStore(key types.StoreKey) types.CommitKVStore { // If the Store has an inter-block cache, first attempt to lookup and unwrap // the underlying CommitKVStore by StoreKey. If it does not exist, fallback to // the main mapping of CommitKVStores. @@ -182,6 +177,17 @@ func (rs *Store) GetCommitKVStore(key types.StoreKey) types.CommitKVStore { return rs.stores[key] } +// GetCommitKVStore returns a mounted CommitKVStore for a given StoreKey. If the +// store is wrapped in an inter-block cache, it will be unwrapped before returning. +func (rs *Store) GetCommitKVStore(key types.StoreKey) types.CommitKVStore { + store, ok := rs.GetCommitStore(key).(types.CommitKVStore) + if !ok { + panic(fmt.Sprintf("store with key %v is not CommitKVStore", key)) + } + + return store +} + // StoreKeysByName returns mapping storeNames -> StoreKeys func (rs *Store) StoreKeysByName() map[string]types.StoreKey { return rs.keysByName @@ -230,7 +236,7 @@ func (rs *Store) loadVersion(ver int64, upgrades *types.StoreUpgrades) error { } // load each Store (note this doesn't panic on unmounted keys now) - newStores := make(map[types.StoreKey]types.CommitKVStore) + newStores := make(map[types.StoreKey]types.CommitStore) storesKeys := make([]types.StoreKey, 0, len(rs.storesParams)) @@ -561,15 +567,17 @@ func (rs *Store) CacheWrapWithTrace(_ io.Writer, _ types.TraceContext) types.Cac func (rs *Store) CacheMultiStore() types.CacheMultiStore { stores := make(map[types.StoreKey]types.CacheWrapper) for k, v := range rs.stores { - store := types.KVStore(v) - // Wire the listenkv.Store to allow listeners to observe the writes from the cache store, - // set same listeners on cache store will observe duplicated writes. - if rs.ListeningEnabled(k) { - store = listenkv.NewStore(store, k, rs.listeners[k]) + store := types.CacheWrapper(v) + if kv, ok := store.(types.KVStore); ok { + // Wire the listenkv.Store to allow listeners to observe the writes from the cache store, + // set same listeners on cache store will observe duplicated writes. + if rs.ListeningEnabled(k) { + store = listenkv.NewStore(kv, k, rs.listeners[k]) + } } stores[k] = store } - return cachemulti.NewStore(rs.db, stores, rs.keysByName, rs.traceWriter, rs.getTracingContext()) + return cachemulti.NewFromKVStore(stores, rs.traceWriter, rs.getTracingContext()) } // CacheMultiStoreWithVersion is analogous to CacheMultiStore except that it @@ -581,7 +589,7 @@ func (rs *Store) CacheMultiStoreWithVersion(version int64) (types.CacheMultiStor var commitInfo *types.CommitInfo storeInfos := map[string]bool{} for key, store := range rs.stores { - var cacheStore types.KVStore + var cacheStore types.CacheWrapper switch store.GetStoreType() { case types.StoreTypeIAVL: // If the store is wrapped with an inter-block cache, we must first unwrap @@ -620,16 +628,18 @@ func (rs *Store) CacheMultiStoreWithVersion(version int64) (types.CacheMultiStor cacheStore = store } - // Wire the listenkv.Store to allow listeners to observe the writes from the cache store, - // set same listeners on cache store will observe duplicated writes. - if rs.ListeningEnabled(key) { - cacheStore = listenkv.NewStore(cacheStore, key, rs.listeners[key]) + if kv, ok := cacheStore.(types.KVStore); ok { + // Wire the listenkv.Store to allow listeners to observe the writes from the cache store, + // set same listeners on cache store will observe duplicated writes. + if rs.ListeningEnabled(key) { + cacheStore = listenkv.NewStore(kv, key, rs.listeners[key]) + } } cachedStores[key] = cacheStore } - return cachemulti.NewStore(rs.db, cachedStores, rs.keysByName, rs.traceWriter, rs.getTracingContext()), nil + return cachemulti.NewFromKVStore(cachedStores, rs.traceWriter, rs.getTracingContext()), nil } // GetStore returns a mounted Store for a given StoreKey. If the StoreKey does @@ -639,7 +649,7 @@ func (rs *Store) CacheMultiStoreWithVersion(version int64) (types.CacheMultiStor // TODO: This isn't used directly upstream. Consider returning the Store as-is // instead of unwrapping. func (rs *Store) GetStore(key types.StoreKey) types.Store { - store := rs.GetCommitKVStore(key) + store := rs.GetCommitStore(key) if store == nil { panic(fmt.Sprintf("store does not exist for key: %s", key.Name())) } @@ -658,7 +668,10 @@ func (rs *Store) GetKVStore(key types.StoreKey) types.KVStore { if s == nil { panic(fmt.Sprintf("store does not exist for key: %s", key.Name())) } - store := types.KVStore(s) + store, ok := s.(types.KVStore) + if !ok { + panic(fmt.Sprintf("store with key %v is not KVStore", key)) + } if rs.TracingEnabled() { store = tracekv.NewStore(store, rs.traceWriter, rs.getTracingContext()) @@ -670,6 +683,20 @@ func (rs *Store) GetKVStore(key types.StoreKey) types.KVStore { return store } +// GetObjKVStore returns a mounted ObjKVStore for a given StoreKey. +func (rs *Store) GetObjKVStore(key types.StoreKey) types.ObjKVStore { + s := rs.stores[key] + if s == nil { + panic(fmt.Sprintf("store does not exist for key: %s", key.Name())) + } + store, ok := s.(types.ObjKVStore) + if !ok { + panic(fmt.Sprintf("store with key %v is not ObjKVStore", key)) + } + + return store +} + func (rs *Store) handlePruning(version int64) error { pruneHeight := rs.pruningManager.GetPruningHeight(version) rs.logger.Debug("prune start", "height", version) @@ -721,7 +748,7 @@ func (rs *Store) GetStoreByName(name string) types.Store { return nil } - return rs.GetCommitKVStore(key) + return rs.GetCommitStore(key) } // Query calls substore.Query with the same `req` where `req.Path` is @@ -834,10 +861,10 @@ func (rs *Store) Snapshot(height uint64, protoWriter protoio.Writer) error { stores := []namedStore{} keys := keysFromStoreKeyMap(rs.stores) for _, key := range keys { - switch store := rs.GetCommitKVStore(key).(type) { + switch store := rs.GetCommitStore(key).(type) { case *iavl.Store: stores = append(stores, namedStore{name: key.Name(), Store: store}) - case *transient.Store, *mem.Store: + case *transient.Store, *mem.Store, *transient.ObjStore: // Non-persisted stores shouldn't be snapshotted continue default: @@ -997,7 +1024,7 @@ loop: return snapshotItem, rs.LoadLatestVersion() } -func (rs *Store) loadCommitStoreFromParams(key types.StoreKey, id types.CommitID, params storeParams) (types.CommitKVStore, error) { +func (rs *Store) loadCommitStoreFromParams(key types.StoreKey, id types.CommitID, params storeParams) (types.CommitStore, error) { var db dbm.DB if params.db != nil { @@ -1029,20 +1056,26 @@ func (rs *Store) loadCommitStoreFromParams(key types.StoreKey, id types.CommitID return commitDBStoreAdapter{Store: dbadapter.Store{DB: db}}, nil case types.StoreTypeTransient: - _, ok := key.(*types.TransientStoreKey) - if !ok { - return nil, fmt.Errorf("invalid StoreKey for StoreTypeTransient: %s", key.String()) + if _, ok := key.(*types.TransientStoreKey); !ok { + return nil, fmt.Errorf("unexpected key type for a TransientStoreKey; got: %s, %T", key.String(), key) } return transient.NewStore(), nil case types.StoreTypeMemory: if _, ok := key.(*types.MemoryStoreKey); !ok { - return nil, fmt.Errorf("unexpected key type for a MemoryStoreKey; got: %s", key.String()) + return nil, fmt.Errorf("unexpected key type for a MemoryStoreKey; got: %s, %T", key.String(), key) } return mem.NewStore(), nil + case types.StoreTypeObject: + if _, ok := key.(*types.ObjectStoreKey); !ok { + return nil, fmt.Errorf("unexpected key type for a ObjectStoreKey; got: %s, %T", key.String(), key) + } + + return transient.NewObjStore(), nil + default: panic(fmt.Sprintf("unrecognized store type %v", params.typ)) } @@ -1054,7 +1087,7 @@ func (rs *Store) buildCommitInfo(version int64) *types.CommitInfo { for _, key := range keys { store := rs.stores[key] storeType := store.GetStoreType() - if storeType == types.StoreTypeTransient || storeType == types.StoreTypeMemory { + if storeType == types.StoreTypeTransient || storeType == types.StoreTypeMemory || storeType == types.StoreTypeObject { continue } storeInfos = append(storeInfos, types.StoreInfo{ @@ -1172,7 +1205,7 @@ func GetLatestVersion(db dbm.DB) int64 { } // Commits each store and returns a new commitInfo. -func commitStores(version int64, storeMap map[types.StoreKey]types.CommitKVStore, removalMap map[types.StoreKey]bool) *types.CommitInfo { +func commitStores(version int64, storeMap map[types.StoreKey]types.CommitStore, removalMap map[types.StoreKey]bool) *types.CommitInfo { storeInfos := make([]types.StoreInfo, 0, len(storeMap)) storeKeys := keysFromStoreKeyMap(storeMap) @@ -1192,7 +1225,7 @@ func commitStores(version int64, storeMap map[types.StoreKey]types.CommitKVStore } storeType := store.GetStoreType() - if storeType == types.StoreTypeTransient || storeType == types.StoreTypeMemory { + if storeType == types.StoreTypeTransient || storeType == types.StoreTypeMemory || storeType == types.StoreTypeObject { continue } diff --git a/store/rootmulti/store_test.go b/store/rootmulti/store_test.go index 495a1a8af3ed..d089b399bc7f 100644 --- a/store/rootmulti/store_test.go +++ b/store/rootmulti/store_test.go @@ -800,6 +800,7 @@ var ( testStoreKey1 = types.NewKVStoreKey("store1") testStoreKey2 = types.NewKVStoreKey("store2") testStoreKey3 = types.NewKVStoreKey("store3") + testStoreKey4 = types.NewKVStoreKey("store4") ) func newMultiStoreWithMounts(db dbm.DB, pruningOpts pruningtypes.PruningOptions) *Store { @@ -872,7 +873,7 @@ func getExpectedCommitID(store *Store, ver int64) types.CommitID { } } -func hashStores(stores map[types.StoreKey]types.CommitKVStore) []byte { +func hashStores(stores map[types.StoreKey]types.CommitStore) []byte { m := make(map[string][]byte, len(stores)) for key, store := range stores { name := key.Name() @@ -927,35 +928,40 @@ func TestStateListeners(t *testing.T) { require.Empty(t, ms.PopStateCache()) } -type commitKVStoreStub struct { - types.CommitKVStore +type commitStoreStub struct { + types.CommitStore Committed int } -func (stub *commitKVStoreStub) Commit() types.CommitID { - commitID := stub.CommitKVStore.Commit() +func (stub *commitStoreStub) Commit() types.CommitID { + commitID := stub.CommitStore.Commit() stub.Committed++ return commitID } -func prepareStoreMap() (map[types.StoreKey]types.CommitKVStore, error) { +func prepareStoreMap() (map[types.StoreKey]types.CommitStore, error) { var db dbm.DB = dbm.NewMemDB() store := NewStore(db, log.NewNopLogger(), metrics.NewNoOpMetrics()) store.MountStoreWithDB(types.NewKVStoreKey("iavl1"), types.StoreTypeIAVL, nil) store.MountStoreWithDB(types.NewKVStoreKey("iavl2"), types.StoreTypeIAVL, nil) store.MountStoreWithDB(types.NewTransientStoreKey("trans1"), types.StoreTypeTransient, nil) + store.MountStoreWithDB(types.NewMemoryStoreKey("mem1"), types.StoreTypeMemory, nil) + store.MountStoreWithDB(types.NewObjectStoreKey("obj1"), types.StoreTypeObject, nil) if err := store.LoadLatestVersion(); err != nil { return nil, err } - return map[types.StoreKey]types.CommitKVStore{ - testStoreKey1: &commitKVStoreStub{ - CommitKVStore: store.GetStoreByName("iavl1").(types.CommitKVStore), + return map[types.StoreKey]types.CommitStore{ + testStoreKey1: &commitStoreStub{ + CommitStore: store.GetStoreByName("iavl1").(types.CommitStore), }, - testStoreKey2: &commitKVStoreStub{ - CommitKVStore: store.GetStoreByName("iavl2").(types.CommitKVStore), + testStoreKey2: &commitStoreStub{ + CommitStore: store.GetStoreByName("iavl2").(types.CommitStore), }, - testStoreKey3: &commitKVStoreStub{ - CommitKVStore: store.GetStoreByName("trans1").(types.CommitKVStore), + testStoreKey3: &commitStoreStub{ + CommitStore: store.GetStoreByName("trans1").(types.CommitStore), + }, + testStoreKey4: &commitStoreStub{ + CommitStore: store.GetStoreByName("obj1").(types.CommitStore), }, }, nil } @@ -986,7 +992,7 @@ func TestCommitStores(t *testing.T) { t.Run(tc.name, func(t *testing.T) { storeMap, err := prepareStoreMap() require.NoError(t, err) - store := storeMap[testStoreKey1].(*commitKVStoreStub) + store := storeMap[testStoreKey1].(*commitStoreStub) for i := tc.committed; i > 0; i-- { store.Commit() } diff --git a/store/tracekv/store.go b/store/tracekv/store.go index ba6df431da16..b0960cdb51e8 100644 --- a/store/tracekv/store.go +++ b/store/tracekv/store.go @@ -161,10 +161,9 @@ func (tkv *Store) GetStoreType() types.StoreType { return tkv.parent.GetStoreType() } -// CacheWrap implements the KVStore interface. It panics because a Store -// cannot be branched. +// CacheWrap implements CacheWrapper. func (tkv *Store) CacheWrap() types.CacheWrap { - panic("cannot CacheWrap a TraceKVStore") + return tkv.parent.CacheWrap() } // CacheWrapWithTrace implements the KVStore interface. It panics as a diff --git a/store/tracekv/store_test.go b/store/tracekv/store_test.go index 2c42734baefd..2e91c338a3fb 100644 --- a/store/tracekv/store_test.go +++ b/store/tracekv/store_test.go @@ -283,7 +283,7 @@ func TestTraceKVStoreGetStoreType(t *testing.T) { func TestTraceKVStoreCacheWrap(t *testing.T) { store := newEmptyTraceKVStore(nil) - require.Panics(t, func() { store.CacheWrap() }) + store.CacheWrap() } func TestTraceKVStoreCacheWrapWithTrace(t *testing.T) { diff --git a/store/transient/store.go b/store/transient/store.go index 6f393279f571..53332e9f33c1 100644 --- a/store/transient/store.go +++ b/store/transient/store.go @@ -1,9 +1,8 @@ package transient import ( - dbm "github.com/cosmos/cosmos-db" - - "cosmossdk.io/store/dbadapter" + "cosmossdk.io/store/internal" + "cosmossdk.io/store/internal/btree" pruningtypes "cosmossdk.io/store/pruning/types" "cosmossdk.io/store/types" ) @@ -11,43 +10,73 @@ import ( var ( _ types.Committer = (*Store)(nil) _ types.KVStore = (*Store)(nil) + + _ types.Committer = (*ObjStore)(nil) + _ types.ObjKVStore = (*ObjStore)(nil) ) // Store is a wrapper for a MemDB with Commiter implementation +type GStore[V any] struct { + internal.BTreeStore[V] +} + +// NewGStore constructs new generic transient store +func NewGStore[V any](isZero func(V) bool, valueLen func(V) int) *GStore[V] { + return &GStore[V]{*internal.NewBTreeStore(btree.NewBTree[V](), isZero, valueLen)} +} + +// Store specializes GStore for []byte type Store struct { - dbadapter.Store + GStore[[]byte] } -// Constructs new MemDB adapter func NewStore() *Store { - return &Store{Store: dbadapter.Store{DB: dbm.NewMemDB()}} + return &Store{*NewGStore( + func(v []byte) bool { return v == nil }, + func(v []byte) int { return len(v) }, + )} +} + +func (*Store) GetStoreType() types.StoreType { + return types.StoreTypeTransient +} + +// ObjStore specializes GStore for any +type ObjStore struct { + GStore[any] +} + +func NewObjStore() *ObjStore { + return &ObjStore{*NewGStore( + func(v any) bool { return v == nil }, + func(v any) int { return 1 }, // for value length validation + )} +} + +func (*ObjStore) GetStoreType() types.StoreType { + return types.StoreTypeObject } // Implements CommitStore // Commit cleans up Store. -func (ts *Store) Commit() (id types.CommitID) { - ts.Store = dbadapter.Store{DB: dbm.NewMemDB()} +func (ts *GStore[V]) Commit() (id types.CommitID) { + ts.Clear() return } -func (ts *Store) SetPruning(_ pruningtypes.PruningOptions) {} +func (ts *GStore[V]) SetPruning(_ pruningtypes.PruningOptions) {} // GetPruning is a no-op as pruning options cannot be directly set on this store. // They must be set on the root commit multi-store. -func (ts *Store) GetPruning() pruningtypes.PruningOptions { +func (ts *GStore[V]) GetPruning() pruningtypes.PruningOptions { return pruningtypes.NewPruningOptions(pruningtypes.PruningUndefined) } // Implements CommitStore -func (ts *Store) LastCommitID() types.CommitID { +func (ts *GStore[V]) LastCommitID() types.CommitID { return types.CommitID{} } -func (ts *Store) WorkingHash() []byte { +func (ts *GStore[V]) WorkingHash() []byte { return []byte{} } - -// Implements Store. -func (ts *Store) GetStoreType() types.StoreType { - return types.StoreTypeTransient -} diff --git a/store/types/store.go b/store/types/store.go index 0adf42d2b0ab..d506dfff7613 100644 --- a/store/types/store.go +++ b/store/types/store.go @@ -120,14 +120,12 @@ type MultiStore interface { // call CacheMultiStore.Write(). CacheMultiStore() CacheMultiStore - // CacheMultiStoreWithVersion branches the underlying MultiStore where - // each stored is loaded at a specific version (height). - CacheMultiStoreWithVersion(version int64) (CacheMultiStore, error) - + // GetStore is convenience for fetching substores. // Convenience for fetching substores. // If the store does not exist, panics. GetStore(StoreKey) Store GetKVStore(StoreKey) KVStore + GetObjKVStore(StoreKey) ObjKVStore // TracingEnabled returns if tracing is enabled for the MultiStore. TracingEnabled() bool @@ -141,6 +139,14 @@ type MultiStore interface { // implied that the caller should update the context when necessary between // tracing operations. The modified MultiStore is returned. SetTracingContext(TraceContext) MultiStore +} + +type RootMultiStore interface { + MultiStore + + // CacheMultiStoreWithVersion branches the underlying MultiStore where + // each stored is loaded at a specific version (height). + CacheMultiStoreWithVersion(version int64) (CacheMultiStore, error) // LatestVersion returns the latest version in the store LatestVersion() int64 @@ -150,12 +156,14 @@ type MultiStore interface { type CacheMultiStore interface { MultiStore Write() // Writes operations to underlying KVStore + + RunAtomic(func(CacheMultiStore) error) error } // CommitMultiStore is an interface for a MultiStore without cache capabilities. type CommitMultiStore interface { Committer - MultiStore + RootMultiStore snapshottypes.Snapshotter // Mount a store of type using the given db. @@ -227,25 +235,25 @@ type CommitMultiStore interface { //---------subsp------------------------------- // KVStore -// BasicKVStore is a simple interface to get/set data -type BasicKVStore interface { +// GBasicKVStore is a simple interface to get/set data +type GBasicKVStore[V any] interface { // Get returns nil if key doesn't exist. Panics on nil key. - Get(key []byte) []byte + Get(key []byte) V // Has checks if a key exists. Panics on nil key. Has(key []byte) bool // Set sets the key. Panics on nil key or value. - Set(key, value []byte) + Set(key []byte, value V) // Delete deletes the key. Panics on nil key. Delete(key []byte) } -// KVStore additionally provides iteration and deletion -type KVStore interface { +// GKVStore additionally provides iteration and deletion +type GKVStore[V any] interface { Store - BasicKVStore + GBasicKVStore[V] // Iterator over a domain of keys in ascending order. End is exclusive. // Start must be less than end, or the Iterator is invalid. @@ -253,18 +261,54 @@ type KVStore interface { // To iterate over entire domain, use store.Iterator(nil, nil) // CONTRACT: No writes may happen within a domain while an iterator exists over it. // Exceptionally allowed for cachekv.Store, safe to write in the modules. - Iterator(start, end []byte) Iterator + Iterator(start, end []byte) GIterator[V] // Iterator over a domain of keys in descending order. End is exclusive. // Start must be less than end, or the Iterator is invalid. // Iterator must be closed by caller. // CONTRACT: No writes may happen within a domain while an iterator exists over it. // Exceptionally allowed for cachekv.Store, safe to write in the modules. - ReverseIterator(start, end []byte) Iterator + ReverseIterator(start, end []byte) GIterator[V] +} + +// GIterator is the generic version of dbm's Iterator +type GIterator[V any] interface { + // Domain returns the start (inclusive) and end (exclusive) limits of the iterator. + // CONTRACT: start, end readonly []byte + Domain() (start, end []byte) + + // Valid returns whether the current iterator is valid. Once invalid, the Iterator remains + // invalid forever. + Valid() bool + + // Next moves the iterator to the next key in the database, as defined by order of iteration. + // If Valid returns false, this method will panic. + Next() + + // Key returns the key at the current position. Panics if the iterator is invalid. + // CONTRACT: key readonly []byte + Key() (key []byte) + + // Value returns the value at the current position. Panics if the iterator is invalid. + // CONTRACT: value readonly []byte + Value() (value V) + + // Error returns the last error encountered by the iterator, if any. + Error() error + + // Close closes the iterator, relasing any allocated resources. + Close() error } -// Iterator is an alias db's Iterator for convenience. -type Iterator = dbm.Iterator +type ( + Iterator = GIterator[[]byte] + BasicKVStore = GBasicKVStore[[]byte] + KVStore = GKVStore[[]byte] + + ObjIterator = GIterator[any] + ObjBasicKVStore = GBasicKVStore[any] + ObjKVStore = GKVStore[any] +) // CacheKVStore branches a KVStore and provides read cache functionality. // After calling .Write() on the CacheKVStore, all previously created @@ -290,14 +334,13 @@ type CommitKVStore interface { // a Committer, since Commit ephemeral store make no sense. It can return KVStore, // HeapStore, SpaceStore, etc. type CacheWrap interface { + CacheWrapper + // Write syncs with the underlying store. Write() - // CacheWrap recursively wraps again. - CacheWrap() CacheWrap - - // CacheWrapWithTrace recursively wraps again with tracing enabled. - CacheWrapWithTrace(w io.Writer, tc TraceContext) CacheWrap + // Discard the write set + Discard() } type CacheWrapper interface { @@ -316,6 +359,12 @@ func (cid CommitID) String() string { return fmt.Sprintf("CommitID{%v:%X}", cid.Hash, cid.Version) } +// BranchStore +type BranchStore interface { + Clone() BranchStore + Restore(BranchStore) +} + //---------------------------------------- // Store types @@ -330,6 +379,7 @@ const ( StoreTypeMemory StoreTypeSMT StoreTypePersistent + StoreTypeObject ) func (st StoreType) String() string { @@ -354,6 +404,9 @@ func (st StoreType) String() string { case StoreTypePersistent: return "StoreTypePersistent" + + case StoreTypeObject: + return "StoreTypeObject" } return "unknown store type" @@ -433,6 +486,29 @@ func (key *TransientStoreKey) String() string { return fmt.Sprintf("TransientStoreKey{%p, %s}", key, key.name) } +// ObjectStoreKey is used for indexing transient stores in a MultiStore +type ObjectStoreKey struct { + name string +} + +// Constructs new ObjectStoreKey +// Must return a pointer according to the ocap principle +func NewObjectStoreKey(name string) *ObjectStoreKey { + return &ObjectStoreKey{ + name: name, + } +} + +// Implements StoreKey +func (key *ObjectStoreKey) Name() string { + return key.name +} + +// Implements StoreKey +func (key *ObjectStoreKey) String() string { + return fmt.Sprintf("ObjectStoreKey{%p, %s}", key, key.name) +} + // MemoryStoreKey defines a typed key to be used with an in-memory KVStore. type MemoryStoreKey struct { name string @@ -526,3 +602,17 @@ func NewMemoryStoreKeys(names ...string) map[string]*MemoryStoreKey { return keys } + +// NewObjectStoreKeys constructs a new map matching store key names to their +// respective ObjectStoreKey references. +// The function will panic if there is a potential conflict in names (see `assertNoPrefix` +// function for more details). +func NewObjectStoreKeys(names ...string) map[string]*ObjectStoreKey { + assertNoCommonPrefix(names) + keys := make(map[string]*ObjectStoreKey) + for _, n := range names { + keys[n] = NewObjectStoreKey(n) + } + + return keys +} diff --git a/store/types/validity.go b/store/types/validity.go index a1fbaba999c7..cfb6088c8a32 100644 --- a/store/types/validity.go +++ b/store/types/validity.go @@ -1,5 +1,7 @@ package types +import "errors" + var ( // 128K - 1 MaxKeyLength = (1 << 17) - 1 @@ -22,7 +24,20 @@ func AssertValidValue(value []byte) { if value == nil { panic("value is nil") } - if len(value) > MaxValueLength { - panic("value is too large") + AssertValidValueLength(len(value)) +} + +// AssertValidValueGeneric checks if the value is valid(value is not nil and within length limit) +func AssertValidValueGeneric[V any](value V, isZero func(V) bool, valueLen func(V) int) { + if isZero(value) { + panic("value is nil") + } + AssertValidValueLength(valueLen(value)) +} + +// AssertValidValueLength checks if the value length is within length limit +func AssertValidValueLength(l int) { + if l > MaxValueLength { + panic(errors.New("value is too large")) } } diff --git a/systemtests/go.mod b/systemtests/go.mod index 77bb60c992e0..557884085fd1 100644 --- a/systemtests/go.mod +++ b/systemtests/go.mod @@ -161,3 +161,8 @@ require ( pgregory.net/rapid v1.2.0 // indirect sigs.k8s.io/yaml v1.4.0 // indirect ) + +replace ( + cosmossdk.io/store => ../store + github.com/cosmos/cosmos-sdk => .. +) diff --git a/systemtests/go.sum b/systemtests/go.sum index efb9ceef0010..609a8b5d0138 100644 --- a/systemtests/go.sum +++ b/systemtests/go.sum @@ -16,8 +16,6 @@ cosmossdk.io/math v1.5.3 h1:WH6tu6Z3AUCeHbeOSHg2mt9rnoiUWVWaQ2t6Gkll96U= cosmossdk.io/math v1.5.3/go.mod h1:uqcZv7vexnhMFJF+6zh9EWdm/+Ylyln34IvPnBauPCQ= cosmossdk.io/schema v1.1.0 h1:mmpuz3dzouCoyjjcMcA/xHBEmMChN+EHh8EHxHRHhzE= cosmossdk.io/schema v1.1.0/go.mod h1:Gb7pqO+tpR+jLW5qDcNOSv0KtppYs7881kfzakguhhI= -cosmossdk.io/store v1.1.2 h1:3HOZG8+CuThREKv6cn3WSohAc6yccxO3hLzwK6rBC7o= -cosmossdk.io/store v1.1.2/go.mod h1:60rAGzTHevGm592kFhiUVkNC9w7gooSEn5iUBPzHQ6A= cosmossdk.io/x/tx v0.14.0 h1:hB3O25kIcyDW/7kMTLMaO8Ripj3yqs5imceVd6c/heA= cosmossdk.io/x/tx v0.14.0/go.mod h1:Tn30rSRA1PRfdGB3Yz55W4Sn6EIutr9xtMKSHij+9PM= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= @@ -147,8 +145,6 @@ github.com/cosmos/cosmos-db v1.1.1 h1:FezFSU37AlBC8S98NlSagL76oqBRWq/prTPvFcEJNC github.com/cosmos/cosmos-db v1.1.1/go.mod h1:AghjcIPqdhSLP/2Z0yha5xPH3nLnskz81pBx3tcVSAw= github.com/cosmos/cosmos-proto v1.0.0-beta.5 h1:eNcayDLpip+zVLRLYafhzLvQlSmyab+RC5W7ZfmxJLA= github.com/cosmos/cosmos-proto v1.0.0-beta.5/go.mod h1:hQGLpiIUloJBMdQMMWb/4wRApmI9hjHH05nefC0Ojec= -github.com/cosmos/cosmos-sdk v0.53.0 h1:ZsB2tnBVudumV059oPuElcr0K1lLOutaI6WJ+osNTbI= -github.com/cosmos/cosmos-sdk v0.53.0/go.mod h1:UPcRyFwOUy2PfSFBWxBceO/HTjZOuBVqY583WyazIGs= github.com/cosmos/go-bip39 v1.0.0 h1:pcomnQdrdH22njcAatO0yWojsUnCO3y2tNoV1cb6hHY= github.com/cosmos/go-bip39 v1.0.0/go.mod h1:RNJv0H/pOIVgxw6KS7QeX2a0Uo0aKUlfhZ4xuwvCdJw= github.com/cosmos/gogogateway v1.2.0 h1:Ae/OivNhp8DqBi/sh2A8a1D0y638GpL3tkmLQAiKxTE= diff --git a/tests/go.mod b/tests/go.mod index 8ce2805cc2f2..d029fc0727da 100644 --- a/tests/go.mod +++ b/tests/go.mod @@ -221,6 +221,7 @@ require ( // replace ( // // ) +replace cosmossdk.io/store => ../store // Below are the long-lived replace for tests. replace ( diff --git a/tests/go.sum b/tests/go.sum index 5534b27cba74..bd004461bece 100644 --- a/tests/go.sum +++ b/tests/go.sum @@ -632,8 +632,6 @@ cosmossdk.io/math v1.5.3 h1:WH6tu6Z3AUCeHbeOSHg2mt9rnoiUWVWaQ2t6Gkll96U= cosmossdk.io/math v1.5.3/go.mod h1:uqcZv7vexnhMFJF+6zh9EWdm/+Ylyln34IvPnBauPCQ= cosmossdk.io/schema v1.1.0 h1:mmpuz3dzouCoyjjcMcA/xHBEmMChN+EHh8EHxHRHhzE= cosmossdk.io/schema v1.1.0/go.mod h1:Gb7pqO+tpR+jLW5qDcNOSv0KtppYs7881kfzakguhhI= -cosmossdk.io/store v1.1.2 h1:3HOZG8+CuThREKv6cn3WSohAc6yccxO3hLzwK6rBC7o= -cosmossdk.io/store v1.1.2/go.mod h1:60rAGzTHevGm592kFhiUVkNC9w7gooSEn5iUBPzHQ6A= cosmossdk.io/x/circuit v0.2.0 h1:RJPMBQWCQU77EcM9HDTBnqRhq21fcUxgWZl7BZylJZo= cosmossdk.io/x/circuit v0.2.0/go.mod h1:CjiGXDeZs64nMv0fG+QmvGVTcn7n3Sv4cDszMRR2JqU= cosmossdk.io/x/evidence v0.2.0 h1:o72zbmgCM7U0v7z7b0XnMB+NqX0tFamqb1HHkQbhrZ0= diff --git a/tests/integration/bank/keeper/deterministic_test.go b/tests/integration/bank/keeper/deterministic_test.go index d5fe027f5c61..ca939ff3d26a 100644 --- a/tests/integration/bank/keeper/deterministic_test.go +++ b/tests/integration/bank/keeper/deterministic_test.go @@ -70,7 +70,8 @@ func initDeterministicFixture(t *testing.T) *deterministicFixture { cdc := moduletestutil.MakeTestEncodingConfig(auth.AppModuleBasic{}, bank.AppModuleBasic{}).Codec logger := log.NewTestLogger(t) - cms := integration.CreateMultiStore(keys, logger) + okeys := storetypes.NewObjectStoreKeys(banktypes.ObjectStoreKey) + cms := integration.CreateMultiStore(keys, okeys, logger) newCtx := sdk.NewContext(cms, cmtproto.Header{}, true, logger) @@ -95,6 +96,7 @@ func initDeterministicFixture(t *testing.T) *deterministicFixture { } bankKeeper := keeper.NewBaseKeeper( cdc, + storetypes.NewObjectStoreKey(banktypes.ObjectStoreKey), runtime.NewKVStoreService(keys[banktypes.StoreKey]), accountKeeper, blockedAddresses, diff --git a/tests/integration/distribution/keeper/msg_server_test.go b/tests/integration/distribution/keeper/msg_server_test.go index bc0434af8886..e15e8a9bde7b 100644 --- a/tests/integration/distribution/keeper/msg_server_test.go +++ b/tests/integration/distribution/keeper/msg_server_test.go @@ -65,7 +65,8 @@ func initFixture(tb testing.TB) *fixture { cdc := moduletestutil.MakeTestEncodingConfig(auth.AppModuleBasic{}, distribution.AppModuleBasic{}).Codec logger := log.NewTestLogger(tb) - cms := integration.CreateMultiStore(keys, logger) + okeys := storetypes.NewObjectStoreKeys(banktypes.ObjectStoreKey) + cms := integration.CreateMultiStore(keys, okeys, logger) newCtx := sdk.NewContext(cms, types.Header{}, true, logger) @@ -92,6 +93,7 @@ func initFixture(tb testing.TB) *fixture { } bankKeeper := bankkeeper.NewBaseKeeper( cdc, + okeys[banktypes.ObjectStoreKey], runtime.NewKVStoreService(keys[banktypes.StoreKey]), accountKeeper, blockedAddresses, diff --git a/tests/integration/evidence/keeper/infraction_test.go b/tests/integration/evidence/keeper/infraction_test.go index 18a507f96de8..0a140177ebc1 100644 --- a/tests/integration/evidence/keeper/infraction_test.go +++ b/tests/integration/evidence/keeper/infraction_test.go @@ -86,7 +86,8 @@ func initFixture(tb testing.TB) *fixture { cdc := moduletestutil.MakeTestEncodingConfig(auth.AppModuleBasic{}, evidence.AppModuleBasic{}).Codec logger := log.NewTestLogger(tb) - cms := integration.CreateMultiStore(keys, logger) + okeys := storetypes.NewObjectStoreKeys(banktypes.ObjectStoreKey) + cms := integration.CreateMultiStore(keys, okeys, logger) newCtx := sdk.NewContext(cms, cmtproto.Header{}, true, logger) @@ -113,6 +114,7 @@ func initFixture(tb testing.TB) *fixture { } bankKeeper := bankkeeper.NewBaseKeeper( cdc, + okeys[banktypes.ObjectStoreKey], runtime.NewKVStoreService(keys[banktypes.StoreKey]), accountKeeper, blockedAddresses, diff --git a/tests/integration/gov/keeper/keeper_test.go b/tests/integration/gov/keeper/keeper_test.go index 388e0be915c1..5bca3a23dd91 100644 --- a/tests/integration/gov/keeper/keeper_test.go +++ b/tests/integration/gov/keeper/keeper_test.go @@ -56,7 +56,8 @@ func initFixture(tb testing.TB) *fixture { cdc := moduletestutil.MakeTestEncodingConfig(auth.AppModuleBasic{}, bank.AppModuleBasic{}, gov.AppModuleBasic{}).Codec logger := log.NewTestLogger(tb) - cms := integration.CreateMultiStore(keys, logger) + okeys := storetypes.NewObjectStoreKeys(banktypes.ObjectStoreKey) + cms := integration.CreateMultiStore(keys, okeys, logger) newCtx := sdk.NewContext(cms, cmtproto.Header{}, true, logger) @@ -85,6 +86,7 @@ func initFixture(tb testing.TB) *fixture { } bankKeeper := bankkeeper.NewBaseKeeper( cdc, + okeys[banktypes.ObjectStoreKey], runtime.NewKVStoreService(keys[banktypes.StoreKey]), accountKeeper, blockedAddresses, diff --git a/tests/integration/slashing/keeper/keeper_test.go b/tests/integration/slashing/keeper/keeper_test.go index fc296c86d370..cfbc11f67016 100644 --- a/tests/integration/slashing/keeper/keeper_test.go +++ b/tests/integration/slashing/keeper/keeper_test.go @@ -58,7 +58,8 @@ func initFixture(tb testing.TB) *fixture { cdc := moduletestutil.MakeTestEncodingConfig(auth.AppModuleBasic{}).Codec logger := log.NewTestLogger(tb) - cms := integration.CreateMultiStore(keys, logger) + okeys := storetypes.NewObjectStoreKeys(banktypes.ObjectStoreKey) + cms := integration.CreateMultiStore(keys, okeys, logger) newCtx := sdk.NewContext(cms, cmtproto.Header{}, true, logger) @@ -85,6 +86,7 @@ func initFixture(tb testing.TB) *fixture { } bankKeeper := bankkeeper.NewBaseKeeper( cdc, + okeys[banktypes.ObjectStoreKey], runtime.NewKVStoreService(keys[banktypes.StoreKey]), accountKeeper, blockedAddresses, diff --git a/tests/integration/staking/keeper/common_test.go b/tests/integration/staking/keeper/common_test.go index 7ed49482d348..586ccd969006 100644 --- a/tests/integration/staking/keeper/common_test.go +++ b/tests/integration/staking/keeper/common_test.go @@ -104,7 +104,8 @@ func initFixture(tb testing.TB) *fixture { cdc := moduletestutil.MakeTestEncodingConfig(auth.AppModuleBasic{}, staking.AppModuleBasic{}).Codec logger := log.NewTestLogger(tb) - cms := integration.CreateMultiStore(keys, logger) + okeys := storetypes.NewObjectStoreKeys(banktypes.ObjectStoreKey) + cms := integration.CreateMultiStore(keys, okeys, logger) newCtx := sdk.NewContext(cms, cmtprototypes.Header{}, true, logger) @@ -132,6 +133,7 @@ func initFixture(tb testing.TB) *fixture { } bankKeeper := bankkeeper.NewBaseKeeper( cdc, + okeys[banktypes.ObjectStoreKey], runtime.NewKVStoreService(keys[banktypes.StoreKey]), accountKeeper, blockedAddresses, diff --git a/tests/integration/staking/keeper/determinstic_test.go b/tests/integration/staking/keeper/determinstic_test.go index 170c51535d56..3d94be1cafc0 100644 --- a/tests/integration/staking/keeper/determinstic_test.go +++ b/tests/integration/staking/keeper/determinstic_test.go @@ -73,7 +73,8 @@ func initDeterministicFixture(t *testing.T) *deterministicFixture { cdc := moduletestutil.MakeTestEncodingConfig(auth.AppModuleBasic{}, distribution.AppModuleBasic{}).Codec logger := log.NewTestLogger(t) - cms := integration.CreateMultiStore(keys, logger) + okeys := storetypes.NewObjectStoreKeys(banktypes.ObjectStoreKey) + cms := integration.CreateMultiStore(keys, okeys, logger) newCtx := sdk.NewContext(cms, cmtproto.Header{}, true, logger) @@ -101,6 +102,7 @@ func initDeterministicFixture(t *testing.T) *deterministicFixture { } bankKeeper := bankkeeper.NewBaseKeeper( cdc, + okeys[banktypes.ObjectStoreKey], runtime.NewKVStoreService(keys[banktypes.StoreKey]), accountKeeper, blockedAddresses, diff --git a/tests/systemtests/go.mod b/tests/systemtests/go.mod index ee545f634402..4b02e020e108 100644 --- a/tests/systemtests/go.mod +++ b/tests/systemtests/go.mod @@ -168,3 +168,5 @@ require ( pgregory.net/rapid v1.2.0 // indirect sigs.k8s.io/yaml v1.4.0 // indirect ) + +replace cosmossdk.io/store => ../../store diff --git a/tests/systemtests/go.sum b/tests/systemtests/go.sum index c2eff28444eb..015566df99ef 100644 --- a/tests/systemtests/go.sum +++ b/tests/systemtests/go.sum @@ -16,8 +16,6 @@ cosmossdk.io/math v1.5.3 h1:WH6tu6Z3AUCeHbeOSHg2mt9rnoiUWVWaQ2t6Gkll96U= cosmossdk.io/math v1.5.3/go.mod h1:uqcZv7vexnhMFJF+6zh9EWdm/+Ylyln34IvPnBauPCQ= cosmossdk.io/schema v1.1.0 h1:mmpuz3dzouCoyjjcMcA/xHBEmMChN+EHh8EHxHRHhzE= cosmossdk.io/schema v1.1.0/go.mod h1:Gb7pqO+tpR+jLW5qDcNOSv0KtppYs7881kfzakguhhI= -cosmossdk.io/store v1.1.2 h1:3HOZG8+CuThREKv6cn3WSohAc6yccxO3hLzwK6rBC7o= -cosmossdk.io/store v1.1.2/go.mod h1:60rAGzTHevGm592kFhiUVkNC9w7gooSEn5iUBPzHQ6A= cosmossdk.io/x/tx v0.14.0 h1:hB3O25kIcyDW/7kMTLMaO8Ripj3yqs5imceVd6c/heA= cosmossdk.io/x/tx v0.14.0/go.mod h1:Tn30rSRA1PRfdGB3Yz55W4Sn6EIutr9xtMKSHij+9PM= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= diff --git a/testutil/context.go b/testutil/context.go index 288f7fc55bcd..492b7a4e63f4 100644 --- a/testutil/context.go +++ b/testutil/context.go @@ -80,3 +80,18 @@ func DefaultContextWithDB(tb testing.TB, key, tkey storetypes.StoreKey) TestCont return TestContext{ctx, db, cms} } + +func DefaultContextWithObjectStore(tb testing.TB, key, tkey, okey storetypes.StoreKey) TestContext { + tb.Helper() + db := dbm.NewMemDB() + cms := store.NewCommitMultiStore(db, log.NewNopLogger(), metrics.NewNoOpMetrics()) + cms.MountStoreWithDB(key, storetypes.StoreTypeIAVL, db) + cms.MountStoreWithDB(tkey, storetypes.StoreTypeTransient, nil) + cms.MountStoreWithDB(okey, storetypes.StoreTypeObject, nil) + err := cms.LoadLatestVersion() + assert.NoError(tb, err) + + ctx := sdk.NewContext(cms, cmtproto.Header{Time: time.Now()}, false, log.NewNopLogger()) + + return TestContext{ctx, db, cms} +} diff --git a/testutil/integration/example_test.go b/testutil/integration/example_test.go index b395324c1917..47ae474ab9ff 100644 --- a/testutil/integration/example_test.go +++ b/testutil/integration/example_test.go @@ -20,6 +20,7 @@ import ( authkeeper "github.com/cosmos/cosmos-sdk/x/auth/keeper" authsims "github.com/cosmos/cosmos-sdk/x/auth/simulation" authtypes "github.com/cosmos/cosmos-sdk/x/auth/types" + banktypes "github.com/cosmos/cosmos-sdk/x/bank/types" "github.com/cosmos/cosmos-sdk/x/mint" mintkeeper "github.com/cosmos/cosmos-sdk/x/mint/keeper" minttypes "github.com/cosmos/cosmos-sdk/x/mint/types" @@ -36,8 +37,8 @@ func Example() { // replace the logger by testing values in a real test case (e.g. log.NewTestLogger(t)) logger := log.NewNopLogger() - - cms := integration.CreateMultiStore(keys, logger) + okeys := storetypes.NewObjectStoreKeys(banktypes.ObjectStoreKey) + cms := integration.CreateMultiStore(keys, okeys, logger) newCtx := sdk.NewContext(cms, cmtproto.Header{}, true, logger) accountKeeper := authkeeper.NewAccountKeeper( @@ -125,8 +126,8 @@ func Example_oneModule() { // replace the logger by testing values in a real test case (e.g. log.NewTestLogger(t)) logger := log.NewLogger(io.Discard) - - cms := integration.CreateMultiStore(keys, logger) + okeys := storetypes.NewObjectStoreKeys(banktypes.ObjectStoreKey) + cms := integration.CreateMultiStore(keys, okeys, logger) newCtx := sdk.NewContext(cms, cmtproto.Header{}, true, logger) accountKeeper := authkeeper.NewAccountKeeper( diff --git a/testutil/integration/router.go b/testutil/integration/router.go index cae9a308e3ac..01cbd0dac844 100644 --- a/testutil/integration/router.go +++ b/testutil/integration/router.go @@ -181,7 +181,7 @@ func (app *App) QueryHelper() *baseapp.QueryServiceTestHelper { } // CreateMultiStore is a helper for setting up multiple stores for provided modules. -func CreateMultiStore(keys map[string]*storetypes.KVStoreKey, logger log.Logger) storetypes.CommitMultiStore { +func CreateMultiStore(keys map[string]*storetypes.KVStoreKey, okeys map[string]*storetypes.ObjectStoreKey, logger log.Logger) storetypes.CommitMultiStore { db := dbm.NewMemDB() cms := store.NewCommitMultiStore(db, logger, metrics.NewNoOpMetrics()) @@ -189,6 +189,10 @@ func CreateMultiStore(keys map[string]*storetypes.KVStoreKey, logger log.Logger) cms.MountStoreWithDB(keys[key], storetypes.StoreTypeIAVL, db) } + for key := range okeys { + cms.MountStoreWithDB(okeys[key], storetypes.StoreTypeObject, nil) + } + _ = cms.LoadLatestVersion() return cms } diff --git a/types/context.go b/types/context.go index 5742ec41c85d..d5cebab5ca66 100644 --- a/types/context.go +++ b/types/context.go @@ -2,6 +2,7 @@ package types import ( "context" + "errors" "time" abci "github.com/cometbft/cometbft/abci/types" @@ -64,6 +65,21 @@ type Context struct { streamingManager storetypes.StreamingManager cometInfo comet.BlockInfo headerInfo header.Info + + // the index of the current tx in the block, -1 means not in finalize block context + txIndex int + // the index of the current msg in the tx, -1 means not in finalize block context + msgIndex int + // the total number of transactions in current block + txCount int + // sum the gas used by all the transactions in the current block, only accessible by end blocker + blockGasUsed uint64 + // sum the gas wanted by all the transactions in the current block, only accessible by end blocker + blockGasWanted uint64 + + // incarnationCache is shared between multiple incarnations of the same transaction, + // it must only cache stateless computation results that only depends on tx body and block level information that don't change during block execution, like the result of tx signature verification. + incarnationCache map[string]any } // Proposed rename, not done to avoid API breakage @@ -92,8 +108,30 @@ func (c Context) TransientKVGasConfig() storetypes.GasConfig { return c.trans func (c Context) StreamingManager() storetypes.StreamingManager { return c.streamingManager } func (c Context) CometInfo() comet.BlockInfo { return c.cometInfo } func (c Context) HeaderInfo() header.Info { return c.headerInfo } +func (c Context) TxIndex() int { return c.txIndex } +func (c Context) MsgIndex() int { return c.msgIndex } +func (c Context) TxCount() int { return c.txCount } +func (c Context) BlockGasUsed() uint64 { return c.blockGasUsed } +func (c Context) BlockGasWanted() uint64 { return c.blockGasWanted } +func (c Context) IncarnationCache() map[string]any { return c.incarnationCache } + +func (c Context) GetIncarnationCache(key string) (any, bool) { + if c.incarnationCache == nil { + return nil, false + } + val, ok := c.incarnationCache[key] + return val, ok +} -// BlockHeader returns the header by value. +func (c Context) SetIncarnationCache(key string, value any) { + if c.incarnationCache == nil { + // noop if cache is not initialized + return + } + c.incarnationCache[key] = value +} + +// BlockHeader returns the header by value (shallow copy). func (c Context) BlockHeader() cmtproto.Header { return c.header } @@ -138,6 +176,8 @@ func NewContext(ms storetypes.MultiStore, header cmtproto.Header, isCheckTx bool eventManager: NewEventManager(), kvGasConfig: storetypes.KVGasConfig(), transientKVGasConfig: storetypes.TransientGasConfig(), + txIndex: -1, + msgIndex: -1, } } @@ -317,6 +357,36 @@ func (c Context) WithHeaderInfo(headerInfo header.Info) Context { return c } +func (c Context) WithTxIndex(txIndex int) Context { + c.txIndex = txIndex + return c +} + +func (c Context) WithTxCount(txCount int) Context { + c.txCount = txCount + return c +} + +func (c Context) WithMsgIndex(msgIndex int) Context { + c.msgIndex = msgIndex + return c +} + +func (c Context) WithBlockGasUsed(gasUsed uint64) Context { + c.blockGasUsed = gasUsed + return c +} + +func (c Context) WithBlockGasWanted(gasWanted uint64) Context { + c.blockGasWanted = gasWanted + return c +} + +func (c Context) WithIncarnationCache(cache map[string]any) Context { + c.incarnationCache = cache + return c +} + // TODO: remove??? func (c Context) IsZero() bool { return c.ms == nil @@ -349,6 +419,11 @@ func (c Context) TransientStore(key storetypes.StoreKey) storetypes.KVStore { return gaskv.NewStore(c.ms.GetKVStore(key), c.gasMeter, c.transientKVGasConfig) } +// ObjectStore fetches an object store from the MultiStore, +func (c Context) ObjectStore(key storetypes.StoreKey) storetypes.ObjKVStore { + return gaskv.NewObjStore(c.ms.GetObjKVStore(key), c.gasMeter, c.transientKVGasConfig) +} + // CacheContext returns a new Context with the multi-store cached and a new // EventManager. The cached context is written to the context when writeCache // is called. Note, events are automatically emitted on the parent context's @@ -365,6 +440,26 @@ func (c Context) CacheContext() (cc Context, writeCache func()) { return cc, writeCache } +// RunAtomic execute the callback function atomically, i.e. the state and event changes are +// only persisted if the callback returns no error, or discarded as a whole. +// It uses an efficient approach than CacheContext, without wrapping stores. +func (c Context) RunAtomic(cb func(Context) error) error { + evtManager := NewEventManager() + cacheMS, ok := c.ms.(storetypes.CacheMultiStore) + if !ok { + return errors.New("multistore is not a CacheMultiStore") + } + if err := cacheMS.RunAtomic(func(ms storetypes.CacheMultiStore) error { + ctx := c.WithMultiStore(ms).WithEventManager(evtManager) + return cb(ctx) + }); err != nil { + return err + } + + c.EventManager().EmitEvents(evtManager.Events()) + return nil +} + var ( _ context.Context = Context{} _ storetypes.Context = Context{} diff --git a/types/mempool/mempool.go b/types/mempool/mempool.go index 968f2d947666..d5fac6d04a9a 100644 --- a/types/mempool/mempool.go +++ b/types/mempool/mempool.go @@ -7,11 +7,30 @@ import ( sdk "github.com/cosmos/cosmos-sdk/types" ) +type Tx struct { + Tx sdk.Tx + GasWanted uint64 +} + +func NewMempoolTx(tx sdk.Tx, gasWanted uint64) Tx { + return Tx{ + Tx: tx, + GasWanted: gasWanted, + } +} + +type GasTx interface { + GetGas() uint64 +} + type Mempool interface { // Insert attempts to insert a Tx into the app-side mempool returning // an error upon failure. Insert(context.Context, sdk.Tx) error + // Insert with a custom gas wanted value + InsertWithGasWanted(context.Context, sdk.Tx, uint64) error + // Select returns an Iterator over the app-side mempool. If txs are specified, // then they shall be incorporated into the Iterator. The Iterator is not thread-safe to use. Select(context.Context, [][]byte) Iterator @@ -31,7 +50,7 @@ type ExtMempool interface { Mempool // SelectBy use callback to iterate over the mempool, it's thread-safe to use. - SelectBy(context.Context, [][]byte, func(sdk.Tx) bool) + SelectBy(context.Context, [][]byte, func(Tx) bool) } // Iterator defines an app-side mempool iterator interface that is as minimal as @@ -43,7 +62,7 @@ type Iterator interface { Next() Iterator // Tx returns the transaction at the current position of the iterator. - Tx() sdk.Tx + Tx() Tx } var ( @@ -53,7 +72,7 @@ var ( // SelectBy is compatible with old interface to avoid breaking api. // In v0.52+, this function is removed and SelectBy is merged into Mempool interface. -func SelectBy(ctx context.Context, mempool Mempool, txs [][]byte, callback func(sdk.Tx) bool) { +func SelectBy(ctx context.Context, mempool Mempool, txs [][]byte, callback func(Tx) bool) { if ext, ok := mempool.(ExtMempool); ok { ext.SelectBy(ctx, txs, callback) return diff --git a/types/mempool/mempool_test.go b/types/mempool/mempool_test.go index d84baaebaa68..be18cf329d4e 100644 --- a/types/mempool/mempool_test.go +++ b/types/mempool/mempool_test.go @@ -139,7 +139,7 @@ func fetchTxs(iterator mempool.Iterator, maxBytes int64) []sdk.Tx { if numBytes += txSize; numBytes > maxBytes { break } - txs = append(txs, iterator.Tx()) + txs = append(txs, iterator.Tx().Tx) i := iterator.Next() iterator = i } diff --git a/types/mempool/noop.go b/types/mempool/noop.go index 0a8fdec6f566..edc397dd2f5d 100644 --- a/types/mempool/noop.go +++ b/types/mempool/noop.go @@ -16,8 +16,9 @@ var _ ExtMempool = (*NoOpMempool)(nil) // is FIFO-ordered by default. type NoOpMempool struct{} -func (NoOpMempool) Insert(context.Context, sdk.Tx) error { return nil } -func (NoOpMempool) Select(context.Context, [][]byte) Iterator { return nil } -func (NoOpMempool) SelectBy(context.Context, [][]byte, func(sdk.Tx) bool) {} -func (NoOpMempool) CountTx() int { return 0 } -func (NoOpMempool) Remove(sdk.Tx) error { return nil } +func (NoOpMempool) Insert(context.Context, sdk.Tx) error { return nil } +func (NoOpMempool) InsertWithGasWanted(context.Context, sdk.Tx, uint64) error { return nil } +func (NoOpMempool) Select(context.Context, [][]byte) Iterator { return nil } +func (NoOpMempool) SelectBy(context.Context, [][]byte, func(Tx) bool) {} +func (NoOpMempool) CountTx() int { return 0 } +func (NoOpMempool) Remove(sdk.Tx) error { return nil } diff --git a/types/mempool/priority_nonce.go b/types/mempool/priority_nonce.go index abee245a6618..4cc9acab3513 100644 --- a/types/mempool/priority_nonce.go +++ b/types/mempool/priority_nonce.go @@ -189,7 +189,7 @@ func (mp *PriorityNonceMempool[C]) NextSenderTx(sender string) sdk.Tx { } cursor := senderIndex.Front() - return cursor.Value.(sdk.Tx) + return cursor.Value.(Tx).Tx } // Insert attempts to insert a Tx into the app-side mempool in O(log n) time, @@ -201,7 +201,7 @@ func (mp *PriorityNonceMempool[C]) NextSenderTx(sender string) sdk.Tx { // // Inserting a duplicate tx with a different priority overwrites the existing tx, // changing the total order of the mempool. -func (mp *PriorityNonceMempool[C]) Insert(ctx context.Context, tx sdk.Tx) error { +func (mp *PriorityNonceMempool[C]) InsertWithGasWanted(ctx context.Context, tx sdk.Tx, gasWanted uint64) error { mp.mtx.Lock() defer mp.mtx.Unlock() if mp.cfg.MaxTx > 0 && mp.priorityIndex.Len() >= mp.cfg.MaxTx { @@ -210,6 +210,8 @@ func (mp *PriorityNonceMempool[C]) Insert(ctx context.Context, tx sdk.Tx) error return nil } + memTx := NewMempoolTx(tx, gasWanted) + sigs, err := mp.cfg.SignerExtractor.GetSigners(tx) if err != nil { return err @@ -247,12 +249,12 @@ func (mp *PriorityNonceMempool[C]) Insert(ctx context.Context, tx sdk.Tx) error // changes. sk := txMeta[C]{nonce: nonce, sender: sender} if oldScore, txExists := mp.scores[sk]; txExists { - if mp.cfg.TxReplacement != nil && !mp.cfg.TxReplacement(oldScore.priority, priority, senderIndex.Get(key).Value.(sdk.Tx), tx) { + if mp.cfg.TxReplacement != nil && !mp.cfg.TxReplacement(oldScore.priority, priority, senderIndex.Get(key).Value.(Tx).Tx, tx) { return fmt.Errorf( "tx doesn't fit the replacement rule, oldPriority: %v, newPriority: %v, oldTx: %v, newTx: %v", oldScore.priority, priority, - senderIndex.Get(key).Value.(sdk.Tx), + senderIndex.Get(key).Value.(Tx).Tx, tx, ) } @@ -270,7 +272,7 @@ func (mp *PriorityNonceMempool[C]) Insert(ctx context.Context, tx sdk.Tx) error // Since senderIndex is scored by nonce, a changed priority will overwrite the // existing key. - key.senderElement = senderIndex.Set(key, tx) + key.senderElement = senderIndex.Set(key, memTx) mp.scores[sk] = txMeta[C]{priority: priority} mp.priorityIndex.Set(key, tx) @@ -278,6 +280,15 @@ func (mp *PriorityNonceMempool[C]) Insert(ctx context.Context, tx sdk.Tx) error return nil } +func (mp *PriorityNonceMempool[C]) Insert(ctx context.Context, tx sdk.Tx) error { + var gasLimit uint64 + if gasTx, ok := tx.(GasTx); ok { + gasLimit = gasTx.GetGas() + } + + return mp.InsertWithGasWanted(ctx, tx, gasLimit) +} + func (i *PriorityNonceIterator[C]) iteratePriority() Iterator { // beginning of priority iteration if i.priorityNode == nil { @@ -341,8 +352,8 @@ func (i *PriorityNonceIterator[C]) Next() Iterator { return i } -func (i *PriorityNonceIterator[C]) Tx() sdk.Tx { - return i.senderCursors[i.sender].Value.(sdk.Tx) +func (i *PriorityNonceIterator[C]) Tx() Tx { + return i.senderCursors[i.sender].Value.(Tx) } // Select returns a set of transactions from the mempool, ordered by priority @@ -376,7 +387,7 @@ func (mp *PriorityNonceMempool[C]) doSelect(_ context.Context, _ [][]byte) Itera } // SelectBy will hold the mutex during the iteration, callback returns if continue. -func (mp *PriorityNonceMempool[C]) SelectBy(ctx context.Context, txs [][]byte, callback func(sdk.Tx) bool) { +func (mp *PriorityNonceMempool[C]) SelectBy(ctx context.Context, txs [][]byte, callback func(Tx) bool) { mp.mtx.Lock() defer mp.mtx.Unlock() @@ -448,8 +459,6 @@ func (mp *PriorityNonceMempool[C]) CountTx() int { // Remove removes a transaction from the mempool in O(log n) time, returning an // error if unsuccessful. func (mp *PriorityNonceMempool[C]) Remove(tx sdk.Tx) error { - mp.mtx.Lock() - defer mp.mtx.Unlock() sigs, err := mp.cfg.SignerExtractor.GetSigners(tx) if err != nil { return err @@ -465,6 +474,9 @@ func (mp *PriorityNonceMempool[C]) Remove(tx sdk.Tx) error { return err } + mp.mtx.Lock() + defer mp.mtx.Unlock() + scoreKey := txMeta[C]{nonce: nonce, sender: sender} score, ok := mp.scores[scoreKey] if !ok { diff --git a/types/mempool/priority_nonce_test.go b/types/mempool/priority_nonce_test.go index 91843e578eb1..876a750fe3d6 100644 --- a/types/mempool/priority_nonce_test.go +++ b/types/mempool/priority_nonce_test.go @@ -388,7 +388,7 @@ func (s *MempoolTestSuite) TestIterator() { // iterate through txs iterator := pool.Select(ctx, nil) for iterator != nil { - tx := iterator.Tx().(testTx) + tx := iterator.Tx().Tx.(testTx) require.Equal(t, tt.txs[tx.id].p, int(tx.priority)) require.Equal(t, tt.txs[tx.id].n, int(tx.nonce)) require.Equal(t, tt.txs[tx.id].a, tx.address) @@ -464,8 +464,8 @@ func (s *MempoolTestSuite) TestIteratorConcurrency() { }() var i int - pool.SelectBy(ctx, nil, func(memTx sdk.Tx) bool { - tx := memTx.(testTx) + pool.SelectBy(ctx, nil, func(memTx mempool.Tx) bool { + tx := memTx.Tx.(testTx) if tx.id < len(tt.txs) { require.Equal(t, tt.txs[tx.id].p, int(tx.priority)) require.Equal(t, tt.txs[tx.id].n, int(tx.nonce)) @@ -935,7 +935,7 @@ func TestNextSenderTx_TxReplacement(t *testing.T) { require.Equal(t, 1, mp.CountTx()) iter := mp.Select(ctx, nil) - require.Equal(t, tx, iter.Tx()) + require.Equal(t, tx, iter.Tx().Tx) } // test Priority with TxReplacement @@ -970,7 +970,7 @@ func TestNextSenderTx_TxReplacement(t *testing.T) { require.Equal(t, 1, mp.CountTx()) iter := mp.Select(ctx, nil) - require.Equal(t, txs[3], iter.Tx()) + require.Equal(t, txs[3], iter.Tx().Tx) } func TestPriorityNonceMempool_UnorderedTx_FailsForSequence(t *testing.T) { diff --git a/types/mempool/sender_nonce.go b/types/mempool/sender_nonce.go index ce2fa2e2c118..8591fbb332fe 100644 --- a/types/mempool/sender_nonce.go +++ b/types/mempool/sender_nonce.go @@ -113,12 +113,12 @@ func (snm *SenderNonceMempool) NextSenderTx(sender string) sdk.Tx { } cursor := senderIndex.Front() - return cursor.Value.(sdk.Tx) + return cursor.Value.(Tx).Tx } // Insert adds a tx to the mempool. It returns an error if the tx does not have // at least one signer. Note, priority is ignored. -func (snm *SenderNonceMempool) Insert(_ context.Context, tx sdk.Tx) error { +func (snm *SenderNonceMempool) InsertWithGasWanted(_ context.Context, tx sdk.Tx, gasLimit uint64) error { snm.mtx.Lock() defer snm.mtx.Unlock() if snm.maxTx > 0 && len(snm.existingTx) >= snm.maxTx { @@ -128,6 +128,8 @@ func (snm *SenderNonceMempool) Insert(_ context.Context, tx sdk.Tx) error { return nil } + memTx := NewMempoolTx(tx, gasLimit) + sigs, err := tx.(signing.SigVerifiableTx).GetSignaturesV2() if err != nil { return err @@ -149,7 +151,7 @@ func (snm *SenderNonceMempool) Insert(_ context.Context, tx sdk.Tx) error { snm.senders[sender] = senderTxs } - senderTxs.Set(nonce, tx) + senderTxs.Set(nonce, memTx) key := txKey{nonce: nonce, address: sender} snm.existingTx[key] = true @@ -157,6 +159,15 @@ func (snm *SenderNonceMempool) Insert(_ context.Context, tx sdk.Tx) error { return nil } +func (snm *SenderNonceMempool) Insert(ctx context.Context, tx sdk.Tx) error { + var gasLimit uint64 + if gasTx, ok := tx.(GasTx); ok { + gasLimit = gasTx.GetGas() + } + + return snm.InsertWithGasWanted(ctx, tx, gasLimit) +} + // Select returns an iterator ordering transactions the mempool with the lowest // nonce of a random selected sender first. // @@ -197,7 +208,7 @@ func (snm *SenderNonceMempool) doSelect(_ context.Context, _ [][]byte) Iterator } // SelectBy will hold the mutex during the iteration, callback returns if continue. -func (snm *SenderNonceMempool) SelectBy(ctx context.Context, txs [][]byte, callback func(sdk.Tx) bool) { +func (snm *SenderNonceMempool) SelectBy(ctx context.Context, txs [][]byte, callback func(Tx) bool) { snm.mtx.Lock() defer snm.mtx.Unlock() @@ -290,8 +301,8 @@ func (i *senderNonceMempoolIterator) Next() Iterator { return nil } -func (i *senderNonceMempoolIterator) Tx() sdk.Tx { - return i.currentTx.Value.(sdk.Tx) +func (i *senderNonceMempoolIterator) Tx() Tx { + return i.currentTx.Value.(Tx) } func removeAtIndex[T any](slice []T, index int) []T { diff --git a/types/mempool/sender_nonce_property_test.go b/types/mempool/sender_nonce_property_test.go index baaaa4abe9a2..b55f5ef8814a 100644 --- a/types/mempool/sender_nonce_property_test.go +++ b/types/mempool/sender_nonce_property_test.go @@ -96,7 +96,7 @@ func fetchAllTxs(iterator mempool.Iterator) []testTx { var txs []testTx for iterator != nil { tx := iterator.Tx() - txs = append(txs, tx.(testTx)) + txs = append(txs, tx.Tx.(testTx)) i := iterator.Next() iterator = i } diff --git a/x/bank/keeper/collections_test.go b/x/bank/keeper/collections_test.go index 97b5e3e8bca0..9f03d0c9a9b1 100644 --- a/x/bank/keeper/collections_test.go +++ b/x/bank/keeper/collections_test.go @@ -26,7 +26,8 @@ import ( func TestBankStateCompatibility(t *testing.T) { key := storetypes.NewKVStoreKey(banktypes.StoreKey) - testCtx := testutil.DefaultContextWithDB(t, key, storetypes.NewTransientStoreKey("transient_test")) + okey := storetypes.NewObjectStoreKey(banktypes.ObjectStoreKey) + testCtx := testutil.DefaultContextWithObjectStore(t, key, storetypes.NewTransientStoreKey("transient_test"), okey) ctx := testCtx.Ctx.WithBlockHeader(cmtproto.Header{Time: cmttime.Now()}) encCfg := moduletestutil.MakeTestEncodingConfig() @@ -39,6 +40,7 @@ func TestBankStateCompatibility(t *testing.T) { k := keeper.NewBaseKeeper( encCfg.Codec, + okey, storeService, authKeeper, map[string]bool{accAddrs[4].String(): true}, diff --git a/x/bank/keeper/keeper.go b/x/bank/keeper/keeper.go index 6883e60aa509..3b02093605a6 100644 --- a/x/bank/keeper/keeper.go +++ b/x/bank/keeper/keeper.go @@ -8,6 +8,7 @@ import ( errorsmod "cosmossdk.io/errors" "cosmossdk.io/log" "cosmossdk.io/math" + storetypes "cosmossdk.io/store/types" "github.com/cosmos/cosmos-sdk/codec" sdk "github.com/cosmos/cosmos-sdk/types" @@ -46,9 +47,14 @@ type Keeper interface { MintCoins(ctx context.Context, moduleName string, amt sdk.Coins) error BurnCoins(ctx context.Context, moduleName string, amt sdk.Coins) error + SendCoinsFromAccountToModuleVirtual(ctx context.Context, senderAddr sdk.AccAddress, recipientModule string, amt sdk.Coins) error + SendCoinsFromModuleToAccountVirtual(ctx context.Context, senderModule string, recipientAddr sdk.AccAddress, amt sdk.Coins) error + DelegateCoins(ctx context.Context, delegatorAddr, moduleAccAddr sdk.AccAddress, amt sdk.Coins) error UndelegateCoins(ctx context.Context, moduleAccAddr, delegatorAddr sdk.AccAddress, amt sdk.Coins) error + CreditVirtualAccounts(ctx context.Context) error + types.QueryServer } @@ -83,6 +89,7 @@ func (k BaseKeeper) GetPaginatedTotalSupply(ctx context.Context, pagination *que // by using a SendCoinsFromModuleToAccount execution. func NewBaseKeeper( cdc codec.BinaryCodec, + objStoreKey storetypes.StoreKey, storeService store.KVStoreService, ak types.AccountKeeper, blockedAddrs map[string]bool, @@ -97,7 +104,7 @@ func NewBaseKeeper( logger = logger.With(log.ModuleKey, "x/"+types.ModuleName) return BaseKeeper{ - BaseSendKeeper: NewBaseSendKeeper(cdc, storeService, ak, blockedAddrs, authority, logger), + BaseSendKeeper: NewBaseSendKeeper(cdc, objStoreKey, storeService, ak, blockedAddrs, authority, logger), ak: ak, cdc: cdc, storeService: storeService, diff --git a/x/bank/keeper/keeper_test.go b/x/bank/keeper/keeper_test.go index 0952c34d8a06..273b35013270 100644 --- a/x/bank/keeper/keeper_test.go +++ b/x/bank/keeper/keeper_test.go @@ -130,7 +130,8 @@ func TestKeeperTestSuite(t *testing.T) { func (suite *KeeperTestSuite) SetupTest() { key := storetypes.NewKVStoreKey(banktypes.StoreKey) - testCtx := testutil.DefaultContextWithDB(suite.T(), key, storetypes.NewTransientStoreKey("transient_test")) + okey := storetypes.NewObjectStoreKey(banktypes.ObjectStoreKey) + testCtx := testutil.DefaultContextWithObjectStore(suite.T(), key, storetypes.NewTransientStoreKey("transient_test"), okey) ctx := testCtx.Ctx.WithBlockHeader(cmtproto.Header{Time: cmttime.Now()}) encCfg := moduletestutil.MakeTestEncodingConfig() @@ -144,6 +145,7 @@ func (suite *KeeperTestSuite) SetupTest() { suite.authKeeper = authKeeper suite.bankKeeper = keeper.NewBaseKeeper( encCfg.Codec, + okey, storeService, suite.authKeeper, map[string]bool{accAddrs[4].String(): true}, @@ -196,6 +198,16 @@ func (suite *KeeperTestSuite) mockSendCoinsFromAccountToModule(acc *authtypes.Ba suite.authKeeper.EXPECT().HasAccount(suite.ctx, moduleAcc.GetAddress()).Return(true) } +func (suite *KeeperTestSuite) mockSendCoinsFromAccountToModuleVirtual(acc *authtypes.BaseAccount, moduleAcc *authtypes.ModuleAccount) { + suite.authKeeper.EXPECT().GetModuleAccount(suite.ctx, moduleAcc.Name).Return(moduleAcc) + suite.authKeeper.EXPECT().GetAccount(suite.ctx, acc.GetAddress()).Return(acc) +} + +func (suite *KeeperTestSuite) mockSendCoinsFromModuleToAccountVirtual(moduleAcc *authtypes.ModuleAccount, accAddr sdk.AccAddress) { + suite.authKeeper.EXPECT().GetModuleAddress(moduleAcc.Name).Return(moduleAcc.GetAddress()) + suite.authKeeper.EXPECT().HasAccount(suite.ctx, accAddr).Return(true) +} + func (suite *KeeperTestSuite) mockSendCoins(ctx context.Context, sender sdk.AccountI, receiver sdk.AccAddress) { suite.authKeeper.EXPECT().GetAccount(ctx, sender.GetAddress()).Return(sender) suite.authKeeper.EXPECT().HasAccount(ctx, receiver).Return(true) @@ -315,6 +327,7 @@ func (suite *KeeperTestSuite) TestGetAuthority() { NewKeeperWithAuthority := func(authority string) keeper.BaseKeeper { return keeper.NewBaseKeeper( moduletestutil.MakeTestEncodingConfig().Codec, + nil, storeService, suite.authKeeper, nil, @@ -632,6 +645,38 @@ func (suite *KeeperTestSuite) TestSendCoinsNewAccount() { require.Equal(acc1Balances, updatedAcc1Bal) } +func (suite *KeeperTestSuite) TestSendCoinsVirtual() { + ctx := suite.ctx + require := suite.Require() + keeper := suite.bankKeeper + sdkCtx := sdk.UnwrapSDKContext(ctx) + acc0 := authtypes.NewBaseAccountWithAddress(accAddrs[0]) + feeDenom1 := "fee1" + feeDenom2 := "fee2" + + balances := sdk.NewCoins(sdk.NewInt64Coin(feeDenom1, 100), sdk.NewInt64Coin(feeDenom2, 100)) + suite.mockFundAccount(accAddrs[0]) + require.NoError(banktestutil.FundAccount(ctx, suite.bankKeeper, accAddrs[0], balances)) + + sendAmt := sdk.NewCoins(sdk.NewInt64Coin(feeDenom1, 50), sdk.NewInt64Coin(feeDenom2, 50)) + suite.mockSendCoinsFromAccountToModuleVirtual(acc0, burnerAcc) + require.NoError( + keeper.SendCoinsFromAccountToModuleVirtual(sdkCtx, accAddrs[0], authtypes.Burner, sendAmt), + ) + + refundAmt := sdk.NewCoins(sdk.NewInt64Coin(feeDenom1, 25), sdk.NewInt64Coin(feeDenom2, 25)) + suite.mockSendCoinsFromModuleToAccountVirtual(burnerAcc, accAddrs[0]) + require.NoError( + keeper.SendCoinsFromModuleToAccountVirtual(sdkCtx, authtypes.Burner, accAddrs[0], refundAmt), + ) + + suite.authKeeper.EXPECT().HasAccount(suite.ctx, burnerAcc.GetAddress()).Return(true) + require.NoError(keeper.CreditVirtualAccounts(ctx)) + + require.Equal(math.NewInt(25), keeper.GetBalance(suite.ctx, burnerAcc.GetAddress(), feeDenom1).Amount) + require.Equal(math.NewInt(25), keeper.GetBalance(suite.ctx, burnerAcc.GetAddress(), feeDenom2).Amount) +} + func (suite *KeeperTestSuite) TestInputOutputNewAccount() { ctx := suite.ctx require := suite.Require() diff --git a/x/bank/keeper/send.go b/x/bank/keeper/send.go index 96fa6f2a089b..e003ad575dc6 100644 --- a/x/bank/keeper/send.go +++ b/x/bank/keeper/send.go @@ -9,6 +9,7 @@ import ( errorsmod "cosmossdk.io/errors" "cosmossdk.io/log" "cosmossdk.io/math" + storetypes "cosmossdk.io/store/types" "github.com/cosmos/cosmos-sdk/codec" "github.com/cosmos/cosmos-sdk/telemetry" @@ -60,6 +61,7 @@ type BaseSendKeeper struct { ak types.AccountKeeper storeService store.KVStoreService logger log.Logger + objStoreKey storetypes.StoreKey // list of addresses that are restricted from receiving transactions blockedAddrs map[string]bool @@ -73,6 +75,7 @@ type BaseSendKeeper struct { func NewBaseSendKeeper( cdc codec.BinaryCodec, + objStoreKey storetypes.StoreKey, storeService store.KVStoreService, ak types.AccountKeeper, blockedAddrs map[string]bool, @@ -86,6 +89,7 @@ func NewBaseSendKeeper( return BaseSendKeeper{ BaseViewKeeper: NewBaseViewKeeper(cdc, storeService, ak, logger), cdc: cdc, + objStoreKey: objStoreKey, ak: ak, storeService: storeService, blockedAddrs: blockedAddrs, @@ -240,6 +244,12 @@ func (k BaseSendKeeper) SendCoins(ctx context.Context, fromAddr, toAddr sdk.AccA return err } + k.ensureAccountCreated(ctx, toAddr) + k.emitSendCoinsEvents(ctx, fromAddr, toAddr, amt) + return nil +} + +func (k BaseSendKeeper) ensureAccountCreated(ctx context.Context, toAddr sdk.AccAddress) { // Create account if recipient does not exist. // // NOTE: This should ultimately be removed in favor a more flexible approach @@ -249,14 +259,18 @@ func (k BaseSendKeeper) SendCoins(ctx context.Context, fromAddr, toAddr sdk.AccA defer telemetry.IncrCounter(1, "new", "account") k.ak.SetAccount(ctx, k.ak.NewAccountWithAddress(ctx, toAddr)) } +} +// emitSendCoinsEvents emit send coins events. +func (k BaseSendKeeper) emitSendCoinsEvents(ctx context.Context, fromAddr, toAddr sdk.AccAddress, amt sdk.Coins) { // bech32 encoding is expensive! Only do it once for fromAddr fromAddrString := fromAddr.String() + toAddrString := toAddr.String() sdkCtx := sdk.UnwrapSDKContext(ctx) sdkCtx.EventManager().EmitEvents(sdk.Events{ sdk.NewEvent( types.EventTypeTransfer, - sdk.NewAttribute(types.AttributeKeyRecipient, toAddr.String()), + sdk.NewAttribute(types.AttributeKeyRecipient, toAddrString), sdk.NewAttribute(types.AttributeKeySender, fromAddrString), sdk.NewAttribute(sdk.AttributeKeyAmount, amt.String()), ), @@ -265,8 +279,6 @@ func (k BaseSendKeeper) SendCoins(ctx context.Context, fromAddr, toAddr sdk.AccA sdk.NewAttribute(types.AttributeKeySender, fromAddrString), ), }) - - return nil } // subUnlockedCoins removes the unlocked amt coins of the given account. diff --git a/x/bank/keeper/virtual.go b/x/bank/keeper/virtual.go new file mode 100644 index 000000000000..08e37803fa5d --- /dev/null +++ b/x/bank/keeper/virtual.go @@ -0,0 +1,179 @@ +package keeper + +import ( + "bytes" + "context" + "encoding/binary" + "encoding/hex" + "fmt" + + errorsmod "cosmossdk.io/errors" + + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" +) + +// SendCoinsFromAccountToModuleVirtual sends coins from account to a virtual module account. +func (k BaseSendKeeper) SendCoinsFromAccountToModuleVirtual( + ctx context.Context, senderAddr sdk.AccAddress, recipientModule string, amt sdk.Coins, +) error { + recipientAcc := k.ak.GetModuleAccount(ctx, recipientModule) + if recipientAcc == nil { + panic(errorsmod.Wrapf(sdkerrors.ErrUnknownAddress, "module account %s does not exist", recipientModule)) + } + + return k.SendCoinsToVirtual(ctx, senderAddr, recipientAcc.GetAddress(), amt) +} + +// SendCoinsFromModuleToAccountVirtual sends coins from account to a virtual module account. +func (k BaseSendKeeper) SendCoinsFromModuleToAccountVirtual( + ctx context.Context, senderModule string, recipientAddr sdk.AccAddress, amt sdk.Coins, +) error { + senderAddr := k.ak.GetModuleAddress(senderModule) + if senderAddr == nil { + panic(errorsmod.Wrapf(sdkerrors.ErrUnknownAddress, "module account %s does not exist", senderModule)) + } + + if k.BlockedAddr(recipientAddr) { + return errorsmod.Wrapf(sdkerrors.ErrUnauthorized, "%s is not allowed to receive funds", recipientAddr) + } + + return k.SendCoinsFromVirtual(ctx, senderAddr, recipientAddr, amt) +} + +// SendCoinsToVirtual accumulate the recipient's coins in a per-transaction transient state, +// which are sumed up and added to the real account at the end of block. +// Events are emiited the same as normal send. +func (k BaseSendKeeper) SendCoinsToVirtual(ctx context.Context, fromAddr, toAddr sdk.AccAddress, amt sdk.Coins) error { + var err error + err = k.subUnlockedCoins(ctx, fromAddr, amt) + if err != nil { + return err + } + + toAddr, err = k.sendRestriction.apply(ctx, fromAddr, toAddr, amt) + if err != nil { + return err + } + + k.addVirtualCoins(ctx, toAddr, amt) + k.emitSendCoinsEvents(ctx, fromAddr, toAddr, amt) + return nil +} + +// SendCoinsFromVirtual deduct coins from virtual from account and send to recipient account. +func (k BaseSendKeeper) SendCoinsFromVirtual(ctx context.Context, fromAddr, toAddr sdk.AccAddress, amt sdk.Coins) error { + var err error + err = k.subVirtualCoins(ctx, fromAddr, amt) + if err != nil { + return err + } + + toAddr, err = k.sendRestriction.apply(ctx, fromAddr, toAddr, amt) + if err != nil { + return err + } + + err = k.addCoins(ctx, toAddr, amt) + if err != nil { + return err + } + + k.ensureAccountCreated(ctx, toAddr) + k.emitSendCoinsEvents(ctx, fromAddr, toAddr, amt) + return nil +} + +func (k BaseSendKeeper) addVirtualCoins(ctx context.Context, addr sdk.AccAddress, amt sdk.Coins) { + sdkCtx := sdk.UnwrapSDKContext(ctx) + store := sdkCtx.ObjectStore(k.objStoreKey) + + key := make([]byte, len(addr)+8) + copy(key, addr) + binary.BigEndian.PutUint64(key[len(addr):], uint64(sdkCtx.TxIndex())) + + var coins sdk.Coins + value := store.Get(key) + if value != nil { + coins = value.(sdk.Coins) + } + coins = coins.Add(amt...) + store.Set(key, coins) +} + +func (k BaseSendKeeper) subVirtualCoins(ctx context.Context, addr sdk.AccAddress, amt sdk.Coins) error { + sdkCtx := sdk.UnwrapSDKContext(ctx) + store := sdkCtx.ObjectStore(k.objStoreKey) + + key := make([]byte, len(addr)+8) + copy(key, addr) + binary.BigEndian.PutUint64(key[len(addr):], uint64(sdkCtx.TxIndex())) + + value := store.Get(key) + if value == nil { + return errorsmod.Wrapf( + sdkerrors.ErrInsufficientFunds, + "spendable balance 0 is smaller than %s", + amt, + ) + } + spendable := value.(sdk.Coins) + balance, hasNeg := spendable.SafeSub(amt...) + if hasNeg { + return errorsmod.Wrapf( + sdkerrors.ErrInsufficientFunds, + "spendable balance %s is smaller than %s", + spendable, amt, + ) + } + if balance.IsZero() { + store.Delete(key) + } else { + store.Set(key, balance) + } + + return nil +} + +// CreditVirtualAccounts sum up the transient coins and add them to the real account, +// should be called at end blocker. +func (k BaseSendKeeper) CreditVirtualAccounts(ctx context.Context) error { + store := sdk.UnwrapSDKContext(ctx).ObjectStore(k.objStoreKey) + + var toAddr sdk.AccAddress + sum := sdk.NewMapCoins(nil) + flushCurrentAddr := func() error { + if len(sum) == 0 { + // nothing to flush + return nil + } + + if err := k.addCoins(ctx, toAddr, sum.ToCoins()); err != nil { + return err + } + clear(sum) + + k.ensureAccountCreated(ctx, toAddr) + return nil + } + + it := store.Iterator(nil, nil) + defer it.Close() + for ; it.Valid(); it.Next() { + if len(it.Key()) <= 8 { + return fmt.Errorf("unexpected key length: %s", hex.EncodeToString(it.Key())) + } + + addr := it.Key()[:len(it.Key())-8] + if !bytes.Equal(toAddr, addr) { + if err := flushCurrentAddr(); err != nil { + return err + } + toAddr = addr + } + + sum.Add(it.Value().(sdk.Coins)...) + } + + return flushCurrentAddr() +} diff --git a/x/bank/module.go b/x/bank/module.go index e7140af204a9..f0fe8793c79f 100644 --- a/x/bank/module.go +++ b/x/bank/module.go @@ -17,6 +17,7 @@ import ( corestore "cosmossdk.io/core/store" "cosmossdk.io/depinject" "cosmossdk.io/log" + storetypes "cosmossdk.io/store/types" "github.com/cosmos/cosmos-sdk/client" "github.com/cosmos/cosmos-sdk/codec" @@ -44,7 +45,8 @@ var ( _ module.HasGenesis = AppModule{} _ module.HasServices = AppModule{} - _ appmodule.AppModule = AppModule{} + _ appmodule.HasEndBlocker = AppModule{} + _ appmodule.AppModule = AppModule{} ) // AppModuleBasic defines the basic application module used by the bank module. @@ -215,12 +217,12 @@ func init() { type ModuleInputs struct { depinject.In - Config *modulev1.Module - Cdc codec.Codec - StoreService corestore.KVStoreService - Logger log.Logger - + Config *modulev1.Module + Cdc codec.Codec + StoreService corestore.KVStoreService + Logger log.Logger AccountKeeper types.AccountKeeper + ObjStoreKey *storetypes.ObjectStoreKey // LegacySubspace is used solely for migration of x/params managed parameters LegacySubspace exported.Subspace `optional:"true"` @@ -257,6 +259,7 @@ func ProvideModule(in ModuleInputs) ModuleOutputs { bankKeeper := keeper.NewBaseKeeper( in.Cdc, + in.ObjStoreKey, in.StoreService, in.AccountKeeper, blockedAddresses, @@ -303,3 +306,7 @@ func InvokeSetSendRestrictions( return nil } + +func (am AppModule) EndBlock(ctx context.Context) error { + return am.keeper.CreditVirtualAccounts(ctx) +} diff --git a/x/bank/types/keys.go b/x/bank/types/keys.go index b4ea683d4b69..485d1cdb9ef0 100644 --- a/x/bank/types/keys.go +++ b/x/bank/types/keys.go @@ -17,6 +17,9 @@ const ( // RouterKey defines the module's message routing key RouterKey = ModuleName + + // ObjectStoreKey defines the store name for the object store + ObjectStoreKey = "object:" + ModuleName ) // KVStore keys diff --git a/x/circuit/go.mod b/x/circuit/go.mod index 4032962ca925..5d8de2e824b5 100644 --- a/x/circuit/go.mod +++ b/x/circuit/go.mod @@ -157,3 +157,10 @@ require ( pgregory.net/rapid v1.2.0 // indirect sigs.k8s.io/yaml v1.4.0 // indirect ) + +replace ( + cosmossdk.io/api => ../../api + cosmossdk.io/store => ../../store + // Temporary replace until the next 0.53 tag + github.com/cosmos/cosmos-sdk => ../.. +) diff --git a/x/circuit/go.sum b/x/circuit/go.sum index 73ec26d5c38b..7b9f526f2ba3 100644 --- a/x/circuit/go.sum +++ b/x/circuit/go.sum @@ -1,7 +1,5 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cosmossdk.io/api v0.9.2 h1:9i9ptOBdmoIEVEVWLtYYHjxZonlF/aOVODLFaxpmNtg= -cosmossdk.io/api v0.9.2/go.mod h1:CWt31nVohvoPMTlPv+mMNCtC0a7BqRdESjCsstHcTkU= cosmossdk.io/collections v1.2.0 h1:IesfVG8G/+FYCMVMP01frS/Cw99Omk5vBh3cHbO01Gg= cosmossdk.io/collections v1.2.0/go.mod h1:4NkMoYw6qRA8fnSH/yn1D/MOutr8qyQnwsO50Mz9ItU= cosmossdk.io/core v0.11.3 h1:mei+MVDJOwIjIniaKelE3jPDqShCc/F4LkNNHh+4yfo= @@ -16,8 +14,6 @@ cosmossdk.io/math v1.5.3 h1:WH6tu6Z3AUCeHbeOSHg2mt9rnoiUWVWaQ2t6Gkll96U= cosmossdk.io/math v1.5.3/go.mod h1:uqcZv7vexnhMFJF+6zh9EWdm/+Ylyln34IvPnBauPCQ= cosmossdk.io/schema v1.1.0 h1:mmpuz3dzouCoyjjcMcA/xHBEmMChN+EHh8EHxHRHhzE= cosmossdk.io/schema v1.1.0/go.mod h1:Gb7pqO+tpR+jLW5qDcNOSv0KtppYs7881kfzakguhhI= -cosmossdk.io/store v1.1.2 h1:3HOZG8+CuThREKv6cn3WSohAc6yccxO3hLzwK6rBC7o= -cosmossdk.io/store v1.1.2/go.mod h1:60rAGzTHevGm592kFhiUVkNC9w7gooSEn5iUBPzHQ6A= cosmossdk.io/x/tx v0.14.0 h1:hB3O25kIcyDW/7kMTLMaO8Ripj3yqs5imceVd6c/heA= cosmossdk.io/x/tx v0.14.0/go.mod h1:Tn30rSRA1PRfdGB3Yz55W4Sn6EIutr9xtMKSHij+9PM= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= @@ -147,8 +143,6 @@ github.com/cosmos/cosmos-db v1.1.1 h1:FezFSU37AlBC8S98NlSagL76oqBRWq/prTPvFcEJNC github.com/cosmos/cosmos-db v1.1.1/go.mod h1:AghjcIPqdhSLP/2Z0yha5xPH3nLnskz81pBx3tcVSAw= github.com/cosmos/cosmos-proto v1.0.0-beta.5 h1:eNcayDLpip+zVLRLYafhzLvQlSmyab+RC5W7ZfmxJLA= github.com/cosmos/cosmos-proto v1.0.0-beta.5/go.mod h1:hQGLpiIUloJBMdQMMWb/4wRApmI9hjHH05nefC0Ojec= -github.com/cosmos/cosmos-sdk v0.53.0 h1:ZsB2tnBVudumV059oPuElcr0K1lLOutaI6WJ+osNTbI= -github.com/cosmos/cosmos-sdk v0.53.0/go.mod h1:UPcRyFwOUy2PfSFBWxBceO/HTjZOuBVqY583WyazIGs= github.com/cosmos/go-bip39 v1.0.0 h1:pcomnQdrdH22njcAatO0yWojsUnCO3y2tNoV1cb6hHY= github.com/cosmos/go-bip39 v1.0.0/go.mod h1:RNJv0H/pOIVgxw6KS7QeX2a0Uo0aKUlfhZ4xuwvCdJw= github.com/cosmos/gogogateway v1.2.0 h1:Ae/OivNhp8DqBi/sh2A8a1D0y638GpL3tkmLQAiKxTE= diff --git a/x/evidence/go.mod b/x/evidence/go.mod index 51759cd47e63..44d67248c749 100644 --- a/x/evidence/go.mod +++ b/x/evidence/go.mod @@ -158,3 +158,10 @@ require ( pgregory.net/rapid v1.2.0 // indirect sigs.k8s.io/yaml v1.4.0 // indirect ) + +replace ( + cosmossdk.io/api => ../../api + cosmossdk.io/store => ../../store + // Temporary replace until the next 0.53 tag + github.com/cosmos/cosmos-sdk => ../.. +) diff --git a/x/evidence/go.sum b/x/evidence/go.sum index 73ec26d5c38b..7b9f526f2ba3 100644 --- a/x/evidence/go.sum +++ b/x/evidence/go.sum @@ -1,7 +1,5 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cosmossdk.io/api v0.9.2 h1:9i9ptOBdmoIEVEVWLtYYHjxZonlF/aOVODLFaxpmNtg= -cosmossdk.io/api v0.9.2/go.mod h1:CWt31nVohvoPMTlPv+mMNCtC0a7BqRdESjCsstHcTkU= cosmossdk.io/collections v1.2.0 h1:IesfVG8G/+FYCMVMP01frS/Cw99Omk5vBh3cHbO01Gg= cosmossdk.io/collections v1.2.0/go.mod h1:4NkMoYw6qRA8fnSH/yn1D/MOutr8qyQnwsO50Mz9ItU= cosmossdk.io/core v0.11.3 h1:mei+MVDJOwIjIniaKelE3jPDqShCc/F4LkNNHh+4yfo= @@ -16,8 +14,6 @@ cosmossdk.io/math v1.5.3 h1:WH6tu6Z3AUCeHbeOSHg2mt9rnoiUWVWaQ2t6Gkll96U= cosmossdk.io/math v1.5.3/go.mod h1:uqcZv7vexnhMFJF+6zh9EWdm/+Ylyln34IvPnBauPCQ= cosmossdk.io/schema v1.1.0 h1:mmpuz3dzouCoyjjcMcA/xHBEmMChN+EHh8EHxHRHhzE= cosmossdk.io/schema v1.1.0/go.mod h1:Gb7pqO+tpR+jLW5qDcNOSv0KtppYs7881kfzakguhhI= -cosmossdk.io/store v1.1.2 h1:3HOZG8+CuThREKv6cn3WSohAc6yccxO3hLzwK6rBC7o= -cosmossdk.io/store v1.1.2/go.mod h1:60rAGzTHevGm592kFhiUVkNC9w7gooSEn5iUBPzHQ6A= cosmossdk.io/x/tx v0.14.0 h1:hB3O25kIcyDW/7kMTLMaO8Ripj3yqs5imceVd6c/heA= cosmossdk.io/x/tx v0.14.0/go.mod h1:Tn30rSRA1PRfdGB3Yz55W4Sn6EIutr9xtMKSHij+9PM= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= @@ -147,8 +143,6 @@ github.com/cosmos/cosmos-db v1.1.1 h1:FezFSU37AlBC8S98NlSagL76oqBRWq/prTPvFcEJNC github.com/cosmos/cosmos-db v1.1.1/go.mod h1:AghjcIPqdhSLP/2Z0yha5xPH3nLnskz81pBx3tcVSAw= github.com/cosmos/cosmos-proto v1.0.0-beta.5 h1:eNcayDLpip+zVLRLYafhzLvQlSmyab+RC5W7ZfmxJLA= github.com/cosmos/cosmos-proto v1.0.0-beta.5/go.mod h1:hQGLpiIUloJBMdQMMWb/4wRApmI9hjHH05nefC0Ojec= -github.com/cosmos/cosmos-sdk v0.53.0 h1:ZsB2tnBVudumV059oPuElcr0K1lLOutaI6WJ+osNTbI= -github.com/cosmos/cosmos-sdk v0.53.0/go.mod h1:UPcRyFwOUy2PfSFBWxBceO/HTjZOuBVqY583WyazIGs= github.com/cosmos/go-bip39 v1.0.0 h1:pcomnQdrdH22njcAatO0yWojsUnCO3y2tNoV1cb6hHY= github.com/cosmos/go-bip39 v1.0.0/go.mod h1:RNJv0H/pOIVgxw6KS7QeX2a0Uo0aKUlfhZ4xuwvCdJw= github.com/cosmos/gogogateway v1.2.0 h1:Ae/OivNhp8DqBi/sh2A8a1D0y638GpL3tkmLQAiKxTE= diff --git a/x/feegrant/go.mod b/x/feegrant/go.mod index bbca8dd792db..f9c378e8535b 100644 --- a/x/feegrant/go.mod +++ b/x/feegrant/go.mod @@ -159,3 +159,10 @@ require ( pgregory.net/rapid v1.2.0 // indirect sigs.k8s.io/yaml v1.4.0 // indirect ) + +replace ( + cosmossdk.io/api => ../../api + cosmossdk.io/store => ../../store + // Temporary replace until the next 0.53 tag + github.com/cosmos/cosmos-sdk => ../.. +) diff --git a/x/feegrant/go.sum b/x/feegrant/go.sum index 1500da1166f8..6a1366570571 100644 --- a/x/feegrant/go.sum +++ b/x/feegrant/go.sum @@ -1,7 +1,5 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cosmossdk.io/api v0.9.2 h1:9i9ptOBdmoIEVEVWLtYYHjxZonlF/aOVODLFaxpmNtg= -cosmossdk.io/api v0.9.2/go.mod h1:CWt31nVohvoPMTlPv+mMNCtC0a7BqRdESjCsstHcTkU= cosmossdk.io/collections v1.2.0 h1:IesfVG8G/+FYCMVMP01frS/Cw99Omk5vBh3cHbO01Gg= cosmossdk.io/collections v1.2.0/go.mod h1:4NkMoYw6qRA8fnSH/yn1D/MOutr8qyQnwsO50Mz9ItU= cosmossdk.io/core v0.11.3 h1:mei+MVDJOwIjIniaKelE3jPDqShCc/F4LkNNHh+4yfo= @@ -16,8 +14,6 @@ cosmossdk.io/math v1.5.3 h1:WH6tu6Z3AUCeHbeOSHg2mt9rnoiUWVWaQ2t6Gkll96U= cosmossdk.io/math v1.5.3/go.mod h1:uqcZv7vexnhMFJF+6zh9EWdm/+Ylyln34IvPnBauPCQ= cosmossdk.io/schema v1.1.0 h1:mmpuz3dzouCoyjjcMcA/xHBEmMChN+EHh8EHxHRHhzE= cosmossdk.io/schema v1.1.0/go.mod h1:Gb7pqO+tpR+jLW5qDcNOSv0KtppYs7881kfzakguhhI= -cosmossdk.io/store v1.1.2 h1:3HOZG8+CuThREKv6cn3WSohAc6yccxO3hLzwK6rBC7o= -cosmossdk.io/store v1.1.2/go.mod h1:60rAGzTHevGm592kFhiUVkNC9w7gooSEn5iUBPzHQ6A= cosmossdk.io/x/tx v0.14.0 h1:hB3O25kIcyDW/7kMTLMaO8Ripj3yqs5imceVd6c/heA= cosmossdk.io/x/tx v0.14.0/go.mod h1:Tn30rSRA1PRfdGB3Yz55W4Sn6EIutr9xtMKSHij+9PM= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= @@ -151,8 +147,6 @@ github.com/cosmos/cosmos-db v1.1.1 h1:FezFSU37AlBC8S98NlSagL76oqBRWq/prTPvFcEJNC github.com/cosmos/cosmos-db v1.1.1/go.mod h1:AghjcIPqdhSLP/2Z0yha5xPH3nLnskz81pBx3tcVSAw= github.com/cosmos/cosmos-proto v1.0.0-beta.5 h1:eNcayDLpip+zVLRLYafhzLvQlSmyab+RC5W7ZfmxJLA= github.com/cosmos/cosmos-proto v1.0.0-beta.5/go.mod h1:hQGLpiIUloJBMdQMMWb/4wRApmI9hjHH05nefC0Ojec= -github.com/cosmos/cosmos-sdk v0.53.0 h1:ZsB2tnBVudumV059oPuElcr0K1lLOutaI6WJ+osNTbI= -github.com/cosmos/cosmos-sdk v0.53.0/go.mod h1:UPcRyFwOUy2PfSFBWxBceO/HTjZOuBVqY583WyazIGs= github.com/cosmos/go-bip39 v1.0.0 h1:pcomnQdrdH22njcAatO0yWojsUnCO3y2tNoV1cb6hHY= github.com/cosmos/go-bip39 v1.0.0/go.mod h1:RNJv0H/pOIVgxw6KS7QeX2a0Uo0aKUlfhZ4xuwvCdJw= github.com/cosmos/gogogateway v1.2.0 h1:Ae/OivNhp8DqBi/sh2A8a1D0y638GpL3tkmLQAiKxTE= diff --git a/x/gov/testutil/expected_keepers_mocks.go b/x/gov/testutil/expected_keepers_mocks.go index 64a678b21741..437af56cc2e9 100644 --- a/x/gov/testutil/expected_keepers_mocks.go +++ b/x/gov/testutil/expected_keepers_mocks.go @@ -233,6 +233,20 @@ func (mr *MockBankKeeperMockRecorder) ClearSendRestriction() *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClearSendRestriction", reflect.TypeOf((*MockBankKeeper)(nil).ClearSendRestriction)) } +// CreditVirtualAccounts mocks base method. +func (m *MockBankKeeper) CreditVirtualAccounts(ctx context.Context) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CreditVirtualAccounts", ctx) + ret0, _ := ret[0].(error) + return ret0 +} + +// CreditVirtualAccounts indicates an expected call of CreditVirtualAccounts. +func (mr *MockBankKeeperMockRecorder) CreditVirtualAccounts(ctx any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreditVirtualAccounts", reflect.TypeOf((*MockBankKeeper)(nil).CreditVirtualAccounts), ctx) +} + // DelegateCoins mocks base method. func (m *MockBankKeeper) DelegateCoins(ctx context.Context, delegatorAddr, moduleAccAddr types.AccAddress, amt types.Coins) error { m.ctrl.T.Helper() @@ -797,6 +811,20 @@ func (mr *MockBankKeeperMockRecorder) SendCoinsFromAccountToModule(ctx, senderAd return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendCoinsFromAccountToModule", reflect.TypeOf((*MockBankKeeper)(nil).SendCoinsFromAccountToModule), ctx, senderAddr, recipientModule, amt) } +// SendCoinsFromAccountToModuleVirtual mocks base method. +func (m *MockBankKeeper) SendCoinsFromAccountToModuleVirtual(ctx context.Context, senderAddr types.AccAddress, recipientModule string, amt types.Coins) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SendCoinsFromAccountToModuleVirtual", ctx, senderAddr, recipientModule, amt) + ret0, _ := ret[0].(error) + return ret0 +} + +// SendCoinsFromAccountToModuleVirtual indicates an expected call of SendCoinsFromAccountToModuleVirtual. +func (mr *MockBankKeeperMockRecorder) SendCoinsFromAccountToModuleVirtual(ctx, senderAddr, recipientModule, amt any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendCoinsFromAccountToModuleVirtual", reflect.TypeOf((*MockBankKeeper)(nil).SendCoinsFromAccountToModuleVirtual), ctx, senderAddr, recipientModule, amt) +} + // SendCoinsFromModuleToAccount mocks base method. func (m *MockBankKeeper) SendCoinsFromModuleToAccount(ctx context.Context, senderModule string, recipientAddr types.AccAddress, amt types.Coins) error { m.ctrl.T.Helper() @@ -811,6 +839,20 @@ func (mr *MockBankKeeperMockRecorder) SendCoinsFromModuleToAccount(ctx, senderMo return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendCoinsFromModuleToAccount", reflect.TypeOf((*MockBankKeeper)(nil).SendCoinsFromModuleToAccount), ctx, senderModule, recipientAddr, amt) } +// SendCoinsFromModuleToAccountVirtual mocks base method. +func (m *MockBankKeeper) SendCoinsFromModuleToAccountVirtual(ctx context.Context, senderModule string, recipientAddr types.AccAddress, amt types.Coins) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SendCoinsFromModuleToAccountVirtual", ctx, senderModule, recipientAddr, amt) + ret0, _ := ret[0].(error) + return ret0 +} + +// SendCoinsFromModuleToAccountVirtual indicates an expected call of SendCoinsFromModuleToAccountVirtual. +func (mr *MockBankKeeperMockRecorder) SendCoinsFromModuleToAccountVirtual(ctx, senderModule, recipientAddr, amt any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendCoinsFromModuleToAccountVirtual", reflect.TypeOf((*MockBankKeeper)(nil).SendCoinsFromModuleToAccountVirtual), ctx, senderModule, recipientAddr, amt) +} + // SendCoinsFromModuleToModule mocks base method. func (m *MockBankKeeper) SendCoinsFromModuleToModule(ctx context.Context, senderModule, recipientModule string, amt types.Coins) error { m.ctrl.T.Helper() diff --git a/x/group/internal/orm/testsupport.go b/x/group/internal/orm/testsupport.go index b4fe3d0354ee..b03fcec42ba9 100644 --- a/x/group/internal/orm/testsupport.go +++ b/x/group/internal/orm/testsupport.go @@ -26,8 +26,8 @@ func NewMockContext() *MockContext { } func (m MockContext) KVStore(key storetypes.StoreKey) storetypes.KVStore { - if s := m.store.GetCommitKVStore(key); s != nil { - return s + if s := m.store.GetCommitStore(key); s != nil { + return s.(storetypes.KVStore) } m.store.MountStoreWithDB(key, storetypes.StoreTypeIAVL, m.db) if err := m.store.LoadLatestVersion(); err != nil { diff --git a/x/nft/go.mod b/x/nft/go.mod index 4646bada9e5c..b1a245757c0e 100644 --- a/x/nft/go.mod +++ b/x/nft/go.mod @@ -157,3 +157,10 @@ require ( pgregory.net/rapid v1.2.0 // indirect sigs.k8s.io/yaml v1.4.0 // indirect ) + +replace ( + cosmossdk.io/api => ../../api + cosmossdk.io/store => ../../store + // Temporary replace until the next 0.53 tag + github.com/cosmos/cosmos-sdk => ../.. +) diff --git a/x/nft/go.sum b/x/nft/go.sum index 73ec26d5c38b..7b9f526f2ba3 100644 --- a/x/nft/go.sum +++ b/x/nft/go.sum @@ -1,7 +1,5 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cosmossdk.io/api v0.9.2 h1:9i9ptOBdmoIEVEVWLtYYHjxZonlF/aOVODLFaxpmNtg= -cosmossdk.io/api v0.9.2/go.mod h1:CWt31nVohvoPMTlPv+mMNCtC0a7BqRdESjCsstHcTkU= cosmossdk.io/collections v1.2.0 h1:IesfVG8G/+FYCMVMP01frS/Cw99Omk5vBh3cHbO01Gg= cosmossdk.io/collections v1.2.0/go.mod h1:4NkMoYw6qRA8fnSH/yn1D/MOutr8qyQnwsO50Mz9ItU= cosmossdk.io/core v0.11.3 h1:mei+MVDJOwIjIniaKelE3jPDqShCc/F4LkNNHh+4yfo= @@ -16,8 +14,6 @@ cosmossdk.io/math v1.5.3 h1:WH6tu6Z3AUCeHbeOSHg2mt9rnoiUWVWaQ2t6Gkll96U= cosmossdk.io/math v1.5.3/go.mod h1:uqcZv7vexnhMFJF+6zh9EWdm/+Ylyln34IvPnBauPCQ= cosmossdk.io/schema v1.1.0 h1:mmpuz3dzouCoyjjcMcA/xHBEmMChN+EHh8EHxHRHhzE= cosmossdk.io/schema v1.1.0/go.mod h1:Gb7pqO+tpR+jLW5qDcNOSv0KtppYs7881kfzakguhhI= -cosmossdk.io/store v1.1.2 h1:3HOZG8+CuThREKv6cn3WSohAc6yccxO3hLzwK6rBC7o= -cosmossdk.io/store v1.1.2/go.mod h1:60rAGzTHevGm592kFhiUVkNC9w7gooSEn5iUBPzHQ6A= cosmossdk.io/x/tx v0.14.0 h1:hB3O25kIcyDW/7kMTLMaO8Ripj3yqs5imceVd6c/heA= cosmossdk.io/x/tx v0.14.0/go.mod h1:Tn30rSRA1PRfdGB3Yz55W4Sn6EIutr9xtMKSHij+9PM= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= @@ -147,8 +143,6 @@ github.com/cosmos/cosmos-db v1.1.1 h1:FezFSU37AlBC8S98NlSagL76oqBRWq/prTPvFcEJNC github.com/cosmos/cosmos-db v1.1.1/go.mod h1:AghjcIPqdhSLP/2Z0yha5xPH3nLnskz81pBx3tcVSAw= github.com/cosmos/cosmos-proto v1.0.0-beta.5 h1:eNcayDLpip+zVLRLYafhzLvQlSmyab+RC5W7ZfmxJLA= github.com/cosmos/cosmos-proto v1.0.0-beta.5/go.mod h1:hQGLpiIUloJBMdQMMWb/4wRApmI9hjHH05nefC0Ojec= -github.com/cosmos/cosmos-sdk v0.53.0 h1:ZsB2tnBVudumV059oPuElcr0K1lLOutaI6WJ+osNTbI= -github.com/cosmos/cosmos-sdk v0.53.0/go.mod h1:UPcRyFwOUy2PfSFBWxBceO/HTjZOuBVqY583WyazIGs= github.com/cosmos/go-bip39 v1.0.0 h1:pcomnQdrdH22njcAatO0yWojsUnCO3y2tNoV1cb6hHY= github.com/cosmos/go-bip39 v1.0.0/go.mod h1:RNJv0H/pOIVgxw6KS7QeX2a0Uo0aKUlfhZ4xuwvCdJw= github.com/cosmos/gogogateway v1.2.0 h1:Ae/OivNhp8DqBi/sh2A8a1D0y638GpL3tkmLQAiKxTE= diff --git a/x/simulation/simulate.go b/x/simulation/simulate.go index 5ee761630f3a..54f88f63ff6a 100644 --- a/x/simulation/simulate.go +++ b/x/simulation/simulate.go @@ -243,7 +243,6 @@ func SimulateFromSeedX( proposerAddress = validators.randomProposer(r) if config.Commit { - app.SimWriteState() if _, err := app.Commit(); err != nil { return params, accs, fmt.Errorf("commit failed at height %d: %w", blockHeight, err) } diff --git a/x/tx/CHANGELOG.md b/x/tx/CHANGELOG.md index 0a0b60ed6c56..b79075ab5f8c 100644 --- a/x/tx/CHANGELOG.md +++ b/x/tx/CHANGELOG.md @@ -33,6 +33,10 @@ Since v0.13.0, x/tx follows Cosmos SDK semver: https://github.com/cosmos/cosmos- ## [Unreleased] +### Improvements + +* [#21850](https://github.com/cosmos/cosmos-sdk/pull/21850) Support bytes field as signer. + ## [v0.14.0](https://github.com/cosmos/cosmos-sdk/releases/tag/x/tx/v0.14.0) - 2025-04-24 * SDK v0.53.x support. diff --git a/x/tx/internal/testpb/signers.proto b/x/tx/internal/testpb/signers.proto index 63eb38cba43b..7244a3657400 100644 --- a/x/tx/internal/testpb/signers.proto +++ b/x/tx/internal/testpb/signers.proto @@ -92,7 +92,7 @@ message DeeplyNestedRepeatedSigner { message BadSigner { option (cosmos.msg.v1.signer) = "signer"; - bytes signer = 1; + int32 signer = 1; } message NoSignerOption { @@ -107,4 +107,4 @@ message ValidatorSigner { service TestSimpleSigner { option (cosmos.msg.v1.service) = true; rpc TestSimpleSigner(SimpleSigner) returns (SimpleSigner) {} -} \ No newline at end of file +} diff --git a/x/tx/internal/testpb/signers.pulsar.go b/x/tx/internal/testpb/signers.pulsar.go index f6e3a3d081c1..91de409223b3 100644 --- a/x/tx/internal/testpb/signers.pulsar.go +++ b/x/tx/internal/testpb/signers.pulsar.go @@ -7900,8 +7900,8 @@ func (x *fastReflection_BadSigner) Interface() protoreflect.ProtoMessage { // While iterating, mutating operations may only be performed // on the current field descriptor. func (x *fastReflection_BadSigner) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) { - if len(x.Signer) != 0 { - value := protoreflect.ValueOfBytes(x.Signer) + if x.Signer != int32(0) { + value := protoreflect.ValueOfInt32(x.Signer) if !f(fd_BadSigner_signer, value) { return } @@ -7922,7 +7922,7 @@ func (x *fastReflection_BadSigner) Range(f func(protoreflect.FieldDescriptor, pr func (x *fastReflection_BadSigner) Has(fd protoreflect.FieldDescriptor) bool { switch fd.FullName() { case "BadSigner.signer": - return len(x.Signer) != 0 + return x.Signer != int32(0) default: if fd.IsExtension() { panic(fmt.Errorf("proto3 declared messages do not support extensions: BadSigner")) @@ -7940,7 +7940,7 @@ func (x *fastReflection_BadSigner) Has(fd protoreflect.FieldDescriptor) bool { func (x *fastReflection_BadSigner) Clear(fd protoreflect.FieldDescriptor) { switch fd.FullName() { case "BadSigner.signer": - x.Signer = nil + x.Signer = int32(0) default: if fd.IsExtension() { panic(fmt.Errorf("proto3 declared messages do not support extensions: BadSigner")) @@ -7959,7 +7959,7 @@ func (x *fastReflection_BadSigner) Get(descriptor protoreflect.FieldDescriptor) switch descriptor.FullName() { case "BadSigner.signer": value := x.Signer - return protoreflect.ValueOfBytes(value) + return protoreflect.ValueOfInt32(value) default: if descriptor.IsExtension() { panic(fmt.Errorf("proto3 declared messages do not support extensions: BadSigner")) @@ -7981,7 +7981,7 @@ func (x *fastReflection_BadSigner) Get(descriptor protoreflect.FieldDescriptor) func (x *fastReflection_BadSigner) Set(fd protoreflect.FieldDescriptor, value protoreflect.Value) { switch fd.FullName() { case "BadSigner.signer": - x.Signer = value.Bytes() + x.Signer = int32(value.Int()) default: if fd.IsExtension() { panic(fmt.Errorf("proto3 declared messages do not support extensions: BadSigner")) @@ -8018,7 +8018,7 @@ func (x *fastReflection_BadSigner) Mutable(fd protoreflect.FieldDescriptor) prot func (x *fastReflection_BadSigner) NewField(fd protoreflect.FieldDescriptor) protoreflect.Value { switch fd.FullName() { case "BadSigner.signer": - return protoreflect.ValueOfBytes(nil) + return protoreflect.ValueOfInt32(int32(0)) default: if fd.IsExtension() { panic(fmt.Errorf("proto3 declared messages do not support extensions: BadSigner")) @@ -8088,9 +8088,8 @@ func (x *fastReflection_BadSigner) ProtoMethods() *protoiface.Methods { var n int var l int _ = l - l = len(x.Signer) - if l > 0 { - n += 1 + l + runtime.Sov(uint64(l)) + if x.Signer != 0 { + n += 1 + runtime.Sov(uint64(x.Signer)) } if x.unknownFields != nil { n += len(x.unknownFields) @@ -8121,12 +8120,10 @@ func (x *fastReflection_BadSigner) ProtoMethods() *protoiface.Methods { i -= len(x.unknownFields) copy(dAtA[i:], x.unknownFields) } - if len(x.Signer) > 0 { - i -= len(x.Signer) - copy(dAtA[i:], x.Signer) - i = runtime.EncodeVarint(dAtA, i, uint64(len(x.Signer))) + if x.Signer != 0 { + i = runtime.EncodeVarint(dAtA, i, uint64(x.Signer)) i-- - dAtA[i] = 0xa + dAtA[i] = 0x8 } if input.Buf != nil { input.Buf = append(input.Buf, dAtA...) @@ -8178,10 +8175,10 @@ func (x *fastReflection_BadSigner) ProtoMethods() *protoiface.Methods { } switch fieldNum { case 1: - if wireType != 2 { + if wireType != 0 { return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, fmt.Errorf("proto: wrong wireType = %d for field Signer", wireType) } - var byteLen int + x.Signer = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrIntOverflow @@ -8191,26 +8188,11 @@ func (x *fastReflection_BadSigner) ProtoMethods() *protoiface.Methods { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + x.Signer |= int32(b&0x7F) << shift if b < 0x80 { break } } - if byteLen < 0 { - return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrInvalidLength - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrInvalidLength - } - if postIndex > l { - return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF - } - x.Signer = append(x.Signer[:0], dAtA[iNdEx:postIndex]...) - if x.Signer == nil { - x.Signer = []byte{} - } - iNdEx = postIndex default: iNdEx = preIndex skippy, err := runtime.Skip(dAtA[iNdEx:]) @@ -9386,7 +9368,7 @@ type BadSigner struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Signer []byte `protobuf:"bytes,1,opt,name=signer,proto3" json:"signer,omitempty"` + Signer int32 `protobuf:"varint,1,opt,name=signer,proto3" json:"signer,omitempty"` } func (x *BadSigner) Reset() { @@ -9409,11 +9391,11 @@ func (*BadSigner) Descriptor() ([]byte, []int) { return file_signers_proto_rawDescGZIP(), []int{8} } -func (x *BadSigner) GetSigner() []byte { +func (x *BadSigner) GetSigner() int32 { if x != nil { return x.Signer } - return nil + return 0 } type NoSignerOption struct { @@ -9885,7 +9867,7 @@ var file_signers_proto_rawDesc = []byte{ 0x05, 0x69, 0x6e, 0x6e, 0x65, 0x72, 0x3a, 0x0a, 0x82, 0xe7, 0xb0, 0x2a, 0x05, 0x69, 0x6e, 0x6e, 0x65, 0x72, 0x3a, 0x0a, 0x82, 0xe7, 0xb0, 0x2a, 0x05, 0x69, 0x6e, 0x6e, 0x65, 0x72, 0x22, 0x30, 0x0a, 0x09, 0x42, 0x61, 0x64, 0x53, 0x69, 0x67, 0x6e, 0x65, 0x72, 0x12, 0x16, 0x0a, 0x06, 0x73, - 0x69, 0x67, 0x6e, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x73, 0x69, 0x67, + 0x69, 0x67, 0x6e, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x72, 0x3a, 0x0b, 0x82, 0xe7, 0xb0, 0x2a, 0x06, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x72, 0x22, 0x28, 0x0a, 0x0e, 0x4e, 0x6f, 0x53, 0x69, 0x67, 0x6e, 0x65, 0x72, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, diff --git a/x/tx/signing/context.go b/x/tx/signing/context.go index 84e0e09166a5..04f7f304a150 100644 --- a/x/tx/signing/context.go +++ b/x/tx/signing/context.go @@ -301,6 +301,11 @@ func (c *Context) makeGetSignersFunc(descriptor protoreflect.MessageDescriptor) } return arr, nil } + case protoreflect.BytesKind: + fieldGetters[i] = func(msg proto.Message, arr [][]byte) ([][]byte, error) { + addrBz := msg.ProtoReflect().Get(field).Bytes() + return append(arr, addrBz), nil + } default: return nil, fmt.Errorf("unexpected field type %s for field %s in message %s", field.Kind(), fieldName, descriptor.FullName()) } diff --git a/x/upgrade/go.mod b/x/upgrade/go.mod index e61c8604abd5..19172e2a82b0 100644 --- a/x/upgrade/go.mod +++ b/x/upgrade/go.mod @@ -209,3 +209,10 @@ require ( // Fix upstream GHSA-h395-qcrw-5vmq and GHSA-3vp4-m3rf-835h vulnerabilities. // TODO Remove it: https://github.com/cosmos/cosmos-sdk/issues/10409 replace github.com/gin-gonic/gin => github.com/gin-gonic/gin v1.9.1 + +replace ( + cosmossdk.io/api => ../../api + cosmossdk.io/store => ../../store + // Temporary replace until the next 0.53 tag + github.com/cosmos/cosmos-sdk => ../.. +) diff --git a/x/upgrade/go.sum b/x/upgrade/go.sum index ae8e6048bda3..d37b9d6c4a03 100644 --- a/x/upgrade/go.sum +++ b/x/upgrade/go.sum @@ -614,8 +614,6 @@ cloud.google.com/go/workflows v1.7.0/go.mod h1:JhSrZuVZWuiDfKEFxU0/F1PQjmpnpcoIS cloud.google.com/go/workflows v1.8.0/go.mod h1:ysGhmEajwZxGn1OhGOGKsTXc5PyxOc0vfKf5Af+to4M= cloud.google.com/go/workflows v1.9.0/go.mod h1:ZGkj1aFIOd9c8Gerkjjq7OW7I5+l6cSvT3ujaO/WwSA= cloud.google.com/go/workflows v1.10.0/go.mod h1:fZ8LmRmZQWacon9UCX1r/g/DfAXx5VcPALq2CxzdePw= -cosmossdk.io/api v0.9.2 h1:9i9ptOBdmoIEVEVWLtYYHjxZonlF/aOVODLFaxpmNtg= -cosmossdk.io/api v0.9.2/go.mod h1:CWt31nVohvoPMTlPv+mMNCtC0a7BqRdESjCsstHcTkU= cosmossdk.io/collections v1.2.0 h1:IesfVG8G/+FYCMVMP01frS/Cw99Omk5vBh3cHbO01Gg= cosmossdk.io/collections v1.2.0/go.mod h1:4NkMoYw6qRA8fnSH/yn1D/MOutr8qyQnwsO50Mz9ItU= cosmossdk.io/core v0.11.3 h1:mei+MVDJOwIjIniaKelE3jPDqShCc/F4LkNNHh+4yfo= @@ -630,8 +628,6 @@ cosmossdk.io/math v1.5.3 h1:WH6tu6Z3AUCeHbeOSHg2mt9rnoiUWVWaQ2t6Gkll96U= cosmossdk.io/math v1.5.3/go.mod h1:uqcZv7vexnhMFJF+6zh9EWdm/+Ylyln34IvPnBauPCQ= cosmossdk.io/schema v1.1.0 h1:mmpuz3dzouCoyjjcMcA/xHBEmMChN+EHh8EHxHRHhzE= cosmossdk.io/schema v1.1.0/go.mod h1:Gb7pqO+tpR+jLW5qDcNOSv0KtppYs7881kfzakguhhI= -cosmossdk.io/store v1.1.2 h1:3HOZG8+CuThREKv6cn3WSohAc6yccxO3hLzwK6rBC7o= -cosmossdk.io/store v1.1.2/go.mod h1:60rAGzTHevGm592kFhiUVkNC9w7gooSEn5iUBPzHQ6A= cosmossdk.io/x/tx v0.14.0 h1:hB3O25kIcyDW/7kMTLMaO8Ripj3yqs5imceVd6c/heA= cosmossdk.io/x/tx v0.14.0/go.mod h1:Tn30rSRA1PRfdGB3Yz55W4Sn6EIutr9xtMKSHij+9PM= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= @@ -809,8 +805,6 @@ github.com/cosmos/cosmos-db v1.1.1 h1:FezFSU37AlBC8S98NlSagL76oqBRWq/prTPvFcEJNC github.com/cosmos/cosmos-db v1.1.1/go.mod h1:AghjcIPqdhSLP/2Z0yha5xPH3nLnskz81pBx3tcVSAw= github.com/cosmos/cosmos-proto v1.0.0-beta.5 h1:eNcayDLpip+zVLRLYafhzLvQlSmyab+RC5W7ZfmxJLA= github.com/cosmos/cosmos-proto v1.0.0-beta.5/go.mod h1:hQGLpiIUloJBMdQMMWb/4wRApmI9hjHH05nefC0Ojec= -github.com/cosmos/cosmos-sdk v0.53.0 h1:ZsB2tnBVudumV059oPuElcr0K1lLOutaI6WJ+osNTbI= -github.com/cosmos/cosmos-sdk v0.53.0/go.mod h1:UPcRyFwOUy2PfSFBWxBceO/HTjZOuBVqY583WyazIGs= github.com/cosmos/go-bip39 v1.0.0 h1:pcomnQdrdH22njcAatO0yWojsUnCO3y2tNoV1cb6hHY= github.com/cosmos/go-bip39 v1.0.0/go.mod h1:RNJv0H/pOIVgxw6KS7QeX2a0Uo0aKUlfhZ4xuwvCdJw= github.com/cosmos/gogogateway v1.2.0 h1:Ae/OivNhp8DqBi/sh2A8a1D0y638GpL3tkmLQAiKxTE=