From bd3056b60939944267434aae66023f16cdef5e64 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bartosz=20R=C3=B3=C5=BCa=C5=84ski?= Date: Mon, 12 Aug 2024 15:18:48 +0000 Subject: [PATCH 1/7] removed redudnant contextual validation in ATX V1 handler (#6243) ## Motivation The removed check served no purpose. The actual contextual validation is implemented in `StoreAtx()`. --- activation/handler_v1.go | 56 +------------------------ activation/handler_v1_test.go | 79 ----------------------------------- activation/handler_v2.go | 2 +- 3 files changed, 3 insertions(+), 134 deletions(-) diff --git a/activation/handler_v1.go b/activation/handler_v1.go index 481c90df24..46b24cd2bd 100644 --- a/activation/handler_v1.go +++ b/activation/handler_v1.go @@ -284,47 +284,6 @@ func (h *HandlerV1) validateNonInitialAtx( return nil } -// contextuallyValidateAtx ensures that the previous ATX referenced is the last known ATX for the referenced miner ID. -// If a previous ATX is not referenced, it validates that indeed there's no previous known ATX for that miner ID. -func (h *HandlerV1) contextuallyValidateAtx(atx *wire.ActivationTxV1) error { - lastAtx, err := atxs.GetLastIDByNodeID(h.cdb, atx.SmesherID) - if err == nil && atx.PrevATXID == lastAtx { - // last atx referenced equals last ATX seen from node - return nil - } - - if err == nil && atx.PrevATXID == types.EmptyATXID { - // no previous atx declared, but already seen at least one atx from node - return fmt.Errorf( - "no prev atx reported, but other atx with same node id (%v) found: %v", - atx.SmesherID, - lastAtx.ShortString(), - ) - } - - if err == nil && atx.PrevATXID != lastAtx { - // last atx referenced does not equal last ATX seen from node - return errors.New("last atx is not the one referenced") - } - - if errors.Is(err, sql.ErrNotFound) && atx.PrevATXID == types.EmptyATXID { - // no previous atx found and none referenced - return nil - } - - if err != nil && atx.PrevATXID != types.EmptyATXID { - // no previous atx found but previous atx referenced - h.logger.Error("could not fetch node last atx", - zap.Stringer("atx_id", atx.ID()), - zap.Stringer("smesher", atx.SmesherID), - zap.Error(err), - ) - return fmt.Errorf("could not fetch node last atx: %w", err) - } - - return err -} - // cacheAtx caches the atx in the atxsdata cache. // Returns true if the atx was cached, false otherwise. func (h *HandlerV1) cacheAtx(ctx context.Context, atx *types.ActivationTx) *atxsdata.ATX { @@ -397,7 +356,7 @@ func (h *HandlerV1) checkDoublePublish( return nil, fmt.Errorf("add malfeasance proof: %w", err) } - h.logger.Warn("smesher produced more than one atx in the same epoch", + h.logger.Debug("smesher produced more than one atx in the same epoch", log.ZContext(ctx), zap.Stringer("smesher", atx.SmesherID), zap.Stringer("previous", prev), @@ -478,7 +437,7 @@ func (h *HandlerV1) checkWrongPrevAtx( return nil, fmt.Errorf("add malfeasance proof: %w", err) } - h.logger.Warn("smesher referenced the wrong previous in published ATX", + h.logger.Debug("smesher referenced the wrong previous in published ATX", log.ZContext(ctx), zap.Stringer("smesher", atx.SmesherID), log.ZShortStringer("actual", atx.PrevATXID), @@ -593,17 +552,6 @@ func (h *HandlerV1) processATX( return proof, nil } - if err := h.contextuallyValidateAtx(watx); err != nil { - h.logger.Warn("atx is contextually invalid ", - log.ZContext(ctx), - zap.Stringer("atx_id", watx.ID()), - zap.Stringer("smesherID", watx.SmesherID), - zap.Error(err), - ) - } else { - h.logger.Debug("atx is valid", zap.Stringer("atx_id", watx.ID())) - } - var baseTickHeight uint64 if watx.PositioningATXID != h.goldenATXID { posAtx, err := h.cdb.GetAtx(watx.PositioningATXID) diff --git a/activation/handler_v1_test.go b/activation/handler_v1_test.go index 5db33b1098..5aba8cb7f9 100644 --- a/activation/handler_v1_test.go +++ b/activation/handler_v1_test.go @@ -453,85 +453,6 @@ func TestHandlerV1_SyntacticallyValidateAtx(t *testing.T) { }) } -func TestHandler_ContextuallyValidateAtx(t *testing.T) { - goldenATXID := types.ATXID{2, 3, 4} - - sig, err := signing.NewEdSigner() - require.NoError(t, err) - - t.Run("valid initial atx", func(t *testing.T) { - t.Parallel() - - atx := newInitialATXv1(t, goldenATXID) - atx.Sign(sig) - - atxHdlr := newV1TestHandler(t, goldenATXID) - require.NoError(t, atxHdlr.contextuallyValidateAtx(atx)) - }) - - t.Run("missing prevAtx", func(t *testing.T) { - t.Parallel() - - atxHdlr := newV1TestHandler(t, goldenATXID) - - prevAtx := newInitialATXv1(t, goldenATXID) - atx := newChainedActivationTxV1(t, prevAtx, goldenATXID) - - err = atxHdlr.contextuallyValidateAtx(atx) - require.ErrorIs(t, err, sql.ErrNotFound) - }) - - t.Run("wrong previous atx by same node", func(t *testing.T) { - t.Parallel() - - atxHdlr := newV1TestHandler(t, goldenATXID) - - atx0 := newInitialATXv1(t, goldenATXID) - atx0.Sign(sig) - atxHdlr.expectAtxV1(atx0, sig.NodeID()) - _, err := atxHdlr.processATX(context.Background(), "", atx0, time.Now()) - require.NoError(t, err) - - atx1 := newChainedActivationTxV1(t, atx0, goldenATXID) - atx1.Sign(sig) - atxHdlr.expectAtxV1(atx1, sig.NodeID()) - atxHdlr.mockFetch.EXPECT().GetAtxs(gomock.Any(), gomock.Any(), gomock.Any()) - _, err = atxHdlr.processATX(context.Background(), "", atx1, time.Now()) - require.NoError(t, err) - - atxInvalidPrevious := newChainedActivationTxV1(t, atx0, goldenATXID) - atxInvalidPrevious.Sign(sig) - err = atxHdlr.contextuallyValidateAtx(atxInvalidPrevious) - require.EqualError(t, err, "last atx is not the one referenced") - }) - - t.Run("wrong previous atx from different node", func(t *testing.T) { - t.Parallel() - - otherSig, err := signing.NewEdSigner() - require.NoError(t, err) - - atxHdlr := newV1TestHandler(t, goldenATXID) - - atx0 := newInitialATXv1(t, goldenATXID) - atx0.Sign(otherSig) - atxHdlr.expectAtxV1(atx0, otherSig.NodeID()) - _, err = atxHdlr.processATX(context.Background(), "", atx0, time.Now()) - require.NoError(t, err) - - atx1 := newInitialATXv1(t, goldenATXID) - atx1.Sign(sig) - atxHdlr.expectAtxV1(atx1, sig.NodeID()) - _, err = atxHdlr.processATX(context.Background(), "", atx1, time.Now()) - require.NoError(t, err) - - atxInvalidPrevious := newChainedActivationTxV1(t, atx0, goldenATXID) - atxInvalidPrevious.Sign(sig) - err = atxHdlr.contextuallyValidateAtx(atxInvalidPrevious) - require.EqualError(t, err, "last atx is not the one referenced") - }) -} - func TestHandlerV1_StoreAtx(t *testing.T) { goldenATXID := types.RandomATXID() diff --git a/activation/handler_v2.go b/activation/handler_v2.go index 42d139161e..6755b9f7e0 100644 --- a/activation/handler_v2.go +++ b/activation/handler_v2.go @@ -630,7 +630,7 @@ func (h *HandlerV2) syntacticallyValidateDeps( ) invalidIdx := &verifying.ErrInvalidIndex{} if errors.As(err, invalidIdx) { - h.logger.Info( + h.logger.Debug( "ATX with invalid post index", zap.Stringer("id", atx.ID()), zap.Int("index", invalidIdx.Index), From f8354c524015d44e9d9c11a4a07b8d0eb98520dc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bartosz=20R=C3=B3=C5=BCa=C5=84ski?= Date: Mon, 12 Aug 2024 16:03:11 +0000 Subject: [PATCH 2/7] Speed up ATX cache warmup (#6241) ## Motivation Speeding up the in-memory ATX cache warmup that is especially slow on HDDs. Co-authored-by: Jedrzej Nowak --- activation/e2e/checkpoint_test.go | 2 +- atxsdata/data.go | 25 +++++++------------ atxsdata/data_test.go | 13 ++++------ atxsdata/warmup.go | 40 ++++++++++++++++++++++++++----- atxsdata/warmup_test.go | 9 +++---- node/node.go | 34 ++++++++++++++------------ sql/atxs/atxs.go | 28 +++++++--------------- tortoise/replay/replay_test.go | 2 +- 8 files changed, 81 insertions(+), 72 deletions(-) diff --git a/activation/e2e/checkpoint_test.go b/activation/e2e/checkpoint_test.go index 4e825d944c..0120189fe4 100644 --- a/activation/e2e/checkpoint_test.go +++ b/activation/e2e/checkpoint_test.go @@ -188,7 +188,7 @@ func TestCheckpoint_PublishingSoloATXs(t *testing.T) { // 3. Spawn new ATX handler and builder using the new DB poetDb = activation.NewPoetDb(newDB, logger.Named("poetDb")) cdb = datastore.NewCachedDB(newDB, logger) - atxdata, err = atxsdata.Warm(newDB, 1) + atxdata, err = atxsdata.Warm(newDB, 1, logger) poetService = activation.NewPoetServiceWithClient(poetDb, client, poetCfg, logger) validator = activation.NewValidator(newDB, poetDb, cfg, opts.Scrypt, verifier) require.NoError(t, err) diff --git a/atxsdata/data.go b/atxsdata/data.go index 94c4cfc89d..6968c1180b 100644 --- a/atxsdata/data.go +++ b/atxsdata/data.go @@ -16,10 +16,6 @@ type ATX struct { Weight uint64 BaseHeight, Height uint64 Nonce types.VRFPostIndex - // unexported to avoid accidental unsynchronized access - // (this field is mutated by the Data under a lock and - // might only be safely read under the same lock) - malicious bool } func New() *Data { @@ -107,9 +103,6 @@ func (d *Data) AddAtx(target types.EpochID, id types.ATXID, atx *ATX) bool { atxsCounter.WithLabelValues(target.String()).Inc() ecache.index[id] = atx - if atx.malicious { - d.malicious[atx.Node] = struct{}{} - } return true } @@ -131,7 +124,9 @@ func (d *Data) Add( BaseHeight: baseHeight, Height: height, Nonce: nonce, - malicious: malicious, + } + if malicious { + d.SetMalicious(node) } if d.AddAtx(epoch, atxid, atx) { return atx @@ -165,8 +160,6 @@ func (d *Data) Get(epoch types.EpochID, atx types.ATXID) *ATX { if !exists { return nil } - _, exists = d.malicious[data.Node] - data.malicious = exists return data } @@ -185,10 +178,11 @@ type lockGuard struct{} // AtxFilter is a function that filters atxs. // The `lockGuard` prevents using the filter functions outside of the allowed context // to prevent data races. -type AtxFilter func(*ATX, lockGuard) bool +type AtxFilter func(*Data, *ATX, lockGuard) bool -func NotMalicious(data *ATX, _ lockGuard) bool { - return !data.malicious +func NotMalicious(d *Data, atx *ATX, _ lockGuard) bool { + _, m := d.malicious[atx.Node] + return !m } // IterateInEpoch calls `fn` for every ATX in epoch. @@ -202,12 +196,9 @@ func (d *Data) IterateInEpoch(epoch types.EpochID, fn func(types.ATXID, *ATX), f return } for id, atx := range ecache.index { - if _, exists := d.malicious[atx.Node]; exists { - atx.malicious = true - } ok := true for _, filter := range filters { - ok = ok && filter(atx, lockGuard{}) + ok = ok && filter(d, atx, lockGuard{}) } if ok { fn(id, atx) diff --git a/atxsdata/data_test.go b/atxsdata/data_test.go index 07111638bc..bb7f4ffc1a 100644 --- a/atxsdata/data_test.go +++ b/atxsdata/data_test.go @@ -42,14 +42,15 @@ func TestData(t *testing.T) { d.BaseHeight, d.Height, d.Nonce, - d.malicious, + false, ) } } for epoch := 0; epoch < epochs; epoch++ { for i := range atxids[epoch] { - byatxid := c.Get(types.EpochID(epoch)+1, atxids[epoch][i]) - require.Equal(t, &data[epoch][i], byatxid) + atx := c.Get(types.EpochID(epoch)+1, atxids[epoch][i]) + require.Equal(t, &data[epoch][i], atx) + require.False(t, c.IsMalicious(atx.Node)) } } } @@ -71,13 +72,9 @@ func TestData(t *testing.T) { ) data := c.Get(types.EpochID(epoch), types.ATXID{byte(epoch)}) require.NotNil(t, data) - require.False(t, data.malicious) + require.False(t, c.IsMalicious(data.Node)) } c.SetMalicious(node) - for epoch := 1; epoch <= 10; epoch++ { - data := c.Get(types.EpochID(epoch), types.ATXID{byte(epoch)}) - require.True(t, data.malicious) - } require.True(t, c.IsMalicious(node)) }) t.Run("eviction", func(t *testing.T) { diff --git a/atxsdata/warmup.go b/atxsdata/warmup.go index 84c6850736..6f4d5e5abc 100644 --- a/atxsdata/warmup.go +++ b/atxsdata/warmup.go @@ -3,27 +3,31 @@ package atxsdata import ( "context" "fmt" + "time" + + "go.uber.org/zap" "github.com/spacemeshos/go-spacemesh/common/types" "github.com/spacemeshos/go-spacemesh/sql" "github.com/spacemeshos/go-spacemesh/sql/atxs" + "github.com/spacemeshos/go-spacemesh/sql/identities" "github.com/spacemeshos/go-spacemesh/sql/layers" ) -func Warm(db *sql.Database, keep types.EpochID) (*Data, error) { +func Warm(db *sql.Database, keep types.EpochID, logger *zap.Logger) (*Data, error) { cache := New() tx, err := db.Tx(context.Background()) if err != nil { return nil, err } defer tx.Release() - if err := Warmup(tx, cache, keep); err != nil { + if err := Warmup(tx, cache, keep, logger); err != nil { return nil, fmt.Errorf("warmup %w", err) } return cache, nil } -func Warmup(db sql.Executor, cache *Data, keep types.EpochID) error { +func Warmup(db sql.Executor, cache *Data, keep types.EpochID, logger *zap.Logger) error { latest, err := atxs.LatestEpoch(db) if err != nil { return err @@ -38,7 +42,14 @@ func Warmup(db sql.Executor, cache *Data, keep types.EpochID) error { } cache.EvictEpoch(evict) - return atxs.IterateAtxsData(db, cache.Evicted(), latest, + from := cache.Evicted() + logger.Info("Reading ATXs from DB", + zap.Uint32("from epoch", from.Uint32()), + zap.Uint32("to epoch", latest.Uint32()), + ) + start := time.Now() + var processed int + err = atxs.IterateAtxsData(db, cache.Evicted(), latest, func( id types.ATXID, node types.NodeID, @@ -48,7 +59,6 @@ func Warmup(db sql.Executor, cache *Data, keep types.EpochID) error { base, height uint64, nonce types.VRFPostIndex, - malicious bool, ) bool { cache.Add( epoch+1, @@ -59,8 +69,26 @@ func Warmup(db sql.Executor, cache *Data, keep types.EpochID) error { base, height, nonce, - malicious, + false, ) + processed += 1 + if processed%1_000_000 == 0 { + logger.Debug("Processed 1M", zap.Int("total", processed)) + } return true }) + if err != nil { + return fmt.Errorf("warming up atxdata with ATXs: %w", err) + } + logger.Info("Finished reading ATXs. Starting reading malfeasance", zap.Duration("duration", time.Since(start))) + start = time.Now() + err = identities.IterateMalicious(db, func(_ int, id types.NodeID) error { + cache.SetMalicious(id) + return nil + }) + if err != nil { + return fmt.Errorf("warming up atxdata with malfeasance: %w", err) + } + logger.Info("Finished reading malfeasance", zap.Duration("duration", time.Since(start))) + return nil } diff --git a/atxsdata/warmup_test.go b/atxsdata/warmup_test.go index 67fa598140..c2051fa1c7 100644 --- a/atxsdata/warmup_test.go +++ b/atxsdata/warmup_test.go @@ -8,6 +8,7 @@ import ( "github.com/stretchr/testify/require" "go.uber.org/mock/gomock" + "go.uber.org/zap/zaptest" "github.com/spacemeshos/go-spacemesh/common/types" "github.com/spacemeshos/go-spacemesh/sql" @@ -53,21 +54,21 @@ func TestWarmup(t *testing.T) { } require.NoError(t, layers.SetApplied(db, applied, types.BlockID{1})) - c, err := Warm(db, 1) + c, err := Warm(db, 1, zaptest.NewLogger(t)) require.NoError(t, err) for _, atx := range data[2:] { require.NotNil(t, c.Get(atx.TargetEpoch(), atx.ID())) } }) t.Run("no data", func(t *testing.T) { - c, err := Warm(sql.InMemory(), 1) + c, err := Warm(sql.InMemory(), 1, zaptest.NewLogger(t)) require.NoError(t, err) require.NotNil(t, c) }) t.Run("closed db", func(t *testing.T) { db := sql.InMemory() require.NoError(t, db.Close()) - c, err := Warm(db, 1) + c, err := Warm(db, 1, zaptest.NewLogger(t)) require.Error(t, err) require.Nil(t, c) }) @@ -94,7 +95,7 @@ func TestWarmup(t *testing.T) { AnyTimes() for range 3 { c := New() - require.Error(t, Warmup(exec, c, 1)) + require.Error(t, Warmup(exec, c, 1, zaptest.NewLogger(t))) fail++ call = 0 } diff --git a/node/node.go b/node/node.go index cb00f9998b..a2d6573f66 100644 --- a/node/node.go +++ b/node/node.go @@ -1985,24 +1985,28 @@ func (app *App) setupDBs(ctx context.Context, lg log.Log) error { app.Config.DatabaseSizeMeteringInterval, ) } - app.log.Info("starting cache warmup") - applied, err := layers.GetLastApplied(app.db) - if err != nil { - return err - } - start := time.Now() - data, err := atxsdata.Warm( - app.db, - app.Config.Tortoise.WindowSizeEpochs(applied), - ) - if err != nil { - return err + { + warmupLog := app.log.Zap().Named("warmup") + app.log.Info("starting cache warmup") + applied, err := layers.GetLastApplied(app.db) + if err != nil { + return err + } + start := time.Now() + data, err := atxsdata.Warm( + app.db, + app.Config.Tortoise.WindowSizeEpochs(applied), + warmupLog, + ) + if err != nil { + return err + } + app.atxsdata = data + app.log.With().Info("cache warmup", log.Duration("duration", time.Since(start))) } - app.atxsdata = data - app.log.With().Info("cache warmup", log.Duration("duration", time.Since(start))) app.cachedDB = datastore.NewCachedDB(sqlDB, app.addLogger(CachedDBLogger, lg).Zap(), datastore.WithConfig(app.Config.Cache), - datastore.WithConsensusCache(data), + datastore.WithConsensusCache(app.atxsdata), ) migrations, err = sql.LocalMigrations() diff --git a/sql/atxs/atxs.go b/sql/atxs/atxs.go index 361a2da573..248dd99a0d 100644 --- a/sql/atxs/atxs.go +++ b/sql/atxs/atxs.go @@ -727,28 +727,18 @@ func IterateAtxsData( base uint64, height uint64, nonce types.VRFPostIndex, - isMalicious bool, ) bool, ) error { _, err := db.Exec( - `select - a.id, a.pubkey, a.epoch, a.coinbase, a.effective_num_units, - a.base_tick_height, a.tick_count, a.nonce, - iif(idn.proof is null, 0, 1) as is_malicious - from atxs a left join identities idn on a.pubkey = idn.pubkey`, - // SQLite happens to process the query much faster if we don't - // filter it by epoch - // where a.epoch between ? and ?`, - // func(stmt *sql.Statement) { - // stmt.BindInt64(1, int64(from.Uint32())) - // stmt.BindInt64(2, int64(to.Uint32())) - // }, - nil, + `SELECT id, pubkey, epoch, coinbase, effective_num_units, base_tick_height, tick_count, nonce FROM atxs + WHERE epoch between ?1 and ?2`, + // filtering in CODE is no longer effective on some machines in epoch 29 + func(stmt *sql.Statement) { + stmt.BindInt64(1, int64(from.Uint32())) + stmt.BindInt64(2, int64(to.Uint32())) + }, func(stmt *sql.Statement) bool { epoch := types.EpochID(uint32(stmt.ColumnInt64(2))) - if epoch < from || epoch > to { - return true - } var id types.ATXID stmt.ColumnBytes(0, id[:]) var node types.NodeID @@ -759,9 +749,7 @@ func IterateAtxsData( baseHeight := uint64(stmt.ColumnInt64(5)) ticks := uint64(stmt.ColumnInt64(6)) nonce := types.VRFPostIndex(stmt.ColumnInt64(7)) - isMalicious := stmt.ColumnInt(8) != 0 - return fn(id, node, epoch, coinbase, effectiveUnits*ticks, - baseHeight, baseHeight+ticks, nonce, isMalicious) + return fn(id, node, epoch, coinbase, effectiveUnits*ticks, baseHeight, baseHeight+ticks, nonce) }, ) if err != nil { diff --git a/tortoise/replay/replay_test.go b/tortoise/replay/replay_test.go index 77e71079ee..04a5bd7e28 100644 --- a/tortoise/replay/replay_test.go +++ b/tortoise/replay/replay_test.go @@ -57,7 +57,7 @@ func TestReplayMainnet(t *testing.T) { require.NoError(t, err) start := time.Now() - atxsdata, err := atxsdata.Warm(db, cfg.Tortoise.WindowSizeEpochs(applied)) + atxsdata, err := atxsdata.Warm(db, cfg.Tortoise.WindowSizeEpochs(applied), logger) require.NoError(t, err) trtl, err := tortoise.Recover( context.Background(), From f1353613677290ca1ec9834bf05d830b0bbe8482 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 13 Aug 2024 08:22:30 +0000 Subject: [PATCH 3/7] build(deps): Bump sigs.k8s.io/controller-runtime from 0.18.4 to 0.18.5 (#6247) --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index d4b4248087..725fbc25a6 100644 --- a/go.mod +++ b/go.mod @@ -64,7 +64,7 @@ require ( k8s.io/api v0.30.3 k8s.io/apimachinery v0.30.3 k8s.io/client-go v0.30.3 - sigs.k8s.io/controller-runtime v0.18.4 + sigs.k8s.io/controller-runtime v0.18.5 ) require ( diff --git a/go.sum b/go.sum index 394e1c8c26..d0a9bd2e96 100644 --- a/go.sum +++ b/go.sum @@ -990,8 +990,8 @@ k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSn k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= lukechampine.com/blake3 v1.2.1 h1:YuqqRuaqsGV71BV/nm9xlI0MKUv4QC54jQnBChWbGnI= lukechampine.com/blake3 v1.2.1/go.mod h1:0OFRp7fBtAylGVCO40o87sbupkyIGgbpv1+M1k1LM6k= -sigs.k8s.io/controller-runtime v0.18.4 h1:87+guW1zhvuPLh1PHybKdYFLU0YJp4FhJRmiHvm5BZw= -sigs.k8s.io/controller-runtime v0.18.4/go.mod h1:TVoGrfdpbA9VRFaRnKgk9P5/atA0pMwq+f+msb9M8Sg= +sigs.k8s.io/controller-runtime v0.18.5 h1:nTHio/W+Q4aBlQMgbnC5hZb4IjIidyrizMai9P6n4Rk= +sigs.k8s.io/controller-runtime v0.18.5/go.mod h1:TVoGrfdpbA9VRFaRnKgk9P5/atA0pMwq+f+msb9M8Sg= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= From d21910b80c7ac39be486eee2f12300de6b32b84c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bartosz=20R=C3=B3=C5=BCa=C5=84ski?= Date: Tue, 13 Aug 2024 11:54:47 +0000 Subject: [PATCH 4/7] detect invalid previous ATX for V2 ATXs (#6189) ## Motivation Add a check to detect whether the previous ATX of a V2 ATX is the correct one. --- activation/handler_v2.go | 61 +++++++++++++++++++++---- activation/handler_v2_test.go | 44 ++++++++++++++++++ activation/wire/malfeasance.go | 9 ++-- sql/atxs/atxs.go | 65 ++++++++++---------------- sql/atxs/atxs_test.go | 83 ++++++++++++++++++++++++++++------ 5 files changed, 193 insertions(+), 69 deletions(-) diff --git a/activation/handler_v2.go b/activation/handler_v2.go index 6755b9f7e0..99342c43e8 100644 --- a/activation/handler_v2.go +++ b/activation/handler_v2.go @@ -699,6 +699,14 @@ func (h *HandlerV2) checkMalicious(ctx context.Context, tx *sql.Tx, atx *activat return nil } + malicious, err = h.checkPrevAtx(ctx, tx, atx) + if err != nil { + return fmt.Errorf("checking previous ATX: %w", err) + } + if malicious { + return nil + } + // TODO(mafa): contextual validation: // 1. check double-publish = ID contributed post to two ATXs in the same epoch // 2. check previous ATX @@ -762,29 +770,66 @@ func (h *HandlerV2) checkDoublePost(ctx context.Context, tx *sql.Tx, atx *activa return false, nil } -func (h *HandlerV2) checkDoubleMerge(ctx context.Context, tx *sql.Tx, watx *activationTx) (bool, error) { - if watx.MarriageATX == nil { +func (h *HandlerV2) checkDoubleMerge(ctx context.Context, tx *sql.Tx, atx *activationTx) (bool, error) { + if atx.MarriageATX == nil { return false, nil } - ids, err := atxs.MergeConflict(tx, *watx.MarriageATX, watx.PublishEpoch) + ids, err := atxs.MergeConflict(tx, *atx.MarriageATX, atx.PublishEpoch) switch { case errors.Is(err, sql.ErrNotFound): return false, nil case err != nil: return false, fmt.Errorf("searching for ATXs with the same marriage ATX: %w", err) } - otherIndex := slices.IndexFunc(ids, func(id types.ATXID) bool { return id != watx.ID() }) + otherIndex := slices.IndexFunc(ids, func(id types.ATXID) bool { return id != atx.ID() }) other := ids[otherIndex] h.logger.Debug("second merged ATX for single marriage - creating malfeasance proof", - zap.Stringer("marriage_atx", *watx.MarriageATX), - zap.Stringer("atx", watx.ID()), + zap.Stringer("marriage_atx", *atx.MarriageATX), + zap.Stringer("atx", atx.ID()), zap.Stringer("other_atx", other), - zap.Stringer("smesher_id", watx.SmesherID), + zap.Stringer("smesher_id", atx.SmesherID), ) var proof wire.Proof - return true, h.malPublisher.Publish(ctx, watx.SmesherID, proof) + return true, h.malPublisher.Publish(ctx, atx.SmesherID, proof) +} + +func (h *HandlerV2) checkPrevAtx(ctx context.Context, tx *sql.Tx, atx *activationTx) (bool, error) { + for id, data := range atx.ids { + expectedPrevID, err := atxs.PrevIDByNodeID(tx, id, atx.PublishEpoch) + if err != nil && !errors.Is(err, sql.ErrNotFound) { + return false, fmt.Errorf("get last atx by node id: %w", err) + } + if expectedPrevID == data.previous { + continue + } + + h.logger.Debug("atx references a wrong previous ATX", + log.ZShortStringer("smesherID", id), + log.ZShortStringer("actual", data.previous), + log.ZShortStringer("expected", expectedPrevID), + ) + + atx1, atx2, err := atxs.PrevATXCollision(tx, data.previous, id) + switch { + case errors.Is(err, sql.ErrNotFound): + continue + case err != nil: + return false, fmt.Errorf("checking for previous ATX collision: %w", err) + } + + h.logger.Debug("creating a malfeasance proof for invalid previous ATX", + log.ZShortStringer("smesherID", id), + log.ZShortStringer("atx1", atx1), + log.ZShortStringer("atx2", atx2), + ) + + // TODO(mafa): finish proof + var proof wire.Proof + return true, h.malPublisher.Publish(ctx, id, proof) + } + return false, nil } // Store an ATX in the DB. diff --git a/activation/handler_v2_test.go b/activation/handler_v2_test.go index 46f6e76834..b596a2ed2c 100644 --- a/activation/handler_v2_test.go +++ b/activation/handler_v2_test.go @@ -1894,6 +1894,50 @@ func Test_CalculatingUnits(t *testing.T) { }) } +func TestContextual_PreviousATX(t *testing.T) { + golden := types.RandomATXID() + atxHndlr := newV2TestHandler(t, golden) + var ( + signers []*signing.EdSigner + eqSet []types.NodeID + ) + for range 3 { + sig, err := signing.NewEdSigner() + require.NoError(t, err) + signers = append(signers, sig) + eqSet = append(eqSet, sig.NodeID()) + } + + mATX, otherAtxs := marryIDs(t, atxHndlr, signers, golden) + + // signer 1 creates a solo ATX + soloAtx := newSoloATXv2(t, mATX.PublishEpoch+1, otherAtxs[0].ID(), mATX.ID()) + soloAtx.Sign(signers[1]) + atxHndlr.expectAtxV2(soloAtx) + err := atxHndlr.processATX(context.Background(), "", soloAtx, time.Now()) + require.NoError(t, err) + + // create a MergedATX for all IDs + merged := newSoloATXv2(t, mATX.PublishEpoch+2, mATX.ID(), mATX.ID()) + post := wire.SubPostV2{ + MarriageIndex: 1, + PrevATXIndex: 1, + NumUnits: soloAtx.TotalNumUnits(), + } + merged.NiPosts[0].Posts = append(merged.NiPosts[0].Posts, post) + // Pass a wrong previous ATX for signer 1. It's already been used for soloATX + // (which should be used for the previous ATX for signer 1). + merged.PreviousATXs = append(merged.PreviousATXs, otherAtxs[0].ID()) + matxID := mATX.ID() + merged.MarriageATX = &matxID + merged.Sign(signers[0]) + + atxHndlr.expectMergedAtxV2(merged, eqSet, []uint64{100}) + atxHndlr.mMalPublish.EXPECT().Publish(gomock.Any(), signers[1].NodeID(), gomock.Any()) + err = atxHndlr.processATX(context.Background(), "", merged, time.Now()) + require.NoError(t, err) +} + func Test_CalculatingWeight(t *testing.T) { t.Parallel() t.Run("total weight must not overflow uint64", func(t *testing.T) { diff --git a/activation/wire/malfeasance.go b/activation/wire/malfeasance.go index 019a52d6cc..c857dd075b 100644 --- a/activation/wire/malfeasance.go +++ b/activation/wire/malfeasance.go @@ -33,10 +33,11 @@ const ( LegacyInvalidPost ProofType = 0x01 LegacyInvalidPrevATX ProofType = 0x02 - DoublePublish ProofType = 0x10 - DoubleMarry ProofType = 0x11 - DoubleMerge ProofType = 0x12 - InvalidPost ProofType = 0x13 + DoublePublish ProofType = 0x10 + DoubleMarry ProofType = 0x11 + DoubleMerge ProofType = 0x12 + InvalidPost ProofType = 0x13 + InvalidPrevious ProofType = 0x14 ) // ProofVersion is an identifier for the version of the proof that is encoded in the ATXProof. diff --git a/sql/atxs/atxs.go b/sql/atxs/atxs.go index 248dd99a0d..9d749a035b 100644 --- a/sql/atxs/atxs.go +++ b/sql/atxs/atxs.go @@ -245,7 +245,8 @@ func GetLastIDByNodeID(db sql.Executor, nodeID types.NodeID) (id types.ATXID, er } // PrevIDByNodeID returns the previous ATX ID for a given node ID and public epoch. -// It returns the newest ATX ID that was published before the given public epoch. +// It returns the newest ATX ID containing PoST of the given node ID +// that was published before the given public epoch. func PrevIDByNodeID(db sql.Executor, nodeID types.NodeID, pubEpoch types.EpochID) (id types.ATXID, err error) { enc := func(stmt *sql.Statement) { stmt.BindBytes(1, nodeID.Bytes()) @@ -257,10 +258,10 @@ func PrevIDByNodeID(db sql.Executor, nodeID types.NodeID, pubEpoch types.EpochID } if rows, err := db.Exec(` - select id from atxs - where pubkey = ?1 and epoch < ?2 - order by epoch desc - limit 1;`, enc, dec); err != nil { + SELECT posts.atxid FROM posts JOIN atxs ON posts.atxid = atxs.id + WHERE posts.pubkey = ?1 AND atxs.epoch < ?2 + ORDER BY atxs.epoch DESC + LIMIT 1;`, enc, dec); err != nil { return types.EmptyATXID, fmt.Errorf("exec nodeID %v, epoch %d: %w", nodeID, pubEpoch, err) } else if rows == 0 { return types.EmptyATXID, fmt.Errorf("exec nodeID %s, epoch %d: %w", nodeID, pubEpoch, sql.ErrNotFound) @@ -861,46 +862,26 @@ func IterateAtxIdsWithMalfeasance( return err } -type PrevATXCollision struct { - NodeID1 types.NodeID - ATX1 types.ATXID - - NodeID2 types.NodeID - ATX2 types.ATXID -} - -func PrevATXCollisions(db sql.Executor) ([]PrevATXCollision, error) { - var result []PrevATXCollision - +func PrevATXCollision(db sql.Executor, prev types.ATXID, id types.NodeID) (types.ATXID, types.ATXID, error) { + var atxs []types.ATXID + enc := func(stmt *sql.Statement) { + stmt.BindBytes(1, prev[:]) + stmt.BindBytes(2, id[:]) + } dec := func(stmt *sql.Statement) bool { - var nodeID1, nodeID2 types.NodeID - stmt.ColumnBytes(0, nodeID1[:]) - stmt.ColumnBytes(1, nodeID2[:]) - - var id1, id2 types.ATXID - stmt.ColumnBytes(2, id1[:]) - stmt.ColumnBytes(3, id2[:]) - - result = append(result, PrevATXCollision{ - NodeID1: nodeID1, - ATX1: id1, - - NodeID2: nodeID2, - ATX2: id2, - }) - return true + var id types.ATXID + stmt.ColumnBytes(0, id[:]) + atxs = append(atxs, id) + return len(atxs) < 2 } - // we are joining the table with itself to find ATXs with the same prevATX - // the WHERE clause ensures that we only get the pairs once - if _, err := db.Exec(` - SELECT p1.pubkey, p2.pubkey, p1.atxid, p2.atxid - FROM posts p1 - INNER JOIN posts p2 ON p1.prev_atxid = p2.prev_atxid - WHERE p1.atxid < p2.atxid;`, nil, dec); err != nil { - return nil, fmt.Errorf("error getting ATXs with same prevATX: %w", err) + _, err := db.Exec("SELECT atxid FROM posts WHERE prev_atxid = ?1 AND pubkey = ?2;", enc, dec) + if err != nil { + return types.EmptyATXID, types.EmptyATXID, fmt.Errorf("error getting ATXs with same prevATX: %w", err) } - - return result, nil + if len(atxs) != 2 { + return types.EmptyATXID, types.EmptyATXID, sql.ErrNotFound + } + return atxs[0], atxs[1], nil } func Units(db sql.Executor, atxID types.ATXID, nodeID types.NodeID) (uint32, error) { diff --git a/sql/atxs/atxs_test.go b/sql/atxs/atxs_test.go index eb507fe969..aaebce0755 100644 --- a/sql/atxs/atxs_test.go +++ b/sql/atxs/atxs_test.go @@ -1023,7 +1023,7 @@ func TestLatest(t *testing.T) { } } -func Test_PrevATXCollisions(t *testing.T) { +func Test_PrevATXCollision(t *testing.T) { db := sql.InMemory() sig, err := signing.NewEdSigner() require.NoError(t, err) @@ -1048,29 +1048,29 @@ func Test_PrevATXCollisions(t *testing.T) { require.NoError(t, err) require.Equal(t, atx2, got2) - // add 10 valid ATXs by 10 other smeshers + // add 10 valid ATXs by 10 other smeshers, using the same previous but no collision + var otherIds []types.NodeID for i := 2; i < 6; i++ { otherSig, err := signing.NewEdSigner() require.NoError(t, err) + otherIds = append(otherIds, otherSig.NodeID()) - atx, blob := newAtx(t, otherSig, withPublishEpoch(types.EpochID(i))) - require.NoError(t, atxs.Add(db, atx, blob)) - - atx2, blob2 := newAtx(t, otherSig, - withPublishEpoch(types.EpochID(i+1)), - ) + atx2, blob2 := newAtx(t, otherSig, withPublishEpoch(types.EpochID(i+1))) require.NoError(t, atxs.Add(db, atx2, blob2)) - require.NoError(t, atxs.SetPost(db, atx2.ID(), atx.ID(), 0, sig.NodeID(), 10)) + require.NoError(t, atxs.SetPost(db, atx2.ID(), prevATXID, 0, atx2.SmesherID, 10)) } - // get the collisions - got, err := atxs.PrevATXCollisions(db) + collision1, collision2, err := atxs.PrevATXCollision(db, prevATXID, sig.NodeID()) require.NoError(t, err) - require.Len(t, got, 1) + require.ElementsMatch(t, []types.ATXID{atx1.ID(), atx2.ID()}, []types.ATXID{collision1, collision2}) - require.Equal(t, sig.NodeID(), got[0].NodeID1) - require.Equal(t, sig.NodeID(), got[0].NodeID2) - require.ElementsMatch(t, []types.ATXID{atx1.ID(), atx2.ID()}, []types.ATXID{got[0].ATX1, got[0].ATX2}) + _, _, err = atxs.PrevATXCollision(db, types.RandomATXID(), sig.NodeID()) + require.ErrorIs(t, err, sql.ErrNotFound) + + for _, id := range append(otherIds, types.RandomNodeID()) { + _, _, err := atxs.PrevATXCollision(db, prevATXID, id) + require.ErrorIs(t, err, sql.ErrNotFound) + } } func TestCoinbase(t *testing.T) { @@ -1362,3 +1362,56 @@ func Test_Previous(t *testing.T) { require.Equal(t, previousAtxs, got) }) } + +func TestPrevIDByNodeID(t *testing.T) { + t.Run("no previous ATXs", func(t *testing.T) { + db := sql.InMemory() + _, err := atxs.PrevIDByNodeID(db, types.RandomNodeID(), 0) + require.ErrorIs(t, err, sql.ErrNotFound) + }) + t.Run("filters by epoch", func(t *testing.T) { + db := sql.InMemory() + sig, err := signing.NewEdSigner() + require.NoError(t, err) + + atx1, blob1 := newAtx(t, sig, withPublishEpoch(1)) + require.NoError(t, atxs.Add(db, atx1, blob1)) + require.NoError(t, atxs.SetPost(db, atx1.ID(), types.EmptyATXID, 0, sig.NodeID(), 4)) + + atx2, blob2 := newAtx(t, sig, withPublishEpoch(2)) + require.NoError(t, atxs.Add(db, atx2, blob2)) + require.NoError(t, atxs.SetPost(db, atx2.ID(), types.EmptyATXID, 0, sig.NodeID(), 4)) + + _, err = atxs.PrevIDByNodeID(db, sig.NodeID(), 1) + require.ErrorIs(t, err, sql.ErrNotFound) + + prevID, err := atxs.PrevIDByNodeID(db, sig.NodeID(), 2) + require.NoError(t, err) + require.Equal(t, atx1.ID(), prevID) + + prevID, err = atxs.PrevIDByNodeID(db, sig.NodeID(), 3) + require.NoError(t, err) + require.Equal(t, atx2.ID(), prevID) + }) + t.Run("the previous is merged and ID is not the signer", func(t *testing.T) { + db := sql.InMemory() + sig, err := signing.NewEdSigner() + require.NoError(t, err) + id := types.RandomNodeID() + + atx1, blob1 := newAtx(t, sig, withPublishEpoch(1)) + require.NoError(t, atxs.Add(db, atx1, blob1)) + require.NoError(t, atxs.SetPost(db, atx1.ID(), types.EmptyATXID, 0, sig.NodeID(), 4)) + require.NoError(t, atxs.SetPost(db, atx1.ID(), types.EmptyATXID, 0, id, 8)) + require.NoError(t, atxs.SetPost(db, atx1.ID(), types.EmptyATXID, 0, types.RandomNodeID(), 12)) + + atx2, blob2 := newAtx(t, sig, withPublishEpoch(2)) + require.NoError(t, atxs.Add(db, atx2, blob2)) + require.NoError(t, atxs.SetPost(db, atx2.ID(), atx1.ID(), 0, sig.NodeID(), 4)) + require.NoError(t, atxs.SetPost(db, atx2.ID(), atx1.ID(), 0, types.RandomNodeID(), 12)) + + prevID, err := atxs.PrevIDByNodeID(db, id, 3) + require.NoError(t, err) + require.Equal(t, atx1.ID(), prevID) + }) +} From 3aae270ce09416588687f5daf5045b02161aa766 Mon Sep 17 00:00:00 2001 From: Matthias Fasching <5011972+fasmat@users.noreply.github.com> Date: Tue, 13 Aug 2024 12:48:39 +0000 Subject: [PATCH 5/7] chore: remove dependency on `activation/wire` from tests outside `activation` (#6244) ## Motivation Most packages should be unaware of wire types and not depend on packages implementing them (including tests). This removes the dependency on `activation/wire` from most other packages. --- api/grpcserver/v2alpha1/activation_test.go | 25 +-- checkpoint/recovery_collecting_deps_test.go | 8 +- common/fixture/atxs.go | 47 ++-- datastore/store_test.go | 30 +-- fetch/handler_test.go | 39 ++-- fetch/p2p_test.go | 75 +++---- sql/atxs/atxs_test.go | 237 ++++++++++---------- 7 files changed, 204 insertions(+), 257 deletions(-) diff --git a/api/grpcserver/v2alpha1/activation_test.go b/api/grpcserver/v2alpha1/activation_test.go index 183bd11c8b..46a8391338 100644 --- a/api/grpcserver/v2alpha1/activation_test.go +++ b/api/grpcserver/v2alpha1/activation_test.go @@ -28,9 +28,8 @@ func TestActivationService_List(t *testing.T) { activations := make([]types.ActivationTx, 100) for i := range activations { atx := gen.Next() - vAtx := fixture.ToAtx(t, atx) - require.NoError(t, atxs.Add(db, vAtx, atx.Blob())) - activations[i] = *vAtx + require.NoError(t, atxs.Add(db, atx, types.AtxBlob{})) + activations[i] = *atx } svc := NewActivationService(db) @@ -111,9 +110,8 @@ func TestActivationStreamService_Stream(t *testing.T) { activations := make([]types.ActivationTx, 100) for i := range activations { atx := gen.Next() - vAtx := fixture.ToAtx(t, atx) - require.NoError(t, atxs.Add(db, vAtx, atx.Blob())) - activations[i] = *vAtx + require.NoError(t, atxs.Add(db, atx, types.AtxBlob{})) + activations[i] = *atx } svc := NewActivationStreamService(db) @@ -153,9 +151,8 @@ func TestActivationStreamService_Stream(t *testing.T) { gen = fixture.NewAtxsGenerator().WithEpochs(start, 10) var streamed []*events.ActivationTx for i := 0; i < n; i++ { - watx := gen.Next() - atx := fixture.ToAtx(t, watx) - require.NoError(t, atxs.Add(db, atx, watx.Blob())) + atx := gen.Next() + require.NoError(t, atxs.Add(db, atx, types.AtxBlob{})) streamed = append(streamed, &events.ActivationTx{ActivationTx: atx}) } @@ -221,9 +218,8 @@ func TestActivationService_ActivationsCount(t *testing.T) { epoch3ATXs := make([]types.ActivationTx, 30) for i := range epoch3ATXs { atx := genEpoch3.Next() - vatx := fixture.ToAtx(t, atx) - require.NoError(t, atxs.Add(db, vatx, atx.Blob())) - epoch3ATXs[i] = *vatx + require.NoError(t, atxs.Add(db, atx, types.AtxBlob{})) + epoch3ATXs[i] = *atx } genEpoch5 := fixture.NewAtxsGenerator().WithSeed(time.Now().UnixNano()+1). @@ -231,9 +227,8 @@ func TestActivationService_ActivationsCount(t *testing.T) { epoch5ATXs := make([]types.ActivationTx, 10) // ensure the number here is different from above for i := range epoch5ATXs { atx := genEpoch5.Next() - vatx := fixture.ToAtx(t, atx) - require.NoError(t, atxs.Add(db, vatx, atx.Blob())) - epoch5ATXs[i] = *vatx + require.NoError(t, atxs.Add(db, atx, types.AtxBlob{})) + epoch5ATXs[i] = *atx } svc := NewActivationService(db) diff --git a/checkpoint/recovery_collecting_deps_test.go b/checkpoint/recovery_collecting_deps_test.go index 1df6ca0a9b..53df59841f 100644 --- a/checkpoint/recovery_collecting_deps_test.go +++ b/checkpoint/recovery_collecting_deps_test.go @@ -8,7 +8,6 @@ import ( "golang.org/x/exp/maps" "github.com/spacemeshos/go-spacemesh/activation/wire" - "github.com/spacemeshos/go-spacemesh/common/fixture" "github.com/spacemeshos/go-spacemesh/common/types" "github.com/spacemeshos/go-spacemesh/sql" "github.com/spacemeshos/go-spacemesh/sql/atxs" @@ -29,7 +28,10 @@ func TestCollectingDeps(t *testing.T) { }, SmesherID: types.RandomNodeID(), } - require.NoError(t, atxs.Add(db, fixture.ToAtx(t, marriageATX), marriageATX.Blob())) + atx := wire.ActivationTxFromWireV1(marriageATX) + atx.SetReceived(time.Now().Local()) + atx.TickCount = 1 + require.NoError(t, atxs.Add(db, atx, marriageATX.Blob())) mAtxID := marriageATX.ID() watx := &wire.ActivationTxV2{ @@ -37,7 +39,7 @@ func TestCollectingDeps(t *testing.T) { SmesherID: types.RandomNodeID(), MarriageATX: &mAtxID, } - atx := &types.ActivationTx{ + atx = &types.ActivationTx{ SmesherID: watx.SmesherID, } atx.SetID(watx.ID()) diff --git a/common/fixture/atxs.go b/common/fixture/atxs.go index af3ead08f2..dc61c18b8b 100644 --- a/common/fixture/atxs.go +++ b/common/fixture/atxs.go @@ -2,13 +2,10 @@ package fixture import ( "math/rand" - "testing" "time" - "github.com/spacemeshos/go-spacemesh/activation/wire" "github.com/spacemeshos/go-spacemesh/common/types" "github.com/spacemeshos/go-spacemesh/genvm/sdk/wallet" - "github.com/spacemeshos/go-spacemesh/signing" ) // NewAtxsGenerator with some random parameters. @@ -42,38 +39,20 @@ func (g *AtxsGenerator) WithEpochs(start, n int) *AtxsGenerator { return g } -// Next generates VerifiedActivationTx. -func (g *AtxsGenerator) Next() *wire.ActivationTxV1 { - var prevAtxId types.ATXID - g.rng.Read(prevAtxId[:]) - var posAtxId types.ATXID - g.rng.Read(posAtxId[:]) - - signer, err := signing.NewEdSigner(signing.WithKeyFromRand(g.rng)) - if err != nil { - panic("failed to create signer") - } - - atx := &wire.ActivationTxV1{ - InnerActivationTxV1: wire.InnerActivationTxV1{ - NIPostChallengeV1: wire.NIPostChallengeV1{ - Sequence: g.rng.Uint64(), - PrevATXID: prevAtxId, - PublishEpoch: g.Epochs[g.rng.Intn(len(g.Epochs))], - PositioningATXID: posAtxId, - }, - Coinbase: wallet.Address(signer.PublicKey().Bytes()), - NumUnits: g.rng.Uint32(), - }, +// Next generates ActivationTx. +func (g *AtxsGenerator) Next() *types.ActivationTx { + var nodeID types.NodeID + g.rng.Read(nodeID[:]) + + atx := &types.ActivationTx{ + Sequence: g.rng.Uint64(), + PublishEpoch: g.Epochs[g.rng.Intn(len(g.Epochs))], + Coinbase: wallet.Address(nodeID.Bytes()), + NumUnits: g.rng.Uint32(), + TickCount: 1, + SmesherID: nodeID, } - atx.Sign(signer) - return atx -} - -func ToAtx(t testing.TB, watx *wire.ActivationTxV1) *types.ActivationTx { - t.Helper() - atx := wire.ActivationTxFromWireV1(watx) + atx.SetID(types.RandomATXID()) atx.SetReceived(time.Now().Local()) - atx.TickCount = 1 return atx } diff --git a/datastore/store_test.go b/datastore/store_test.go index e928e1b226..b3ae9fe55b 100644 --- a/datastore/store_test.go +++ b/datastore/store_test.go @@ -10,9 +10,7 @@ import ( "github.com/stretchr/testify/require" "go.uber.org/zap/zaptest" - "github.com/spacemeshos/go-spacemesh/activation/wire" "github.com/spacemeshos/go-spacemesh/codec" - "github.com/spacemeshos/go-spacemesh/common/fixture" "github.com/spacemeshos/go-spacemesh/common/types" "github.com/spacemeshos/go-spacemesh/datastore" mwire "github.com/spacemeshos/go-spacemesh/malfeasance/wire" @@ -85,19 +83,14 @@ func TestBlobStore_GetATXBlob(t *testing.T) { bs := datastore.NewBlobStore(db, store.New()) ctx := context.Background() - atx := &wire.ActivationTxV1{ - InnerActivationTxV1: wire.InnerActivationTxV1{ - NIPostChallengeV1: wire.NIPostChallengeV1{ - PublishEpoch: types.EpochID(22), - Sequence: 11, - }, - NumUnits: 11, - }, + atx := &types.ActivationTx{ + PublishEpoch: types.EpochID(22), + Sequence: 11, + NumUnits: 11, + SmesherID: types.RandomNodeID(), } - signer, err := signing.NewEdSigner() - require.NoError(t, err) - atx.Sign(signer) - vAtx := fixture.ToAtx(t, atx) + atx.SetID(types.RandomATXID()) + atx.SetReceived(time.Now().Local()) has, err := bs.Has(datastore.ATXDB, atx.ID().Bytes()) require.NoError(t, err) @@ -106,18 +99,15 @@ func TestBlobStore_GetATXBlob(t *testing.T) { _, err = getBytes(ctx, bs, datastore.ATXDB, atx.ID()) require.ErrorIs(t, err, datastore.ErrNotFound) - require.NoError(t, atxs.Add(db, vAtx, atx.Blob())) + blob := types.AtxBlob{Blob: types.RandomBytes(100)} + require.NoError(t, atxs.Add(db, atx, blob)) has, err = bs.Has(datastore.ATXDB, atx.ID().Bytes()) require.NoError(t, err) require.True(t, has) got, err := getBytes(ctx, bs, datastore.ATXDB, atx.ID()) require.NoError(t, err) - - gotA, err := wire.DecodeAtxV1(got) - require.NoError(t, err) - require.Equal(t, atx.ID(), gotA.ID()) - require.Equal(t, atx, gotA) + require.Equal(t, blob.Blob, got) _, err = getBytes(ctx, bs, datastore.BallotDB, atx.ID()) require.ErrorIs(t, err, datastore.ErrNotFound) diff --git a/fetch/handler_test.go b/fetch/handler_test.go index d42900a28a..24749e1a2d 100644 --- a/fetch/handler_test.go +++ b/fetch/handler_test.go @@ -9,9 +9,7 @@ import ( "github.com/stretchr/testify/require" "go.uber.org/zap/zaptest" - "github.com/spacemeshos/go-spacemesh/activation/wire" "github.com/spacemeshos/go-spacemesh/codec" - "github.com/spacemeshos/go-spacemesh/common/fixture" "github.com/spacemeshos/go-spacemesh/common/types" "github.com/spacemeshos/go-spacemesh/datastore" "github.com/spacemeshos/go-spacemesh/p2p/server" @@ -262,23 +260,16 @@ func TestHandleMeshHashReq(t *testing.T) { } } -func newAtx(t *testing.T, published types.EpochID) (*types.ActivationTx, types.AtxBlob) { - t.Helper() - nonce := uint64(123) - signer, err := signing.NewEdSigner() - require.NoError(t, err) - atx := &wire.ActivationTxV1{ - InnerActivationTxV1: wire.InnerActivationTxV1{ - NIPostChallengeV1: wire.NIPostChallengeV1{ - PublishEpoch: published, - PrevATXID: types.RandomATXID(), - }, - NumUnits: 2, - VRFNonce: &nonce, - }, +func newAtx(t *testing.T, published types.EpochID) *types.ActivationTx { + atx := &types.ActivationTx{ + PublishEpoch: published, + NumUnits: 2, + VRFNonce: types.VRFPostIndex(123), + SmesherID: types.RandomNodeID(), } - atx.Sign(signer) - return fixture.ToAtx(t, atx), atx.Blob() + atx.SetID(types.RandomATXID()) + atx.SetReceived(time.Now().Local()) + return atx } func TestHandleEpochInfoReq(t *testing.T) { @@ -304,8 +295,8 @@ func TestHandleEpochInfoReq(t *testing.T) { var expected EpochData if !tc.missingData { for i := 0; i < 10; i++ { - vatx, blob := newAtx(t, epoch) - require.NoError(t, atxs.Add(th.cdb, vatx, blob)) + vatx := newAtx(t, epoch) + require.NoError(t, atxs.Add(th.cdb, vatx, types.AtxBlob{})) expected.AtxIDs = append(expected.AtxIDs, vatx.ID()) } } @@ -353,8 +344,8 @@ func testHandleEpochInfoReqWithQueryCache( var expected EpochData for i := 0; i < 10; i++ { - vatx, blob := newAtx(t, epoch) - require.NoError(t, atxs.Add(th.cdb, vatx, blob)) + vatx := newAtx(t, epoch) + require.NoError(t, atxs.Add(th.cdb, vatx, types.AtxBlob{})) atxs.AtxAdded(th.cdb, vatx) expected.AtxIDs = append(expected.AtxIDs, vatx.ID()) } @@ -372,8 +363,8 @@ func testHandleEpochInfoReqWithQueryCache( } // Add another ATX which should be appended to the cached slice - vatx, blob := newAtx(t, epoch) - require.NoError(t, atxs.Add(th.cdb, vatx, blob)) + vatx := newAtx(t, epoch) + require.NoError(t, atxs.Add(th.cdb, vatx, types.AtxBlob{})) atxs.AtxAdded(th.cdb, vatx) expected.AtxIDs = append(expected.AtxIDs, vatx.ID()) require.Equal(t, 23, qc.QueryCount()) diff --git a/fetch/p2p_test.go b/fetch/p2p_test.go index a9dc85a825..b76929320c 100644 --- a/fetch/p2p_test.go +++ b/fetch/p2p_test.go @@ -171,8 +171,8 @@ func createP2PFetch( func (tpf *testP2PFetch) createATXs(epoch types.EpochID) []types.ATXID { atxIDs := make([]types.ATXID, 10) for i := range atxIDs { - atx, blob := newAtx(tpf.t, epoch) - require.NoError(tpf.t, atxs.Add(tpf.serverCDB, atx, blob)) + atx := newAtx(tpf.t, epoch) + require.NoError(tpf.t, atxs.Add(tpf.serverCDB, atx, types.AtxBlob{})) atxIDs[i] = atx.ID() } return atxIDs @@ -351,15 +351,14 @@ func TestP2PGetATXs(t *testing.T) { t, "database: no free connection", func(t *testing.T, ctx context.Context, tpf *testP2PFetch, errStr string) { epoch := types.EpochID(11) - atx, blob := newAtx(tpf.t, epoch) + atx := newAtx(tpf.t, epoch) + blob := types.AtxBlob{Blob: types.RandomBytes(100)} require.NoError(tpf.t, atxs.Add(tpf.serverCDB, atx, blob)) tpf.verifyGetHash( - func() error { - return tpf.clientFetch.GetAtxs( - context.Background(), []types.ATXID{atx.ID()}) - }, + func() error { return tpf.clientFetch.GetAtxs(context.Background(), []types.ATXID{atx.ID()}) }, errStr, "atx", "hs/1", types.Hash32(atx.ID()), atx.ID().Bytes(), - blob.Blob) + blob.Blob, + ) }) } @@ -368,17 +367,13 @@ func TestP2PGetPoet(t *testing.T) { t, "database: no free connection", false, func(t *testing.T, ctx context.Context, tpf *testP2PFetch, errStr string) { ref := types.PoetProofRef{0x42, 0x43} - require.NoError(t, poets.Add( - tpf.serverCDB, ref, - []byte("proof1"), []byte("sid1"), "rid1")) + require.NoError(t, poets.Add(tpf.serverCDB, ref, []byte("proof1"), []byte("sid1"), "rid1")) tpf.verifyGetHash( - func() error { - return tpf.clientFetch.GetPoetProof( - context.Background(), types.Hash32(ref)) - }, + func() error { return tpf.clientFetch.GetPoetProof(context.Background(), types.Hash32(ref)) }, errStr, "poet", "hs/1", types.Hash32(ref), ref[:], - []byte("proof1")) + []byte("proof1"), + ) }) } @@ -397,12 +392,10 @@ func TestP2PGetBallot(t *testing.T) { require.NoError(t, ballots.Add(tpf.serverCDB, b)) tpf.verifyGetHash( - func() error { - return tpf.clientFetch.GetBallots( - context.Background(), []types.BallotID{b.ID()}) - }, + func() error { return tpf.clientFetch.GetBallots(context.Background(), []types.BallotID{b.ID()}) }, errStr, "ballot", "hs/1", b.ID().AsHash32(), b.ID().Bytes(), - codec.MustEncode(b)) + codec.MustEncode(b), + ) }) } @@ -418,11 +411,10 @@ func TestP2PGetActiveSet(t *testing.T) { require.NoError(tpf.t, activesets.Add(tpf.serverCDB, id, set)) tpf.verifyGetHash( - func() error { - return tpf.clientFetch.GetActiveSet(context.Background(), id) - }, + func() error { return tpf.clientFetch.GetActiveSet(context.Background(), id) }, errStr, "activeset", "as/1", id, id.Bytes(), - codec.MustEncode(set)) + codec.MustEncode(set), + ) }) } @@ -435,12 +427,10 @@ func TestP2PGetBlock(t *testing.T) { require.NoError(t, blocks.Add(tpf.serverCDB, bk)) tpf.verifyGetHash( - func() error { - return tpf.clientFetch.GetBlocks( - context.Background(), []types.BlockID{bk.ID()}) - }, + func() error { return tpf.clientFetch.GetBlocks(context.Background(), []types.BlockID{bk.ID()}) }, errStr, "block", "hs/1", bk.ID().AsHash32(), bk.ID().Bytes(), - codec.MustEncode(bk)) + codec.MustEncode(bk), + ) }) } @@ -488,12 +478,10 @@ func TestP2PGetBlockTransactions(t *testing.T) { tx := genTx(t, signer, types.Address{1}, 1, 1, 1) require.NoError(t, transactions.Add(tpf.serverCDB, &tx, time.Now())) tpf.verifyGetHash( - func() error { - return tpf.clientFetch.GetBlockTxs( - context.Background(), []types.TransactionID{tx.ID}) - }, + func() error { return tpf.clientFetch.GetBlockTxs(context.Background(), []types.TransactionID{tx.ID}) }, errStr, "txBlock", "hs/1", types.Hash32(tx.ID), tx.ID.Bytes(), - tx.Raw) + tx.Raw, + ) }) } @@ -507,11 +495,11 @@ func TestP2PGetProposalTransactions(t *testing.T) { require.NoError(t, transactions.Add(tpf.serverCDB, &tx, time.Now())) tpf.verifyGetHash( func() error { - return tpf.clientFetch.GetProposalTxs( - context.Background(), []types.TransactionID{tx.ID}) + return tpf.clientFetch.GetProposalTxs(context.Background(), []types.TransactionID{tx.ID}) }, errStr, "txProposal", "hs/1", types.Hash32(tx.ID), tx.ID.Bytes(), - tx.Raw) + tx.Raw, + ) }) } @@ -521,14 +509,11 @@ func TestP2PGetMalfeasanceProofs(t *testing.T) { func(t *testing.T, ctx context.Context, tpf *testP2PFetch, errStr string) { nid := types.RandomNodeID() proof := types.RandomBytes(11) - require.NoError(t, identities.SetMalicious( - tpf.serverCDB, nid, proof, time.Now())) + require.NoError(t, identities.SetMalicious(tpf.serverCDB, nid, proof, time.Now())) tpf.verifyGetHash( - func() error { - return tpf.clientFetch.GetMalfeasanceProofs( - context.Background(), []types.NodeID{nid}) - }, + func() error { return tpf.clientFetch.GetMalfeasanceProofs(context.Background(), []types.NodeID{nid}) }, errStr, "mal", "hs/1", types.Hash32(nid), nid.Bytes(), - proof) + proof, + ) }) } diff --git a/sql/atxs/atxs_test.go b/sql/atxs/atxs_test.go index aaebce0755..67c7811715 100644 --- a/sql/atxs/atxs_test.go +++ b/sql/atxs/atxs_test.go @@ -3,6 +3,7 @@ package atxs_test import ( "context" "errors" + "fmt" "os" "slices" "testing" @@ -11,8 +12,6 @@ import ( "github.com/stretchr/testify/require" "golang.org/x/exp/rand" - "github.com/spacemeshos/go-spacemesh/activation/wire" - "github.com/spacemeshos/go-spacemesh/common/fixture" "github.com/spacemeshos/go-spacemesh/common/types" "github.com/spacemeshos/go-spacemesh/signing" "github.com/spacemeshos/go-spacemesh/sql" @@ -36,9 +35,9 @@ func TestGet(t *testing.T) { for i := 0; i < 3; i++ { sig, err := signing.NewEdSigner() require.NoError(t, err) - atx, blob := newAtx(t, sig, withPublishEpoch(types.EpochID(i))) + atx := newAtx(t, sig, withPublishEpoch(types.EpochID(i))) atxList = append(atxList, atx) - require.NoError(t, atxs.Add(db, atx, blob)) + require.NoError(t, atxs.Add(db, atx, types.AtxBlob{})) } for _, want := range atxList { @@ -58,8 +57,8 @@ func TestAll(t *testing.T) { for i := 0; i < 3; i++ { sig, err := signing.NewEdSigner() require.NoError(t, err) - atx, blob := newAtx(t, sig, withPublishEpoch(types.EpochID(i))) - require.NoError(t, atxs.Add(db, atx, blob)) + atx := newAtx(t, sig, withPublishEpoch(types.EpochID(i))) + require.NoError(t, atxs.Add(db, atx, types.AtxBlob{})) expected = append(expected, atx.ID()) } @@ -75,8 +74,8 @@ func TestHasID(t *testing.T) { for i := 0; i < 3; i++ { sig, err := signing.NewEdSigner() require.NoError(t, err) - atx, blob := newAtx(t, sig, withPublishEpoch(types.EpochID(i))) - require.NoError(t, atxs.Add(db, atx, blob)) + atx := newAtx(t, sig, withPublishEpoch(types.EpochID(i))) + require.NoError(t, atxs.Add(db, atx, types.AtxBlob{})) atxList = append(atxList, atx) } @@ -101,8 +100,8 @@ func Test_IdentityExists(t *testing.T) { require.NoError(t, err) require.False(t, yes) - atx, blob := newAtx(t, sig) - require.NoError(t, atxs.Add(db, atx, blob)) + atx := newAtx(t, sig) + require.NoError(t, atxs.Add(db, atx, types.AtxBlob{})) yes, err = atxs.IdentityExists(db, sig.NodeID()) require.NoError(t, err) @@ -120,10 +119,10 @@ func TestGetFirstIDByNodeID(t *testing.T) { require.NoError(t, err) // Arrange - atx1, _ := newAtx(t, sig1, withPublishEpoch(1)) - atx2, _ := newAtx(t, sig1, withPublishEpoch(2), withSequence(atx1.Sequence+1)) - atx3, _ := newAtx(t, sig2, withPublishEpoch(3)) - atx4, _ := newAtx(t, sig2, withPublishEpoch(4), withSequence(atx3.Sequence+1)) + atx1 := newAtx(t, sig1, withPublishEpoch(1)) + atx2 := newAtx(t, sig1, withPublishEpoch(2), withSequence(atx1.Sequence+1)) + atx3 := newAtx(t, sig2, withPublishEpoch(3)) + atx4 := newAtx(t, sig2, withPublishEpoch(4), withSequence(atx3.Sequence+1)) for _, atx := range []*types.ActivationTx{atx1, atx2, atx3, atx4} { require.NoError(t, atxs.Add(db, atx, types.AtxBlob{})) @@ -152,12 +151,12 @@ func TestLatestN(t *testing.T) { sig3, err := signing.NewEdSigner() require.NoError(t, err) - atx1, _ := newAtx(t, sig1, withPublishEpoch(1), withSequence(0)) - atx2, _ := newAtx(t, sig1, withPublishEpoch(2), withSequence(1)) - atx3, _ := newAtx(t, sig2, withPublishEpoch(3), withSequence(1)) - atx4, _ := newAtx(t, sig2, withPublishEpoch(4), withSequence(2)) - atx5, _ := newAtx(t, sig2, withPublishEpoch(5), withSequence(3)) - atx6, _ := newAtx(t, sig3, withPublishEpoch(1), withSequence(0)) + atx1 := newAtx(t, sig1, withPublishEpoch(1), withSequence(0)) + atx2 := newAtx(t, sig1, withPublishEpoch(2), withSequence(1)) + atx3 := newAtx(t, sig2, withPublishEpoch(3), withSequence(1)) + atx4 := newAtx(t, sig2, withPublishEpoch(4), withSequence(2)) + atx5 := newAtx(t, sig2, withPublishEpoch(5), withSequence(3)) + atx6 := newAtx(t, sig3, withPublishEpoch(1), withSequence(0)) for _, atx := range []*types.ActivationTx{atx1, atx2, atx3, atx4, atx5, atx6} { require.NoError(t, atxs.Add(db, atx, types.AtxBlob{})) @@ -242,8 +241,8 @@ func TestGetByEpochAndNodeID(t *testing.T) { sig2, err := signing.NewEdSigner() require.NoError(t, err) - atx1, _ := newAtx(t, sig1, withPublishEpoch(1)) - atx2, _ := newAtx(t, sig2, withPublishEpoch(2)) + atx1 := newAtx(t, sig1, withPublishEpoch(1)) + atx2 := newAtx(t, sig2, withPublishEpoch(2)) for _, atx := range []*types.ActivationTx{atx1, atx2} { require.NoError(t, atxs.Add(db, atx, types.AtxBlob{})) @@ -277,10 +276,10 @@ func TestGetLastIDByNodeID(t *testing.T) { require.NoError(t, err) // Arrange - atx1, _ := newAtx(t, sig1, withPublishEpoch(1)) - atx2, _ := newAtx(t, sig1, withPublishEpoch(2), withSequence(atx1.Sequence+1)) - atx3, _ := newAtx(t, sig2, withPublishEpoch(3)) - atx4, _ := newAtx(t, sig2, withPublishEpoch(4), withSequence(atx3.Sequence+1)) + atx1 := newAtx(t, sig1, withPublishEpoch(1)) + atx2 := newAtx(t, sig1, withPublishEpoch(2), withSequence(atx1.Sequence+1)) + atx3 := newAtx(t, sig2, withPublishEpoch(3)) + atx4 := newAtx(t, sig2, withPublishEpoch(4), withSequence(atx3.Sequence+1)) for _, atx := range []*types.ActivationTx{atx1, atx2, atx3, atx4} { require.NoError(t, atxs.Add(db, atx, types.AtxBlob{})) @@ -312,10 +311,10 @@ func TestGetIDByEpochAndNodeID(t *testing.T) { e2 := types.EpochID(2) e3 := types.EpochID(3) - atx1, _ := newAtx(t, sig1, withPublishEpoch(e1)) - atx2, _ := newAtx(t, sig1, withPublishEpoch(e2)) - atx3, _ := newAtx(t, sig2, withPublishEpoch(e2)) - atx4, _ := newAtx(t, sig2, withPublishEpoch(e3)) + atx1 := newAtx(t, sig1, withPublishEpoch(e1)) + atx2 := newAtx(t, sig1, withPublishEpoch(e2)) + atx3 := newAtx(t, sig2, withPublishEpoch(e2)) + atx4 := newAtx(t, sig2, withPublishEpoch(e3)) for _, atx := range []*types.ActivationTx{atx1, atx2, atx3, atx4} { require.NoError(t, atxs.Add(db, atx, types.AtxBlob{})) @@ -357,10 +356,10 @@ func TestGetIDsByEpoch(t *testing.T) { e2 := types.EpochID(2) e3 := types.EpochID(3) - atx1, _ := newAtx(t, sig1, withPublishEpoch(e1)) - atx2, _ := newAtx(t, sig1, withPublishEpoch(e2)) - atx3, _ := newAtx(t, sig2, withPublishEpoch(e2)) - atx4, _ := newAtx(t, sig2, withPublishEpoch(e3)) + atx1 := newAtx(t, sig1, withPublishEpoch(e1)) + atx2 := newAtx(t, sig1, withPublishEpoch(e2)) + atx3 := newAtx(t, sig2, withPublishEpoch(e2)) + atx4 := newAtx(t, sig2, withPublishEpoch(e3)) for _, atx := range []*types.ActivationTx{atx1, atx2, atx3, atx4} { require.NoError(t, atxs.Add(db, atx, types.AtxBlob{})) @@ -393,12 +392,12 @@ func TestGetIDsByEpochCached(t *testing.T) { e2 := types.EpochID(2) e3 := types.EpochID(3) - atx1, _ := newAtx(t, sig1, withPublishEpoch(e1)) - atx2, _ := newAtx(t, sig1, withPublishEpoch(e2)) - atx3, _ := newAtx(t, sig2, withPublishEpoch(e2)) - atx4, _ := newAtx(t, sig2, withPublishEpoch(e3)) - atx5, _ := newAtx(t, sig2, withPublishEpoch(e3)) - atx6, _ := newAtx(t, sig2, withPublishEpoch(e3)) + atx1 := newAtx(t, sig1, withPublishEpoch(e1)) + atx2 := newAtx(t, sig1, withPublishEpoch(e2)) + atx3 := newAtx(t, sig2, withPublishEpoch(e2)) + atx4 := newAtx(t, sig2, withPublishEpoch(e3)) + atx5 := newAtx(t, sig2, withPublishEpoch(e3)) + atx6 := newAtx(t, sig2, withPublishEpoch(e3)) for _, atx := range []*types.ActivationTx{atx1, atx2, atx3, atx4} { require.NoError(t, atxs.Add(db, atx, types.AtxBlob{})) @@ -462,8 +461,8 @@ func Test_IterateAtxsWithMalfeasance(t *testing.T) { for i := uint32(0); i < 20; i++ { sig, err := signing.NewEdSigner() require.NoError(t, err) - atx, blob := newAtx(t, sig, withPublishEpoch(types.EpochID(i/4))) - require.NoError(t, atxs.Add(db, atx, blob)) + atx := newAtx(t, sig, withPublishEpoch(types.EpochID(i/4))) + require.NoError(t, atxs.Add(db, atx, types.AtxBlob{})) malicious := (i % 2) == 0 m[atx.ID()] = malicious if malicious { @@ -492,8 +491,8 @@ func Test_IterateAtxIdsWithMalfeasance(t *testing.T) { for i := uint32(0); i < 20; i++ { sig, err := signing.NewEdSigner() require.NoError(t, err) - atx, blob := newAtx(t, sig, withPublishEpoch(types.EpochID(i/4))) - require.NoError(t, atxs.Add(db, atx, blob)) + atx := newAtx(t, sig, withPublishEpoch(types.EpochID(i/4))) + require.NoError(t, atxs.Add(db, atx, types.AtxBlob{})) malicious := (i % 2) == 0 m[atx.ID()] = malicious if malicious { @@ -521,11 +520,11 @@ func TestVRFNonce(t *testing.T) { sig, err := signing.NewEdSigner() require.NoError(t, err) - atx1, blob := newAtx(t, sig, withPublishEpoch(20), withNonce(333)) - require.NoError(t, atxs.Add(db, atx1, blob)) + atx1 := newAtx(t, sig, withPublishEpoch(20), withNonce(333)) + require.NoError(t, atxs.Add(db, atx1, types.AtxBlob{})) - atx2, blob := newAtx(t, sig, withPublishEpoch(50), withNonce(777)) - require.NoError(t, atxs.Add(db, atx2, blob)) + atx2 := newAtx(t, sig, withPublishEpoch(50), withNonce(777)) + require.NoError(t, atxs.Add(db, atx2, types.AtxBlob{})) // Act & Assert @@ -554,7 +553,11 @@ func TestLoadBlob(t *testing.T) { sig, err := signing.NewEdSigner() require.NoError(t, err) - atx1, blob := newAtx(t, sig, withPublishEpoch(1)) + atx1 := newAtx(t, sig, withPublishEpoch(1)) + blob := types.AtxBlob{ + Blob: []byte("blob"), + Version: types.AtxV1, + } require.NoError(t, atxs.Add(db, atx1, blob)) var blob1 sql.Blob @@ -569,9 +572,11 @@ func TestLoadBlob(t *testing.T) { require.Equal(t, []int{len(blob1.Bytes)}, blobSizes) var blob2 sql.Blob - atx2, blob := newAtx(t, sig) - blob.Blob = []byte("blob2 of different size") - blob.Version = types.AtxV2 + atx2 := newAtx(t, sig) + blob = types.AtxBlob{ + Blob: []byte("blob2 of different size"), + Version: types.AtxV2, + } require.NoError(t, atxs.Add(db, atx2, blob)) version, err = atxs.LoadBlob(ctx, db, atx2.ID().Bytes(), &blob2) @@ -605,8 +610,11 @@ func TestLoadBlob_DefaultsToV1(t *testing.T) { sig, err := signing.NewEdSigner() require.NoError(t, err) - atx, blob := newAtx(t, sig) - blob.Version = 0 + atx := newAtx(t, sig) + blob := types.AtxBlob{ + Blob: []byte("blob"), + Version: 0, + } require.NoError(t, atxs.Add(db, atx, blob)) @@ -623,7 +631,8 @@ func TestGetBlobCached(t *testing.T) { sig, err := signing.NewEdSigner() require.NoError(t, err) - atx, blob := newAtx(t, sig, withPublishEpoch(1)) + atx := newAtx(t, sig, withPublishEpoch(1)) + blob := types.AtxBlob{Blob: []byte("blob")} require.NoError(t, atxs.Add(db, atx, blob)) require.Equal(t, 2, db.QueryCount()) // insert atx + blob @@ -703,7 +712,8 @@ func TestCachedBlobEviction(t *testing.T) { blobs := make([][]byte, 11) var b sql.Blob for n := range addedATXs { - atx, blob := newAtx(t, sig, withPublishEpoch(1)) + atx := newAtx(t, sig, withPublishEpoch(1)) + blob := types.AtxBlob{Blob: []byte(fmt.Sprintf("blob %d", n))} require.NoError(t, atxs.Add(db, atx, blob)) addedATXs[n] = atx blobs[n] = blob.Blob @@ -736,7 +746,7 @@ func TestCheckpointATX(t *testing.T) { sig, err := signing.NewEdSigner() require.NoError(t, err) - atx, _ := newAtx(t, sig, withPublishEpoch(3), withSequence(4)) + atx := newAtx(t, sig, withPublishEpoch(3), withSequence(4)) catx := &atxs.CheckpointAtx{ ID: atx.ID(), Epoch: atx.PublishEpoch, @@ -786,10 +796,10 @@ func TestAdd(t *testing.T) { sig, err := signing.NewEdSigner() require.NoError(t, err) - atx, blob := newAtx(t, sig, withPublishEpoch(1)) + atx := newAtx(t, sig, withPublishEpoch(1)) - require.NoError(t, atxs.Add(db, atx, blob)) - require.ErrorIs(t, atxs.Add(db, atx, blob), sql.ErrObjectExists) + require.NoError(t, atxs.Add(db, atx, types.AtxBlob{})) + require.ErrorIs(t, atxs.Add(db, atx, types.AtxBlob{}), sql.ErrObjectExists) got, err := atxs.Get(db, atx.ID()) require.NoError(t, err) @@ -822,24 +832,19 @@ func withCoinbase(addr types.Address) createAtxOpt { } } -func newAtx(t testing.TB, signer *signing.EdSigner, opts ...createAtxOpt) (*types.ActivationTx, types.AtxBlob) { - nonce := uint64(123) - watx := &wire.ActivationTxV1{ - InnerActivationTxV1: wire.InnerActivationTxV1{ - NIPostChallengeV1: wire.NIPostChallengeV1{ - PrevATXID: types.RandomATXID(), - }, - NumUnits: 2, - VRFNonce: &nonce, - }, +func newAtx(t testing.TB, signer *signing.EdSigner, opts ...createAtxOpt) *types.ActivationTx { + atx := &types.ActivationTx{ + NumUnits: 2, + TickCount: 1, + VRFNonce: types.VRFPostIndex(123), + SmesherID: signer.NodeID(), } - watx.Sign(signer) - - atx := fixture.ToAtx(t, watx) + atx.SetID(types.RandomATXID()) + atx.SetReceived(time.Now().Local()) for _, opt := range opts { opt(atx) } - return atx, watx.Blob() + return atx } type header struct { @@ -1031,12 +1036,12 @@ func Test_PrevATXCollision(t *testing.T) { // create two ATXs with the same PrevATXID prevATXID := types.RandomATXID() - atx1, blob1 := newAtx(t, sig, withPublishEpoch(1)) - atx2, blob2 := newAtx(t, sig, withPublishEpoch(2)) + atx1 := newAtx(t, sig, withPublishEpoch(1)) + atx2 := newAtx(t, sig, withPublishEpoch(2)) - require.NoError(t, atxs.Add(db, atx1, blob1)) + require.NoError(t, atxs.Add(db, atx1, types.AtxBlob{})) require.NoError(t, atxs.SetPost(db, atx1.ID(), prevATXID, 0, sig.NodeID(), 10)) - require.NoError(t, atxs.Add(db, atx2, blob2)) + require.NoError(t, atxs.Add(db, atx2, types.AtxBlob{})) require.NoError(t, atxs.SetPost(db, atx2.ID(), prevATXID, 0, sig.NodeID(), 10)) // verify that the ATXs were added @@ -1055,8 +1060,8 @@ func Test_PrevATXCollision(t *testing.T) { require.NoError(t, err) otherIds = append(otherIds, otherSig.NodeID()) - atx2, blob2 := newAtx(t, otherSig, withPublishEpoch(types.EpochID(i+1))) - require.NoError(t, atxs.Add(db, atx2, blob2)) + atx2 := newAtx(t, otherSig, withPublishEpoch(types.EpochID(i+1))) + require.NoError(t, atxs.Add(db, atx2, types.AtxBlob{})) require.NoError(t, atxs.SetPost(db, atx2.ID(), prevATXID, 0, atx2.SmesherID, 10)) } @@ -1086,8 +1091,8 @@ func TestCoinbase(t *testing.T) { db := sql.InMemory() sig, err := signing.NewEdSigner() require.NoError(t, err) - atx, blob := newAtx(t, sig, withCoinbase(types.Address{1, 2, 3})) - require.NoError(t, atxs.Add(db, atx, blob)) + atx := newAtx(t, sig, withCoinbase(types.Address{1, 2, 3})) + require.NoError(t, atxs.Add(db, atx, types.AtxBlob{})) cb, err := atxs.Coinbase(db, sig.NodeID()) require.NoError(t, err) require.Equal(t, atx.Coinbase, cb) @@ -1097,10 +1102,10 @@ func TestCoinbase(t *testing.T) { db := sql.InMemory() sig, err := signing.NewEdSigner() require.NoError(t, err) - atx1, blob1 := newAtx(t, sig, withPublishEpoch(1), withCoinbase(types.Address{1, 2, 3})) - atx2, blob2 := newAtx(t, sig, withPublishEpoch(2), withCoinbase(types.Address{4, 5, 6})) - require.NoError(t, atxs.Add(db, atx1, blob1)) - require.NoError(t, atxs.Add(db, atx2, blob2)) + atx1 := newAtx(t, sig, withPublishEpoch(1), withCoinbase(types.Address{1, 2, 3})) + atx2 := newAtx(t, sig, withPublishEpoch(2), withCoinbase(types.Address{4, 5, 6})) + require.NoError(t, atxs.Add(db, atx1, types.AtxBlob{})) + require.NoError(t, atxs.Add(db, atx2, types.AtxBlob{})) cb, err := atxs.Coinbase(db, sig.NodeID()) require.NoError(t, err) require.Equal(t, atx2.Coinbase, cb) @@ -1162,8 +1167,8 @@ func Test_AtxWithPrevious(t *testing.T) { db := sql.InMemory() prev := types.RandomATXID() - atx, blob := newAtx(t, sig) - require.NoError(t, atxs.Add(db, atx, blob)) + atx := newAtx(t, sig) + require.NoError(t, atxs.Add(db, atx, types.AtxBlob{})) require.NoError(t, atxs.SetPost(db, atx.ID(), prev, 0, sig.NodeID(), 10)) id, err := atxs.AtxWithPrevious(db, prev, sig.NodeID()) @@ -1173,8 +1178,8 @@ func Test_AtxWithPrevious(t *testing.T) { t.Run("finds other ATX with same previous (empty)", func(t *testing.T) { db := sql.InMemory() - atx, blob := newAtx(t, sig) - require.NoError(t, atxs.Add(db, atx, blob)) + atx := newAtx(t, sig) + require.NoError(t, atxs.Add(db, atx, types.AtxBlob{})) require.NoError(t, atxs.SetPost(db, atx.ID(), types.EmptyATXID, 0, sig.NodeID(), 10)) id, err := atxs.AtxWithPrevious(db, types.EmptyATXID, sig.NodeID()) @@ -1188,12 +1193,12 @@ func Test_AtxWithPrevious(t *testing.T) { require.NoError(t, err) prev := types.RandomATXID() - atx, blob := newAtx(t, sig) - require.NoError(t, atxs.Add(db, atx, blob)) + atx := newAtx(t, sig) + require.NoError(t, atxs.Add(db, atx, types.AtxBlob{})) require.NoError(t, atxs.SetPost(db, atx.ID(), prev, 0, sig.NodeID(), 10)) - atx2, blob := newAtx(t, sig2) - require.NoError(t, atxs.Add(db, atx2, blob)) + atx2 := newAtx(t, sig2) + require.NoError(t, atxs.Add(db, atx2, types.AtxBlob{})) require.NoError(t, atxs.SetPost(db, atx2.ID(), prev, 0, sig2.NodeID(), 10)) id, err := atxs.AtxWithPrevious(db, prev, sig.NodeID()) @@ -1222,16 +1227,16 @@ func Test_FindDoublePublish(t *testing.T) { db := sql.InMemory() // one atx - atx0, blob := newAtx(t, sig, withPublishEpoch(1)) - require.NoError(t, atxs.Add(db, atx0, blob)) + atx0 := newAtx(t, sig, withPublishEpoch(1)) + require.NoError(t, atxs.Add(db, atx0, types.AtxBlob{})) require.NoError(t, atxs.SetPost(db, atx0.ID(), types.EmptyATXID, 0, atx0.SmesherID, 10)) _, err = atxs.FindDoublePublish(db, atx0.SmesherID, atx0.PublishEpoch) require.ErrorIs(t, err, sql.ErrNotFound) // two atxs in different epochs - atx1, blob := newAtx(t, sig, withPublishEpoch(atx0.PublishEpoch+1)) - require.NoError(t, atxs.Add(db, atx1, blob)) + atx1 := newAtx(t, sig, withPublishEpoch(atx0.PublishEpoch+1)) + require.NoError(t, atxs.Add(db, atx1, types.AtxBlob{})) require.NoError(t, atxs.SetPost(db, atx1.ID(), types.EmptyATXID, 0, atx0.SmesherID, 10)) _, err = atxs.FindDoublePublish(db, atx0.SmesherID, atx0.PublishEpoch) @@ -1241,17 +1246,17 @@ func Test_FindDoublePublish(t *testing.T) { t.Parallel() db := sql.InMemory() - atx0, blob := newAtx(t, sig) - require.NoError(t, atxs.Add(db, atx0, blob)) + atx0 := newAtx(t, sig) + require.NoError(t, atxs.Add(db, atx0, types.AtxBlob{})) require.NoError(t, atxs.SetPost(db, atx0.ID(), types.EmptyATXID, 0, atx0.SmesherID, 10)) - atx1, blob := newAtx(t, sig) - require.NoError(t, atxs.Add(db, atx1, blob)) + atx1 := newAtx(t, sig) + require.NoError(t, atxs.Add(db, atx1, types.AtxBlob{})) require.NoError(t, atxs.SetPost(db, atx1.ID(), types.EmptyATXID, 0, atx0.SmesherID, 10)) - atxids, err := atxs.FindDoublePublish(db, atx0.SmesherID, atx0.PublishEpoch) + atxIDs, err := atxs.FindDoublePublish(db, atx0.SmesherID, atx0.PublishEpoch) require.NoError(t, err) - require.ElementsMatch(t, []types.ATXID{atx0.ID(), atx1.ID()}, atxids) + require.ElementsMatch(t, []types.ATXID{atx0.ID(), atx1.ID()}, atxIDs) // filters by epoch _, err = atxs.FindDoublePublish(db, atx0.SmesherID, atx0.PublishEpoch+1) @@ -1264,16 +1269,16 @@ func Test_FindDoublePublish(t *testing.T) { atx0Signer, err := signing.NewEdSigner() require.NoError(t, err) - atx0, blob := newAtx(t, atx0Signer) - require.NoError(t, atxs.Add(db, atx0, blob)) + atx0 := newAtx(t, atx0Signer) + require.NoError(t, atxs.Add(db, atx0, types.AtxBlob{})) require.NoError(t, atxs.SetPost(db, atx0.ID(), types.EmptyATXID, 0, atx0.SmesherID, 10)) require.NoError(t, atxs.SetPost(db, atx0.ID(), types.EmptyATXID, 0, sig.NodeID(), 10)) atx1Signer, err := signing.NewEdSigner() require.NoError(t, err) - atx1, blob := newAtx(t, atx1Signer) - require.NoError(t, atxs.Add(db, atx1, blob)) + atx1 := newAtx(t, atx1Signer) + require.NoError(t, atxs.Add(db, atx1, types.AtxBlob{})) require.NoError(t, atxs.SetPost(db, atx1.ID(), types.EmptyATXID, 0, atx1.SmesherID, 10)) require.NoError(t, atxs.SetPost(db, atx1.ID(), types.EmptyATXID, 0, sig.NodeID(), 10)) @@ -1374,12 +1379,12 @@ func TestPrevIDByNodeID(t *testing.T) { sig, err := signing.NewEdSigner() require.NoError(t, err) - atx1, blob1 := newAtx(t, sig, withPublishEpoch(1)) - require.NoError(t, atxs.Add(db, atx1, blob1)) + atx1 := newAtx(t, sig, withPublishEpoch(1)) + require.NoError(t, atxs.Add(db, atx1, types.AtxBlob{})) require.NoError(t, atxs.SetPost(db, atx1.ID(), types.EmptyATXID, 0, sig.NodeID(), 4)) - atx2, blob2 := newAtx(t, sig, withPublishEpoch(2)) - require.NoError(t, atxs.Add(db, atx2, blob2)) + atx2 := newAtx(t, sig, withPublishEpoch(2)) + require.NoError(t, atxs.Add(db, atx2, types.AtxBlob{})) require.NoError(t, atxs.SetPost(db, atx2.ID(), types.EmptyATXID, 0, sig.NodeID(), 4)) _, err = atxs.PrevIDByNodeID(db, sig.NodeID(), 1) @@ -1399,14 +1404,14 @@ func TestPrevIDByNodeID(t *testing.T) { require.NoError(t, err) id := types.RandomNodeID() - atx1, blob1 := newAtx(t, sig, withPublishEpoch(1)) - require.NoError(t, atxs.Add(db, atx1, blob1)) + atx1 := newAtx(t, sig, withPublishEpoch(1)) + require.NoError(t, atxs.Add(db, atx1, types.AtxBlob{})) require.NoError(t, atxs.SetPost(db, atx1.ID(), types.EmptyATXID, 0, sig.NodeID(), 4)) require.NoError(t, atxs.SetPost(db, atx1.ID(), types.EmptyATXID, 0, id, 8)) require.NoError(t, atxs.SetPost(db, atx1.ID(), types.EmptyATXID, 0, types.RandomNodeID(), 12)) - atx2, blob2 := newAtx(t, sig, withPublishEpoch(2)) - require.NoError(t, atxs.Add(db, atx2, blob2)) + atx2 := newAtx(t, sig, withPublishEpoch(2)) + require.NoError(t, atxs.Add(db, atx2, types.AtxBlob{})) require.NoError(t, atxs.SetPost(db, atx2.ID(), atx1.ID(), 0, sig.NodeID(), 4)) require.NoError(t, atxs.SetPost(db, atx2.ID(), atx1.ID(), 0, types.RandomNodeID(), 12)) From d372ea3081a6dd50b986e3bd51a03643ae84c090 Mon Sep 17 00:00:00 2001 From: Matthias Fasching <5011972+fasmat@users.noreply.github.com> Date: Tue, 13 Aug 2024 12:48:41 +0000 Subject: [PATCH 6/7] Cleanup code and changelog (#6251) ## Motivation This updates the changelog after the release of `v1.6.6-hotfix1` and fixes a few typos in code. --- CHANGELOG.md | 7 ++ activation/handler_v2.go | 103 +++++++++--------- .../wire/malfeasance_double_marry_test.go | 16 +-- hare3/hare.go | 32 +++--- hare3/hare_test.go | 6 +- hare4/hare.go | 36 +++--- hare4/hare_test.go | 6 +- p2p/pubsub/mocks/publisher.go | 58 +++++----- p2p/pubsub/pubsub.go | 4 +- 9 files changed, 138 insertions(+), 130 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index e237b3aafb..971c69322f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -15,6 +15,13 @@ such and the node will continue to scan new ATXs for their validity. ### Improvements +## Release v1.6.6-hotfix1 + +### Improvements + +* [#6248](https://github.com/spacemeshos/go-spacemesh/pull/6248) Fixed node not being able to handle more than 6.55M + ATXs per epoch. + ## Release v1.6.6 ### Improvements diff --git a/activation/handler_v2.go b/activation/handler_v2.go index 99342c43e8..230472e819 100644 --- a/activation/handler_v2.go +++ b/activation/handler_v2.go @@ -32,6 +32,8 @@ import ( "github.com/spacemeshos/go-spacemesh/system" ) +var errAtxNotV2 = errors.New("ATX is not V2") + type nipostValidatorV2 interface { IsVerifyingFullPost() bool VRFNonceV2(smesherID types.NodeID, commitment types.ATXID, vrfNonce uint64, numUnits uint32) error @@ -666,96 +668,101 @@ func (h *HandlerV2) syntacticallyValidateDeps( return &result, nil } -func (h *HandlerV2) checkMalicious(ctx context.Context, tx *sql.Tx, atx *activationTx) error { +func (h *HandlerV2) checkMalicious(ctx context.Context, tx *sql.Tx, atx *activationTx) (bool, error) { malicious, err := identities.IsMalicious(tx, atx.SmesherID) if err != nil { - return fmt.Errorf("checking if node is malicious: %w", err) + return malicious, fmt.Errorf("checking if node is malicious: %w", err) } if malicious { - return nil + return true, nil } malicious, err = h.checkDoubleMarry(ctx, tx, atx) if err != nil { - return fmt.Errorf("checking double marry: %w", err) + return malicious, fmt.Errorf("checking double marry: %w", err) } if malicious { - return nil + return true, nil } malicious, err = h.checkDoublePost(ctx, tx, atx) if err != nil { - return fmt.Errorf("checking double post: %w", err) + return malicious, fmt.Errorf("checking double post: %w", err) } if malicious { - return nil + return true, nil } malicious, err = h.checkDoubleMerge(ctx, tx, atx) if err != nil { - return fmt.Errorf("checking double merge: %w", err) + return malicious, fmt.Errorf("checking double merge: %w", err) } if malicious { - return nil + return true, nil } malicious, err = h.checkPrevAtx(ctx, tx, atx) if err != nil { - return fmt.Errorf("checking previous ATX: %w", err) - } - if malicious { - return nil + return malicious, fmt.Errorf("checking previous ATX: %w", err) } - // TODO(mafa): contextual validation: - // 1. check double-publish = ID contributed post to two ATXs in the same epoch - // 2. check previous ATX - // 3 ID already married (same node ID in multiple marriage certificates) - // 4. two ATXs referencing the same marriage certificate in the same epoch - return nil + return malicious, err +} + +func (h *HandlerV2) fetchWireAtx(ctx context.Context, tx *sql.Tx, id types.ATXID) (*wire.ActivationTxV2, error) { + var blob sql.Blob + v, err := atxs.LoadBlob(ctx, tx, id.Bytes(), &blob) + if err != nil { + return nil, fmt.Errorf("get atx blob %s: %w", id.ShortString(), err) + } + if v != types.AtxV2 { + return nil, errAtxNotV2 + } + atx := &wire.ActivationTxV2{} + codec.MustDecode(blob.Bytes, atx) + return atx, nil } func (h *HandlerV2) checkDoubleMarry(ctx context.Context, tx *sql.Tx, atx *activationTx) (bool, error) { for _, m := range atx.marriages { - mATX, err := identities.MarriageATX(tx, m.id) + mATXID, err := identities.MarriageATX(tx, m.id) if err != nil { return false, fmt.Errorf("checking if ID is married: %w", err) } - if mATX != atx.ID() { - var blob sql.Blob - v, err := atxs.LoadBlob(ctx, tx, mATX.Bytes(), &blob) - if err != nil { - return true, fmt.Errorf("creating double marry proof: %w", err) - } - if v != types.AtxV2 { - h.logger.Fatal("Failed to create double marry malfeasance proof: ATX is not v2", - zap.Stringer("atx_id", mATX), - ) - } - var otherAtx wire.ActivationTxV2 - codec.MustDecode(blob.Bytes, &otherAtx) + if mATXID == atx.ID() { + continue + } - proof, err := wire.NewDoubleMarryProof(tx, atx.ActivationTxV2, &otherAtx, m.id) - if err != nil { - return true, fmt.Errorf("creating double marry proof: %w", err) - } - return true, h.malPublisher.Publish(ctx, m.id, proof) + otherAtx, err := h.fetchWireAtx(ctx, tx, mATXID) + switch { + case errors.Is(err, errAtxNotV2): + h.logger.Fatal("Failed to create double marry malfeasance proof: ATX is not v2", + zap.Stringer("atx_id", mATXID), + ) + case err != nil: + return false, fmt.Errorf("fetching other ATX: %w", err) } + + proof, err := wire.NewDoubleMarryProof(tx, atx.ActivationTxV2, otherAtx, m.id) + if err != nil { + return true, fmt.Errorf("creating double marry proof: %w", err) + } + return true, h.malPublisher.Publish(ctx, m.id, proof) } return false, nil } func (h *HandlerV2) checkDoublePost(ctx context.Context, tx *sql.Tx, atx *activationTx) (bool, error) { for id := range atx.ids { - atxids, err := atxs.FindDoublePublish(tx, id, atx.PublishEpoch) + atxIDs, err := atxs.FindDoublePublish(tx, id, atx.PublishEpoch) switch { case errors.Is(err, sql.ErrNotFound): continue case err != nil: return false, fmt.Errorf("searching for double publish: %w", err) } - otherAtxId := slices.IndexFunc(atxids, func(other types.ATXID) bool { return other != atx.ID() }) - otherAtx := atxids[otherAtxId] + otherAtxId := slices.IndexFunc(atxIDs, func(other types.ATXID) bool { return other != atx.ID() }) + otherAtx := atxIDs[otherAtxId] h.logger.Debug( "found ID that has already contributed its PoST in this epoch", zap.Stringer("node_id", id), @@ -866,22 +873,16 @@ func (h *HandlerV2) storeAtx(ctx context.Context, atx *types.ActivationTx, watx atxs.AtxAdded(h.cdb, atx) - var malicious bool + malicious := false err := h.cdb.WithTx(ctx, func(tx *sql.Tx) error { // malfeasance check happens after storing the ATX because storing updates the marriage set // that is needed for the malfeasance proof // TODO(mafa): don't store own ATX if it would mark the node as malicious // this probably needs to be done by validating and storing own ATXs eagerly and skipping validation in // the gossip handler (not sync!) - err := h.checkMalicious(ctx, tx, watx) - if err != nil { - return fmt.Errorf("check malicious: %w", err) - } - malicious, err = identities.IsMalicious(tx, watx.SmesherID) - if err != nil { - return fmt.Errorf("checking if identity is malicious: %w", err) - } - return nil + var err error + malicious, err = h.checkMalicious(ctx, tx, watx) + return err }) if err != nil { return fmt.Errorf("check malicious: %w", err) diff --git a/activation/wire/malfeasance_double_marry_test.go b/activation/wire/malfeasance_double_marry_test.go index 351aa265ed..77bd73ce05 100644 --- a/activation/wire/malfeasance_double_marry_test.go +++ b/activation/wire/malfeasance_double_marry_test.go @@ -21,7 +21,7 @@ func Test_DoubleMarryProof(t *testing.T) { require.NoError(t, err) t.Run("valid", func(t *testing.T) { - db := sql.InMemory() + db := sql.InMemoryTest(t) otherAtx := &types.ActivationTx{} otherAtx.SetID(types.RandomATXID()) otherAtx.SmesherID = otherSig.NodeID() @@ -50,7 +50,7 @@ func Test_DoubleMarryProof(t *testing.T) { }) t.Run("does not contain same certificate owner", func(t *testing.T) { - db := sql.InMemory() + db := sql.InMemoryTest(t) atx1 := newActivationTxV2( withMarriageCertificate(sig, types.EmptyATXID, sig.NodeID()), @@ -79,7 +79,7 @@ func Test_DoubleMarryProof(t *testing.T) { atx1 := newActivationTxV2() atx1.Sign(sig) - db := sql.InMemory() + db := sql.InMemoryTest(t) proof, err := NewDoubleMarryProof(db, atx1, atx1, sig.NodeID()) require.ErrorContains(t, err, "ATXs have the same ID") require.Nil(t, proof) @@ -103,7 +103,7 @@ func Test_DoubleMarryProof(t *testing.T) { }) t.Run("invalid marriage proof", func(t *testing.T) { - db := sql.InMemory() + db := sql.InMemoryTest(t) otherAtx := &types.ActivationTx{} otherAtx.SetID(types.RandomATXID()) otherAtx.SmesherID = otherSig.NodeID() @@ -150,7 +150,7 @@ func Test_DoubleMarryProof(t *testing.T) { }) t.Run("invalid certificate proof", func(t *testing.T) { - db := sql.InMemory() + db := sql.InMemoryTest(t) otherAtx := &types.ActivationTx{} otherAtx.SetID(types.RandomATXID()) otherAtx.SmesherID = otherSig.NodeID() @@ -197,7 +197,7 @@ func Test_DoubleMarryProof(t *testing.T) { }) t.Run("invalid atx signature", func(t *testing.T) { - db := sql.InMemory() + db := sql.InMemoryTest(t) otherAtx := &types.ActivationTx{} otherAtx.SetID(types.RandomATXID()) otherAtx.SmesherID = otherSig.NodeID() @@ -233,7 +233,7 @@ func Test_DoubleMarryProof(t *testing.T) { }) t.Run("invalid certificate signature", func(t *testing.T) { - db := sql.InMemory() + db := sql.InMemoryTest(t) otherAtx := &types.ActivationTx{} otherAtx.SetID(types.RandomATXID()) otherAtx.SmesherID = otherSig.NodeID() @@ -269,7 +269,7 @@ func Test_DoubleMarryProof(t *testing.T) { }) t.Run("unknown reference ATX", func(t *testing.T) { - db := sql.InMemory() + db := sql.InMemoryTest(t) atx1 := newActivationTxV2( withMarriageCertificate(sig, types.EmptyATXID, sig.NodeID()), diff --git a/hare3/hare.go b/hare3/hare.go index 8538243a18..5943b71e93 100644 --- a/hare3/hare.go +++ b/hare3/hare.go @@ -129,9 +129,9 @@ type WeakCoinOutput struct { type Opt func(*Hare) -func WithWallclock(clock clockwork.Clock) Opt { +func WithWallClock(clock clockwork.Clock) Opt { return func(hr *Hare) { - hr.wallclock = clock + hr.wallClock = clock } } @@ -163,15 +163,15 @@ func WithResultsChan(c chan hare4.ConsensusOutput) Opt { } } -type nodeclock interface { +type nodeClock interface { AwaitLayer(types.LayerID) <-chan struct{} CurrentLayer() types.LayerID LayerToTime(types.LayerID) time.Time } func New( - nodeclock nodeclock, - pubsub pubsub.PublishSubsciber, + nodeClock nodeClock, + pubsub pubsub.PublishSubscriber, db *sql.Database, atxsdata *atxsdata.Data, proposals *store.Store, @@ -192,9 +192,9 @@ func New( config: DefaultConfig(), log: zap.NewNop(), - wallclock: clockwork.NewRealClock(), + wallClock: clockwork.NewRealClock(), - nodeclock: nodeclock, + nodeClock: nodeClock, pubsub: pubsub, db: db, atxsdata: atxsdata, @@ -229,11 +229,11 @@ type Hare struct { // options config Config log *zap.Logger - wallclock clockwork.Clock + wallClock clockwork.Clock // dependencies - nodeclock nodeclock - pubsub pubsub.PublishSubsciber + nodeClock nodeClock + pubsub pubsub.PublishSubscriber db *sql.Database atxsdata *atxsdata.Data proposals *store.Store @@ -261,7 +261,7 @@ func (h *Hare) Coins() <-chan hare4.WeakCoinOutput { func (h *Hare) Start() { h.pubsub.Register(h.config.ProtocolName, h.Handler, pubsub.WithValidatorInline(true)) - current := h.nodeclock.CurrentLayer() + 1 + current := h.nodeClock.CurrentLayer() + 1 enabled := max(current, h.config.EnableLayer, types.GetEffectiveGenesis()+1) disabled := types.LayerID(math.MaxUint32) if h.config.DisableLayer > 0 { @@ -275,7 +275,7 @@ func (h *Hare) Start() { h.eg.Go(func() error { for next := enabled; next < disabled; next++ { select { - case <-h.nodeclock.AwaitLayer(next): + case <-h.nodeClock.AwaitLayer(next): h.log.Debug("notified", zap.Uint32("lid", next.Uint32())) h.onLayer(next) case <-h.ctx.Done(): @@ -349,7 +349,7 @@ func (h *Hare) Handler(ctx context.Context, peer p2p.Peer, buf []byte) error { droppedMessages.Inc() return errors.New("dropped by graded gossip") } - expected := h.nodeclock.LayerToTime(msg.Layer).Add(h.config.roundStart(msg.IterRound)) + expected := h.nodeClock.LayerToTime(msg.Layer).Add(h.config.roundStart(msg.IterRound)) metrics.ReportMessageLatency(h.config.ProtocolName, msg.Round.String(), time.Since(expected)) return nil } @@ -426,12 +426,12 @@ func (h *Hare) run(session *session) error { h.tracer.OnActive(session.vrfs) activeLatency.Observe(time.Since(start).Seconds()) - walltime := h.nodeclock.LayerToTime(session.lid).Add(h.config.PreroundDelay) + walltime := h.nodeClock.LayerToTime(session.lid).Add(h.config.PreroundDelay) if active { h.log.Debug("active in preround. waiting for preround delay", zap.Uint32("lid", session.lid.Uint32())) // initial set is not needed if node is not active in preround select { - case <-h.wallclock.After(walltime.Sub(h.wallclock.Now())): + case <-h.wallClock.After(walltime.Sub(h.wallClock.Now())): case <-h.ctx.Done(): return h.ctx.Err() } @@ -459,7 +459,7 @@ func (h *Hare) run(session *session) error { activeLatency.Observe(time.Since(start).Seconds()) select { - case <-h.wallclock.After(walltime.Sub(h.wallclock.Now())): + case <-h.wallClock.After(walltime.Sub(h.wallClock.Now())): h.log.Debug("execute round", zap.Uint32("lid", session.lid.Uint32()), zap.Uint8("iter", session.proto.Iter), zap.Stringer("round", session.proto.Round), diff --git a/hare3/hare_test.go b/hare3/hare_test.go index 0030041ead..6ca0b02b91 100644 --- a/hare3/hare_test.go +++ b/hare3/hare_test.go @@ -121,7 +121,7 @@ type node struct { proposals *store.Store ctrl *gomock.Controller - mpublisher *pmocks.MockPublishSubsciber + mpublisher *pmocks.MockPublishSubscriber msyncer *smocks.MockSyncStateProvider patrol *layerpatrol.LayerPatrol tracer *testTracer @@ -203,7 +203,7 @@ func (n *node) withOracle() *node { } func (n *node) withPublisher() *node { - n.mpublisher = pmocks.NewMockPublishSubsciber(n.ctrl) + n.mpublisher = pmocks.NewMockPublishSubscriber(n.ctrl) n.mpublisher.EXPECT().Register(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() return n } @@ -230,7 +230,7 @@ func (n *node) withHare() *node { n.patrol, WithConfig(n.t.cfg), WithLogger(logger), - WithWallclock(n.clock), + WithWallClock(n.clock), WithTracer(tracer), ) n.register(n.signer) diff --git a/hare4/hare.go b/hare4/hare.go index 7789c6c723..9ce948dbd8 100644 --- a/hare4/hare.go +++ b/hare4/hare.go @@ -151,9 +151,9 @@ func WithServer(s streamRequester) Opt { } } -func WithWallclock(clock clockwork.Clock) Opt { +func WithWallClock(clock clockwork.Clock) Opt { return func(hr *Hare) { - hr.wallclock = clock + hr.wallClock = clock } } @@ -185,19 +185,19 @@ func WithResultsChan(c chan ConsensusOutput) Opt { } } -type nodeclock interface { +type nodeClock interface { AwaitLayer(types.LayerID) <-chan struct{} CurrentLayer() types.LayerID LayerToTime(types.LayerID) time.Time } func New( - nodeclock nodeclock, - pubsub pubsub.PublishSubsciber, + nodeClock nodeClock, + pubsub pubsub.PublishSubscriber, db *sql.Database, atxsdata *atxsdata.Data, proposals *store.Store, - verif verifier, + verifier verifier, oracle oracle, sync system.SyncStateProvider, patrol *layerpatrol.LayerPatrol, @@ -216,14 +216,14 @@ func New( config: DefaultConfig(), log: zap.NewNop(), - wallclock: clockwork.NewRealClock(), + wallClock: clockwork.NewRealClock(), - nodeclock: nodeclock, + nodeClock: nodeClock, pubsub: pubsub, db: db, atxsdata: atxsdata, proposals: proposals, - verifier: verif, + verifier: verifier, oracle: &legacyOracle{ log: zap.NewNop(), oracle: oracle, @@ -258,11 +258,11 @@ type Hare struct { // options config Config log *zap.Logger - wallclock clockwork.Clock + wallClock clockwork.Clock // dependencies - nodeclock nodeclock - pubsub pubsub.PublishSubsciber + nodeClock nodeClock + pubsub pubsub.PublishSubscriber db *sql.Database atxsdata *atxsdata.Data proposals *store.Store @@ -291,7 +291,7 @@ func (h *Hare) Coins() <-chan WeakCoinOutput { func (h *Hare) Start() { h.pubsub.Register(h.config.ProtocolName, h.Handler, pubsub.WithValidatorInline(true)) - current := h.nodeclock.CurrentLayer() + 1 + current := h.nodeClock.CurrentLayer() + 1 enabled := max(current, h.config.EnableLayer, types.GetEffectiveGenesis()+1) disabled := types.LayerID(math.MaxUint32) if h.config.DisableLayer > 0 { @@ -305,7 +305,7 @@ func (h *Hare) Start() { h.eg.Go(func() error { for next := enabled; next < disabled; next++ { select { - case <-h.nodeclock.AwaitLayer(next): + case <-h.nodeClock.AwaitLayer(next): h.log.Debug("notified", zap.Uint32("layer", next.Uint32())) h.onLayer(next) h.cleanMessageCache(next - 1) @@ -560,7 +560,7 @@ func (h *Hare) Handler(ctx context.Context, peer p2p.Peer, buf []byte) error { droppedMessages.Inc() return errors.New("dropped by graded gossip") } - expected := h.nodeclock.LayerToTime(msg.Layer).Add(h.config.roundStart(msg.IterRound)) + expected := h.nodeClock.LayerToTime(msg.Layer).Add(h.config.roundStart(msg.IterRound)) metrics.ReportMessageLatency(h.config.ProtocolName, msg.Round.String(), time.Since(expected)) return nil } @@ -637,12 +637,12 @@ func (h *Hare) run(session *session) error { h.tracer.OnActive(session.vrfs) activeLatency.Observe(time.Since(start).Seconds()) - walltime := h.nodeclock.LayerToTime(session.lid).Add(h.config.PreroundDelay) + walltime := h.nodeClock.LayerToTime(session.lid).Add(h.config.PreroundDelay) if active { h.log.Debug("active in preround. waiting for preround delay", zap.Uint32("lid", session.lid.Uint32())) // initial set is not needed if node is not active in preround select { - case <-h.wallclock.After(walltime.Sub(h.wallclock.Now())): + case <-h.wallClock.After(walltime.Sub(h.wallClock.Now())): case <-h.ctx.Done(): return h.ctx.Err() } @@ -670,7 +670,7 @@ func (h *Hare) run(session *session) error { activeLatency.Observe(time.Since(start).Seconds()) select { - case <-h.wallclock.After(walltime.Sub(h.wallclock.Now())): + case <-h.wallClock.After(walltime.Sub(h.wallClock.Now())): h.log.Debug("execute round", zap.Uint32("lid", session.lid.Uint32()), zap.Uint8("iter", session.proto.Iter), zap.Stringer("round", session.proto.Round), diff --git a/hare4/hare_test.go b/hare4/hare_test.go index be619142a0..e798a39f51 100644 --- a/hare4/hare_test.go +++ b/hare4/hare_test.go @@ -130,7 +130,7 @@ type node struct { proposals *store.Store ctrl *gomock.Controller - mpublisher *pmocks.MockPublishSubsciber + mpublisher *pmocks.MockPublishSubscriber msyncer *smocks.MockSyncStateProvider mverifier *hmock.Mockverifier mockStreamRequester *hmock.MockstreamRequester @@ -219,7 +219,7 @@ func (n *node) withOracle() *node { } func (n *node) withPublisher() *node { - n.mpublisher = pmocks.NewMockPublishSubsciber(n.ctrl) + n.mpublisher = pmocks.NewMockPublishSubscriber(n.ctrl) n.mpublisher.EXPECT().Register(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() return n } @@ -258,7 +258,7 @@ func (n *node) withHare() *node { nil, WithConfig(n.t.cfg), WithLogger(logger), - WithWallclock(n.clock), + WithWallClock(n.clock), WithTracer(tracer), WithServer(n.mockStreamRequester), WithLogger(z), diff --git a/p2p/pubsub/mocks/publisher.go b/p2p/pubsub/mocks/publisher.go index 57b67d9db6..a94e468ae0 100644 --- a/p2p/pubsub/mocks/publisher.go +++ b/p2p/pubsub/mocks/publisher.go @@ -142,31 +142,31 @@ func (c *MockSubscriberRegisterCall) DoAndReturn(f func(string, pubsub.GossipHan return c } -// MockPublishSubsciber is a mock of PublishSubsciber interface. -type MockPublishSubsciber struct { +// MockPublishSubscriber is a mock of PublishSubscriber interface. +type MockPublishSubscriber struct { ctrl *gomock.Controller - recorder *MockPublishSubsciberMockRecorder + recorder *MockPublishSubscriberMockRecorder } -// MockPublishSubsciberMockRecorder is the mock recorder for MockPublishSubsciber. -type MockPublishSubsciberMockRecorder struct { - mock *MockPublishSubsciber +// MockPublishSubscriberMockRecorder is the mock recorder for MockPublishSubscriber. +type MockPublishSubscriberMockRecorder struct { + mock *MockPublishSubscriber } -// NewMockPublishSubsciber creates a new mock instance. -func NewMockPublishSubsciber(ctrl *gomock.Controller) *MockPublishSubsciber { - mock := &MockPublishSubsciber{ctrl: ctrl} - mock.recorder = &MockPublishSubsciberMockRecorder{mock} +// NewMockPublishSubscriber creates a new mock instance. +func NewMockPublishSubscriber(ctrl *gomock.Controller) *MockPublishSubscriber { + mock := &MockPublishSubscriber{ctrl: ctrl} + mock.recorder = &MockPublishSubscriberMockRecorder{mock} return mock } // EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockPublishSubsciber) EXPECT() *MockPublishSubsciberMockRecorder { +func (m *MockPublishSubscriber) EXPECT() *MockPublishSubscriberMockRecorder { return m.recorder } // Publish mocks base method. -func (m *MockPublishSubsciber) Publish(arg0 context.Context, arg1 string, arg2 []byte) error { +func (m *MockPublishSubscriber) Publish(arg0 context.Context, arg1 string, arg2 []byte) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Publish", arg0, arg1, arg2) ret0, _ := ret[0].(error) @@ -174,37 +174,37 @@ func (m *MockPublishSubsciber) Publish(arg0 context.Context, arg1 string, arg2 [ } // Publish indicates an expected call of Publish. -func (mr *MockPublishSubsciberMockRecorder) Publish(arg0, arg1, arg2 any) *MockPublishSubsciberPublishCall { +func (mr *MockPublishSubscriberMockRecorder) Publish(arg0, arg1, arg2 any) *MockPublishSubscriberPublishCall { mr.mock.ctrl.T.Helper() - call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Publish", reflect.TypeOf((*MockPublishSubsciber)(nil).Publish), arg0, arg1, arg2) - return &MockPublishSubsciberPublishCall{Call: call} + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Publish", reflect.TypeOf((*MockPublishSubscriber)(nil).Publish), arg0, arg1, arg2) + return &MockPublishSubscriberPublishCall{Call: call} } -// MockPublishSubsciberPublishCall wrap *gomock.Call -type MockPublishSubsciberPublishCall struct { +// MockPublishSubscriberPublishCall wrap *gomock.Call +type MockPublishSubscriberPublishCall struct { *gomock.Call } // Return rewrite *gomock.Call.Return -func (c *MockPublishSubsciberPublishCall) Return(arg0 error) *MockPublishSubsciberPublishCall { +func (c *MockPublishSubscriberPublishCall) Return(arg0 error) *MockPublishSubscriberPublishCall { c.Call = c.Call.Return(arg0) return c } // Do rewrite *gomock.Call.Do -func (c *MockPublishSubsciberPublishCall) Do(f func(context.Context, string, []byte) error) *MockPublishSubsciberPublishCall { +func (c *MockPublishSubscriberPublishCall) Do(f func(context.Context, string, []byte) error) *MockPublishSubscriberPublishCall { c.Call = c.Call.Do(f) return c } // DoAndReturn rewrite *gomock.Call.DoAndReturn -func (c *MockPublishSubsciberPublishCall) DoAndReturn(f func(context.Context, string, []byte) error) *MockPublishSubsciberPublishCall { +func (c *MockPublishSubscriberPublishCall) DoAndReturn(f func(context.Context, string, []byte) error) *MockPublishSubscriberPublishCall { c.Call = c.Call.DoAndReturn(f) return c } // Register mocks base method. -func (m *MockPublishSubsciber) Register(arg0 string, arg1 pubsub.GossipHandler, arg2 ...pubsub.ValidatorOpt) { +func (m *MockPublishSubscriber) Register(arg0 string, arg1 pubsub.GossipHandler, arg2 ...pubsub.ValidatorOpt) { m.ctrl.T.Helper() varargs := []any{arg0, arg1} for _, a := range arg2 { @@ -214,32 +214,32 @@ func (m *MockPublishSubsciber) Register(arg0 string, arg1 pubsub.GossipHandler, } // Register indicates an expected call of Register. -func (mr *MockPublishSubsciberMockRecorder) Register(arg0, arg1 any, arg2 ...any) *MockPublishSubsciberRegisterCall { +func (mr *MockPublishSubscriberMockRecorder) Register(arg0, arg1 any, arg2 ...any) *MockPublishSubscriberRegisterCall { mr.mock.ctrl.T.Helper() varargs := append([]any{arg0, arg1}, arg2...) - call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Register", reflect.TypeOf((*MockPublishSubsciber)(nil).Register), varargs...) - return &MockPublishSubsciberRegisterCall{Call: call} + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Register", reflect.TypeOf((*MockPublishSubscriber)(nil).Register), varargs...) + return &MockPublishSubscriberRegisterCall{Call: call} } -// MockPublishSubsciberRegisterCall wrap *gomock.Call -type MockPublishSubsciberRegisterCall struct { +// MockPublishSubscriberRegisterCall wrap *gomock.Call +type MockPublishSubscriberRegisterCall struct { *gomock.Call } // Return rewrite *gomock.Call.Return -func (c *MockPublishSubsciberRegisterCall) Return() *MockPublishSubsciberRegisterCall { +func (c *MockPublishSubscriberRegisterCall) Return() *MockPublishSubscriberRegisterCall { c.Call = c.Call.Return() return c } // Do rewrite *gomock.Call.Do -func (c *MockPublishSubsciberRegisterCall) Do(f func(string, pubsub.GossipHandler, ...pubsub.ValidatorOpt)) *MockPublishSubsciberRegisterCall { +func (c *MockPublishSubscriberRegisterCall) Do(f func(string, pubsub.GossipHandler, ...pubsub.ValidatorOpt)) *MockPublishSubscriberRegisterCall { c.Call = c.Call.Do(f) return c } // DoAndReturn rewrite *gomock.Call.DoAndReturn -func (c *MockPublishSubsciberRegisterCall) DoAndReturn(f func(string, pubsub.GossipHandler, ...pubsub.ValidatorOpt)) *MockPublishSubsciberRegisterCall { +func (c *MockPublishSubscriberRegisterCall) DoAndReturn(f func(string, pubsub.GossipHandler, ...pubsub.ValidatorOpt)) *MockPublishSubscriberRegisterCall { c.Call = c.Call.DoAndReturn(f) return c } diff --git a/p2p/pubsub/pubsub.go b/p2p/pubsub/pubsub.go index c27d33ae37..b64dd2f859 100644 --- a/p2p/pubsub/pubsub.go +++ b/p2p/pubsub/pubsub.go @@ -133,8 +133,8 @@ var ( WithValidatorConcurrency = pubsub.WithValidatorConcurrency ) -// PublishSubsciber common interface for publisher and subscribing. -type PublishSubsciber interface { +// PublishSubscriber common interface for publisher and subscribing. +type PublishSubscriber interface { Publisher Subscriber } From dbcaf5c8daa19da062fc5c6e9b1cbcb251c986d5 Mon Sep 17 00:00:00 2001 From: Matthias Fasching <5011972+fasmat@users.noreply.github.com> Date: Tue, 13 Aug 2024 16:02:45 +0000 Subject: [PATCH 7/7] Fix response data slice too small (#6248) ## Motivation The response msg object needs to be increased in size to allow 8.0 Mio ATXs to be processed by the node --- fetch/wire_types.go | 3 ++- p2p/server/server.go | 2 +- p2p/server/server_scale.go | 4 ++-- 3 files changed, 5 insertions(+), 4 deletions(-) diff --git a/fetch/wire_types.go b/fetch/wire_types.go index 725b6af613..5ac39fb23b 100644 --- a/fetch/wire_types.go +++ b/fetch/wire_types.go @@ -121,7 +121,8 @@ type MaliciousIDs struct { type EpochData struct { // When changing this value also check - // - the size of `ResponseMessage` above + // - the size of `ResponseMessage.Data` above + // - the size of `Response.Data` in `p2p/server/server.go` // - the size of `NodeIDs` in `MaliciousIDs` above // - the size of `Set` in `EpochActiveSet` in common/types/activation.go // - the size of `EligibilityProofs` in the type `Ballot` in common/types/ballot.go diff --git a/p2p/server/server.go b/p2p/server/server.go index a1ead5c8d9..e64c7ce925 100644 --- a/p2p/server/server.go +++ b/p2p/server/server.go @@ -139,7 +139,7 @@ func (err *ServerError) Error() string { // Response is a server response. type Response struct { // keep in line with limit of ResponseMessage.Data in `fetch/wire_types.go` - Data []byte `scale:"max=209715200"` // 200 MiB > 6.0 mio ATX * 32 bytes per ID + Data []byte `scale:"max=272629760"` // 260 MiB > 8.0 mio ATX * 32 bytes per ID Error string `scale:"max=1024"` // TODO(mafa): make error code instead of string } diff --git a/p2p/server/server_scale.go b/p2p/server/server_scale.go index 03cce911d5..a66d669414 100644 --- a/p2p/server/server_scale.go +++ b/p2p/server/server_scale.go @@ -9,7 +9,7 @@ import ( func (t *Response) EncodeScale(enc *scale.Encoder) (total int, err error) { { - n, err := scale.EncodeByteSliceWithLimit(enc, t.Data, 209715200) + n, err := scale.EncodeByteSliceWithLimit(enc, t.Data, 272629760) if err != nil { return total, err } @@ -27,7 +27,7 @@ func (t *Response) EncodeScale(enc *scale.Encoder) (total int, err error) { func (t *Response) DecodeScale(dec *scale.Decoder) (total int, err error) { { - field, n, err := scale.DecodeByteSliceWithLimit(dec, 209715200) + field, n, err := scale.DecodeByteSliceWithLimit(dec, 272629760) if err != nil { return total, err }