Skip to content

Commit

Permalink
build(golangci): add unparam linter
Browse files Browse the repository at this point in the history
  • Loading branch information
acud committed Aug 15, 2024
1 parent bc0848d commit 3e3263b
Show file tree
Hide file tree
Showing 19 changed files with 80 additions and 92 deletions.
18 changes: 8 additions & 10 deletions api/grpcserver/post_service_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,6 @@ import (
func launchPostSupervisor(
tb testing.TB,
log *zap.Logger,
cfg Config,
serviceCfg activation.PostSupervisorConfig,
postOpts activation.PostSetupOpts,
) (types.NodeID, func()) {
Expand Down Expand Up @@ -74,7 +73,6 @@ func launchPostSupervisor(
func launchPostSupervisorTLS(
tb testing.TB,
log *zap.Logger,
cfg Config,
serviceCfg activation.PostSupervisorConfig,
postOpts activation.PostSetupOpts,
) (types.NodeID, func()) {
Expand Down Expand Up @@ -102,7 +100,7 @@ func launchPostSupervisorTLS(
close(ch)
return ch
})
db := sql.InMemory()
db := sql.InMemoryTest(t)

Check failure on line 103 in api/grpcserver/post_service_test.go

View workflow job for this annotation

GitHub Actions / coverage

undefined: t

Check failure on line 103 in api/grpcserver/post_service_test.go

View workflow job for this annotation

GitHub Actions / lint

undefined: t (typecheck)

Check failure on line 103 in api/grpcserver/post_service_test.go

View workflow job for this annotation

GitHub Actions / lint

undefined: t) (typecheck)

Check failure on line 103 in api/grpcserver/post_service_test.go

View workflow job for this annotation

GitHub Actions / unittests (self-hosted, macOS, ARM64, go-spacemesh)

undefined: t

Check failure on line 103 in api/grpcserver/post_service_test.go

View workflow job for this annotation

GitHub Actions / unittests (ubuntu-latest-arm-8-cores)

undefined: t

Check failure on line 103 in api/grpcserver/post_service_test.go

View workflow job for this annotation

GitHub Actions / unittests (ubuntu-22.04)

undefined: t
logger := log.Named("post supervisor")
mgr, err := activation.NewPostSetupManager(postCfg, logger, db, atxsdata.New(), goldenATXID, syncer, validator)
require.NoError(tb, err)
Expand Down Expand Up @@ -130,7 +128,7 @@ func Test_GenerateProof(t *testing.T) {
serviceCfg := activation.DefaultTestPostServiceConfig()
serviceCfg.NodeAddress = fmt.Sprintf("http://%s", cfg.PublicListener)

id, postCleanup := launchPostSupervisor(t, log.Named("supervisor"), cfg, serviceCfg, opts)
id, postCleanup := launchPostSupervisor(t, log.Named("supervisor"), serviceCfg, opts)
t.Cleanup(postCleanup)

var client activation.PostClient
Expand Down Expand Up @@ -181,7 +179,7 @@ func Test_GenerateProof_TLS(t *testing.T) {
serviceCfg.Cert = filepath.Join(certDir, clientCertName)
serviceCfg.Key = filepath.Join(certDir, clientKeyName)

id, postCleanup := launchPostSupervisorTLS(t, log.Named("supervisor"), cfg, serviceCfg, opts)
id, postCleanup := launchPostSupervisorTLS(t, log.Named("supervisor"), serviceCfg, opts)
t.Cleanup(postCleanup)

var client activation.PostClient
Expand Down Expand Up @@ -228,7 +226,7 @@ func Test_GenerateProof_Cancel(t *testing.T) {
serviceCfg := activation.DefaultTestPostServiceConfig()
serviceCfg.NodeAddress = fmt.Sprintf("http://%s", cfg.PublicListener)

id, postCleanup := launchPostSupervisor(t, log.Named("supervisor"), cfg, serviceCfg, opts)
id, postCleanup := launchPostSupervisor(t, log.Named("supervisor"), serviceCfg, opts)
t.Cleanup(postCleanup)

var client activation.PostClient
Expand Down Expand Up @@ -268,7 +266,7 @@ func Test_Metadata(t *testing.T) {
serviceCfg := activation.DefaultTestPostServiceConfig()
serviceCfg.NodeAddress = fmt.Sprintf("http://%s", cfg.PublicListener)

id, postCleanup := launchPostSupervisor(t, log.Named("supervisor"), cfg, serviceCfg, opts)
id, postCleanup := launchPostSupervisor(t, log.Named("supervisor"), serviceCfg, opts)
t.Cleanup(postCleanup)

var client activation.PostClient
Expand Down Expand Up @@ -313,15 +311,15 @@ func Test_GenerateProof_MultipleServices(t *testing.T) {
serviceCfg.NodeAddress = fmt.Sprintf("http://%s", cfg.PublicListener)

// all but one should not be able to register to the node (i.e. open a stream to it).
id, postCleanup := launchPostSupervisor(t, log.Named("supervisor1"), cfg, serviceCfg, opts)
id, postCleanup := launchPostSupervisor(t, log.Named("supervisor1"), serviceCfg, opts)
t.Cleanup(postCleanup)

opts.DataDir = t.TempDir()
_, postCleanup = launchPostSupervisor(t, log.Named("supervisor2"), cfg, serviceCfg, opts)
_, postCleanup = launchPostSupervisor(t, log.Named("supervisor2"), serviceCfg, opts)
t.Cleanup(postCleanup)

opts.DataDir = t.TempDir()
_, postCleanup = launchPostSupervisor(t, log.Named("supervisor3"), cfg, serviceCfg, opts)
_, postCleanup = launchPostSupervisor(t, log.Named("supervisor3"), serviceCfg, opts)
t.Cleanup(postCleanup)

var client activation.PostClient
Expand Down
16 changes: 5 additions & 11 deletions api/grpcserver/v2alpha1/layer.go
Original file line number Diff line number Diff line change
Expand Up @@ -63,10 +63,7 @@ func (s *LayerStreamService) Stream(
dbChan := make(chan *spacemeshv2alpha1.Layer, 100)
errChan := make(chan error, 1)

ops, err := toLayerOperations(toLayerRequest(request))
if err != nil {
return status.Error(codes.InvalidArgument, err.Error())
}
ops := toLayerOperations(toLayerRequest(request))
// send db data to chan to avoid buffer overflow
go func() {
defer close(dbChan)
Expand Down Expand Up @@ -201,10 +198,7 @@ func (s *LayerService) List(
return nil, status.Error(codes.InvalidArgument, "limit must be set to <= 100")
}

ops, err := toLayerOperations(request)
if err != nil {
return nil, status.Error(codes.InvalidArgument, err.Error())
}
ops := toLayerOperations(request)

rst := make([]*spacemeshv2alpha1.Layer, 0, request.Limit)
if err := layers.IterateLayersWithBlockOps(s.db, ops, func(layer *layers.Layer) bool {
Expand All @@ -217,10 +211,10 @@ func (s *LayerService) List(
return &spacemeshv2alpha1.LayerList{Layers: rst}, nil
}

func toLayerOperations(filter *spacemeshv2alpha1.LayerRequest) (builder.Operations, error) {
func toLayerOperations(filter *spacemeshv2alpha1.LayerRequest) builder.Operations {
ops := builder.Operations{}
if filter == nil {
return ops, nil
return ops
}

if filter.StartLayer != 0 {
Expand Down Expand Up @@ -259,7 +253,7 @@ func toLayerOperations(filter *spacemeshv2alpha1.LayerRequest) (builder.Operatio
})
}

return ops, nil
return ops
}

func toLayer(layer *layers.Layer) *spacemeshv2alpha1.Layer {
Expand Down
18 changes: 9 additions & 9 deletions atxsdata/warmup_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -16,11 +16,12 @@ import (
"github.com/spacemeshos/go-spacemesh/sql/mocks"
)

var nonce = types.VRFPostIndex(1)

func gatx(
id types.ATXID,
epoch types.EpochID,
smesher types.NodeID,
nonce types.VRFPostIndex,
) types.ActivationTx {
atx := &types.ActivationTx{
NumUnits: 1,
Expand All @@ -39,14 +40,13 @@ func TestWarmup(t *testing.T) {
t.Run("sanity", func(t *testing.T) {
db := sql.InMemory()
applied := types.LayerID(10)
nonce := types.VRFPostIndex(1)
data := []types.ActivationTx{
gatx(types.ATXID{1, 1}, 1, types.NodeID{1}, nonce),
gatx(types.ATXID{1, 2}, 1, types.NodeID{2}, nonce),
gatx(types.ATXID{2, 1}, 2, types.NodeID{1}, nonce),
gatx(types.ATXID{2, 2}, 2, types.NodeID{2}, nonce),
gatx(types.ATXID{3, 2}, 3, types.NodeID{2}, nonce),
gatx(types.ATXID{3, 3}, 3, types.NodeID{3}, nonce),
gatx(types.ATXID{1, 1}, 1, types.NodeID{1}),
gatx(types.ATXID{1, 2}, 1, types.NodeID{2}),
gatx(types.ATXID{2, 1}, 2, types.NodeID{1}),
gatx(types.ATXID{2, 2}, 2, types.NodeID{2}),
gatx(types.ATXID{3, 2}, 3, types.NodeID{2}),
gatx(types.ATXID{3, 3}, 3, types.NodeID{3}),
}
for i := range data {
require.NoError(t, atxs.Add(db, &data[i], types.AtxBlob{}))
Expand Down Expand Up @@ -74,7 +74,7 @@ func TestWarmup(t *testing.T) {
t.Run("db failures", func(t *testing.T) {
db := sql.InMemory()
nonce := types.VRFPostIndex(1)
data := gatx(types.ATXID{1, 1}, 1, types.NodeID{1}, nonce)
data := gatx(types.ATXID{1, 1}, 1, types.NodeID{1})

Check failure on line 77 in atxsdata/warmup_test.go

View workflow job for this annotation

GitHub Actions / coverage

nonce declared and not used

Check failure on line 77 in atxsdata/warmup_test.go

View workflow job for this annotation

GitHub Actions / lint

nonce declared and not used (typecheck)

Check failure on line 77 in atxsdata/warmup_test.go

View workflow job for this annotation

GitHub Actions / unittests (self-hosted, macOS, ARM64, go-spacemesh)

nonce declared and not used

Check failure on line 77 in atxsdata/warmup_test.go

View workflow job for this annotation

GitHub Actions / unittests (ubuntu-latest-arm-8-cores)

nonce declared and not used

Check failure on line 77 in atxsdata/warmup_test.go

View workflow job for this annotation

GitHub Actions / unittests (ubuntu-22.04)

nonce declared and not used
require.NoError(t, atxs.Add(db, &data, types.AtxBlob{}))

exec := mocks.NewMockExecutor(gomock.NewController(t))
Expand Down
4 changes: 2 additions & 2 deletions beacon/handlers.go
Original file line number Diff line number Diff line change
Expand Up @@ -409,7 +409,7 @@ func (pd *ProtocolDriver) HandleFollowingVotes(ctx context.Context, peer p2p.Pee
return errUntimelyMessage
}

nodeID, err := pd.verifyFollowingVotes(ctx, m)
nodeID, err := pd.verifyFollowingVotes(m)
if err != nil {
return err
}
Expand All @@ -434,7 +434,7 @@ func (pd *ProtocolDriver) HandleFollowingVotes(ctx context.Context, peer p2p.Pee
return nil
}

func (pd *ProtocolDriver) verifyFollowingVotes(ctx context.Context, m FollowingVotingMessage) (types.NodeID, error) {
func (pd *ProtocolDriver) verifyFollowingVotes(m FollowingVotingMessage) (types.NodeID, error) {
messageBytes := codec.MustEncode(&m.FollowingVotingMessageBody)
if !pd.edVerifier.Verify(signing.BEACON_FOLLOWUP_MSG, m.SmesherID, messageBytes, m.Signature) {
return types.EmptyNodeID, fmt.Errorf("[round %v] verify signature %s: failed", types.FirstRound, m.Signature)
Expand Down
3 changes: 1 addition & 2 deletions fetch/fetch.go
Original file line number Diff line number Diff line change
Expand Up @@ -786,7 +786,7 @@ func (f *Fetch) streamBatch(peer p2p.Peer, batch *batchInfo) error {
batchMap := batch.toMap()

n, err := server.ReadResponse(s, func(respLen uint32) (n int, err error) {
return f.receiveStreamedBatch(ctx, s, batch, batchMap)
return f.receiveStreamedBatch(s, batch, batchMap)
})
if err != nil {
return n, err
Expand Down Expand Up @@ -819,7 +819,6 @@ func (f *Fetch) streamBatch(peer p2p.Peer, batch *batchInfo) error {
}

func (f *Fetch) receiveStreamedBatch(
ctx context.Context,
s io.ReadWriter,
batch *batchInfo,
batchMap map[types.Hash32]RequestMessage,
Expand Down
4 changes: 2 additions & 2 deletions hare3/eligibility/oracle.go
Original file line number Diff line number Diff line change
Expand Up @@ -178,7 +178,7 @@ func (o *Oracle) resetCacheOnSynced(ctx context.Context) {
}

// buildVRFMessage builds the VRF message used as input for hare eligibility validation.
func (o *Oracle) buildVRFMessage(ctx context.Context, layer types.LayerID, round uint32) ([]byte, error) {
func (o *Oracle) buildVRFMessage(layer types.LayerID, round uint32) ([]byte, error) {
beacon, err := o.beacons.GetBeacon(layer.GetEpoch())
if err != nil {
return nil, fmt.Errorf("get beacon: %w", err)
Expand Down Expand Up @@ -240,7 +240,7 @@ func (o *Oracle) prepareEligibilityCheck(
return 0, fixed.Fixed{}, fixed.Fixed{}, true, err
}

msg, err := o.buildVRFMessage(ctx, layer, round)
msg, err := o.buildVRFMessage(layer, round)
if err != nil {
logger.Warn("could not build vrf message", zap.Error(err))
return 0, fixed.Fixed{}, fixed.Fixed{}, true, err
Expand Down
2 changes: 1 addition & 1 deletion hare4/eligibility/oracle.go
Original file line number Diff line number Diff line change
Expand Up @@ -178,7 +178,7 @@ func (o *Oracle) resetCacheOnSynced(ctx context.Context) {
}

// buildVRFMessage builds the VRF message used as input for hare eligibility validation.
func (o *Oracle) buildVRFMessage(ctx context.Context, layer types.LayerID, round uint32) ([]byte, error) {
func (o *Oracle) buildVRFMessage(layer types.LayerID, round uint32) ([]byte, error) {
beacon, err := o.beacons.GetBeacon(layer.GetEpoch())
if err != nil {
return nil, fmt.Errorf("get beacon: %w", err)
Expand Down
10 changes: 4 additions & 6 deletions hare4/hare.go
Original file line number Diff line number Diff line change
Expand Up @@ -390,12 +390,12 @@ func (h *Hare) handleProposalsStream(ctx context.Context, msg []byte, s io.ReadW

// reconstructProposals tries to reconstruct the full list of proposals from a peer based on a delivered
// set of compact IDs.
func (h *Hare) reconstructProposals(ctx context.Context, peer p2p.Peer, msgId types.Hash32, msg *Message) error {
func (h *Hare) reconstructProposals(peer p2p.Peer, msgId types.Hash32, msg *Message) error {
proposals := h.proposals.GetForLayer(msg.Layer)
if len(proposals) == 0 {
return errNoLayerProposals
}
compacted := h.compactProposals(msg.Layer, proposals)
compacted := h.compactProposals(proposals)
proposalIds := make([]proposalTuple, len(proposals))
for i := range proposals {
proposalIds[i] = proposalTuple{id: proposals[i].ID(), compact: compacted[i]}
Expand Down Expand Up @@ -476,7 +476,7 @@ func (h *Hare) Handler(ctx context.Context, peer p2p.Peer, buf []byte) error {
// original sent message for signature validation to occur
compacts = msg.Value.CompactProposals
messageCompactsCounter.Add(float64(len(compacts)))
err := h.reconstructProposals(ctx, peer, msgId, msg)
err := h.reconstructProposals(peer, msgId, msg)
switch {
case errors.Is(err, errCannotMatchProposals):
msg.Value.Proposals, err = h.fetchFull(ctx, peer, msgId)
Expand Down Expand Up @@ -885,9 +885,7 @@ type session struct {
vrfs []*types.HareEligibility
}

func (h *Hare) compactProposals(layer types.LayerID,
proposals []*types.Proposal,
) []types.CompactProposalID {
func (h *Hare) compactProposals(proposals []*types.Proposal) []types.CompactProposalID {
compactProposals := make([]types.CompactProposalID, len(proposals))
for i, prop := range proposals {
vrf := prop.EligibilityProofs[0].Sig
Expand Down
3 changes: 1 addition & 2 deletions miner/proposal_builder.go
Original file line number Diff line number Diff line change
Expand Up @@ -480,7 +480,6 @@ func (pb *ProposalBuilder) initSharedData(ctx context.Context, current types.Lay
}

func (pb *ProposalBuilder) initSignerData(
ctx context.Context,
ss *signerSession,
lid types.LayerID,
) error {
Expand Down Expand Up @@ -573,7 +572,7 @@ func (pb *ProposalBuilder) build(ctx context.Context, lid types.LayerID) error {
for _, ss := range signers {
ss.latency.start = start
eg.Go(func() error {
if err := pb.initSignerData(ctx, ss, lid); err != nil {
if err := pb.initSignerData(ss, lid); err != nil {
if errors.Is(err, errAtxNotAvailable) {
ss.log.Debug("smesher doesn't have atx that targets this epoch",
log.ZContext(ctx),
Expand Down
4 changes: 2 additions & 2 deletions node/node.go
Original file line number Diff line number Diff line change
Expand Up @@ -1589,7 +1589,7 @@ func (app *App) grpcService(svc grpcserver.Service, lg log.Log) (grpcserver.Serv
return nil, fmt.Errorf("unknown service %s", svc)
}

func (app *App) startAPIServices(ctx context.Context) error {
func (app *App) startAPIServices() error {
logger := app.addLogger(GRPCLogger, app.log)
grpczap.SetGrpcLoggerV2(grpclog, logger.Zap())

Expand Down Expand Up @@ -2194,7 +2194,7 @@ func (app *App) startSynchronous(ctx context.Context) (err error) {
app.log.Info("no need to preserve data after recovery")
}

if err := app.startAPIServices(ctx); err != nil {
if err := app.startAPIServices(); err != nil {
return err
}

Expand Down
6 changes: 3 additions & 3 deletions sql/atxs/atxs.go
Original file line number Diff line number Diff line change
Expand Up @@ -358,7 +358,7 @@ func LoadBlob(ctx context.Context, db sql.Executor, id []byte, blob *sql.Blob) (
// We don't use the provided blob in this case to avoid
// caching references to the underlying slice (subsequent calls would modify it).
var blob sql.Blob
v, err := getBlob(ctx, db, id, &blob)
v, err := getBlob(db, id, &blob)
if err != nil {
return nil, err
}
Expand All @@ -372,10 +372,10 @@ func LoadBlob(ctx context.Context, db sql.Executor, id []byte, blob *sql.Blob) (
return cached.version, nil
}

return getBlob(ctx, db, id, blob)
return getBlob(db, id, blob)
}

func getBlob(ctx context.Context, db sql.Executor, id []byte, blob *sql.Blob) (types.AtxVersion, error) {
func getBlob(db sql.Executor, id []byte, blob *sql.Blob) (types.AtxVersion, error) {
var version types.AtxVersion
rows, err := db.Exec("select atx, version from atx_blobs where id = ?1",
func(stmt *sql.Statement) {
Expand Down
6 changes: 3 additions & 3 deletions syncer/state_syncer.go
Original file line number Diff line number Diff line change
Expand Up @@ -161,7 +161,7 @@ func (s *Syncer) processLayerOpinions(ctx context.Context, lid types.LayerID, re
}
}

func (s *Syncer) needCert(ctx context.Context, lid types.LayerID) (bool, error) {
func (s *Syncer) needCert(lid types.LayerID) (bool, error) {
cutoff := s.certCutoffLayer()
if !lid.After(cutoff) {
return false, nil
Expand All @@ -183,7 +183,7 @@ func (s *Syncer) layerOpinions(
}

v2OpnPoll.Inc()
needCert, err := s.needCert(ctx, lid)
needCert, err := s.needCert(lid)
if err != nil {
return nil, nil, err
}
Expand Down Expand Up @@ -233,7 +233,7 @@ func (s *Syncer) checkMeshAgreement(
}

func (s *Syncer) adopt(ctx context.Context, lid types.LayerID, certs []*types.Certificate) error {
needCert, err := s.needCert(ctx, lid)
needCert, err := s.needCert(lid)
if err != nil {
return err
}
Expand Down
3 changes: 1 addition & 2 deletions systest/cluster/cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -742,8 +742,7 @@ func (c *Cluster) Wait(tctx *testcontext.Context, i int) error {
func (c *Cluster) WaitAll(ctx context.Context) error {
var eg errgroup.Group
wait := func(clients []*NodeClient) {
for i := range c.clients {
client := c.clients[i]
for _, client := range clients {
eg.Go(func() error {
_, err := client.Resolve(ctx)
return err
Expand Down
2 changes: 1 addition & 1 deletion tortoise/algorithm.go
Original file line number Diff line number Diff line change
Expand Up @@ -308,7 +308,7 @@ func (t *Tortoise) TallyVotes(ctx context.Context, lid types.LayerID) {
defer t.mu.Unlock()
waitTallyVotes.Observe(float64(time.Since(start).Nanoseconds()))
start = time.Now()
t.trtl.tallyVotes(ctx, lid)
t.trtl.tallyVotes(lid)
executeTallyVotes.Observe(float64(time.Since(start).Nanoseconds()))
if t.tracer != nil {
t.tracer.On(&TallyTrace{Layer: lid})
Expand Down
2 changes: 1 addition & 1 deletion tortoise/state.go
Original file line number Diff line number Diff line change
Expand Up @@ -434,7 +434,7 @@ type headerWithSign struct {
sign sign
}

func decodeVotes(evicted, blid types.LayerID, base *ballotInfo, exceptions types.Votes) (votes, types.LayerID, error) {
func decodeVotes(blid types.LayerID, base *ballotInfo, exceptions types.Votes) (votes, types.LayerID, error) {
from := base.layer
diff := map[types.LayerID]map[types.BlockID]headerWithSign{}
for _, header := range exceptions.Against {
Expand Down
Loading

0 comments on commit 3e3263b

Please sign in to comment.