Skip to content

Commit

Permalink
Merge commit from fork
Browse files Browse the repository at this point in the history
* no danger, baby

* comments for all gosec

* im going through changes

* changes

* message

* final changes

* final changes
  • Loading branch information
ItamarYuran authored Feb 20, 2025
1 parent 7db5bf3 commit 3a62575
Show file tree
Hide file tree
Showing 26 changed files with 125 additions and 79 deletions.
1 change: 1 addition & 0 deletions cmd/lakectl/cmd/abuse_random_delete.go
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,7 @@ var abuseRandomDeletesCmd = &cobra.Command{
// generate randomly selected keys as input
generator.Setup(func(add stress.GeneratorAddFn) {
for i := 0; i < amount; i++ {
// rand.Intn is good enough for abuse.
//nolint:gosec
add(keys[rand.Intn(len(keys))])
}
Expand Down
1 change: 1 addition & 0 deletions cmd/lakectl/cmd/abuse_random_read.go
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,7 @@ var abuseRandomReadsCmd = &cobra.Command{
// generate randomly selected keys as input
generator.Setup(func(add stress.GeneratorAddFn) {
for i := 0; i < amount; i++ {
// rand.Intn is good enough for abuse.
//nolint:gosec
add(keys[rand.Intn(len(keys))])
}
Expand Down
5 changes: 4 additions & 1 deletion pkg/api/controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -203,7 +203,9 @@ func (c *Controller) CreatePresignMultipartUpload(w http.ResponseWriter, r *http

// check valid number of parts
if params.Parts != nil {
if *params.Parts < 0 || int32(*params.Parts) > manager.MaxUploadParts { //nolint:gosec
// casting MaxUploadParts from int32 to int does not pose a danger since int
// is int32 or int64 thus no information will be lost
if *params.Parts < 0 || *params.Parts > int(manager.MaxUploadParts) {
writeError(w, r, http.StatusBadRequest, fmt.Sprintf("parts can be between 0 and %d", manager.MaxUploadParts))
return
}
Expand Down Expand Up @@ -5336,6 +5338,7 @@ func (c *Controller) PostStatsEvents(w http.ResponseWriter, r *http.Request, bod
UserID: user.Username,
Client: client,
}
// count of stats, we can filter it on the receiving side
c.Collector.CollectEvents(ev, uint64(statsEv.Count)) //nolint:gosec

c.Logger.WithContext(ctx).WithFields(logging.Fields{
Expand Down
6 changes: 4 additions & 2 deletions pkg/api/etag_middleware.go
Original file line number Diff line number Diff line change
@@ -1,7 +1,9 @@
package api

import (
"crypto/md5" //nolint:gosec
// MD5 required for ETag computation.
//nolint:gosec
"crypto/md5"
"encoding/hex"
"io"
"io/fs"
Expand Down Expand Up @@ -62,7 +64,7 @@ func scanFSEtags(fSys fs.FS) (map[string]string, error) {
return err
}
defer func() { _ = f.Close() }()

// MD5 required for ETag computation.
h := md5.New() //nolint:gosec
if _, err := io.Copy(h, f); err != nil {
return err
Expand Down
5 changes: 4 additions & 1 deletion pkg/block/hashing_reader.go
Original file line number Diff line number Diff line change
@@ -1,7 +1,9 @@
package block

import (
"crypto/md5" //nolint:gosec
// MD5 required for ETag computation.
//nolint:gosec
"crypto/md5"
"crypto/sha256"
"hash"
"io"
Expand Down Expand Up @@ -45,6 +47,7 @@ func NewHashingReader(body io.Reader, hashTypes ...HashFunction) *HashingReader
switch hashType {
case HashFunctionMD5:
if s.Md5 == nil {
// MD5 required for ETag computation.
s.Md5 = md5.New() //nolint:gosec
}
case HashFunctionSHA256:
Expand Down
2 changes: 2 additions & 0 deletions pkg/block/local/adapter.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@ package local

import (
"context"
// MD5 required for ETag computation.
"crypto/md5" //nolint:gosec
"encoding/hex"
"errors"
Expand Down Expand Up @@ -463,6 +464,7 @@ func computeETag(parts []block.MultipartPart) string {
}
s := strings.Join(etagHex, "")
b, _ := hex.DecodeString(s)
// MD5 required for ETag computation.
md5res := md5.Sum(b) //nolint:gosec
csm := hex.EncodeToString(md5res[:])
return csm
Expand Down
2 changes: 2 additions & 0 deletions pkg/block/local/walker.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@ package local

import (
"context"
// MD5 required for ETag computation.
"crypto/md5" //nolint:gosec
"encoding/hex"
"encoding/json"
Expand Down Expand Up @@ -173,6 +174,7 @@ func calcFileETag(ent block.ObjectStoreEntry) (string, error) {
return "", err
}
defer func() { _ = f.Close() }()
// MD5 required for ETag computation.
hash := md5.New() //nolint:gosec
_, err = io.Copy(hash, f)
if err != nil {
Expand Down
20 changes: 12 additions & 8 deletions pkg/block/s3/adapter.go
Original file line number Diff line number Diff line change
Expand Up @@ -314,8 +314,9 @@ func (a *Adapter) UploadPart(ctx context.Context, obj block.ObjectPointer, sizeB
}

uploadPartInput := &s3.UploadPartInput{
Bucket: aws.String(bucket),
Key: aws.String(key),
Bucket: aws.String(bucket),
Key: aws.String(key),
// partNumber can only be as big as 10,000
PartNumber: aws.Int32(int32(partNumber)), //nolint:gosec
UploadId: aws.String(uploadID),
Body: reader,
Expand Down Expand Up @@ -480,9 +481,10 @@ func (a *Adapter) GetPresignUploadPartURL(ctx context.Context, obj block.ObjectP
presigner := a.presignerClient(ctx, bucket)

uploadInput := &s3.UploadPartInput{
Bucket: aws.String(bucket),
Key: aws.String(key),
UploadId: aws.String(uploadID),
Bucket: aws.String(bucket),
Key: aws.String(key),
UploadId: aws.String(uploadID),
// partNumber can only be as big as 10,000
PartNumber: aws.Int32(int32(partNumber)), //nolint:gosec
}
uploadPart, err := presigner.PresignUploadPart(ctx, uploadInput)
Expand Down Expand Up @@ -609,8 +611,9 @@ func (a *Adapter) copyPart(ctx context.Context, sourceObj, destinationObj block.
}

uploadPartCopyObject := s3.UploadPartCopyInput{
Bucket: aws.String(bucket),
Key: aws.String(key),
Bucket: aws.String(bucket),
Key: aws.String(key),
// partNumber can only be as big as 10,000
PartNumber: aws.Int32(int32(partNumber)), //nolint:gosec
UploadId: aws.String(uploadID),
CopySource: aws.String(fmt.Sprintf("%s/%s", srcKey.GetStorageNamespace(), srcKey.GetKey())),
Expand Down Expand Up @@ -753,7 +756,8 @@ func convertFromBlockMultipartUploadCompletion(multipartList *block.MultipartUpl
parts := make([]types.CompletedPart, 0, len(multipartList.Part))
for _, p := range multipartList.Part {
parts = append(parts, types.CompletedPart{
ETag: aws.String(p.ETag),
ETag: aws.String(p.ETag),
// partNumber can only be as big as 10,000
PartNumber: aws.Int32(int32(p.PartNumber)), //nolint:gosec
})
}
Expand Down
3 changes: 2 additions & 1 deletion pkg/cache/cache.go
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,8 @@ func NewJitterFn(jitter time.Duration) JitterFn {
}
}
return func() time.Duration {
n := rand.Intn(int(jitter)) //nolint:gosec
// Safe cast, jitter is of type int64
n := rand.Int63n(int64(jitter)) //nolint:gosec
return time.Duration(n)
}
}
4 changes: 3 additions & 1 deletion pkg/catalog/catalog.go
Original file line number Diff line number Diff line change
Expand Up @@ -1329,14 +1329,16 @@ func (c *Catalog) CreateCommitRecord(ctx context.Context, repositoryID string, c
commitParents[i] = graveler.CommitID(parent)
}
commit := graveler.Commit{
// cast from int32 to int. no information loss danger
Version: graveler.CommitVersion(version), //nolint:gosec
Committer: committer,
Message: message,
MetaRangeID: graveler.MetaRangeID(metaRangeID),
CreationDate: time.Unix(creationDate, 0).UTC(),
Parents: commitParents,
Metadata: metadata,
Generation: graveler.CommitGeneration(generation), //nolint:gosec
// cast from int32 to int32
Generation: graveler.CommitGeneration(generation), //nolint:gosec
}
return c.Store.CreateCommitRecord(ctx, repository, graveler.CommitID(commitID), commit, opts...)
}
Expand Down
1 change: 1 addition & 0 deletions pkg/catalog/testutils/testutils.go
Original file line number Diff line number Diff line change
Expand Up @@ -150,6 +150,7 @@ func (w *FakeWalker) createEntries(count int) {
// For example, setting "5" here will cause the test to constantly fail.
// Fix Bug #3384
const seed = 6
// tests, safe
//nolint:gosec
randGen := rand.New(rand.NewSource(seed))
for i := 0; i < count; i++ {
Expand Down
11 changes: 6 additions & 5 deletions pkg/config/defaults.go
Original file line number Diff line number Diff line change
Expand Up @@ -7,11 +7,12 @@ import (
)

const (
DefaultListenAddress = "0.0.0.0:8000"
DefaultLoggingLevel = "INFO"
DefaultLoggingAuditLogLevel = "DEBUG"
BlockstoreTypeKey = "blockstore.type"
DefaultQuickstartUsername = "quickstart"
DefaultListenAddress = "0.0.0.0:8000"
DefaultLoggingLevel = "INFO"
DefaultLoggingAuditLogLevel = "DEBUG"
BlockstoreTypeKey = "blockstore.type"
DefaultQuickstartUsername = "quickstart"
// quicksart creds, safe
DefaultQuickstartKeyID = "AKIAIOSFOLQUICKSTART" //nolint:gosec
DefaultQuickstartSecretKey = "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY" // nolint:gosec
DefaultAuthAPIHealthCheckTimeout = 20 * time.Second
Expand Down
1 change: 1 addition & 0 deletions pkg/gateway/operations/getobject.go
Original file line number Diff line number Diff line change
Expand Up @@ -223,6 +223,7 @@ func handleListParts(w http.ResponseWriter, req *http.Request, o *PathOperation)
parts := make([]serde.MultipartUploadPart, len(partsResp.Parts))
for i, part := range partsResp.Parts {
parts[i] = serde.MultipartUploadPart{
// PartNumber <= 10000, safe
PartNumber: int32(part.PartNumber), //nolint:gosec
ETag: part.ETag,
LastModified: serde.Timestamp(part.LastModified),
Expand Down
2 changes: 2 additions & 0 deletions pkg/gateway/sig/v4_streaming_reader.go
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,7 @@ import (

// Streaming AWS Signature Version '4' constants.
const (
// These constants are part of the AWS SigV4 spec, so they are safe.
emptySHA256 = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" //nolint:gosec
signV4ChunkedAlgorithm = "AWS4-HMAC-SHA256-PAYLOAD" //nolint:gosec
SlashSeparator = "/"
Expand Down Expand Up @@ -226,6 +227,7 @@ func (cr *s3ChunkedReader) Read(buf []byte) (n int, err error) {
n += n0
buf = buf[n0:]
// Update bytes to be read of the current chunk before verifying chunk's signature.
// n >= n0 >= 0, cast is safe
cr.n -= uint64(n0) //nolint:gosec

// If we're at the end of a chunk.
Expand Down
1 change: 1 addition & 0 deletions pkg/graveler/graveler.go
Original file line number Diff line number Diff line change
Expand Up @@ -3678,6 +3678,7 @@ func (c *commitValueIterator) setValue() bool {
}
commit := c.src.Value()
data, err := proto.Marshal(&CommitData{
// version is already int32, safe
Version: int32(commit.Version), //nolint:gosec
Id: string(commit.CommitID),
Committer: commit.Committer,
Expand Down
1 change: 1 addition & 0 deletions pkg/ident/ident.go
Original file line number Diff line number Diff line change
Expand Up @@ -61,6 +61,7 @@ func MarshalInt64(h hash.Hash, v int64) {
marshalType(h, AddressTypeInt64)
_, _ = h.Write([]byte{int64Bytes})
bytes := make([]byte, int64Bytes)
// safe as we are only interested in v's bytes value
binary.BigEndian.PutUint64(bytes, uint64(v)) //nolint:gosec
_, _ = h.Write(bytes)
}
Expand Down
1 change: 1 addition & 0 deletions pkg/kv/cosmosdb/main_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -77,6 +77,7 @@ func TestMain(m *testing.M) {
Key: "C2y6yDjf5/R+ob0N8A7Cgv30VRDJIWEHLM+4QDU5DE2nQ9nDuVTqobD4b8mGGyPMbIZnqyMsEcaGQy67XIw/Jw==",
Database: "test-db",
Client: &http.Client{Timeout: 30 * time.Second, Transport: &http.Transport{
// tests, safe
TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, //nolint:gosec // ignore self-signed cert for local testing using the emulator
}},
StrongConsistency: false,
Expand Down
7 changes: 6 additions & 1 deletion pkg/kv/cosmosdb/store.go
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@ import (
"encoding/json"
"errors"
"fmt"
"math"
"net/http"
"sort"

Expand Down Expand Up @@ -355,6 +356,9 @@ func (s *Store) Scan(ctx context.Context, partitionKey []byte, options kv.ScanOp
if len(partitionKey) == 0 {
return nil, kv.ErrMissingPartitionKey
}
if options.BatchSize > math.MaxInt32 {
return nil, kv.ErrBatchSizeTooBig
}
it := &EntriesIterator{
store: s,
partitionKey: partitionKey,
Expand Down Expand Up @@ -517,7 +521,8 @@ func (e *EntriesIterator) runQuery(includeStartKey bool) error {
pk := azcosmos.NewPartitionKeyString(encoding.EncodeToString(e.partitionKey))
e.queryPager = e.store.containerClient.NewQueryItemsPager(query, pk, &azcosmos.QueryOptions{
ConsistencyLevel: e.store.consistencyLevel.ToPtr(),
PageSizeHint: int32(e.batchSize), //nolint:gosec
// batchSize checked, safe cast
PageSizeHint: int32(e.batchSize), //nolint:gosec
QueryParameters: []azcosmos.QueryParameter{{
Name: "@start",
Value: encoding.EncodeToString(e.startKey),
Expand Down
1 change: 1 addition & 0 deletions pkg/kv/store.go
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,7 @@ var (
ErrConnectFailed = errors.New("connect failed")
ErrDriverConfiguration = errors.New("driver configuration")
ErrMissingPartitionKey = errors.New("missing partition key")
ErrBatchSizeTooBig = errors.New("batch size too big")
ErrMissingKey = errors.New("missing key")
ErrMissingValue = errors.New("missing value")
ErrNotFound = errors.New("not found")
Expand Down
2 changes: 2 additions & 0 deletions pkg/loadtest/target_generator.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@ func randomFilepath(basename string) string {
const maxDirSuffixes = 3
depth := rand.Intn(maxDepthLevel) //nolint:gosec
for i := 0; i < depth; i++ {
// tests, safe
dirSuffix := rand.Intn(maxDirSuffixes) //nolint:gosec
sb.WriteString(fmt.Sprintf("dir%d/", dirSuffix))
}
Expand All @@ -42,6 +43,7 @@ func (t *TargetGenerator) GenerateCreateFileTargets(repo, branch string, num int
now := time.Now().UnixNano()
result := make([]vegeta.Target, num)
for i := 0; i < num; i++ {
// tests, safe
randomContent := rand.Int() //nolint:gosec
fileContent := "--" + boundary + "\n" +
"Content-Disposition: form-data; name=\"content\"; filename=\"file\"\n" +
Expand Down
11 changes: 6 additions & 5 deletions pkg/metastore/glue/converter.go
Original file line number Diff line number Diff line change
Expand Up @@ -235,11 +235,12 @@ func SDLocalToGlue(sd *metastore.StorageDescriptor) *types.StorageDescriptor {
}
schemaRef, _ := sd.AWSSchemaReference.(*types.SchemaReference)
return &types.StorageDescriptor{
BucketColumns: sd.BucketCols,
Columns: columnsLocalToGlue(sd.Cols),
Compressed: sd.Compressed,
InputFormat: aws.String(sd.InputFormat),
Location: aws.String(sd.Location),
BucketColumns: sd.BucketCols,
Columns: columnsLocalToGlue(sd.Cols),
Compressed: sd.Compressed,
InputFormat: aws.String(sd.InputFormat),
Location: aws.String(sd.Location),
// numBuckets < int32
NumberOfBuckets: int32(sd.NumBuckets), //nolint:gosec
OutputFormat: aws.String(sd.OutputFormat),
Parameters: sd.Parameters,
Expand Down
3 changes: 2 additions & 1 deletion pkg/metastore/glue/metastore_client.go
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,8 @@ func (g *MSClient) GetDatabase(ctx context.Context, name string) (*metastore.Dat

func (g *MSClient) getDatabaseFromGlue(ctx context.Context, token *string, parts int) (*glue.GetDatabasesOutput, error) {
return g.client.GetDatabases(ctx, &glue.GetDatabasesInput{
CatalogId: aws.String(g.catalogID),
CatalogId: aws.String(g.catalogID),
// parts <= 10000, safe
MaxResults: aws.Int32(int32(parts)), //nolint:gosec
NextToken: token,
})
Expand Down
22 changes: 13 additions & 9 deletions pkg/metastore/hive/converter.go
Original file line number Diff line number Diff line change
Expand Up @@ -97,7 +97,8 @@ func sortColumnsLocalToHive(columns []*metastore.Order) []*hive_metastore.Order
res := make([]*hive_metastore.Order, len(columns))
for i, column := range columns {
res[i] = &hive_metastore.Order{
Col: column.Col,
Col: column.Col,
// safe, there are less than int32 different columns
Order: int32(column.Order), //nolint:gosec
}
}
Expand All @@ -121,9 +122,11 @@ func TableLocalToHive(table *metastore.Table) *hive_metastore.Table {
privileges, _ := table.Privileges.(*hive_metastore.PrincipalPrivilegeSet)

ht := &hive_metastore.Table{
DbName: table.DBName,
TableName: table.TableName,
Owner: table.Owner,
DbName: table.DBName,
TableName: table.TableName,
Owner: table.Owner,
// Hive spec stores in32 creation times and is susceptible to Y2K38;
// we cannot do anything about that. See hive_metastore.thrift.
CreateTime: int32(table.CreateTime), //nolint:gosec
LastAccessTime: int32(table.LastAccessTime), //nolint:gosec
Retention: int32(table.Retention), //nolint:gosec
Expand Down Expand Up @@ -206,11 +209,12 @@ func SDLocalToHive(sd *metastore.StorageDescriptor) *hive_metastore.StorageDescr
return nil
}
return &hive_metastore.StorageDescriptor{
Cols: columnsLocalToHive(sd.Cols),
Location: sd.Location,
InputFormat: sd.InputFormat,
OutputFormat: sd.OutputFormat,
Compressed: sd.Compressed,
Cols: columnsLocalToHive(sd.Cols),
Location: sd.Location,
InputFormat: sd.InputFormat,
OutputFormat: sd.OutputFormat,
Compressed: sd.Compressed,
// numBuckets < int32
NumBuckets: int32(sd.NumBuckets), //nolint:gosec
SerdeInfo: serDeLocalToHive(sd.SerdeInfo),
BucketCols: sd.BucketCols,
Expand Down
Loading

0 comments on commit 3a62575

Please sign in to comment.