diff --git a/changelog/fragments/1757423719-Make-file-storage-size-configurable.yaml b/changelog/fragments/1757423719-Make-file-storage-size-configurable.yaml new file mode 100644 index 0000000000..c76273d5cd --- /dev/null +++ b/changelog/fragments/1757423719-Make-file-storage-size-configurable.yaml @@ -0,0 +1,32 @@ +# Kind can be one of: +# - breaking-change: a change to previously-documented behavior +# - deprecation: functionality that is being removed in a later release +# - bug-fix: fixes a problem in a previous version +# - enhancement: extends functionality but does not break or fix existing behavior +# - feature: new functionality +# - known-issue: problems that we are aware of in a given version +# - security: impacts on the security of a product or a user’s deployment. +# - upgrade: important information for someone upgrading from a prior version +# - other: does not fit into any of the other categories +kind: enhancement + +# Change summary; a 80ish characters long description of the change. +summary: Makes file storage size configurable + +# Long description; in case the summary is not enough to describe the change +# this field accommodate a description without length limits. +# NOTE: This field will be rendered only for breaking-change and known-issue kinds at the moment. +#description: + +# Affected component; a word indicating the component this changeset affects. +component: fleet-server + +# PR URL; optional; the PR number that added the changeset. +# If not present is automatically filled by the tooling finding the PR where this changelog fragment has been added. +# NOTE: the tooling supports backports, so it's able to fill the original PR number instead of the backport PR number. +# Please provide it if you are adding a fragment for a different PR. +#pr: https://github.com/owner/repo/1234 + +# Issue URL; optional; the GitHub issue related to this changeset (either closes or is part of). +# If not present is automatically filled by the tooling with the issue linked to the PR number. +#issue: https://github.com/owner/repo/1234 diff --git a/internal/pkg/api/error.go b/internal/pkg/api/error.go index 5b859fea97..52379c5978 100644 --- a/internal/pkg/api/error.go +++ b/internal/pkg/api/error.go @@ -340,6 +340,15 @@ func NewHTTPErrResp(err error) HTTPErrResp { zerolog.InfoLevel, }, }, + { + uploader.ErrFeatureDisabled, + HTTPErrResp{ + http.StatusForbidden, + "ErrFileFeatureDisable", + "File Transfer is disabled in Fleet Server configuration", + zerolog.WarnLevel, + }, + }, { uploader.ErrMissingChunks, HTTPErrResp{ diff --git a/internal/pkg/api/handleFileDelivery.go b/internal/pkg/api/handleFileDelivery.go index 22ce1a8693..b58337807e 100644 --- a/internal/pkg/api/handleFileDelivery.go +++ b/internal/pkg/api/handleFileDelivery.go @@ -30,7 +30,7 @@ func NewFileDeliveryT(cfg *config.Server, bulker bulk.Bulk, chunkClient *elastic return &FileDeliveryT{ bulker: bulker, cache: cache, - deliverer: delivery.New(chunkClient, bulker, maxFileSize), + deliverer: delivery.New(chunkClient, bulker, cfg.Limits.MaxFileStorageByteSize), authAgent: authAgent, } } diff --git a/internal/pkg/api/handleFileDelivery_test.go b/internal/pkg/api/handleFileDelivery_test.go index b036f0a052..1ffa1506b9 100644 --- a/internal/pkg/api/handleFileDelivery_test.go +++ b/internal/pkg/api/handleFileDelivery_test.go @@ -405,7 +405,7 @@ func prepareFileDeliveryMock(t *testing.T) (http.Handler, apiServer, *MockTransp ft: &FileDeliveryT{ bulker: fakebulk, cache: c, - deliverer: delivery.New(mockES, fakebulk, maxFileSize), + deliverer: delivery.New(mockES, fakebulk, nil), authAgent: func(r *http.Request, id *string, bulker bulk.Bulk, c cache.Cache) (*model.Agent, error) { return &model.Agent{ ESDocument: model.ESDocument{ diff --git a/internal/pkg/api/handleUpload.go b/internal/pkg/api/handleUpload.go index 3f567b5cc0..d13548957a 100644 --- a/internal/pkg/api/handleUpload.go +++ b/internal/pkg/api/handleUpload.go @@ -31,8 +31,6 @@ import ( ) const ( - // TODO: move to a config - maxFileSize = 104857600 // 100 MiB maxUploadTimer = 24 * time.Hour ) @@ -58,7 +56,7 @@ func NewUploadT(cfg *config.Server, bulker bulk.Bulk, chunkClient *elasticsearch chunkClient: chunkClient, bulker: bulker, cache: cache, - uploader: uploader.New(chunkClient, bulker, cache, maxFileSize, maxUploadTimer), + uploader: uploader.New(chunkClient, bulker, cache, cfg.Limits.MaxFileStorageByteSize, maxUploadTimer), authAgent: authAgent, authAPIKey: authAPIKey, } diff --git a/internal/pkg/api/handleUpload_test.go b/internal/pkg/api/handleUpload_test.go index f3c08cb338..6d875af3a1 100644 --- a/internal/pkg/api/handleUpload_test.go +++ b/internal/pkg/api/handleUpload_test.go @@ -11,6 +11,7 @@ import ( "crypto/sha256" "encoding/hex" "encoding/json" + "fmt" "io" "io/ioutil" "net/http" @@ -71,18 +72,6 @@ func TestUploadBeginValidation(t *testing.T) { "src": "agent" }`, }, - {"Oversized file should be rejected", http.StatusBadRequest, "size", - `{ - "file": { - "size": ` + strconv.Itoa(maxFileSize+1024) + `, - "name": "foo.png", - "mime_type": "image/png" - }, - "agent_id": "foo", - "action_id": "123", - "src": "agent" - }`, - }, {"zero size file should be rejected", http.StatusBadRequest, "size", `{ "file": { @@ -346,6 +335,50 @@ func TestUploadBeginBadRequest(t *testing.T) { assert.Equal(t, http.StatusBadRequest, rec.Code) } +func TestUploadBeginFileSize(t *testing.T) { + + mockFile := func(size int64) string { + return fmt.Sprintf(`{ + "file": { + "size": %d, + "name": "foo.png", + "mime_type": "image/png" + }, + "agent_id": "foo", + "action_id": "123", + "src": "agent" + }`, size) + } + + // now test various body contents + tests := []struct { + Name string + MaxSize *uint64 + ExpectStatus int + InputSize int64 + }{ + {"MaxSize nil allows uploads", nil, http.StatusOK, 1000}, + {"MaxSize nil allows large uploads", nil, http.StatusOK, 1024 * 1024 * 1024 * 2}, + {"MaxSize nil does not allow 0-length files", nil, http.StatusBadRequest, 0}, + {"MaxSize 0 does not allow uploads", size_ptr(0), http.StatusForbidden, 1000}, + {"MaxSize 0 does not allow 0-sized uploads", size_ptr(0), http.StatusForbidden, 0}, + {"Sizes larger than MaxSize are denied", size_ptr(1024), http.StatusBadRequest, 2048}, + {"Sizes smaller than MaxSize are allowed", size_ptr(1024), http.StatusOK, 900}, + } + + for _, tc := range tests { + t.Run(tc.Name, func(t *testing.T) { + + hr, _, _, _ := configureUploaderMock(t, tc.MaxSize) + rec := httptest.NewRecorder() + req := httptest.NewRequest(http.MethodPost, RouteUploadBegin, strings.NewReader(mockFile(tc.InputSize))) + hr.ServeHTTP(rec, req) + assert.Equal(t, tc.ExpectStatus, rec.Code) + }) + } + +} + /* Chunk data upload route */ @@ -377,7 +410,7 @@ func TestChunkUploadRouteParams(t *testing.T) { mockUploadInfoResult(fakebulk, file.Info{ DocID: "bar.foo", ID: mockUploadID, - ChunkSize: maxFileSize, + ChunkSize: file.MaxChunkSize, Total: file.MaxChunkSize + 1, Count: 2, // this is a 2-chunk "file" based on size above Start: time.Now(), @@ -410,7 +443,7 @@ func TestChunkUploadRequiresChunkHashHeader(t *testing.T) { mockUploadInfoResult(fakebulk, file.Info{ DocID: "bar.foo", ID: mockUploadID, - ChunkSize: maxFileSize, + ChunkSize: file.MaxChunkSize, Total: 10, Count: 1, Start: time.Now(), @@ -458,7 +491,7 @@ func TestChunkUploadStatus(t *testing.T) { mockUploadInfoResult(fakebulk, file.Info{ DocID: "bar.foo", ID: mockUploadID, - ChunkSize: maxFileSize, + ChunkSize: file.MaxChunkSize, Total: 10, Count: 1, Start: time.Now(), @@ -509,7 +542,7 @@ func TestChunkUploadExpiry(t *testing.T) { mockUploadInfoResult(fakebulk, file.Info{ DocID: "bar.foo", ID: mockUploadID, - ChunkSize: maxFileSize, + ChunkSize: file.MaxChunkSize, Total: 10, Count: 1, Start: tc.StartTime, @@ -547,7 +580,7 @@ func TestChunkUploadWritesTimestamp(t *testing.T) { mockUploadInfoResult(fakebulk, file.Info{ DocID: "bar.foo", ID: mockUploadID, - ChunkSize: maxFileSize, + ChunkSize: file.MaxChunkSize, Total: 10, Count: 1, Start: time.Now(), @@ -597,7 +630,7 @@ func TestUploadCompleteRequiresMatchingAuth(t *testing.T) { mockInfo := file.Info{ DocID: "bar." + tc.AgentInFileRecord, ID: mockUploadID, - ChunkSize: maxFileSize, + ChunkSize: file.MaxChunkSize, Total: 10, Count: 1, Start: time.Now().Add(-time.Minute), @@ -998,6 +1031,10 @@ func TestUploadCompleteBadRequests(t *testing.T) { // prepareUploaderMock sets up common dependencies and registers upload routes to a returned router func prepareUploaderMock(t *testing.T) (http.Handler, apiServer, *itesting.MockBulk, *MockTransport) { + return configureUploaderMock(t, nil) +} + +func configureUploaderMock(t *testing.T, fileSize *uint64) (http.Handler, apiServer, *itesting.MockBulk, *MockTransport) { // chunk index operations skip the bulker in order to send binary docs directly // so a mock *elasticsearch.Client needs to be be prepared es, tx := mockESClient(t) @@ -1034,7 +1071,7 @@ func prepareUploaderMock(t *testing.T) (http.Handler, apiServer, *itesting.MockB bulker: fakebulk, chunkClient: es, cache: c, - uploader: uploader.New(es, fakebulk, c, maxFileSize, maxUploadTimer), + uploader: uploader.New(es, fakebulk, c, fileSize, maxUploadTimer), authAgent: func(r *http.Request, id *string, bulker bulk.Bulk, c cache.Cache) (*model.Agent, error) { return &model.Agent{ ESDocument: model.ESDocument{ @@ -1192,3 +1229,8 @@ func sendBody(body io.Reader) *http.Response { }, } } + +func size_ptr(x int) *uint64 { + y := uint64(x) //nolint:gosec // disable G115 + return &y +} diff --git a/internal/pkg/config/env_defaults.go b/internal/pkg/config/env_defaults.go index a0bf3df9dd..7374d7dcd9 100644 --- a/internal/pkg/config/env_defaults.go +++ b/internal/pkg/config/env_defaults.go @@ -70,7 +70,7 @@ const ( defaultUploadChunkInterval = time.Millisecond * 3 defaultUploadChunkBurst = 5 defaultUploadChunkMax = 10 - defaultUploadChunkMaxBody = 1024 * 1024 * 4 // this is also enforced in handler, a chunk MAY NOT be larger than 4 MiB + defaultUploadChunkMaxBody = 1024 * 1024 * 4 // this is also enforced in handler, a chunk MUST NOT be larger than 4 MiB defaultFileDelivInterval = time.Millisecond * 100 defaultFileDelivBurst = 5 @@ -141,8 +141,9 @@ type limit struct { } type serverLimitDefaults struct { - PolicyThrottle time.Duration `config:"policy_throttle"` // deprecated: replaced by policy_limit - MaxConnections int `config:"max_connections"` + PolicyThrottle time.Duration `config:"policy_throttle"` // deprecated: replaced by policy_limit + MaxConnections int `config:"max_connections"` + MaxFileStorageByteSize *uint64 `config:"max_file_storage_size"` ActionLimit limit `config:"action_limit"` PolicyLimit limit `config:"policy_limit"` diff --git a/internal/pkg/config/limits.go b/internal/pkg/config/limits.go index 8e6dbf391c..db3baaa202 100644 --- a/internal/pkg/config/limits.go +++ b/internal/pkg/config/limits.go @@ -16,9 +16,10 @@ type Limit struct { } type ServerLimits struct { - MaxAgents int `config:"max_agents"` - MaxHeaderByteSize int `config:"max_header_byte_size"` - MaxConnections int `config:"max_connections"` + MaxAgents int `config:"max_agents"` + MaxHeaderByteSize int `config:"max_header_byte_size"` + MaxConnections int `config:"max_connections"` + MaxFileStorageByteSize *uint64 `config:"max_file_storage_size"` ActionLimit Limit `config:"action_limit"` PolicyLimit Limit `config:"policy_limit"` @@ -47,6 +48,7 @@ func (c *ServerLimits) LoadLimits(limits *envLimits) { if c.MaxConnections == 0 { c.MaxConnections = l.MaxConnections } + c.MaxFileStorageByteSize = l.MaxFileStorageByteSize c.ActionLimit = mergeEnvLimit(c.ActionLimit, l.ActionLimit) c.PolicyLimit = mergeEnvLimit(c.PolicyLimit, l.PolicyLimit) diff --git a/internal/pkg/file/delivery/delivery.go b/internal/pkg/file/delivery/delivery.go index f6d3c1ccc0..6956f5d2c7 100644 --- a/internal/pkg/file/delivery/delivery.go +++ b/internal/pkg/file/delivery/delivery.go @@ -25,13 +25,13 @@ var ( ) type Deliverer struct { - sizeLimit int64 + sizeLimit *uint64 client *elasticsearch.Client bulker bulk.Bulk } -func New(client *elasticsearch.Client, bulker bulk.Bulk, sizeLimit int64) *Deliverer { +func New(client *elasticsearch.Client, bulker bulk.Bulk, sizeLimit *uint64) *Deliverer { return &Deliverer{ client: client, bulker: bulker, diff --git a/internal/pkg/file/delivery/delivery_test.go b/internal/pkg/file/delivery/delivery_test.go index 895cb36ca6..62d544ab3f 100644 --- a/internal/pkg/file/delivery/delivery_test.go +++ b/internal/pkg/file/delivery/delivery_test.go @@ -68,7 +68,7 @@ func TestFindFile(t *testing.T) { }, }, nil) - d := New(nil, fakeBulk, -1) + d := New(nil, fakeBulk, nil) info, err := d.FindFileForAgent(context.Background(), fileID, agentID) require.NoError(t, err) @@ -92,7 +92,7 @@ func TestFindFileHandlesNoResults(t *testing.T) { }, }, nil) - d := New(nil, fakeBulk, -1) + d := New(nil, fakeBulk, nil) _, err := d.FindFileForAgent(context.Background(), "somefile", "anyagent") assert.ErrorIs(t, ErrNoFile, err) @@ -132,7 +132,7 @@ func TestLocateChunks(t *testing.T) { }, }, nil) - d := New(nil, fakeBulk, -1) + d := New(nil, fakeBulk, nil) chunks, err := d.LocateChunks(context.Background(), zerolog.Logger{}, baseID) require.NoError(t, err) @@ -154,7 +154,7 @@ func TestLocateChunksEmpty(t *testing.T) { }, }, nil) - d := New(nil, fakeBulk, -1) + d := New(nil, fakeBulk, nil) _, err := d.LocateChunks(context.Background(), zerolog.Logger{}, "afile") assert.Error(t, err) @@ -172,7 +172,7 @@ func TestSendFile(t *testing.T) { } // Chunk data from a tiny PNG, as a full CBOR document esMock.Response = sendBodyBytes(hexDecode("bf665f696e64657878212e666c6565742d66696c6564656c69766572792d646174612d656e64706f696e74635f6964654142432e30685f76657273696f6e02675f7365715f6e6f016d5f7072696d6172795f7465726d0165666f756e64f5666669656c6473bf64646174619f586789504e470d0a1a0a0000000d494844520000010000000100010300000066bc3a2500000003504c5445b5d0d0630416ea0000001f494441546881edc1010d000000c2a0f74f6d0e37a00000000000000000be0d210000019a60e1d50000000049454e44ae426082ffffff")) //nolint:bodyclose // nopcloser is used, linter does not see it - d := New(esClient, fakeBulk, -1) + d := New(esClient, fakeBulk, nil) err := d.SendFile(context.Background(), zerolog.Logger{}, buf, chunks, fileID) require.NoError(t, err) @@ -208,7 +208,7 @@ func TestSendFileMultipleChunks(t *testing.T) { } } - d := New(esClient, fakeBulk, -1) + d := New(esClient, fakeBulk, nil) err := d.SendFile(context.Background(), zerolog.Logger{}, buf, chunks, fileID) require.NoError(t, err) @@ -237,18 +237,19 @@ func TestSendFileMultipleChunksUsesBackingIndex(t *testing.T) { esMock.RoundTripFn = func(req *http.Request) (*http.Response, error) { parts := strings.Split(req.URL.Path, "/") // ["", ".fleet-filedelivery-data-endpoint-0001", "_doc", "xyz.1"] - if parts[3] == fileID+".0" { + switch parts[3] { + case fileID + ".0": assert.Equal(t, idx1, parts[1]) - } else if parts[3] == fileID+".1" { + case fileID + ".1": assert.Equal(t, idx2, parts[1]) - } else { + default: return nil, errors.New("invalid chunk index!") } return sendBodyBytes(mockData), nil } - d := New(esClient, fakeBulk, -1) + d := New(esClient, fakeBulk, nil) err := d.SendFile(context.Background(), zerolog.Logger{}, buf, chunks, fileID) require.NoError(t, err) } @@ -306,7 +307,7 @@ func TestSendFileHandlesDisorderedChunks(t *testing.T) { return sendBodyBytes(sampleDocBody), nil } - d := New(esClient, fakeBulk, -1) + d := New(esClient, fakeBulk, nil) err := d.SendFile(context.Background(), zerolog.Logger{}, buf, chunks, fileID) require.NoError(t, err) } diff --git a/internal/pkg/file/uploader/upload.go b/internal/pkg/file/uploader/upload.go index 3ce35a4c96..fadd4e65c3 100644 --- a/internal/pkg/file/uploader/upload.go +++ b/internal/pkg/file/uploader/upload.go @@ -23,6 +23,7 @@ import ( var ( ErrInvalidUploadID = errors.New("active upload not found with this ID, it may be expired") ErrFileSizeTooLarge = errors.New("this file exceeds the maximum allowed file size") + ErrFeatureDisabled = errors.New("feature is disabled via fleet server configuration") ErrMissingChunks = errors.New("file data incomplete, not all chunks were uploaded") ErrHashMismatch = errors.New("hash does not match") ErrUploadExpired = errors.New("upload has expired") @@ -37,14 +38,14 @@ var ( type Uploader struct { cache cache.Cache // cache of file metadata doc info - sizeLimit int64 + sizeLimit *uint64 timeLimit time.Duration chunkClient *elasticsearch.Client bulker bulk.Bulk } -func New(chunkClient *elasticsearch.Client, bulker bulk.Bulk, cache cache.Cache, sizeLimit int64, timeLimit time.Duration) *Uploader { +func New(chunkClient *elasticsearch.Client, bulker bulk.Bulk, cache cache.Cache, sizeLimit *uint64, timeLimit time.Duration) *Uploader { return &Uploader{ chunkClient: chunkClient, bulker: bulker, @@ -56,6 +57,10 @@ func New(chunkClient *elasticsearch.Client, bulker bulk.Bulk, cache cache.Cache, // Start an upload operation func (u *Uploader) Begin(ctx context.Context, namespaces []string, data JSDict) (file.Info, error) { + if u.sizeLimit != nil && *u.sizeLimit == 0 { + return file.Info{}, ErrFeatureDisabled + } + vSpan, _ := apm.StartSpan(ctx, "validateFileInfo", "validate") if data == nil { vSpan.End() @@ -73,7 +78,7 @@ func (u *Uploader) Begin(ctx context.Context, namespaces []string, data JSDict) } size, _ := data.Int64("file", "size") - if size > u.sizeLimit { + if u.sizeLimit != nil && uint64(size) > *u.sizeLimit { //nolint:gosec // disable G115 - negatives are checked for in payload validation vSpan.End() return file.Info{}, ErrFileSizeTooLarge } diff --git a/internal/pkg/file/uploader/upload_test.go b/internal/pkg/file/uploader/upload_test.go index 8f9483ba27..c09103b8d0 100644 --- a/internal/pkg/file/uploader/upload_test.go +++ b/internal/pkg/file/uploader/upload_test.go @@ -82,8 +82,8 @@ func TestUploadBeginReturnsCorrectInfo(t *testing.T) { c, err := cache.New(config.Cache{NumCounters: 100, MaxCost: 100000}) require.NoError(t, err) - u := New(nil, fakeBulk, c, int64(size), time.Hour) - info, err := u.Begin(context.Background(), []string{}, data) + u := New(nil, fakeBulk, c, size_ptr(size), time.Hour) + info, err := u.Begin(t.Context(), []string{}, data) assert.NoError(t, err) assert.Equal(t, int64(size), info.Total) @@ -126,8 +126,8 @@ func TestUploadBeginWritesDocumentFromInputs(t *testing.T) { c, err := cache.New(config.Cache{NumCounters: 100, MaxCost: 100000}) require.NoError(t, err) - u := New(nil, fakeBulk, c, int64(size), time.Hour) - _, err = u.Begin(context.Background(), []string{}, data) + u := New(nil, fakeBulk, c, size_ptr(size), time.Hour) + _, err = u.Begin(t.Context(), []string{}, data) assert.NoError(t, err) payload, ok := fakeBulk.Calls[0].Arguments[3].([]byte) @@ -164,14 +164,14 @@ func TestUploadBeginCalculatesCorrectChunkCount(t *testing.T) { c, err := cache.New(config.Cache{NumCounters: 100, MaxCost: 100000}) require.NoError(t, err) - u := New(nil, fakeBulk, c, file.MaxChunkSize*3000, time.Hour) + u := New(nil, fakeBulk, c, size_ptr(file.MaxChunkSize*3000), time.Hour) for _, tc := range tests { t.Run(tc.Name, func(t *testing.T) { data := makeUploadRequestDict(map[string]interface{}{ "file.size": tc.FileSize, }) - info, err := u.Begin(context.Background(), []string{}, data) + info, err := u.Begin(t.Context(), []string{}, data) assert.NoError(t, err) assert.Equal(t, tc.ExpectedCount, info.Count) }) @@ -179,17 +179,21 @@ func TestUploadBeginCalculatesCorrectChunkCount(t *testing.T) { } func TestUploadBeginMaxFileSize(t *testing.T) { + tests := []struct { - UploadSizeLimit int64 + UploadSizeLimit *uint64 FileSize int64 ShouldError bool Name string }{ - {500, 800, true, "800 is too large"}, - {800, 500, false, "file within limits"}, - {1024, 1023, false, "1-less than limit"}, - {1024, 1024, false, "file is exactly limit"}, - {1024, 1025, true, "file is 1 over limit"}, + {size_ptr(0), 4096, true, "0 in config disables feature"}, + {size_ptr(10), 5, false, "any positive value should keep feature enabled"}, + {size_ptr(500), 800, true, "file of 800 is larger than limit 500"}, + {size_ptr(800), 500, false, "file within limits"}, + {size_ptr(1024), 1023, false, "1-less than limit"}, + {size_ptr(1024), 1024, false, "file is exactly limit"}, + {size_ptr(1024), 1025, true, "file is 1 over limit"}, + {nil, 1024 * 1024 * 300, false, "nil as limit is unlimited"}, } fakeBulk := itesting.NewMockBulk() @@ -210,9 +214,13 @@ func TestUploadBeginMaxFileSize(t *testing.T) { data := makeUploadRequestDict(map[string]interface{}{ "file.size": tc.FileSize, }) - _, err := u.Begin(context.Background(), []string{}, data) + _, err := u.Begin(t.Context(), []string{}, data) if tc.ShouldError { - assert.ErrorIs(t, err, ErrFileSizeTooLarge) + if tc.UploadSizeLimit != nil && *tc.UploadSizeLimit == 0 { + assert.ErrorIs(t, err, ErrFeatureDisabled) + } else { + assert.ErrorIs(t, err, ErrFileSizeTooLarge) + } } else { assert.NoError(t, err) } @@ -243,7 +251,7 @@ func TestUploadRejectsMissingRequiredFields(t *testing.T) { c, err := cache.New(config.Cache{NumCounters: 100, MaxCost: 100000}) require.NoError(t, err) - u := New(nil, fakeBulk, c, 2048, time.Hour) + u := New(nil, fakeBulk, c, size_ptr(2048), time.Hour) var ok bool for _, field := range tests { @@ -264,7 +272,7 @@ func TestUploadRejectsMissingRequiredFields(t *testing.T) { } } - _, err = u.Begin(context.Background(), []string{}, data) + _, err = u.Begin(t.Context(), []string{}, data) assert.Errorf(t, err, "%s is a required field and should error if not provided", field) }) @@ -336,19 +344,19 @@ func TestChunkMarksFinal(t *testing.T) { c, err := cache.New(config.Cache{NumCounters: 100, MaxCost: 100000}) require.NoError(t, err) - u := New(nil, fakeBulk, c, 8388608000, time.Hour) + u := New(nil, fakeBulk, c, size_ptr(8388608000), time.Hour) data := makeUploadRequestDict(map[string]interface{}{ "file.size": tc.FileSize, }) - info, err := u.Begin(context.Background(), []string{}, data) + info, err := u.Begin(t.Context(), []string{}, data) assert.NoError(t, err) // for anything larger than 1-chunk, check for off-by-ones if tc.FinalChunk > 0 { mockUploadInfoResult(fakeBulk, info) - _, prev, err := u.Chunk(context.Background(), info.ID, tc.FinalChunk-1, "") + _, prev, err := u.Chunk(t.Context(), info.ID, tc.FinalChunk-1, "") assert.NoError(t, err) assert.Falsef(t, prev.Last, "penultimate chunk number (%d) should not be marked final", tc.FinalChunk-1) } @@ -356,9 +364,14 @@ func TestChunkMarksFinal(t *testing.T) { mockUploadInfoResult(fakeBulk, info) // make sure the final chunk is marked as such - _, chunk, err := u.Chunk(context.Background(), info.ID, tc.FinalChunk, "") + _, chunk, err := u.Chunk(t.Context(), info.ID, tc.FinalChunk, "") assert.NoError(t, err) assert.Truef(t, chunk.Last, "chunk number %d should be marked as Last", tc.FinalChunk) }) } } + +func size_ptr(x int) *uint64 { + y := uint64(x) //nolint:gosec // disable G115 + return &y +}