Skip to content
Open
Show file tree
Hide file tree
Changes from 4 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
@@ -0,0 +1,32 @@
# Kind can be one of:
# - breaking-change: a change to previously-documented behavior
# - deprecation: functionality that is being removed in a later release
# - bug-fix: fixes a problem in a previous version
# - enhancement: extends functionality but does not break or fix existing behavior
# - feature: new functionality
# - known-issue: problems that we are aware of in a given version
# - security: impacts on the security of a product or a user’s deployment.
# - upgrade: important information for someone upgrading from a prior version
# - other: does not fit into any of the other categories
kind: enhancement

# Change summary; a 80ish characters long description of the change.
summary: Makes file storage size configurable

# Long description; in case the summary is not enough to describe the change
# this field accommodate a description without length limits.
# NOTE: This field will be rendered only for breaking-change and known-issue kinds at the moment.
#description:

# Affected component; a word indicating the component this changeset affects.
component: fleet-server

# PR URL; optional; the PR number that added the changeset.
# If not present is automatically filled by the tooling finding the PR where this changelog fragment has been added.
# NOTE: the tooling supports backports, so it's able to fill the original PR number instead of the backport PR number.
# Please provide it if you are adding a fragment for a different PR.
#pr: https://github.com/owner/repo/1234

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Suggested change
#pr: https://github.com/owner/repo/1234
pr: https://github.com/elastic/fleet-server/pull/5478


# Issue URL; optional; the GitHub issue related to this changeset (either closes or is part of).
# If not present is automatically filled by the tooling with the issue linked to the PR number.
#issue: https://github.com/owner/repo/1234
2 changes: 1 addition & 1 deletion internal/pkg/api/handleFileDelivery.go
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ func NewFileDeliveryT(cfg *config.Server, bulker bulk.Bulk, chunkClient *elastic
return &FileDeliveryT{
bulker: bulker,
cache: cache,
deliverer: delivery.New(chunkClient, bulker, maxFileSize),
deliverer: delivery.New(chunkClient, bulker, cfg.Limits.MaxFileStorageByteSize),
authAgent: authAgent,
}
}
Expand Down
2 changes: 1 addition & 1 deletion internal/pkg/api/handleFileDelivery_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -405,7 +405,7 @@ func prepareFileDeliveryMock(t *testing.T) (http.Handler, apiServer, *MockTransp
ft: &FileDeliveryT{
bulker: fakebulk,
cache: c,
deliverer: delivery.New(mockES, fakebulk, maxFileSize),
deliverer: delivery.New(mockES, fakebulk, 0),
authAgent: func(r *http.Request, id *string, bulker bulk.Bulk, c cache.Cache) (*model.Agent, error) {
return &model.Agent{
ESDocument: model.ESDocument{
Expand Down
4 changes: 1 addition & 3 deletions internal/pkg/api/handleUpload.go
Original file line number Diff line number Diff line change
Expand Up @@ -31,8 +31,6 @@ import (
)

const (
// TODO: move to a config
maxFileSize = 104857600 // 100 MiB
maxUploadTimer = 24 * time.Hour
)

Expand All @@ -58,7 +56,7 @@ func NewUploadT(cfg *config.Server, bulker bulk.Bulk, chunkClient *elasticsearch
chunkClient: chunkClient,
bulker: bulker,
cache: cache,
uploader: uploader.New(chunkClient, bulker, cache, maxFileSize, maxUploadTimer),
uploader: uploader.New(chunkClient, bulker, cache, cfg.Limits.MaxFileStorageByteSize, maxUploadTimer),
authAgent: authAgent,
authAPIKey: authAPIKey,
}
Expand Down
73 changes: 54 additions & 19 deletions internal/pkg/api/handleUpload_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@ import (
"crypto/sha256"
"encoding/hex"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net/http"
Expand Down Expand Up @@ -71,18 +72,6 @@ func TestUploadBeginValidation(t *testing.T) {
"src": "agent"
}`,
},
{"Oversized file should be rejected", http.StatusBadRequest, "size",
`{
"file": {
"size": ` + strconv.Itoa(maxFileSize+1024) + `,
"name": "foo.png",
"mime_type": "image/png"
},
"agent_id": "foo",
"action_id": "123",
"src": "agent"
}`,
},
{"zero size file should be rejected", http.StatusBadRequest, "size",
`{
"file": {
Expand Down Expand Up @@ -346,6 +335,48 @@ func TestUploadBeginBadRequest(t *testing.T) {
assert.Equal(t, http.StatusBadRequest, rec.Code)
}

func TestUploadBeginFileSize(t *testing.T) {

mockFile := func(size int64) string {
return fmt.Sprintf(`{
"file": {
"size": %d,
"name": "foo.png",
"mime_type": "image/png"
},
"agent_id": "foo",
"action_id": "123",
"src": "agent"
}`, size)
}

// now test various body contents
tests := []struct {
Name string
MaxSize int64
ExpectStatus int
InputSize int64
}{
{"MaxSize 0 allows uploads", 0, http.StatusOK, 1000},
{"MaxSize 0 allows large uploads", 0, http.StatusOK, 1024 * 1024 * 1024 * 2},
{"MaxSize 0 does not allow 0-length files", 0, http.StatusBadRequest, 0},
{"Sizes larger than MaxSize are denied", 1024, http.StatusBadRequest, 2048},
{"Sizes smaller than MaxSize are allowed", 1024, http.StatusOK, 900},
}

for _, tc := range tests {
t.Run(tc.Name, func(t *testing.T) {

hr, _, _, _ := configureUploaderMock(t, tc.MaxSize)
rec := httptest.NewRecorder()
req := httptest.NewRequest(http.MethodPost, RouteUploadBegin, strings.NewReader(mockFile(tc.InputSize)))
hr.ServeHTTP(rec, req)
assert.Equal(t, tc.ExpectStatus, rec.Code)
})
}

}

/*
Chunk data upload route
*/
Expand Down Expand Up @@ -377,7 +408,7 @@ func TestChunkUploadRouteParams(t *testing.T) {
mockUploadInfoResult(fakebulk, file.Info{
DocID: "bar.foo",
ID: mockUploadID,
ChunkSize: maxFileSize,
ChunkSize: file.MaxChunkSize,
Total: file.MaxChunkSize + 1,
Count: 2, // this is a 2-chunk "file" based on size above
Start: time.Now(),
Expand Down Expand Up @@ -410,7 +441,7 @@ func TestChunkUploadRequiresChunkHashHeader(t *testing.T) {
mockUploadInfoResult(fakebulk, file.Info{
DocID: "bar.foo",
ID: mockUploadID,
ChunkSize: maxFileSize,
ChunkSize: file.MaxChunkSize,
Total: 10,
Count: 1,
Start: time.Now(),
Expand Down Expand Up @@ -458,7 +489,7 @@ func TestChunkUploadStatus(t *testing.T) {
mockUploadInfoResult(fakebulk, file.Info{
DocID: "bar.foo",
ID: mockUploadID,
ChunkSize: maxFileSize,
ChunkSize: file.MaxChunkSize,
Total: 10,
Count: 1,
Start: time.Now(),
Expand Down Expand Up @@ -509,7 +540,7 @@ func TestChunkUploadExpiry(t *testing.T) {
mockUploadInfoResult(fakebulk, file.Info{
DocID: "bar.foo",
ID: mockUploadID,
ChunkSize: maxFileSize,
ChunkSize: file.MaxChunkSize,
Total: 10,
Count: 1,
Start: tc.StartTime,
Expand Down Expand Up @@ -547,7 +578,7 @@ func TestChunkUploadWritesTimestamp(t *testing.T) {
mockUploadInfoResult(fakebulk, file.Info{
DocID: "bar.foo",
ID: mockUploadID,
ChunkSize: maxFileSize,
ChunkSize: file.MaxChunkSize,
Total: 10,
Count: 1,
Start: time.Now(),
Expand Down Expand Up @@ -597,7 +628,7 @@ func TestUploadCompleteRequiresMatchingAuth(t *testing.T) {
mockInfo := file.Info{
DocID: "bar." + tc.AgentInFileRecord,
ID: mockUploadID,
ChunkSize: maxFileSize,
ChunkSize: file.MaxChunkSize,
Total: 10,
Count: 1,
Start: time.Now().Add(-time.Minute),
Expand Down Expand Up @@ -998,6 +1029,10 @@ func TestUploadCompleteBadRequests(t *testing.T) {

// prepareUploaderMock sets up common dependencies and registers upload routes to a returned router
func prepareUploaderMock(t *testing.T) (http.Handler, apiServer, *itesting.MockBulk, *MockTransport) {
return configureUploaderMock(t, 0)
}

func configureUploaderMock(t *testing.T, fileSize int64) (http.Handler, apiServer, *itesting.MockBulk, *MockTransport) {
// chunk index operations skip the bulker in order to send binary docs directly
// so a mock *elasticsearch.Client needs to be be prepared
es, tx := mockESClient(t)
Expand Down Expand Up @@ -1034,7 +1069,7 @@ func prepareUploaderMock(t *testing.T) (http.Handler, apiServer, *itesting.MockB
bulker: fakebulk,
chunkClient: es,
cache: c,
uploader: uploader.New(es, fakebulk, c, maxFileSize, maxUploadTimer),
uploader: uploader.New(es, fakebulk, c, fileSize, maxUploadTimer),
authAgent: func(r *http.Request, id *string, bulker bulk.Bulk, c cache.Cache) (*model.Agent, error) {
return &model.Agent{
ESDocument: model.ESDocument{
Expand Down
12 changes: 8 additions & 4 deletions internal/pkg/config/env_defaults.go
Original file line number Diff line number Diff line change
Expand Up @@ -57,6 +57,8 @@ const (
defaultStatusMax = 50
defaultStatusMaxBody = 0

defaultFileStorageByteSize = 0 // no limit

defaultUploadStartInterval = time.Second * 2
defaultUploadStartBurst = 5
defaultUploadStartMax = 10
Expand All @@ -70,7 +72,7 @@ const (
defaultUploadChunkInterval = time.Millisecond * 3
defaultUploadChunkBurst = 5
defaultUploadChunkMax = 10
defaultUploadChunkMaxBody = 1024 * 1024 * 4 // this is also enforced in handler, a chunk MAY NOT be larger than 4 MiB
defaultUploadChunkMaxBody = 1024 * 1024 * 4 // this is also enforced in handler, a chunk MUST NOT be larger than 4 MiB

defaultFileDelivInterval = time.Millisecond * 100
defaultFileDelivBurst = 5
Expand Down Expand Up @@ -141,8 +143,9 @@ type limit struct {
}

type serverLimitDefaults struct {
PolicyThrottle time.Duration `config:"policy_throttle"` // deprecated: replaced by policy_limit
MaxConnections int `config:"max_connections"`
PolicyThrottle time.Duration `config:"policy_throttle"` // deprecated: replaced by policy_limit
MaxConnections int `config:"max_connections"`
MaxFileStorageByteSize int64 `config:"max_file_storage_size"`

ActionLimit limit `config:"action_limit"`
PolicyLimit limit `config:"policy_limit"`
Expand All @@ -161,7 +164,8 @@ type serverLimitDefaults struct {

func defaultserverLimitDefaults() *serverLimitDefaults {
return &serverLimitDefaults{
MaxConnections: defaultMaxConnections,
MaxConnections: defaultMaxConnections,
MaxFileStorageByteSize: defaultFileStorageByteSize,
ActionLimit: limit{
Interval: defaultActionInterval,
Burst: defaultActionBurst,
Expand Down
8 changes: 5 additions & 3 deletions internal/pkg/config/limits.go
Original file line number Diff line number Diff line change
Expand Up @@ -16,9 +16,10 @@ type Limit struct {
}

type ServerLimits struct {
MaxAgents int `config:"max_agents"`
MaxHeaderByteSize int `config:"max_header_byte_size"`
MaxConnections int `config:"max_connections"`
MaxAgents int `config:"max_agents"`
MaxHeaderByteSize int `config:"max_header_byte_size"`
MaxConnections int `config:"max_connections"`
MaxFileStorageByteSize int64 `config:"max_file_storage_size"`

ActionLimit Limit `config:"action_limit"`
PolicyLimit Limit `config:"policy_limit"`
Expand Down Expand Up @@ -47,6 +48,7 @@ func (c *ServerLimits) LoadLimits(limits *envLimits) {
if c.MaxConnections == 0 {
c.MaxConnections = l.MaxConnections
}
c.MaxFileStorageByteSize = l.MaxFileStorageByteSize

c.ActionLimit = mergeEnvLimit(c.ActionLimit, l.ActionLimit)
c.PolicyLimit = mergeEnvLimit(c.PolicyLimit, l.PolicyLimit)
Expand Down
2 changes: 1 addition & 1 deletion internal/pkg/file/uploader/upload.go
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,7 @@ func (u *Uploader) Begin(ctx context.Context, namespaces []string, data JSDict)
}

size, _ := data.Int64("file", "size")
if size > u.sizeLimit {
if u.sizeLimit != 0 && size > u.sizeLimit {
vSpan.End()
return file.Info{}, ErrFileSizeTooLarge
}
Expand Down
17 changes: 9 additions & 8 deletions internal/pkg/file/uploader/upload_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -83,7 +83,7 @@ func TestUploadBeginReturnsCorrectInfo(t *testing.T) {
c, err := cache.New(config.Cache{NumCounters: 100, MaxCost: 100000})
require.NoError(t, err)
u := New(nil, fakeBulk, c, int64(size), time.Hour)
info, err := u.Begin(context.Background(), []string{}, data)
info, err := u.Begin(t.Context(), []string{}, data)
assert.NoError(t, err)

assert.Equal(t, int64(size), info.Total)
Expand Down Expand Up @@ -127,7 +127,7 @@ func TestUploadBeginWritesDocumentFromInputs(t *testing.T) {
c, err := cache.New(config.Cache{NumCounters: 100, MaxCost: 100000})
require.NoError(t, err)
u := New(nil, fakeBulk, c, int64(size), time.Hour)
_, err = u.Begin(context.Background(), []string{}, data)
_, err = u.Begin(t.Context(), []string{}, data)
assert.NoError(t, err)

payload, ok := fakeBulk.Calls[0].Arguments[3].([]byte)
Expand Down Expand Up @@ -171,7 +171,7 @@ func TestUploadBeginCalculatesCorrectChunkCount(t *testing.T) {
data := makeUploadRequestDict(map[string]interface{}{
"file.size": tc.FileSize,
})
info, err := u.Begin(context.Background(), []string{}, data)
info, err := u.Begin(t.Context(), []string{}, data)
assert.NoError(t, err)
assert.Equal(t, tc.ExpectedCount, info.Count)
})
Expand All @@ -185,6 +185,7 @@ func TestUploadBeginMaxFileSize(t *testing.T) {
ShouldError bool
Name string
}{
{0, 1024 * 1024 * 300, false, "0 limit is unlimited"},
{500, 800, true, "800 is too large"},
{800, 500, false, "file within limits"},
{1024, 1023, false, "1-less than limit"},
Expand All @@ -210,7 +211,7 @@ func TestUploadBeginMaxFileSize(t *testing.T) {
data := makeUploadRequestDict(map[string]interface{}{
"file.size": tc.FileSize,
})
_, err := u.Begin(context.Background(), []string{}, data)
_, err := u.Begin(t.Context(), []string{}, data)
if tc.ShouldError {
assert.ErrorIs(t, err, ErrFileSizeTooLarge)
} else {
Expand Down Expand Up @@ -264,7 +265,7 @@ func TestUploadRejectsMissingRequiredFields(t *testing.T) {
}
}

_, err = u.Begin(context.Background(), []string{}, data)
_, err = u.Begin(t.Context(), []string{}, data)
assert.Errorf(t, err, "%s is a required field and should error if not provided", field)
})

Expand Down Expand Up @@ -342,21 +343,21 @@ func TestChunkMarksFinal(t *testing.T) {
"file.size": tc.FileSize,
})

info, err := u.Begin(context.Background(), []string{}, data)
info, err := u.Begin(t.Context(), []string{}, data)
assert.NoError(t, err)

// for anything larger than 1-chunk, check for off-by-ones
if tc.FinalChunk > 0 {
mockUploadInfoResult(fakeBulk, info)
_, prev, err := u.Chunk(context.Background(), info.ID, tc.FinalChunk-1, "")
_, prev, err := u.Chunk(t.Context(), info.ID, tc.FinalChunk-1, "")
assert.NoError(t, err)
assert.Falsef(t, prev.Last, "penultimate chunk number (%d) should not be marked final", tc.FinalChunk-1)
}

mockUploadInfoResult(fakeBulk, info)

// make sure the final chunk is marked as such
_, chunk, err := u.Chunk(context.Background(), info.ID, tc.FinalChunk, "")
_, chunk, err := u.Chunk(t.Context(), info.ID, tc.FinalChunk, "")
assert.NoError(t, err)
assert.Truef(t, chunk.Last, "chunk number %d should be marked as Last", tc.FinalChunk)
})
Expand Down