From 6b81984b8a81d485cd6b3876ee4cd549fe20cdb5 Mon Sep 17 00:00:00 2001 From: benito Date: Thu, 27 May 2021 15:57:32 +0800 Subject: [PATCH 01/16] initial changes --- cmd/tusd/cli/hooks.go | 8 ++++++++ cmd/tusd/cli/hooks/hooks.go | 3 ++- cmd/tusd/cli/hooks/plugin.go | 3 +++ docs/hooks.md | 8 +++++++- examples/hooks/pre-get | 7 +++++++ pkg/handler/config.go | 5 +++++ pkg/handler/unrouted_handler.go | 7 +++++++ 7 files changed, 39 insertions(+), 2 deletions(-) create mode 100644 examples/hooks/pre-get diff --git a/cmd/tusd/cli/hooks.go b/cmd/tusd/cli/hooks.go index 1bb9210a6..27ea5e63d 100644 --- a/cmd/tusd/cli/hooks.go +++ b/cmd/tusd/cli/hooks.go @@ -43,6 +43,10 @@ func preFinishCallback(info handler.HookEvent) error { return hookCallback(hooks.HookPreFinish, info) } +func preGetCallback(info handler.HookEvent) error { + return hookCallback(hooks.HookPreGet, info) +} + func SetupHookMetrics() { MetricsHookErrorsTotal.WithLabelValues(string(hooks.HookPostFinish)).Add(0) MetricsHookErrorsTotal.WithLabelValues(string(hooks.HookPostTerminate)).Add(0) @@ -50,6 +54,7 @@ func SetupHookMetrics() { MetricsHookErrorsTotal.WithLabelValues(string(hooks.HookPostCreate)).Add(0) MetricsHookErrorsTotal.WithLabelValues(string(hooks.HookPreCreate)).Add(0) MetricsHookErrorsTotal.WithLabelValues(string(hooks.HookPreFinish)).Add(0) + MetricsHookErrorsTotal.WithLabelValues(string(hooks.HookPreGet)).Add(0) } func SetupPreHooks(config *handler.Config) error { @@ -99,6 +104,7 @@ func SetupPreHooks(config *handler.Config) error { config.PreUploadCreateCallback = preCreateCallback config.PreFinishResponseCallback = preFinishCallback + config.PreGetResponseCallback = preGetCallback return nil } @@ -115,6 +121,8 @@ func SetupPostHooks(handler *handler.Handler) { invokeHookAsync(hooks.HookPostReceive, info) case info := <-handler.CreatedUploads: invokeHookAsync(hooks.HookPostCreate, info) + case info := <-handler.GetUploads: + invokeHookAsync(hooks.HookPreGet, info) } } }() diff --git a/cmd/tusd/cli/hooks/hooks.go b/cmd/tusd/cli/hooks/hooks.go index b72e3c3dc..be1a0e495 100644 --- a/cmd/tusd/cli/hooks/hooks.go +++ b/cmd/tusd/cli/hooks/hooks.go @@ -18,9 +18,10 @@ const ( HookPostCreate HookType = "post-create" HookPreCreate HookType = "pre-create" HookPreFinish HookType = "pre-finish" + HookPreGet HookType = "pre-get" ) -var AvailableHooks []HookType = []HookType{HookPreCreate, HookPostCreate, HookPostReceive, HookPostTerminate, HookPostFinish, HookPreFinish} +var AvailableHooks []HookType = []HookType{HookPreCreate, HookPostCreate, HookPostReceive, HookPostTerminate, HookPostFinish, HookPreFinish, HookPreGet} type hookDataStore struct { handler.DataStore diff --git a/cmd/tusd/cli/hooks/plugin.go b/cmd/tusd/cli/hooks/plugin.go index 8821527f0..9794d72c3 100644 --- a/cmd/tusd/cli/hooks/plugin.go +++ b/cmd/tusd/cli/hooks/plugin.go @@ -14,6 +14,7 @@ type PluginHookHandler interface { PostFinish(info handler.HookEvent) error PostTerminate(info handler.HookEvent) error PreFinish(info handler.HookEvent) error + PreGet(info handler.HookEvent) error } type PluginHook struct { @@ -57,6 +58,8 @@ func (h PluginHook) InvokeHook(typ HookType, info handler.HookEvent, captureOutp err = h.handler.PreCreate(info) case HookPreFinish: err = h.handler.PreFinish(info) + case HookPreGet: + err = h.handler.PreGet(info) default: err = fmt.Errorf("hooks: unknown hook named %s", typ) } diff --git a/docs/hooks.md b/docs/hooks.md index 2bdd6c663..91660f40a 100644 --- a/docs/hooks.md +++ b/docs/hooks.md @@ -13,7 +13,7 @@ If not otherwise noted, all hooks are invoked in a *non-blocking* way, meaning t ## Blocking Hooks -On the other hand, there are a few *blocking* hooks, such as caused by the `pre-create` and `pre-finish` events. Because their exit code will dictate whether tusd will accept the current incoming request, tusd will wait until the hook process has exited. Therefore, in order to keep the response times low, one should avoid to make time-consuming operations inside the processes for blocking hooks. +On the other hand, there are a few *blocking* hooks, such as caused by the `pre-create`, `pre-get` and `pre-finish` events. Because their exit code will dictate whether tusd will accept the current incoming request, tusd will wait until the hook process has exited. Therefore, in order to keep the response times low, one should avoid to make time-consuming operations inside the processes for blocking hooks. ### Blocking File Hooks @@ -39,6 +39,12 @@ This event will be triggered after an upload is fully finished but before a resp This is a blocking hook, as such it can be used to validate or post-process an uploaded file. A non-zero exit code or HTTP response greater than `400` will return a HTTP 500 error to the client. +### pre-get + +This event will be triggered before an upload read returned to the client. +This is a blocking hook, as such it can be used to validate access limits. +A non-zero exit code or HTTP response greater than `400` will return a HTTP 500 error to the client. + ### post-finish This event will be triggered after an upload is fully finished, meaning that all chunks have been transfered and saved in the storage. After this point, no further modifications, except possible deletion, can be made to the upload entity and it may be desirable to use the file for further processing or notify other applications of the completions of this upload. diff --git a/examples/hooks/pre-get b/examples/hooks/pre-get new file mode 100644 index 000000000..1d822ae8a --- /dev/null +++ b/examples/hooks/pre-get @@ -0,0 +1,7 @@ +#!/bin/sh + +filename=$(cat /dev/stdin | jq .MetaData.filename) +if [[ $filename != "public" ]; then + echo "Error: access unauthorized" + exit 1 +fi \ No newline at end of file diff --git a/pkg/handler/config.go b/pkg/handler/config.go index ae9676bc8..83a4df675 100644 --- a/pkg/handler/config.go +++ b/pkg/handler/config.go @@ -40,6 +40,11 @@ type Config struct { // potentially set by proxies when generating an absolute URL in the // response to POST requests. RespectForwardedHeaders bool + // PreGetCallback will be invoked before an upload is read, if the + // property is supplied. If the callback returns nil, the upload will be sent. + // Otherwise the HTTP request will be aborted. This can be used to implement + // accesss limits. + PreGetCallback func(hook HookEvent) error // PreUploadCreateCallback will be invoked before a new upload is created, if the // property is supplied. If the callback returns nil, the upload will be created. // Otherwise the HTTP request will be aborted. This can be used to implement diff --git a/pkg/handler/unrouted_handler.go b/pkg/handler/unrouted_handler.go index d9fadcdf6..3806e55f4 100644 --- a/pkg/handler/unrouted_handler.go +++ b/pkg/handler/unrouted_handler.go @@ -740,6 +740,13 @@ func (handler *UnroutedHandler) GetFile(w http.ResponseWriter, r *http.Request) return } + if handler.config.PreGetCallback != nil { + if err := handler.config.PreGetCallback(newHookEvent(info, r)); err != nil { + handler.sendError(w, r, err) + return + } + } + // Set headers before sending responses w.Header().Set("Content-Length", strconv.FormatInt(info.Offset, 10)) From 61bfce1ee4ce33afb56ed08c8cad1b78eb77065f Mon Sep 17 00:00:00 2001 From: benito Date: Thu, 27 May 2021 16:45:07 +0800 Subject: [PATCH 02/16] remove post hook case --- cmd/tusd/cli/hooks.go | 2 -- 1 file changed, 2 deletions(-) diff --git a/cmd/tusd/cli/hooks.go b/cmd/tusd/cli/hooks.go index 27ea5e63d..dec2f1bee 100644 --- a/cmd/tusd/cli/hooks.go +++ b/cmd/tusd/cli/hooks.go @@ -121,8 +121,6 @@ func SetupPostHooks(handler *handler.Handler) { invokeHookAsync(hooks.HookPostReceive, info) case info := <-handler.CreatedUploads: invokeHookAsync(hooks.HookPostCreate, info) - case info := <-handler.GetUploads: - invokeHookAsync(hooks.HookPreGet, info) } } }() From b50c2936a1f4094c74df3d8fbd85d30063d04c1c Mon Sep 17 00:00:00 2001 From: Juanjo Rodriguez Date: Mon, 14 Jun 2021 14:16:39 +0200 Subject: [PATCH 03/16] cli: Change timestamp to microseconds precision in log (#489) --- cmd/tusd/cli/log.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cmd/tusd/cli/log.go b/cmd/tusd/cli/log.go index 547568802..254a3b409 100644 --- a/cmd/tusd/cli/log.go +++ b/cmd/tusd/cli/log.go @@ -7,8 +7,8 @@ import ( "github.com/tus/tusd/pkg/handler" ) -var stdout = log.New(os.Stdout, "[tusd] ", log.Ldate|log.Ltime) -var stderr = log.New(os.Stderr, "[tusd] ", log.Ldate|log.Ltime) +var stdout = log.New(os.Stdout, "[tusd] ", log.LstdFlags|log.Lmicroseconds) +var stderr = log.New(os.Stderr, "[tusd] ", log.LstdFlags|log.Lmicroseconds) func logEv(logOutput *log.Logger, eventName string, details ...string) { handler.LogEvent(logOutput, eventName, details...) From 781324986d1619b80219f0d82ace1ce8f41acc22 Mon Sep 17 00:00:00 2001 From: Marius Date: Thu, 1 Jul 2021 18:48:58 +0200 Subject: [PATCH 04/16] s3store: Return 400 instead of 500 if upload cannot be streamed Closes https://github.com/tus/tusd/issues/490 --- pkg/s3store/s3store.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/s3store/s3store.go b/pkg/s3store/s3store.go index 4d5e900bc..c5431c02e 100644 --- a/pkg/s3store/s3store.go +++ b/pkg/s3store/s3store.go @@ -558,7 +558,7 @@ func (upload s3Upload) GetReader(ctx context.Context) (io.Reader, error) { }) if err == nil { // The multipart upload still exists, which means we cannot download it yet - return nil, errors.New("cannot stream non-finished upload") + return nil, handler.NewHTTPError(errors.New("cannot stream non-finished upload"), http.StatusBadRequest) } if isAwsError(err, "NoSuchUpload") { From 1b118858236a65c8ff6ee0a2184b8546589ad103 Mon Sep 17 00:00:00 2001 From: Ole-Martin Bratteng <1681525+omBratteng@users.noreply.github.com> Date: Thu, 29 Jul 2021 01:14:50 +0200 Subject: [PATCH 05/16] azurestore: Add implementation * Add azure-storage-blob-go dependency * Implement Azure BlobStorage store * Add AzureStore Mock test * Refactor Blob interfaces to use uppercase fields * Refactor and remove the Create function When getting the offset, and we get the status code BlobNotFound, we can say the offset is 0, and start from the beginning * Update the mock * Refactor error checking of GetOffset to actually check the service code * Begin testing azurestore * Write more tests * New feature allows to set access type on new containers and blob access tier * Write more docs * Upgrade azure-storage-blob-go to v0.13.0 * Remove AzError, not needed * Update link to container access type information * Remove ?toc from link in comments * Remove trailing spaces from workflow * Run tests with go1.15 and 1.16 * Don't fail fast This lets all other tests complete, and makes it easier to see if it's just a one-off fail, or on different OSes and versions * Remove darwin 386 from `build_all.sh` script Removed in go1.15 https://github.com/golang/go/issues/37610 * Update go version in `Dockerfile` * Compile for Apple Silicone (darwin arm64) Only go1.16 supports it --- .github/workflows/main.yml | 13 +- Dockerfile | 2 +- cmd/tusd/cli/composer.go | 45 +++ cmd/tusd/cli/flags.go | 10 + go.mod | 1 + go.sum | 29 ++ pkg/azurestore/azureservice.go | 310 ++++++++++++++++++ pkg/azurestore/azurestore.go | 232 ++++++++++++++ pkg/azurestore/azurestore_mock_test.go | 146 +++++++++ pkg/azurestore/azurestore_test.go | 426 +++++++++++++++++++++++++ scripts/build_all.sh | 4 +- 11 files changed, 1209 insertions(+), 9 deletions(-) create mode 100644 pkg/azurestore/azureservice.go create mode 100644 pkg/azurestore/azurestore.go create mode 100644 pkg/azurestore/azurestore_mock_test.go create mode 100644 pkg/azurestore/azurestore_test.go diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 2aee0d267..f992c84cc 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -5,8 +5,9 @@ name: Test jobs: test: strategy: + fail-fast: false matrix: - go-version: [1.12.x, 1.13.x] + go-version: [1.15.x, 1.16.x] platform: [ubuntu-latest, macos-latest, windows-latest] runs-on: ${{ matrix.platform }} steps: @@ -31,9 +32,9 @@ jobs: with: go-version: '1.13.1' - name: Checkout code - uses: actions/checkout@v2 + uses: actions/checkout@v2 - name: Build TUSD - if: startsWith(github.ref, 'refs/tags/') + if: startsWith(github.ref, 'refs/tags/') env: GO111MODULE: on run: ./scripts/build_all.sh @@ -43,7 +44,7 @@ jobs: with: files: tusd_*.* env: - GITHUB_TOKEN: ${{ secrets.GH_RELEASE_TOKEN }} + GITHUB_TOKEN: ${{ secrets.GH_RELEASE_TOKEN }} - name: Deploy to heroku uses: akhileshns/heroku-deploy@v3.4.6 with: @@ -53,8 +54,8 @@ jobs: - uses: azure/docker-login@v1 with: username: ${{ secrets.DOCKER_USERNAME }} - password: ${{ secrets.DOCKER_PASSWORD }} + password: ${{ secrets.DOCKER_PASSWORD }} - name: Build and push docker image run: | docker build -t tusproject/tusd:$GITHUB_SHA . - docker push tusproject/tusd:$GITHUB_SHA + docker push tusproject/tusd:$GITHUB_SHA diff --git a/Dockerfile b/Dockerfile index 17ae86203..a2b8f9022 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.13-alpine AS builder +FROM golang:1.16-alpine AS builder # Copy in the git repo from the build context COPY . /go/src/github.com/tus/tusd/ diff --git a/cmd/tusd/cli/composer.go b/cmd/tusd/cli/composer.go index 356cf8f07..914bb7051 100644 --- a/cmd/tusd/cli/composer.go +++ b/cmd/tusd/cli/composer.go @@ -1,10 +1,12 @@ package cli import ( + "fmt" "os" "path/filepath" "strings" + "github.com/tus/tusd/pkg/azurestore" "github.com/tus/tusd/pkg/filelocker" "github.com/tus/tusd/pkg/filestore" "github.com/tus/tusd/pkg/gcsstore" @@ -91,6 +93,49 @@ func CreateComposer() { locker := memorylocker.New() locker.UseIn(Composer) + } else if Flags.AzStorage != "" { + + accountName := os.Getenv("AZURE_STORAGE_ACCOUNT") + if accountName == "" { + stderr.Fatalf("No service account name for Azure BlockBlob Storage using the AZURE_STORAGE_ACCOUNT environment variable.\n") + } + + accountKey := os.Getenv("AZURE_STORAGE_KEY") + if accountKey == "" { + stderr.Fatalf("No service account key for Azure BlockBlob Storage using the AZURE_STORAGE_KEY environment variable.\n") + } + + azureEndpoint := Flags.AzEndpoint + // Enables support for using Azurite as a storage emulator without messing with proxies and stuff + // e.g. http://127.0.0.1:10000/devstoreaccount1 + if azureEndpoint == "" { + azureEndpoint = fmt.Sprintf("https://%s.blob.core.windows.net", accountName) + stdout.Printf("Custom Azure Endpoint not specified in flag variable azure-endpoint.\n"+ + "Using endpoint %s\n", azureEndpoint) + } else { + stdout.Printf("Using Azure endpoint %s\n", azureEndpoint) + } + + azConfig := &azurestore.AzConfig{ + AccountName: accountName, + AccountKey: accountKey, + ContainerName: Flags.AzStorage, + ContainerAccessType: Flags.AzContainerAccessType, + BlobAccessTier: Flags.AzBlobAccessTier, + Endpoint: azureEndpoint, + } + + azService, err := azurestore.NewAzureService(azConfig) + if err != nil { + stderr.Fatalf(err.Error()) + } + + store := azurestore.New(azService) + store.ObjectPrefix = Flags.AzObjectPrefix + store.Container = Flags.AzStorage + + store.UseIn(Composer) + } else { dir, err := filepath.Abs(Flags.UploadDir) if err != nil { diff --git a/cmd/tusd/cli/flags.go b/cmd/tusd/cli/flags.go index fdf9da976..296928fac 100644 --- a/cmd/tusd/cli/flags.go +++ b/cmd/tusd/cli/flags.go @@ -29,6 +29,11 @@ var Flags struct { S3DisableSSL bool GCSBucket string GCSObjectPrefix string + AzStorage string + AzContainerAccessType string + AzBlobAccessTier string + AzObjectPrefix string + AzEndpoint string EnabledHooksString string FileHooksDir string HttpHooksEndpoint string @@ -70,6 +75,11 @@ func ParseFlags() { flag.BoolVar(&Flags.S3DisableSSL, "s3-disable-ssl", false, "Disable SSL and only use HTTP for communication with S3 (experimental and may be removed in the future)") flag.StringVar(&Flags.GCSBucket, "gcs-bucket", "", "Use Google Cloud Storage with this bucket as storage backend (requires the GCS_SERVICE_ACCOUNT_FILE environment variable to be set)") flag.StringVar(&Flags.GCSObjectPrefix, "gcs-object-prefix", "", "Prefix for GCS object names (can't contain underscore character)") + flag.StringVar(&Flags.AzStorage, "azure-storage", "", "Use Azure BlockBlob Storage with this container name as a storage backend (requires the AZURE_ACCOUNT_NAME and AZURE_ACCOUNT_KEY environment variable to be set)") + flag.StringVar(&Flags.AzContainerAccessType, "azure-container-access-type", "", "Access type when creating a new container if it does not exist (possible values: blob, container, '')") + flag.StringVar(&Flags.AzBlobAccessTier, "azure-blob-access-tier", "", "Blob access tier when uploading new files (possible values: archive, cool, hot, '')") + flag.StringVar(&Flags.AzObjectPrefix, "azure-object-prefix", "", "Prefix for Azure object names") + flag.StringVar(&Flags.AzEndpoint, "azure-endpoint", "", "Custom Endpoint to use for Azure BlockBlob Storage (requires azure-storage to be pass)") flag.StringVar(&Flags.EnabledHooksString, "hooks-enabled-events", "pre-create,post-create,post-receive,post-terminate,post-finish", "Comma separated list of enabled hook events (e.g. post-create,post-finish). Leave empty to enable default events") flag.StringVar(&Flags.FileHooksDir, "hooks-dir", "", "Directory to search for available hooks scripts") flag.StringVar(&Flags.HttpHooksEndpoint, "hooks-http", "", "An HTTP endpoint to which hook events will be sent to") diff --git a/go.mod b/go.mod index 736ec3748..cd9de17a4 100644 --- a/go.mod +++ b/go.mod @@ -4,6 +4,7 @@ go 1.12 require ( cloud.google.com/go v0.40.0 + github.com/Azure/azure-storage-blob-go v0.13.0 github.com/aws/aws-sdk-go v1.20.1 github.com/bmizerany/pat v0.0.0-20170815010413-6226ea591a40 github.com/golang/mock v1.3.1 diff --git a/go.sum b/go.sum index 4fe38a640..cee0659a4 100644 --- a/go.sum +++ b/go.sum @@ -3,6 +3,17 @@ cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMT cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= cloud.google.com/go v0.40.0 h1:FjSY7bOj+WzJe6TZRVtXI2b9kAYvtNg4lMbcH2+MUkk= cloud.google.com/go v0.40.0/go.mod h1:Tk58MuI9rbLMKlAjeO/bDnteAx7tX2gJIXw4T5Jwlro= +github.com/Azure/azure-pipeline-go v0.2.3 h1:7U9HBg1JFK3jHl5qmo4CTZKFTVgMwdFHMVtCdfBE21U= +github.com/Azure/azure-pipeline-go v0.2.3/go.mod h1:x841ezTBIMG6O3lAcl8ATHnsOPVl2bqk7S3ta6S6u4k= +github.com/Azure/azure-storage-blob-go v0.12.0 h1:7bFXA1QB+lOK2/ASWHhp6/vnxjaeeZq6t8w1Jyp0Iaw= +github.com/Azure/azure-storage-blob-go v0.12.0/go.mod h1:A0u4VjtpgZJ7Y7um/+ix2DHBuEKFC6sEIlj0xc13a4Q= +github.com/Azure/azure-storage-blob-go v0.13.0 h1:lgWHvFh+UYBNVQLFHXkvul2f6yOPA9PIH82RTG2cSwc= +github.com/Azure/azure-storage-blob-go v0.13.0/go.mod h1:pA9kNqtjUeQF2zOSu4s//nUdBD+e64lEuc4sVnuOfNs= +github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= +github.com/Azure/go-autorest/autorest/adal v0.9.2/go.mod h1:/3SMAM86bP6wC9Ev35peQDUeqFZBMH07vvUOmg4z/fE= +github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= +github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= +github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= @@ -20,6 +31,7 @@ github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8 github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= @@ -51,6 +63,8 @@ github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4 h1:hU4mGcQI4DaAYW+IbTun+2qEZVFxK0ySjQLTbS0VQKc= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/grpc-ecosystem/go-grpc-middleware v1.1.0 h1:THDBEeQ9xZ8JEaCLyLQqXMMdRqNr0QAUJTIkQAUtFjg= @@ -71,6 +85,10 @@ github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+o github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/mattn/go-ieproxy v0.0.1 h1:qiyop7gCflfhwCzGyeT0gro3sF9AIg9HU98JORTkqfI= +github.com/mattn/go-ieproxy v0.0.1/go.mod h1:pYabZ6IHcRpFh7vIaLfK7rdcWgFEb3SFJ6/gNWuh88E= github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= @@ -80,10 +98,12 @@ github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3Rllmb github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/nbio/st v0.0.0-20140626010706-e9e8d9816f32 h1:W6apQkHrMkS0Muv8G/TipAy/FJl/rCYT0+EuS8+Z0z4= github.com/nbio/st v0.0.0-20140626010706-e9e8d9816f32/go.mod h1:9wM+0iRr9ahx58uYLpLIr5fm8diHn0JbqRycJi6w0Ms= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/pkg/errors v0.8.0 h1:WdK/asTD0HN+q6hsWO3/vpuAkAr+tw6aNJNDFFf0+qw= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= @@ -122,6 +142,7 @@ go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= @@ -134,8 +155,11 @@ golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73r golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c h1:uOCk1iQW6Vc18bnC13MfzScl+wdKBmM9Y9kU7Z83/lw= golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20191112182307-2180aed22343 h1:00ohfJ4K98s3m6BGUoBd8nyfp4Yl0GoIKvw5abItTjI= +golang.org/x/net v0.0.0-20191112182307-2180aed22343/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0= @@ -149,9 +173,13 @@ golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b h1:ag/x1USPSsqHud38I9BAC88qdNLDHHtQ4mlgQIZPPNA= golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191112214154-59a1497f0cea/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200828194041-157a740278f4 h1:kCCpuwSAoYJPkNc6x0xT9yTtV4oKtARo4RGBQWOfg9E= +golang.org/x/sys v0.0.0-20200828194041-157a740278f4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= @@ -196,6 +224,7 @@ gopkg.in/Acconut/lockfile.v1 v1.1.0/go.mod h1:6UCz3wJ8tSFUsPR6uP/j8uegEtDuEEqFxl gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/h2non/gock.v1 v1.0.14 h1:fTeu9fcUvSnLNacYvYI54h+1/XEteDyHvrVCZEEEYNM= gopkg.in/h2non/gock.v1 v1.0.14/go.mod h1:sX4zAkdYX1TRGJ2JY156cFspQn4yRWn6p9EMdODlynE= gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE= diff --git a/pkg/azurestore/azureservice.go b/pkg/azurestore/azureservice.go new file mode 100644 index 000000000..ffdbb8f61 --- /dev/null +++ b/pkg/azurestore/azureservice.go @@ -0,0 +1,310 @@ +// Package azurestore provides a Azure Blob Storage based backend + +// AzureStore is a storage backend that uses the AzService interface in order to store uploads in Azure Blob Storage. +// It stores the uploads in a container specified in two different BlockBlob: The `[id].info` blobs are used to store the fileinfo in JSON format. The `[id]` blobs without an extension contain the raw binary data uploaded. +// If the upload is not finished within a week, the uncommited blocks will be discarded. + +// Support for setting the default Continaer access type and Blob access tier varies on your Azure Storage Account and its limits. +// More information about Container access types and limts +// https://docs.microsoft.com/en-us/azure/storage/blobs/anonymous-read-access-configure?tabs=portal + +// More information about Blob access tiers and limits +// https://docs.microsoft.com/en-us/azure/storage/blobs/storage-blob-performance-tiers +// https://docs.microsoft.com/en-us/azure/storage/common/storage-account-overview#access-tiers-for-block-blob-data + +package azurestore + +import ( + "bytes" + "context" + "encoding/base64" + "encoding/binary" + "fmt" + "io" + "net/url" + "sort" + "strings" + + "github.com/Azure/azure-storage-blob-go/azblob" +) + +const ( + InfoBlobSuffix string = ".info" + MaxBlockBlobSize int64 = azblob.BlockBlobMaxBlocks * azblob.BlockBlobMaxStageBlockBytes + MaxBlockBlobChunkSize int64 = azblob.BlockBlobMaxStageBlockBytes +) + +type azService struct { + BlobAccessTier azblob.AccessTierType + ContainerURL *azblob.ContainerURL + ContainerName string +} + +type AzService interface { + NewBlob(ctx context.Context, name string) (AzBlob, error) +} + +type AzConfig struct { + AccountName string + AccountKey string + BlobAccessTier string + ContainerName string + ContainerAccessType string + Endpoint string +} + +type AzBlob interface { + // Delete the blob + Delete(ctx context.Context) error + // Upload the blob + Upload(ctx context.Context, body io.ReadSeeker) error + // Download the contents of the blob + Download(ctx context.Context) ([]byte, error) + // Get the offset of the blob and its indexes + GetOffset(ctx context.Context) (int64, error) + // Commit the uploaded blocks to the BlockBlob + Commit(ctx context.Context) error +} + +type BlockBlob struct { + Blob *azblob.BlockBlobURL + AccessTier azblob.AccessTierType + Indexes []int +} + +type InfoBlob struct { + Blob *azblob.BlockBlobURL +} + +// New Azure service for communication to Azure BlockBlob Storage API +func NewAzureService(config *AzConfig) (AzService, error) { + // struct to store your credentials. + credential, err := azblob.NewSharedKeyCredential(config.AccountName, config.AccountKey) + if err != nil { + return nil, err + } + + // Might be limited by the storage account + // "" or default inherits the access type from the Storage Account + var containerAccessType azblob.PublicAccessType + switch config.ContainerAccessType { + case "container": + containerAccessType = azblob.PublicAccessContainer + case "blob": + containerAccessType = azblob.PublicAccessBlob + case "": + default: + containerAccessType = azblob.PublicAccessNone + } + + // Does not support the premium access tiers + var blobAccessTierType azblob.AccessTierType + switch config.BlobAccessTier { + case "archive": + blobAccessTierType = azblob.AccessTierArchive + case "cool": + blobAccessTierType = azblob.AccessTierCool + case "hot": + blobAccessTierType = azblob.AccessTierHot + case "": + default: + blobAccessTierType = azblob.DefaultAccessTier + } + + // The pipeline specifies things like retry policies, logging, deserialization of HTTP response payloads, and more. + p := azblob.NewPipeline(credential, azblob.PipelineOptions{}) + cURL, _ := url.Parse(fmt.Sprintf("%s/%s", config.Endpoint, config.ContainerName)) + + // Get the ContainerURL URL + containerURL := azblob.NewContainerURL(*cURL, p) + // Do not care about response since it will fail if container exists and create if it does not. + _, _ = containerURL.Create(context.Background(), azblob.Metadata{}, containerAccessType) + + return &azService{ + BlobAccessTier: blobAccessTierType, + ContainerURL: &containerURL, + ContainerName: config.ContainerName, + }, nil +} + +// Determine if we return a InfoBlob or BlockBlob, based on the name +func (service *azService) NewBlob(ctx context.Context, name string) (AzBlob, error) { + var fileBlob AzBlob + bb := service.ContainerURL.NewBlockBlobURL(name) + if strings.HasSuffix(name, InfoBlobSuffix) { + fileBlob = &InfoBlob{ + Blob: &bb, + } + } else { + fileBlob = &BlockBlob{ + Blob: &bb, + Indexes: []int{}, + AccessTier: service.BlobAccessTier, + } + } + return fileBlob, nil +} + +// Delete the blockBlob from Azure Blob Storage +func (blockBlob *BlockBlob) Delete(ctx context.Context) error { + _, err := blockBlob.Blob.Delete(ctx, azblob.DeleteSnapshotsOptionInclude, azblob.BlobAccessConditions{}) + return err +} + +// Upload a block to Azure Blob Storage and add it to the indexes to be after upload is finished +func (blockBlob *BlockBlob) Upload(ctx context.Context, body io.ReadSeeker) error { + // Keep track of the indexes + var index int + if len(blockBlob.Indexes) == 0 { + index = 0 + } else { + index = blockBlob.Indexes[len(blockBlob.Indexes)-1] + 1 + } + blockBlob.Indexes = append(blockBlob.Indexes, index) + + _, err := blockBlob.Blob.StageBlock(ctx, blockIDIntToBase64(index), body, azblob.LeaseAccessConditions{}, nil, azblob.ClientProvidedKeyOptions{}) + if err != nil { + return err + } + return nil +} + +// Download the blockBlob from Azure Blob Storage +func (blockBlob *BlockBlob) Download(ctx context.Context) (data []byte, err error) { + downloadResponse, err := blockBlob.Blob.Download(ctx, 0, azblob.CountToEnd, azblob.BlobAccessConditions{}, false, azblob.ClientProvidedKeyOptions{}) + + // If the file does not exist, it will not return an error, but a 404 status and body + if downloadResponse != nil && downloadResponse.StatusCode() == 404 { + return nil, fmt.Errorf("File %s does not exist", blockBlob.Blob.ToBlockBlobURL()) + } + if err != nil { + return nil, err + } + + bodyStream := downloadResponse.Body(azblob.RetryReaderOptions{MaxRetryRequests: 20}) + downloadedData := bytes.Buffer{} + + _, err = downloadedData.ReadFrom(bodyStream) + if err != nil { + return nil, err + } + + return downloadedData.Bytes(), nil +} + +func (blockBlob *BlockBlob) GetOffset(ctx context.Context) (int64, error) { + // Get the offset of the file from azure storage + // For the blob, show each block (ID and size) that is a committed part of it. + var indexes []int + var offset int64 + + getBlock, err := blockBlob.Blob.GetBlockList(ctx, azblob.BlockListAll, azblob.LeaseAccessConditions{}) + if err != nil { + if err.(azblob.StorageError).ServiceCode() == azblob.ServiceCodeBlobNotFound { + return 0, nil + } + + return 0, err + } + + // Need committed blocks to be added to offset to know how big the file really is + for _, block := range getBlock.CommittedBlocks { + offset += block.Size + indexes = append(indexes, blockIDBase64ToInt(block.Name)) + } + + // Need to get the uncommitted blocks so that we can commit them + for _, block := range getBlock.UncommittedBlocks { + offset += block.Size + indexes = append(indexes, blockIDBase64ToInt(block.Name)) + } + + // Sort the block IDs in ascending order. This is required as Azure returns the block lists alphabetically + // and we store the indexes as base64 encoded ints. + sort.Ints(indexes) + blockBlob.Indexes = indexes + + return offset, nil +} + +// After all the blocks have been uploaded, we commit the unstaged blocks by sending a Block List +func (blockBlob *BlockBlob) Commit(ctx context.Context) error { + base64BlockIDs := make([]string, len(blockBlob.Indexes)) + for index, id := range blockBlob.Indexes { + base64BlockIDs[index] = blockIDIntToBase64(id) + } + + _, err := blockBlob.Blob.CommitBlockList(ctx, base64BlockIDs, azblob.BlobHTTPHeaders{}, azblob.Metadata{}, azblob.BlobAccessConditions{}, blockBlob.AccessTier, nil, azblob.ClientProvidedKeyOptions{}) + return err +} + +// Delete the infoBlob from Azure Blob Storage +func (infoBlob *InfoBlob) Delete(ctx context.Context) error { + _, err := infoBlob.Blob.Delete(ctx, azblob.DeleteSnapshotsOptionInclude, azblob.BlobAccessConditions{}) + return err +} + +// Upload the infoBlob to Azure Blob Storage +// Because the info file is presumed to be smaller than azblob.BlockBlobMaxUploadBlobBytes (256MiB), we can upload it all in one go +// New uploaded data will create a new, or overwrite the existing block blob +func (infoBlob *InfoBlob) Upload(ctx context.Context, body io.ReadSeeker) error { + _, err := infoBlob.Blob.Upload(ctx, body, azblob.BlobHTTPHeaders{}, azblob.Metadata{}, azblob.BlobAccessConditions{}, azblob.DefaultAccessTier, nil, azblob.ClientProvidedKeyOptions{}) + return err +} + +// Download the infoBlob from Azure Blob Storage +func (infoBlob *InfoBlob) Download(ctx context.Context) ([]byte, error) { + downloadResponse, err := infoBlob.Blob.Download(ctx, 0, azblob.CountToEnd, azblob.BlobAccessConditions{}, false, azblob.ClientProvidedKeyOptions{}) + + // If the file does not exist, it will not return an error, but a 404 status and body + if downloadResponse != nil && downloadResponse.StatusCode() == 404 { + return nil, fmt.Errorf("File %s does not exist", infoBlob.Blob.ToBlockBlobURL()) + } + if err != nil { + return nil, err + } + + bodyStream := downloadResponse.Body(azblob.RetryReaderOptions{MaxRetryRequests: 20}) + downloadedData := bytes.Buffer{} + + _, err = downloadedData.ReadFrom(bodyStream) + if err != nil { + return nil, err + } + + return downloadedData.Bytes(), nil +} + +// infoBlob does not utilise offset, so just return 0, nil +func (infoBlob *InfoBlob) GetOffset(ctx context.Context) (int64, error) { + return 0, nil +} + +// infoBlob does not have uncommited blocks, so just return nil +func (infoBlob *InfoBlob) Commit(ctx context.Context) error { + return nil +} + +// === Helper Functions === +// These helper functions convert a binary block ID to a base-64 string and vice versa +// NOTE: The blockID must be <= 64 bytes and ALL blockIDs for the block must be the same length +func blockIDBinaryToBase64(blockID []byte) string { + return base64.StdEncoding.EncodeToString(blockID) +} + +func blockIDBase64ToBinary(blockID string) []byte { + binary, _ := base64.StdEncoding.DecodeString(blockID) + return binary +} + +// These helper functions convert an int block ID to a base-64 string and vice versa +func blockIDIntToBase64(blockID int) string { + binaryBlockID := (&[4]byte{})[:] // All block IDs are 4 bytes long + binary.LittleEndian.PutUint32(binaryBlockID, uint32(blockID)) + return blockIDBinaryToBase64(binaryBlockID) +} + +func blockIDBase64ToInt(blockID string) int { + blockIDBase64ToBinary(blockID) + return int(binary.LittleEndian.Uint32(blockIDBase64ToBinary(blockID))) +} diff --git a/pkg/azurestore/azurestore.go b/pkg/azurestore/azurestore.go new file mode 100644 index 000000000..8447e744b --- /dev/null +++ b/pkg/azurestore/azurestore.go @@ -0,0 +1,232 @@ +package azurestore + +import ( + "bufio" + "bytes" + "context" + "encoding/binary" + "encoding/json" + "fmt" + "io" + "strings" + + "github.com/tus/tusd/internal/uid" + "github.com/tus/tusd/pkg/handler" +) + +type AzureStore struct { + Service AzService + ObjectPrefix string + Container string +} + +type AzUpload struct { + ID string + InfoBlob AzBlob + BlockBlob AzBlob + InfoHandler *handler.FileInfo +} + +func New(service AzService) *AzureStore { + return &AzureStore{ + Service: service, + } +} + +// UseIn sets this store as the core data store in the passed composer and adds +// all possible extension to it. +func (store AzureStore) UseIn(composer *handler.StoreComposer) { + composer.UseCore(store) + composer.UseTerminater(store) + composer.UseLengthDeferrer(store) +} + +func (store AzureStore) NewUpload(ctx context.Context, info handler.FileInfo) (handler.Upload, error) { + if info.ID == "" { + info.ID = uid.Uid() + } + + if info.Size > MaxBlockBlobSize { + return nil, fmt.Errorf("azurestore: max upload of %v bytes exceeded MaxBlockBlobSize of %v bytes", + info.Size, MaxBlockBlobSize) + } + + blockBlob, err := store.Service.NewBlob(ctx, store.keyWithPrefix(info.ID)) + if err != nil { + return nil, err + } + + infoFile := store.keyWithPrefix(store.infoPath(info.ID)) + infoBlob, err := store.Service.NewBlob(ctx, infoFile) + if err != nil { + return nil, err + } + + info.Storage = map[string]string{ + "Type": "azurestore", + "Container": store.Container, + "Key": store.keyWithPrefix(info.ID), + } + + azUpload := &AzUpload{ + ID: info.ID, + InfoHandler: &info, + InfoBlob: infoBlob, + BlockBlob: blockBlob, + } + + err = azUpload.writeInfo(ctx) + if err != nil { + return nil, fmt.Errorf("azurestore: unable to create InfoHandler file:\n%s", err) + } + + return azUpload, nil +} + +func (store AzureStore) GetUpload(ctx context.Context, id string) (handle handler.Upload, err error) { + info := handler.FileInfo{} + infoFile := store.keyWithPrefix(store.infoPath(id)) + infoBlob, err := store.Service.NewBlob(ctx, infoFile) + if err != nil { + return nil, err + } + + // Download the info file from Azure Storage + data, err := infoBlob.Download(ctx) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(data, &info); err != nil { + return nil, err + } + + if info.Size > MaxBlockBlobSize { + return nil, fmt.Errorf("azurestore: max upload of %v bytes exceeded MaxBlockBlobSize of %v bytes", + info.Size, MaxBlockBlobSize) + } + + blockBlob, err := store.Service.NewBlob(ctx, store.keyWithPrefix(info.ID)) + if err != nil { + return nil, err + } + + offset, err := blockBlob.GetOffset(ctx) + if err != nil { + return nil, err + } + + info.Offset = offset + + return &AzUpload{ + ID: id, + InfoHandler: &info, + InfoBlob: infoBlob, + BlockBlob: blockBlob, + }, nil +} + +func (store AzureStore) AsTerminatableUpload(upload handler.Upload) handler.TerminatableUpload { + return upload.(*AzUpload) +} + +func (store AzureStore) AsLengthDeclarableUpload(upload handler.Upload) handler.LengthDeclarableUpload { + return upload.(*AzUpload) +} + +func (upload *AzUpload) WriteChunk(ctx context.Context, offset int64, src io.Reader) (int64, error) { + r := bufio.NewReader(src) + buf := new(bytes.Buffer) + n, err := r.WriteTo(buf) + if err != nil { + return 0, err + } + + chunkSize := int64(binary.Size(buf.Bytes())) + if chunkSize > MaxBlockBlobChunkSize { + return 0, fmt.Errorf("azurestore: Chunk of size %v too large. Max chunk size is %v", chunkSize, MaxBlockBlobChunkSize) + } + + re := bytes.NewReader(buf.Bytes()) + err = upload.BlockBlob.Upload(ctx, re) + if err != nil { + return 0, err + } + + upload.InfoHandler.Offset += n + return n, nil +} + +func (upload *AzUpload) GetInfo(ctx context.Context) (handler.FileInfo, error) { + info := handler.FileInfo{} + + if upload.InfoHandler != nil { + return *upload.InfoHandler, nil + } + + data, err := upload.InfoBlob.Download(ctx) + if err != nil { + return info, err + } + + if err := json.Unmarshal(data, &info); err != nil { + return info, err + } + + upload.InfoHandler = &info + return info, nil +} + +// Get the uploaded file from the Azure storage +func (upload *AzUpload) GetReader(ctx context.Context) (io.Reader, error) { + b, err := upload.BlockBlob.Download(ctx) + if err != nil { + return nil, err + } + return bytes.NewReader(b), nil +} + +// Finish the file upload and commit the block list +func (upload *AzUpload) FinishUpload(ctx context.Context) error { + return upload.BlockBlob.Commit(ctx) +} + +func (upload *AzUpload) Terminate(ctx context.Context) error { + // Delete info file + err := upload.InfoBlob.Delete(ctx) + if err != nil { + return err + } + + // Delete file + return upload.BlockBlob.Delete(ctx) +} + +func (upload *AzUpload) DeclareLength(ctx context.Context, length int64) error { + upload.InfoHandler.Size = length + upload.InfoHandler.SizeIsDeferred = false + return upload.writeInfo(ctx) +} + +func (store AzureStore) infoPath(id string) string { + return id + InfoBlobSuffix +} + +func (upload *AzUpload) writeInfo(ctx context.Context) error { + data, err := json.Marshal(upload.InfoHandler) + if err != nil { + return err + } + + reader := bytes.NewReader(data) + return upload.InfoBlob.Upload(ctx, reader) +} + +func (store *AzureStore) keyWithPrefix(key string) string { + prefix := store.ObjectPrefix + if prefix != "" && !strings.HasSuffix(prefix, "/") { + prefix += "/" + } + + return prefix + key +} diff --git a/pkg/azurestore/azurestore_mock_test.go b/pkg/azurestore/azurestore_mock_test.go new file mode 100644 index 000000000..0af7cba57 --- /dev/null +++ b/pkg/azurestore/azurestore_mock_test.go @@ -0,0 +1,146 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/tus/tusd/pkg/azurestore (interfaces: AzService,AzBlob) + +// Package azurestore_test is a generated GoMock package. +package azurestore_test + +import ( + context "context" + gomock "github.com/golang/mock/gomock" + azurestore "github.com/tus/tusd/pkg/azurestore" + io "io" + reflect "reflect" +) + +// MockAzService is a mock of AzService interface +type MockAzService struct { + ctrl *gomock.Controller + recorder *MockAzServiceMockRecorder +} + +// MockAzServiceMockRecorder is the mock recorder for MockAzService +type MockAzServiceMockRecorder struct { + mock *MockAzService +} + +// NewMockAzService creates a new mock instance +func NewMockAzService(ctrl *gomock.Controller) *MockAzService { + mock := &MockAzService{ctrl: ctrl} + mock.recorder = &MockAzServiceMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockAzService) EXPECT() *MockAzServiceMockRecorder { + return m.recorder +} + +// NewBlob mocks base method +func (m *MockAzService) NewBlob(arg0 context.Context, arg1 string) (azurestore.AzBlob, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NewBlob", arg0, arg1) + ret0, _ := ret[0].(azurestore.AzBlob) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// NewBlob indicates an expected call of NewBlob +func (mr *MockAzServiceMockRecorder) NewBlob(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewBlob", reflect.TypeOf((*MockAzService)(nil).NewBlob), arg0, arg1) +} + +// MockAzBlob is a mock of AzBlob interface +type MockAzBlob struct { + ctrl *gomock.Controller + recorder *MockAzBlobMockRecorder +} + +// MockAzBlobMockRecorder is the mock recorder for MockAzBlob +type MockAzBlobMockRecorder struct { + mock *MockAzBlob +} + +// NewMockAzBlob creates a new mock instance +func NewMockAzBlob(ctrl *gomock.Controller) *MockAzBlob { + mock := &MockAzBlob{ctrl: ctrl} + mock.recorder = &MockAzBlobMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockAzBlob) EXPECT() *MockAzBlobMockRecorder { + return m.recorder +} + +// Commit mocks base method +func (m *MockAzBlob) Commit(arg0 context.Context) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Commit", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// Commit indicates an expected call of Commit +func (mr *MockAzBlobMockRecorder) Commit(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Commit", reflect.TypeOf((*MockAzBlob)(nil).Commit), arg0) +} + +// Delete mocks base method +func (m *MockAzBlob) Delete(arg0 context.Context) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Delete", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// Delete indicates an expected call of Delete +func (mr *MockAzBlobMockRecorder) Delete(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockAzBlob)(nil).Delete), arg0) +} + +// Download mocks base method +func (m *MockAzBlob) Download(arg0 context.Context) ([]byte, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Download", arg0) + ret0, _ := ret[0].([]byte) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Download indicates an expected call of Download +func (mr *MockAzBlobMockRecorder) Download(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Download", reflect.TypeOf((*MockAzBlob)(nil).Download), arg0) +} + +// GetOffset mocks base method +func (m *MockAzBlob) GetOffset(arg0 context.Context) (int64, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetOffset", arg0) + ret0, _ := ret[0].(int64) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetOffset indicates an expected call of GetOffset +func (mr *MockAzBlobMockRecorder) GetOffset(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetOffset", reflect.TypeOf((*MockAzBlob)(nil).GetOffset), arg0) +} + +// Upload mocks base method +func (m *MockAzBlob) Upload(arg0 context.Context, arg1 io.ReadSeeker) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Upload", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// Upload indicates an expected call of Upload +func (mr *MockAzBlobMockRecorder) Upload(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Upload", reflect.TypeOf((*MockAzBlob)(nil).Upload), arg0, arg1) +} diff --git a/pkg/azurestore/azurestore_test.go b/pkg/azurestore/azurestore_test.go new file mode 100644 index 000000000..abd09db82 --- /dev/null +++ b/pkg/azurestore/azurestore_test.go @@ -0,0 +1,426 @@ +package azurestore_test + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "testing" + + "github.com/Azure/azure-storage-blob-go/azblob" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + "github.com/tus/tusd/pkg/azurestore" + "github.com/tus/tusd/pkg/handler" +) + +//go:generate mockgen -destination=./azurestore_mock_test.go -package=azurestore_test github.com/tus/tusd/pkg/azurestore AzService,AzBlob + +// Test interface implementations +var _ handler.DataStore = azurestore.AzureStore{} +var _ handler.TerminaterDataStore = azurestore.AzureStore{} +var _ handler.LengthDeferrerDataStore = azurestore.AzureStore{} + +const mockID = "123456789abcdefghijklmnopqrstuvwxyz" +const mockContainer = "tusd" +const mockSize int64 = 4096 +const mockReaderData = "Hello World" + +var mockTusdInfo = handler.FileInfo{ + ID: mockID, + Size: mockSize, + MetaData: map[string]string{ + "foo": "bar", + }, + Storage: map[string]string{ + "Type": "azurestore", + "Container": mockContainer, + "Key": mockID, + }, +} + +func TestNewUpload(t *testing.T) { + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + assert := assert.New(t) + ctx := context.Background() + + service := NewMockAzService(mockCtrl) + store := azurestore.New(service) + store.Container = mockContainer + + infoBlob := NewMockAzBlob(mockCtrl) + assert.NotNil(infoBlob) + + data, err := json.Marshal(mockTusdInfo) + assert.Nil(err) + + r := bytes.NewReader(data) + + gomock.InOrder( + service.EXPECT().NewBlob(ctx, mockID).Return(NewMockAzBlob(mockCtrl), nil).Times(1), + service.EXPECT().NewBlob(ctx, mockID+".info").Return(infoBlob, nil).Times(1), + infoBlob.EXPECT().Upload(ctx, r).Return(nil).Times(1), + ) + + upload, err := store.NewUpload(context.Background(), mockTusdInfo) + assert.Nil(err) + assert.NotNil(upload) +} + +func TestNewUploadWithPrefix(t *testing.T) { + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + assert := assert.New(t) + ctx := context.Background() + + objectPrefix := "/path/to/file/" + + service := NewMockAzService(mockCtrl) + store := azurestore.New(service) + store.Container = mockContainer + store.ObjectPrefix = objectPrefix + + infoBlob := NewMockAzBlob(mockCtrl) + assert.NotNil(infoBlob) + + info := mockTusdInfo + info.Storage = map[string]string{ + "Type": "azurestore", + "Container": mockContainer, + "Key": objectPrefix + mockID, + } + + data, err := json.Marshal(info) + assert.Nil(err) + + r := bytes.NewReader(data) + + gomock.InOrder( + service.EXPECT().NewBlob(ctx, objectPrefix+mockID).Return(NewMockAzBlob(mockCtrl), nil).Times(1), + service.EXPECT().NewBlob(ctx, objectPrefix+mockID+".info").Return(infoBlob, nil).Times(1), + infoBlob.EXPECT().Upload(ctx, r).Return(nil).Times(1), + ) + + upload, err := store.NewUpload(context.Background(), mockTusdInfo) + assert.Nil(err) + assert.NotNil(upload) +} + +func TestNewUploadTooLargeBlob(t *testing.T) { + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + assert := assert.New(t) + ctx := context.Background() + + service := NewMockAzService(mockCtrl) + store := azurestore.New(service) + store.Container = mockContainer + + infoBlob := NewMockAzBlob(mockCtrl) + assert.NotNil(infoBlob) + + info := mockTusdInfo + info.Size = azurestore.MaxBlockBlobSize + 1 + + upload, err := store.NewUpload(ctx, info) + assert.Nil(upload) + assert.NotNil(err) + assert.Contains(err.Error(), "exceeded MaxBlockBlobSize") + assert.Contains(err.Error(), "209715200000001") +} + +func TestGetUpload(t *testing.T) { + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + assert := assert.New(t) + + ctx := context.Background() + ctx, cancel := context.WithCancel(ctx) + + service := NewMockAzService(mockCtrl) + store := azurestore.New(service) + store.Container = mockContainer + + blockBlob := NewMockAzBlob(mockCtrl) + assert.NotNil(blockBlob) + + infoBlob := NewMockAzBlob(mockCtrl) + assert.NotNil(infoBlob) + + data, err := json.Marshal(mockTusdInfo) + assert.Nil(err) + + gomock.InOrder( + service.EXPECT().NewBlob(ctx, mockID+".info").Return(infoBlob, nil).Times(1), + infoBlob.EXPECT().Download(ctx).Return(data, nil).Times(1), + service.EXPECT().NewBlob(ctx, mockID).Return(blockBlob, nil).Times(1), + blockBlob.EXPECT().GetOffset(ctx).Return(int64(0), nil).Times(1), + ) + + upload, err := store.GetUpload(ctx, mockID) + assert.Nil(err) + + info, err := upload.GetInfo(ctx) + assert.Nil(err) + assert.NotNil(info) + cancel() +} + +func TestGetUploadTooLargeBlob(t *testing.T) { + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + assert := assert.New(t) + + ctx := context.Background() + ctx, cancel := context.WithCancel(ctx) + + service := NewMockAzService(mockCtrl) + store := azurestore.New(service) + store.Container = mockContainer + + infoBlob := NewMockAzBlob(mockCtrl) + assert.NotNil(infoBlob) + + info := mockTusdInfo + info.Size = azurestore.MaxBlockBlobSize + 1 + data, err := json.Marshal(info) + assert.Nil(err) + + gomock.InOrder( + service.EXPECT().NewBlob(ctx, mockID+".info").Return(infoBlob, nil).Times(1), + infoBlob.EXPECT().Download(ctx).Return(data, nil).Times(1), + ) + + upload, err := store.GetUpload(ctx, mockID) + assert.Nil(upload) + assert.NotNil(err) + assert.Contains(err.Error(), "exceeded MaxBlockBlobSize") + assert.Contains(err.Error(), "209715200000001") + cancel() +} + +func TestGetUploadNotFound(t *testing.T) { + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + assert := assert.New(t) + + service := NewMockAzService(mockCtrl) + store := azurestore.New(service) + store.Container = mockContainer + + infoBlob := NewMockAzBlob(mockCtrl) + assert.NotNil(infoBlob) + + ctx := context.Background() + gomock.InOrder( + service.EXPECT().NewBlob(ctx, mockID+".info").Return(infoBlob, nil).Times(1), + infoBlob.EXPECT().Download(ctx).Return(nil, errors.New(string(azblob.StorageErrorCodeBlobNotFound))).Times(1), + ) + + _, err := store.GetUpload(context.Background(), mockID) + assert.NotNil(err) + assert.Equal(err.Error(), "BlobNotFound") +} + +func TestGetReader(t *testing.T) { + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + assert := assert.New(t) + + ctx := context.Background() + ctx, cancel := context.WithCancel(ctx) + + service := NewMockAzService(mockCtrl) + store := azurestore.New(service) + store.Container = mockContainer + + blockBlob := NewMockAzBlob(mockCtrl) + assert.NotNil(blockBlob) + + infoBlob := NewMockAzBlob(mockCtrl) + assert.NotNil(infoBlob) + + data, err := json.Marshal(mockTusdInfo) + assert.Nil(err) + + gomock.InOrder( + service.EXPECT().NewBlob(ctx, mockID+".info").Return(infoBlob, nil).Times(1), + infoBlob.EXPECT().Download(ctx).Return(data, nil).Times(1), + service.EXPECT().NewBlob(ctx, mockID).Return(blockBlob, nil).Times(1), + blockBlob.EXPECT().GetOffset(ctx).Return(int64(0), nil).Times(1), + blockBlob.EXPECT().Download(ctx).Return([]byte(mockReaderData), nil).Times(1), + ) + + upload, err := store.GetUpload(ctx, mockID) + assert.Nil(err) + + reader, err := upload.GetReader(ctx) + assert.Nil(err) + assert.NotNil(reader) + cancel() +} + +func TestWriteChunk(t *testing.T) { + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + assert := assert.New(t) + + ctx := context.Background() + ctx, cancel := context.WithCancel(ctx) + + service := NewMockAzService(mockCtrl) + store := azurestore.New(service) + store.Container = mockContainer + + blockBlob := NewMockAzBlob(mockCtrl) + assert.NotNil(blockBlob) + + infoBlob := NewMockAzBlob(mockCtrl) + assert.NotNil(infoBlob) + + data, err := json.Marshal(mockTusdInfo) + assert.Nil(err) + + var offset int64 = mockSize / 2 + + gomock.InOrder( + service.EXPECT().NewBlob(ctx, mockID+".info").Return(infoBlob, nil).Times(1), + infoBlob.EXPECT().Download(ctx).Return(data, nil).Times(1), + service.EXPECT().NewBlob(ctx, mockID).Return(blockBlob, nil).Times(1), + blockBlob.EXPECT().GetOffset(ctx).Return(offset, nil).Times(1), + blockBlob.EXPECT().Upload(ctx, bytes.NewReader([]byte(mockReaderData))).Return(nil).Times(1), + ) + + upload, err := store.GetUpload(ctx, mockID) + assert.Nil(err) + + _, err = upload.WriteChunk(ctx, offset, bytes.NewReader([]byte(mockReaderData))) + assert.Nil(err) + cancel() +} + +func TestFinishUpload(t *testing.T) { + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + assert := assert.New(t) + + ctx := context.Background() + ctx, cancel := context.WithCancel(ctx) + + service := NewMockAzService(mockCtrl) + store := azurestore.New(service) + store.Container = mockContainer + + blockBlob := NewMockAzBlob(mockCtrl) + assert.NotNil(blockBlob) + + infoBlob := NewMockAzBlob(mockCtrl) + assert.NotNil(infoBlob) + + data, err := json.Marshal(mockTusdInfo) + assert.Nil(err) + + var offset int64 = mockSize / 2 + + gomock.InOrder( + service.EXPECT().NewBlob(ctx, mockID+".info").Return(infoBlob, nil).Times(1), + infoBlob.EXPECT().Download(ctx).Return(data, nil).Times(1), + service.EXPECT().NewBlob(ctx, mockID).Return(blockBlob, nil).Times(1), + blockBlob.EXPECT().GetOffset(ctx).Return(offset, nil).Times(1), + blockBlob.EXPECT().Commit(ctx).Return(nil).Times(1), + ) + + upload, err := store.GetUpload(ctx, mockID) + assert.Nil(err) + + err = upload.FinishUpload(ctx) + assert.Nil(err) + cancel() +} + +func TestTerminate(t *testing.T) { + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + assert := assert.New(t) + + ctx := context.Background() + ctx, cancel := context.WithCancel(ctx) + + service := NewMockAzService(mockCtrl) + store := azurestore.New(service) + store.Container = mockContainer + + blockBlob := NewMockAzBlob(mockCtrl) + assert.NotNil(blockBlob) + + infoBlob := NewMockAzBlob(mockCtrl) + assert.NotNil(infoBlob) + + data, err := json.Marshal(mockTusdInfo) + assert.Nil(err) + + gomock.InOrder( + service.EXPECT().NewBlob(ctx, mockID+".info").Return(infoBlob, nil).Times(1), + infoBlob.EXPECT().Download(ctx).Return(data, nil).Times(1), + service.EXPECT().NewBlob(ctx, mockID).Return(blockBlob, nil).Times(1), + blockBlob.EXPECT().GetOffset(ctx).Return(int64(0), nil).Times(1), + infoBlob.EXPECT().Delete(ctx).Return(nil).Times(1), + blockBlob.EXPECT().Delete(ctx).Return(nil).Times(1), + ) + + upload, err := store.GetUpload(ctx, mockID) + assert.Nil(err) + + err = store.AsTerminatableUpload(upload).Terminate(ctx) + assert.Nil(err) + cancel() +} + +func TestDeclareLength(t *testing.T) { + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + assert := assert.New(t) + + ctx := context.Background() + ctx, cancel := context.WithCancel(ctx) + + service := NewMockAzService(mockCtrl) + store := azurestore.New(service) + store.Container = mockContainer + + blockBlob := NewMockAzBlob(mockCtrl) + assert.NotNil(blockBlob) + + infoBlob := NewMockAzBlob(mockCtrl) + assert.NotNil(infoBlob) + + info := mockTusdInfo + info.Size = mockSize * 2 + + data, err := json.Marshal(info) + assert.Nil(err) + + r := bytes.NewReader(data) + + gomock.InOrder( + service.EXPECT().NewBlob(ctx, mockID+".info").Return(infoBlob, nil).Times(1), + infoBlob.EXPECT().Download(ctx).Return(data, nil).Times(1), + service.EXPECT().NewBlob(ctx, mockID).Return(blockBlob, nil).Times(1), + blockBlob.EXPECT().GetOffset(ctx).Return(int64(0), nil).Times(1), + infoBlob.EXPECT().Upload(ctx, r).Return(nil).Times(1), + ) + + upload, err := store.GetUpload(ctx, mockID) + assert.Nil(err) + + err = store.AsLengthDeclarableUpload(upload).DeclareLength(ctx, mockSize*2) + assert.Nil(err) + + info, err = upload.GetInfo(ctx) + assert.Nil(err) + assert.NotNil(info) + assert.Equal(info.Size, mockSize*2) + + cancel() +} diff --git a/scripts/build_all.sh b/scripts/build_all.sh index c79e60a75..11a0a4e39 100755 --- a/scripts/build_all.sh +++ b/scripts/build_all.sh @@ -10,8 +10,8 @@ compile linux 386 compile linux amd64 compile linux arm compile linux arm64 -compile darwin 386 compile darwin amd64 +compile darwin arm64 compile windows 386 .exe compile windows amd64 .exe @@ -19,8 +19,8 @@ maketar linux 386 maketar linux amd64 maketar linux arm maketar linux arm64 -makezip darwin 386 makezip darwin amd64 +makezip darwin arm64 makezip windows 386 .exe makezip windows amd64 .exe makedep amd64 From 0ad435b4c80bbd8fbaae784206e427d6b1316688 Mon Sep 17 00:00:00 2001 From: Marius Date: Wed, 13 Oct 2021 21:08:09 +0200 Subject: [PATCH 06/16] core: Fix wrong offset in upload progress notifications Closes https://github.com/tus/tusd/issues/500 --- pkg/handler/patch_test.go | 14 ++++++++------ pkg/handler/unrouted_handler.go | 5 +++-- 2 files changed, 11 insertions(+), 8 deletions(-) diff --git a/pkg/handler/patch_test.go b/pkg/handler/patch_test.go index 0938e7bde..3329a4524 100644 --- a/pkg/handler/patch_test.go +++ b/pkg/handler/patch_test.go @@ -497,14 +497,16 @@ func TestPatch(t *testing.T) { defer ctrl.Finish() upload := NewMockFullUpload(ctrl) + // We simulate that the upload has already an offset of 10 bytes. Therefore, the progress notifications + // must be the sum of the exisiting offset and the newly read bytes. gomock.InOrder( store.EXPECT().GetUpload(context.Background(), "yes").Return(upload, nil), upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{ ID: "yes", - Offset: 0, + Offset: 10, Size: 100, }, nil), - upload.EXPECT().WriteChunk(context.Background(), int64(0), NewReaderMatcher("first second third")).Return(int64(18), nil), + upload.EXPECT().WriteChunk(context.Background(), int64(10), NewReaderMatcher("first second third")).Return(int64(18), nil), ) handler, _ := NewHandler(Config{ @@ -525,7 +527,7 @@ func TestPatch(t *testing.T) { info := event.Upload a.Equal("yes", info.ID) a.Equal(int64(100), info.Size) - a.Equal(int64(6), info.Offset) + a.Equal(int64(16), info.Offset) writer.Write([]byte("second ")) writer.Write([]byte("third")) @@ -534,7 +536,7 @@ func TestPatch(t *testing.T) { info = event.Upload a.Equal("yes", info.ID) a.Equal(int64(100), info.Size) - a.Equal(int64(18), info.Offset) + a.Equal(int64(28), info.Offset) writer.Close() @@ -548,12 +550,12 @@ func TestPatch(t *testing.T) { ReqHeader: map[string]string{ "Tus-Resumable": "1.0.0", "Content-Type": "application/offset+octet-stream", - "Upload-Offset": "0", + "Upload-Offset": "10", }, ReqBody: reader, Code: http.StatusNoContent, ResHeader: map[string]string{ - "Upload-Offset": "18", + "Upload-Offset": "28", }, }).Run(handler, t) diff --git a/pkg/handler/unrouted_handler.go b/pkg/handler/unrouted_handler.go index d9fadcdf6..ba2fb4280 100644 --- a/pkg/handler/unrouted_handler.go +++ b/pkg/handler/unrouted_handler.go @@ -986,20 +986,21 @@ func (handler *UnroutedHandler) absFileURL(r *http.Request, id string) string { // closed. func (handler *UnroutedHandler) sendProgressMessages(hook HookEvent, reader *bodyReader) chan<- struct{} { previousOffset := int64(0) + originalOffset := hook.Upload.Offset stop := make(chan struct{}, 1) go func() { for { select { case <-stop: - hook.Upload.Offset = reader.bytesRead() + hook.Upload.Offset = originalOffset + reader.bytesRead() if hook.Upload.Offset != previousOffset { handler.UploadProgress <- hook previousOffset = hook.Upload.Offset } return case <-time.After(1 * time.Second): - hook.Upload.Offset = reader.bytesRead() + hook.Upload.Offset = originalOffset + reader.bytesRead() if hook.Upload.Offset != previousOffset { handler.UploadProgress <- hook previousOffset = hook.Upload.Offset From 484e506cd313f54da5265c7d51245790298c4cfe Mon Sep 17 00:00:00 2001 From: CharlyBr Date: Wed, 13 Oct 2021 21:33:49 +0200 Subject: [PATCH 07/16] misc: Ignore tusd binary (#505) --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index 12ba2985f..a263e4db7 100644 --- a/.gitignore +++ b/.gitignore @@ -3,3 +3,4 @@ cover.out data/ node_modules/ .DS_Store +./tusd From d973f9a81ab6886209308bc7ebbfecfd4d4abb8c Mon Sep 17 00:00:00 2001 From: Marius Date: Wed, 13 Oct 2021 21:40:39 +0200 Subject: [PATCH 08/16] misc: Increase Go version in go.mod The current version Go 1.12 is too old, so Heroku does not compile tusd anymore: https://github.com/tus/tusd/runs/3886723478 We use Go 1.16 and not 1.17 because we always support the two latest major releases. --- go.mod | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/go.mod b/go.mod index cd9de17a4..c3c1e6a0c 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/tus/tusd -go 1.12 +go 1.16 require ( cloud.google.com/go v0.40.0 From 5614cecb608f2a187e3018730d4afcb793fa3ddd Mon Sep 17 00:00:00 2001 From: Marius Date: Wed, 13 Oct 2021 21:52:30 +0200 Subject: [PATCH 09/16] misc: Specify Go version for Heroku See https://github.com/tus/tusd/issues/509 --- go.mod | 3 +++ 1 file changed, 3 insertions(+) diff --git a/go.mod b/go.mod index c3c1e6a0c..add641d33 100644 --- a/go.mod +++ b/go.mod @@ -1,5 +1,8 @@ module github.com/tus/tusd +// Specify the Go version needed for the Heroku deployment +// See https://github.com/heroku/heroku-buildpack-go#go-module-specifics +// +heroku goVersion go1.16 go 1.16 require ( From dd44267c082c98e99f08ca88ad85eb0b7fe93ef6 Mon Sep 17 00:00:00 2001 From: Marius Date: Wed, 13 Oct 2021 22:05:19 +0200 Subject: [PATCH 10/16] ci: Upgrade Go version for tests and releases --- .github/workflows/main.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index f992c84cc..a3d8acaba 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -7,7 +7,7 @@ jobs: strategy: fail-fast: false matrix: - go-version: [1.15.x, 1.16.x] + go-version: [1.16.x, 1.17.x] platform: [ubuntu-latest, macos-latest, windows-latest] runs-on: ${{ matrix.platform }} steps: @@ -27,10 +27,10 @@ jobs: runs-on: ubuntu-latest if: github.ref == 'refs/heads/master' || startsWith(github.ref, 'refs/tags/') steps: - - name: Install Go 1.13.1 + - name: Install Go 1.17.2 uses: actions/setup-go@v2 with: - go-version: '1.13.1' + go-version: '1.17.2' - name: Checkout code uses: actions/checkout@v2 - name: Build TUSD From 16a3747ec421c041587f995ef4a00dc3e4ca004d Mon Sep 17 00:00:00 2001 From: benito Date: Sat, 16 Oct 2021 03:50:17 +0800 Subject: [PATCH 11/16] s3store: Accept alternate error response from DigitalOcean Spaces (#507) * fix digitalocean spaces fetch * remove log * remove log * remove info.Size check * Add comment * Delete .gitignore Co-authored-by: Marius --- pkg/s3store/s3store.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/pkg/s3store/s3store.go b/pkg/s3store/s3store.go index c5431c02e..5e864b9e2 100644 --- a/pkg/s3store/s3store.go +++ b/pkg/s3store/s3store.go @@ -498,7 +498,9 @@ func (upload s3Upload) fetchInfo(ctx context.Context) (info handler.FileInfo, er // when the multipart upload has already been completed or aborted. Since // we already found the info object, we know that the upload has been // completed and therefore can ensure the the offset is the size. - if isAwsError(err, "NoSuchUpload") { + // AWS S3 returns NoSuchUpload, but other implementations, such as DigitalOcean + // Spaces, can also return NoSuchKey. + if isAwsError(err, "NoSuchUpload") || isAwsError(err, "NoSuchKey") { info.Offset = info.Size return info, nil } else { From bae0ffb5e57e02d526965d734d7dc7d766f881b5 Mon Sep 17 00:00:00 2001 From: Tinco Andringa Date: Sun, 17 Oct 2021 23:48:53 +0200 Subject: [PATCH 12/16] gcsstore: Allow object prefix to contain underscores (#495) * Only determine object type based on name after last separator * modify test to keep in mind directory prefixes with underscores in them * update documentation to reflect support of underscores in gcs object prefix --- cmd/tusd/cli/flags.go | 2 +- pkg/gcsstore/gcsservice.go | 6 +++++- pkg/gcsstore/gcsservice_test.go | 2 +- 3 files changed, 7 insertions(+), 3 deletions(-) diff --git a/cmd/tusd/cli/flags.go b/cmd/tusd/cli/flags.go index 296928fac..35aaeec13 100644 --- a/cmd/tusd/cli/flags.go +++ b/cmd/tusd/cli/flags.go @@ -74,7 +74,7 @@ func ParseFlags() { flag.BoolVar(&Flags.S3DisableContentHashes, "s3-disable-content-hashes", false, "Disable the calculation of MD5 and SHA256 hashes for the content that gets uploaded to S3 for minimized CPU usage (experimental and may be removed in the future)") flag.BoolVar(&Flags.S3DisableSSL, "s3-disable-ssl", false, "Disable SSL and only use HTTP for communication with S3 (experimental and may be removed in the future)") flag.StringVar(&Flags.GCSBucket, "gcs-bucket", "", "Use Google Cloud Storage with this bucket as storage backend (requires the GCS_SERVICE_ACCOUNT_FILE environment variable to be set)") - flag.StringVar(&Flags.GCSObjectPrefix, "gcs-object-prefix", "", "Prefix for GCS object names (can't contain underscore character)") + flag.StringVar(&Flags.GCSObjectPrefix, "gcs-object-prefix", "", "Prefix for GCS object names") flag.StringVar(&Flags.AzStorage, "azure-storage", "", "Use Azure BlockBlob Storage with this container name as a storage backend (requires the AZURE_ACCOUNT_NAME and AZURE_ACCOUNT_KEY environment variable to be set)") flag.StringVar(&Flags.AzContainerAccessType, "azure-container-access-type", "", "Access type when creating a new container if it does not exist (possible values: blob, container, '')") flag.StringVar(&Flags.AzBlobAccessTier, "azure-blob-access-tier", "", "Blob access tier when uploading new files (possible values: archive, cool, hot, '')") diff --git a/pkg/gcsstore/gcsservice.go b/pkg/gcsstore/gcsservice.go index 46676221c..3acbcbd87 100644 --- a/pkg/gcsstore/gcsservice.go +++ b/pkg/gcsstore/gcsservice.go @@ -351,7 +351,11 @@ loop: if strings.HasSuffix(objAttrs.Name, "info") { continue } - split := strings.Split(objAttrs.Name, "_") + + fileNameParts := strings.Split(objAttrs.Name, "/") + fileName := fileNameParts[len(fileNameParts)-1] + + split := strings.Split(fileName, "_") // If the object name does not split on "_", we have a composed object. // If the object name splits on "_" in to four pieces we diff --git a/pkg/gcsstore/gcsservice_test.go b/pkg/gcsstore/gcsservice_test.go index eeffe15e0..2c9143087 100644 --- a/pkg/gcsstore/gcsservice_test.go +++ b/pkg/gcsstore/gcsservice_test.go @@ -447,7 +447,7 @@ func TestFilterObject(t *testing.T) { defer gock.Off() resp := googleBucketResponse{[]googleObjectResponse{ - googleObjectResponse{Name: "test-prefix_1"}, + googleObjectResponse{Name: "test_directory/test-prefix_1"}, }} gock.New("https://www.googleapis.com"). From 84faa14987da8c5bd9da9ed2cad16fe1b7941bbb Mon Sep 17 00:00:00 2001 From: Marius Date: Mon, 18 Oct 2021 00:29:13 +0200 Subject: [PATCH 13/16] core: Allow non-tus HEAD requests, Add Length to HEAD responses See https://github.com/tus/tusd/pull/480 Squashed commit of the following: commit 7439fd84a6103afdedaf94701a65ce4376789380 Author: Marius Date: Mon Oct 18 00:27:12 2021 +0200 Docs and test commit 16d9dc67e8c8eefc328b1ce12d7e7ca01a49f9f6 Merge: bae0ffb bea5183 Author: Marius Date: Mon Oct 18 00:23:13 2021 +0200 Merge branch 'head_header_check' of https://github.com/s3rius/tusd into s3rius-head_header_check commit bea5183ec3f8759efadb554b5dd1acd18187d055 Author: Pavel Kirilin Date: Thu May 20 19:53:36 2021 +0400 Fixed "Tus-Resumable" header check for HEAD request. Signed-off-by: Pavel Kirilin --- pkg/handler/head_test.go | 7 ++++--- pkg/handler/unrouted_handler.go | 7 ++++--- 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/pkg/handler/head_test.go b/pkg/handler/head_test.go index 59da08536..b5c83bba2 100644 --- a/pkg/handler/head_test.go +++ b/pkg/handler/head_test.go @@ -48,9 +48,10 @@ func TestHead(t *testing.T) { }, Code: http.StatusOK, ResHeader: map[string]string{ - "Upload-Offset": "11", - "Upload-Length": "44", - "Cache-Control": "no-store", + "Upload-Offset": "11", + "Upload-Length": "44", + "Content-Length": "44", + "Cache-Control": "no-store", }, }).Run(handler, t) diff --git a/pkg/handler/unrouted_handler.go b/pkg/handler/unrouted_handler.go index ba2fb4280..6c5f52b54 100644 --- a/pkg/handler/unrouted_handler.go +++ b/pkg/handler/unrouted_handler.go @@ -260,9 +260,9 @@ func (handler *UnroutedHandler) Middleware(h http.Handler) http.Handler { } // Test if the version sent by the client is supported - // GET methods are not checked since a browser may visit this URL and does - // not include this header. This request is not part of the specification. - if r.Method != "GET" && r.Header.Get("Tus-Resumable") != "1.0.0" { + // GET and HEAD methods are not checked since a browser may visit this URL and does + // not include this header. GET requests are not part of the specification. + if r.Method != "GET" && r.Method != "HEAD" && r.Header.Get("Tus-Resumable") != "1.0.0" { handler.sendError(w, r, ErrUnsupportedVersion) return } @@ -472,6 +472,7 @@ func (handler *UnroutedHandler) HeadFile(w http.ResponseWriter, r *http.Request) w.Header().Set("Upload-Defer-Length", UploadLengthDeferred) } else { w.Header().Set("Upload-Length", strconv.FormatInt(info.Size, 10)) + w.Header().Set("Content-Length", strconv.FormatInt(info.Size, 10)) } w.Header().Set("Cache-Control", "no-store") From d00435048d79e51286ed0504fccc6ba8302e0b9a Mon Sep 17 00:00:00 2001 From: benito Date: Thu, 27 May 2021 15:57:32 +0800 Subject: [PATCH 14/16] initial changes --- cmd/tusd/cli/hooks.go | 8 ++++++++ cmd/tusd/cli/hooks/hooks.go | 3 ++- cmd/tusd/cli/hooks/plugin.go | 3 +++ docs/hooks.md | 8 +++++++- examples/hooks/pre-get | 7 +++++++ pkg/handler/config.go | 5 +++++ pkg/handler/unrouted_handler.go | 7 +++++++ 7 files changed, 39 insertions(+), 2 deletions(-) create mode 100644 examples/hooks/pre-get diff --git a/cmd/tusd/cli/hooks.go b/cmd/tusd/cli/hooks.go index 1bb9210a6..27ea5e63d 100644 --- a/cmd/tusd/cli/hooks.go +++ b/cmd/tusd/cli/hooks.go @@ -43,6 +43,10 @@ func preFinishCallback(info handler.HookEvent) error { return hookCallback(hooks.HookPreFinish, info) } +func preGetCallback(info handler.HookEvent) error { + return hookCallback(hooks.HookPreGet, info) +} + func SetupHookMetrics() { MetricsHookErrorsTotal.WithLabelValues(string(hooks.HookPostFinish)).Add(0) MetricsHookErrorsTotal.WithLabelValues(string(hooks.HookPostTerminate)).Add(0) @@ -50,6 +54,7 @@ func SetupHookMetrics() { MetricsHookErrorsTotal.WithLabelValues(string(hooks.HookPostCreate)).Add(0) MetricsHookErrorsTotal.WithLabelValues(string(hooks.HookPreCreate)).Add(0) MetricsHookErrorsTotal.WithLabelValues(string(hooks.HookPreFinish)).Add(0) + MetricsHookErrorsTotal.WithLabelValues(string(hooks.HookPreGet)).Add(0) } func SetupPreHooks(config *handler.Config) error { @@ -99,6 +104,7 @@ func SetupPreHooks(config *handler.Config) error { config.PreUploadCreateCallback = preCreateCallback config.PreFinishResponseCallback = preFinishCallback + config.PreGetResponseCallback = preGetCallback return nil } @@ -115,6 +121,8 @@ func SetupPostHooks(handler *handler.Handler) { invokeHookAsync(hooks.HookPostReceive, info) case info := <-handler.CreatedUploads: invokeHookAsync(hooks.HookPostCreate, info) + case info := <-handler.GetUploads: + invokeHookAsync(hooks.HookPreGet, info) } } }() diff --git a/cmd/tusd/cli/hooks/hooks.go b/cmd/tusd/cli/hooks/hooks.go index b72e3c3dc..be1a0e495 100644 --- a/cmd/tusd/cli/hooks/hooks.go +++ b/cmd/tusd/cli/hooks/hooks.go @@ -18,9 +18,10 @@ const ( HookPostCreate HookType = "post-create" HookPreCreate HookType = "pre-create" HookPreFinish HookType = "pre-finish" + HookPreGet HookType = "pre-get" ) -var AvailableHooks []HookType = []HookType{HookPreCreate, HookPostCreate, HookPostReceive, HookPostTerminate, HookPostFinish, HookPreFinish} +var AvailableHooks []HookType = []HookType{HookPreCreate, HookPostCreate, HookPostReceive, HookPostTerminate, HookPostFinish, HookPreFinish, HookPreGet} type hookDataStore struct { handler.DataStore diff --git a/cmd/tusd/cli/hooks/plugin.go b/cmd/tusd/cli/hooks/plugin.go index 8821527f0..9794d72c3 100644 --- a/cmd/tusd/cli/hooks/plugin.go +++ b/cmd/tusd/cli/hooks/plugin.go @@ -14,6 +14,7 @@ type PluginHookHandler interface { PostFinish(info handler.HookEvent) error PostTerminate(info handler.HookEvent) error PreFinish(info handler.HookEvent) error + PreGet(info handler.HookEvent) error } type PluginHook struct { @@ -57,6 +58,8 @@ func (h PluginHook) InvokeHook(typ HookType, info handler.HookEvent, captureOutp err = h.handler.PreCreate(info) case HookPreFinish: err = h.handler.PreFinish(info) + case HookPreGet: + err = h.handler.PreGet(info) default: err = fmt.Errorf("hooks: unknown hook named %s", typ) } diff --git a/docs/hooks.md b/docs/hooks.md index 2bdd6c663..91660f40a 100644 --- a/docs/hooks.md +++ b/docs/hooks.md @@ -13,7 +13,7 @@ If not otherwise noted, all hooks are invoked in a *non-blocking* way, meaning t ## Blocking Hooks -On the other hand, there are a few *blocking* hooks, such as caused by the `pre-create` and `pre-finish` events. Because their exit code will dictate whether tusd will accept the current incoming request, tusd will wait until the hook process has exited. Therefore, in order to keep the response times low, one should avoid to make time-consuming operations inside the processes for blocking hooks. +On the other hand, there are a few *blocking* hooks, such as caused by the `pre-create`, `pre-get` and `pre-finish` events. Because their exit code will dictate whether tusd will accept the current incoming request, tusd will wait until the hook process has exited. Therefore, in order to keep the response times low, one should avoid to make time-consuming operations inside the processes for blocking hooks. ### Blocking File Hooks @@ -39,6 +39,12 @@ This event will be triggered after an upload is fully finished but before a resp This is a blocking hook, as such it can be used to validate or post-process an uploaded file. A non-zero exit code or HTTP response greater than `400` will return a HTTP 500 error to the client. +### pre-get + +This event will be triggered before an upload read returned to the client. +This is a blocking hook, as such it can be used to validate access limits. +A non-zero exit code or HTTP response greater than `400` will return a HTTP 500 error to the client. + ### post-finish This event will be triggered after an upload is fully finished, meaning that all chunks have been transfered and saved in the storage. After this point, no further modifications, except possible deletion, can be made to the upload entity and it may be desirable to use the file for further processing or notify other applications of the completions of this upload. diff --git a/examples/hooks/pre-get b/examples/hooks/pre-get new file mode 100644 index 000000000..1d822ae8a --- /dev/null +++ b/examples/hooks/pre-get @@ -0,0 +1,7 @@ +#!/bin/sh + +filename=$(cat /dev/stdin | jq .MetaData.filename) +if [[ $filename != "public" ]; then + echo "Error: access unauthorized" + exit 1 +fi \ No newline at end of file diff --git a/pkg/handler/config.go b/pkg/handler/config.go index ae9676bc8..83a4df675 100644 --- a/pkg/handler/config.go +++ b/pkg/handler/config.go @@ -40,6 +40,11 @@ type Config struct { // potentially set by proxies when generating an absolute URL in the // response to POST requests. RespectForwardedHeaders bool + // PreGetCallback will be invoked before an upload is read, if the + // property is supplied. If the callback returns nil, the upload will be sent. + // Otherwise the HTTP request will be aborted. This can be used to implement + // accesss limits. + PreGetCallback func(hook HookEvent) error // PreUploadCreateCallback will be invoked before a new upload is created, if the // property is supplied. If the callback returns nil, the upload will be created. // Otherwise the HTTP request will be aborted. This can be used to implement diff --git a/pkg/handler/unrouted_handler.go b/pkg/handler/unrouted_handler.go index 6c5f52b54..88d4d112c 100644 --- a/pkg/handler/unrouted_handler.go +++ b/pkg/handler/unrouted_handler.go @@ -741,6 +741,13 @@ func (handler *UnroutedHandler) GetFile(w http.ResponseWriter, r *http.Request) return } + if handler.config.PreGetCallback != nil { + if err := handler.config.PreGetCallback(newHookEvent(info, r)); err != nil { + handler.sendError(w, r, err) + return + } + } + // Set headers before sending responses w.Header().Set("Content-Length", strconv.FormatInt(info.Offset, 10)) From cb3c4e83c17328d54f2faf65d768dce9b6442ac9 Mon Sep 17 00:00:00 2001 From: benito Date: Thu, 27 May 2021 16:45:07 +0800 Subject: [PATCH 15/16] remove post hook case --- cmd/tusd/cli/hooks.go | 2 -- 1 file changed, 2 deletions(-) diff --git a/cmd/tusd/cli/hooks.go b/cmd/tusd/cli/hooks.go index 27ea5e63d..dec2f1bee 100644 --- a/cmd/tusd/cli/hooks.go +++ b/cmd/tusd/cli/hooks.go @@ -121,8 +121,6 @@ func SetupPostHooks(handler *handler.Handler) { invokeHookAsync(hooks.HookPostReceive, info) case info := <-handler.CreatedUploads: invokeHookAsync(hooks.HookPostCreate, info) - case info := <-handler.GetUploads: - invokeHookAsync(hooks.HookPreGet, info) } } }() From 8c0568b8713a584a751a4154b9c446c09165361a Mon Sep 17 00:00:00 2001 From: benitogf Date: Mon, 18 Oct 2021 15:11:51 +0800 Subject: [PATCH 16/16] add enabled hook flag, call hook before getting file info --- .gitignore | 1 + cmd/tusd/cli/flags.go | 2 +- cmd/tusd/cli/hooks.go | 4 +++- pkg/handler/unrouted_handler.go | 17 ++++++++++------- 4 files changed, 15 insertions(+), 9 deletions(-) diff --git a/.gitignore b/.gitignore index a263e4db7..3a9998096 100644 --- a/.gitignore +++ b/.gitignore @@ -4,3 +4,4 @@ data/ node_modules/ .DS_Store ./tusd +cmd/tusd/tusd diff --git a/cmd/tusd/cli/flags.go b/cmd/tusd/cli/flags.go index 35aaeec13..54e764908 100644 --- a/cmd/tusd/cli/flags.go +++ b/cmd/tusd/cli/flags.go @@ -80,7 +80,7 @@ func ParseFlags() { flag.StringVar(&Flags.AzBlobAccessTier, "azure-blob-access-tier", "", "Blob access tier when uploading new files (possible values: archive, cool, hot, '')") flag.StringVar(&Flags.AzObjectPrefix, "azure-object-prefix", "", "Prefix for Azure object names") flag.StringVar(&Flags.AzEndpoint, "azure-endpoint", "", "Custom Endpoint to use for Azure BlockBlob Storage (requires azure-storage to be pass)") - flag.StringVar(&Flags.EnabledHooksString, "hooks-enabled-events", "pre-create,post-create,post-receive,post-terminate,post-finish", "Comma separated list of enabled hook events (e.g. post-create,post-finish). Leave empty to enable default events") + flag.StringVar(&Flags.EnabledHooksString, "hooks-enabled-events", "pre-get,pre-create,post-create,post-receive,post-terminate,post-finish", "Comma separated list of enabled hook events (e.g. post-create,post-finish). Leave empty to enable default events") flag.StringVar(&Flags.FileHooksDir, "hooks-dir", "", "Directory to search for available hooks scripts") flag.StringVar(&Flags.HttpHooksEndpoint, "hooks-http", "", "An HTTP endpoint to which hook events will be sent to") flag.StringVar(&Flags.HttpHooksForwardHeaders, "hooks-http-forward-headers", "", "List of HTTP request headers to be forwarded from the client request to the hook endpoint") diff --git a/cmd/tusd/cli/hooks.go b/cmd/tusd/cli/hooks.go index dec2f1bee..0a690296a 100644 --- a/cmd/tusd/cli/hooks.go +++ b/cmd/tusd/cli/hooks.go @@ -104,7 +104,7 @@ func SetupPreHooks(config *handler.Config) error { config.PreUploadCreateCallback = preCreateCallback config.PreFinishResponseCallback = preFinishCallback - config.PreGetResponseCallback = preGetCallback + config.PreGetCallback = preGetCallback return nil } @@ -146,6 +146,8 @@ func invokeHookSync(typ hooks.HookType, info handler.HookEvent, captureOutput bo logEv(stdout, "UploadFinished", "id", id, "size", strconv.FormatInt(size, 10)) case hooks.HookPostTerminate: logEv(stdout, "UploadTerminated", "id", id) + case hooks.HookPreGet: + logEv(stdout, "PreGet", "id", id) } if hookHandler == nil { diff --git a/pkg/handler/unrouted_handler.go b/pkg/handler/unrouted_handler.go index 88d4d112c..825f165f2 100644 --- a/pkg/handler/unrouted_handler.go +++ b/pkg/handler/unrouted_handler.go @@ -719,6 +719,16 @@ func (handler *UnroutedHandler) GetFile(w http.ResponseWriter, r *http.Request) return } + // call the get hook before getting the file + if handler.config.PreGetCallback != nil { + if err := handler.config.PreGetCallback(newHookEvent(FileInfo{ + ID: id, + }, r)); err != nil { + handler.sendError(w, r, err) + return + } + } + if handler.composer.UsesLocker { lock, err := handler.lockUpload(id) if err != nil { @@ -741,13 +751,6 @@ func (handler *UnroutedHandler) GetFile(w http.ResponseWriter, r *http.Request) return } - if handler.config.PreGetCallback != nil { - if err := handler.config.PreGetCallback(newHookEvent(info, r)); err != nil { - handler.sendError(w, r, err) - return - } - } - // Set headers before sending responses w.Header().Set("Content-Length", strconv.FormatInt(info.Offset, 10))