diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index da64afe..ade6ff9 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -16,7 +16,7 @@ jobs: - name: Set up Go uses: actions/setup-go@v4 with: - go-version: '1.25.0' + go-version-file: 'go.mod' - name: Cache Go modules uses: actions/cache@v4 @@ -31,8 +31,13 @@ jobs: - name: Install dependencies run: make deps - - name: Run checks (lint, vet, fmt-check, test) - run: make check + - name: golangci-lint + uses: golangci/golangci-lint-action@v8 + with: + version: v2.6.0 + + - name: Run checks (vet, fmt-check, test) + run: make vet fmt-check test # Note: Validation tests with real cloud providers run in separate workflows # See .github/workflows/validation-*.yml for provider-specific validation tests @@ -63,7 +68,7 @@ jobs: - name: Set up Go uses: actions/setup-go@v4 with: - go-version: '1.23.0' + go-version-file: 'go.mod' - name: Cache Go modules uses: actions/cache@v4 diff --git a/.github/workflows/validation-aws.yml b/.github/workflows/validation-aws.yml new file mode 100644 index 0000000..154edfa --- /dev/null +++ b/.github/workflows/validation-aws.yml @@ -0,0 +1,59 @@ +name: AWS Validation Tests + +on: + schedule: + # Run daily at 2 AM UTC + - cron: "0 2 * * *" + workflow_dispatch: + # Allow manual triggering + pull_request: + paths: + - "v1/providers/aws/**" + - "internal/validation/**" + - "v1/**" + branches: [main] + +jobs: + aws-validation: + name: AWS Provider Validation + runs-on: ubuntu-latest + if: github.event_name == 'schedule' || github.event_name == 'workflow_dispatch' || github.event_name == 'pull_request' + + steps: + - uses: actions/checkout@v4 + + - name: Set up Go + uses: actions/setup-go@v4 + with: + go-version-file: 'go.mod' + + - name: Cache Go modules + uses: actions/cache@v4 + with: + path: | + ~/.cache/go-build + ~/go/pkg/mod + key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }} + restore-keys: | + ${{ runner.os }}-go- + + - name: Install dependencies + run: make deps + + - name: Run AWS validation tests + env: + AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} + TEST_USER_PRIVATE_KEY_PEM_BASE64: ${{ secrets.TEST_USER_PRIVATE_KEY_PEM_BASE64 }} + VALIDATION_TEST: true + run: | + cd v1/providers/aws + go test -v -short=false -timeout=30m ./... + + - name: Upload test results + uses: actions/upload-artifact@v4 + if: always() + with: + name: aws-validation-results + path: | + v1/providers/aws/coverage.out diff --git a/.github/workflows/validation-lambdalabs.yml b/.github/workflows/validation-lambdalabs.yml index de63dc3..965be4d 100644 --- a/.github/workflows/validation-lambdalabs.yml +++ b/.github/workflows/validation-lambdalabs.yml @@ -25,7 +25,7 @@ jobs: - name: Set up Go uses: actions/setup-go@v4 with: - go-version: '1.23.0' + go-version-file: 'go.mod' - name: Cache Go modules uses: actions/cache@v4 diff --git a/.github/workflows/validation-nebius.yml b/.github/workflows/validation-nebius.yml new file mode 100644 index 0000000..b9a40e3 --- /dev/null +++ b/.github/workflows/validation-nebius.yml @@ -0,0 +1,61 @@ +name: Nebius Validation Tests + +on: + schedule: + # Run daily at 2 AM UTC + - cron: "0 2 * * *" + workflow_dispatch: + # Allow manual triggering + pull_request: + paths: + - "v1/providers/nebius/**" + - "internal/validation/**" + - "v1/**" + branches: [main] + +jobs: + nebius-validation: + name: Nebius Provider Validation + runs-on: ubuntu-latest + if: github.event_name == 'schedule' || github.event_name == 'workflow_dispatch' || github.event_name == 'pull_request' + + steps: + - uses: actions/checkout@v4 + + - name: Set up Go + uses: actions/setup-go@v4 + with: + go-version-file: 'go.mod' + + - name: Cache Go modules + uses: actions/cache@v4 + with: + path: | + ~/.cache/go-build + ~/go/pkg/mod + key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }} + restore-keys: | + ${{ runner.os }}-go- + + - name: Install dependencies + run: make deps + + - name: Run Nebius validation tests + env: + NEBIUS_PUBLIC_KEY_ID: ${{ secrets.NEBIUS_PUBLIC_KEY_ID }} + NEBIUS_PRIVATE_KEY_PEM_BASE64: ${{ secrets.NEBIUS_PRIVATE_KEY_PEM_BASE64 }} + NEBIUS_SERVICE_ACCOUNT_ID: ${{ secrets.NEBIUS_SERVICE_ACCOUNT_ID }} + NEBIUS_PROJECT_ID: ${{ secrets.NEBIUS_PROJECT_ID }} + TEST_USER_PRIVATE_KEY_PEM_BASE64: ${{ secrets.TEST_USER_PRIVATE_KEY_PEM_BASE64 }} + VALIDATION_TEST: true + run: | + cd v1/providers/nebius + go test -v -short=false -timeout=30m ./... + + - name: Upload test results + uses: actions/upload-artifact@v4 + if: always() + with: + name: nebius-validation-results + path: | + v1/providers/nebius/coverage.out diff --git a/.github/workflows/validation-shadeform.yml b/.github/workflows/validation-shadeform.yml index 1ef17f8..e9ead9a 100644 --- a/.github/workflows/validation-shadeform.yml +++ b/.github/workflows/validation-shadeform.yml @@ -25,7 +25,7 @@ jobs: - name: Set up Go uses: actions/setup-go@v4 with: - go-version: '1.23.0' + go-version-file: 'go.mod' - name: Cache Go modules uses: actions/cache@v4 diff --git a/.gitignore b/.gitignore index d4eeaac..e14b1d5 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,4 @@ .env __debug_bin* .idea/* +coverage/* \ No newline at end of file diff --git a/.golangci.bck.yml b/.golangci.bck.yml new file mode 100644 index 0000000..ebc0dce --- /dev/null +++ b/.golangci.bck.yml @@ -0,0 +1,109 @@ +run: + build-tags: + - tasks +linters-settings: + goimports: + local-prefixes: github.com/brevdev/dev-plane + revive: + # min-confidence: 0.8 + rules: + - name: blank-imports + - name: context-as-argument + - name: context-as-argument + - name: context-keys-type + - name: dot-imports + - name: error-return + - name: error-strings + - name: error-naming + - name: if-return + - name: increment-decrement + - name: var-naming + - name: var-declaration + # - name: package-comments + - name: range + - name: receiver-naming + - name: time-naming + - name: unexported-return + - name: errorf + - name: empty-block + - name: superfluous-else + - name: unused-parameter + - name: unreachable-code + - name: redefines-builtin-id + gocyclo: + min-complexity: 15 + misspell: + locale: US + nolintlint: + # allow-leading-space: false # require machine-readable nolint directives (with no leading space) + allow-unused: false # report any unused nolint directives + require-explanation: true # require an explanation for nolint directives + require-specific: false # don't require nolint directives to be specific about which linter is being skipped + funlen: + lines: 100 + errcheck: + exclude-functions: + - (*encoding/json.Encoder).Encode + wrapcheck: + ignoreSigs: + - .WrapAndTrace + - .Errorf + - .Wrap + - .New + - .ValidateStruct + - .Permanent + - .Decode + stylecheck: + checks: ["all", "-ST1020", "-ST1000", "-ST1021"] + +linters: + # please, do not use `enable-all`: it's deprecated and will be removed soon. + # inverted configuration with `enable-all` and `disable` is not scalable during updates of golangci-lint + disable-all: true + enable: + - errcheck + # - gosimple # don't like + - govet + - ineffassign + # - staticcheck # broken in collections pkg? + - typecheck + - bodyclose + - unused + # - depguard + - dupl + - copyloopvar + - forcetypeassert + - funlen + # - gci # don't like + - gocognit + - goconst + - gocritic + - gocyclo + # - godot # don't like + - gofumpt + - revive + # - gomnd # don't like + - goprintffuncname + - gosec + # - ifshort # don't like + - misspell + - noctx + - nolintlint + - rowserrcheck # broken with generics + - sqlclosecheck # broken with generics + - stylecheck + # - thelper + - tparallel + - unconvert + - unparam + - whitespace + # - errorlint + # - goerr113 + # - wrapcheck +issues: + # enable issues excluded by default + exclude-use-default: false + exclude: + - composites + exclude-dirs: + - internal/lambdalabs/gen \ No newline at end of file diff --git a/.golangci.yml b/.golangci.yml index 95696df..91568d7 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -1,109 +1,110 @@ +version: "2" run: build-tags: - tasks -linters-settings: - goimports: - local-prefixes: github.com/brevdev/dev-plane - revive: - min-confidence: 0.8 - rules: - - name: blank-imports - - name: context-as-argument - - name: context-as-argument - - name: context-keys-type - - name: dot-imports - - name: error-return - - name: error-strings - - name: error-naming - - name: if-return - - name: increment-decrement - - name: var-naming - - name: var-declaration - # - name: package-comments - - name: range - - name: receiver-naming - - name: time-naming - - name: unexported-return - - name: errorf - - name: empty-block - - name: superfluous-else - - name: unused-parameter - - name: unreachable-code - - name: redefines-builtin-id - gocyclo: - min-complexity: 15 - misspell: - locale: US - nolintlint: - allow-leading-space: false # require machine-readable nolint directives (with no leading space) - allow-unused: false # report any unused nolint directives - require-explanation: true # require an explanation for nolint directives - require-specific: false # don't require nolint directives to be specific about which linter is being skipped - funlen: - lines: 100 - errcheck: - exclude-functions: - - (*encoding/json.Encoder).Encode - wrapcheck: - ignoreSigs: - - .WrapAndTrace - - .Errorf - - .Wrap - - .New - - .ValidateStruct - - .Permanent - - .Decode - stylecheck: - checks: ["all", "-ST1020", "-ST1000", "-ST1021"] - linters: - # please, do not use `enable-all`: it's deprecated and will be removed soon. - # inverted configuration with `enable-all` and `disable` is not scalable during updates of golangci-lint - disable-all: true + default: none enable: - - errcheck - # - gosimple # don't like - - govet - - ineffassign - # - staticcheck # broken in collections pkg? - - typecheck - bodyclose - - unused - # - depguard - - dupl - copyloopvar + - dupl + - errcheck - forcetypeassert - funlen - # - gci # don't like - gocognit - goconst - gocritic - gocyclo - # - godot # don't like - - gofumpt - - revive - # - gomnd # don't like - goprintffuncname - gosec - # - ifshort # don't like + - govet + - ineffassign - misspell - noctx - nolintlint - - rowserrcheck # broken with generics - - sqlclosecheck # broken with generics - - stylecheck - # - thelper + - revive + - rowserrcheck + - sqlclosecheck + - staticcheck - tparallel - unconvert - unparam + - unused - whitespace - # - errorlint - # - goerr113 - # - wrapcheck -issues: - # enable issues excluded by default - exclude-use-default: false - exclude: - - composites - exclude-dirs: - - internal/lambdalabs/gen \ No newline at end of file + settings: + errcheck: + exclude-functions: + - (*encoding/json.Encoder).Encode + funlen: + lines: 100 + gocyclo: + min-complexity: 15 + misspell: + locale: US + nolintlint: + require-explanation: true + require-specific: false + allow-unused: false + revive: + rules: + - name: blank-imports + - name: context-as-argument + - name: context-as-argument + - name: context-keys-type + - name: dot-imports + - name: error-return + - name: error-strings + - name: error-naming + - name: if-return + - name: increment-decrement + - name: var-naming + - name: var-declaration + - name: range + - name: receiver-naming + - name: time-naming + - name: unexported-return + - name: errorf + - name: empty-block + - name: superfluous-else + - name: unused-parameter + - name: unreachable-code + - name: redefines-builtin-id + staticcheck: + checks: + - all + - -ST1000 + - -ST1020 + - -ST1021 + wrapcheck: + ignore-sigs: + - .WrapAndTrace + - .Errorf + - .Wrap + - .New + - .ValidateStruct + - .Permanent + - .Decode + exclusions: + generated: lax + rules: + - path: (.+)\.go$ + text: composites + paths: + - internal/lambdalabs/gen + - third_party$ + - builtin$ + - examples$ +formatters: + enable: + - gofumpt + settings: + goimports: + local-prefixes: + - github.com/brevdev/dev-plane + exclusions: + generated: lax + paths: + - internal/lambdalabs/gen + - third_party$ + - builtin$ + - examples$ diff --git a/go.mod b/go.mod index 9df8aa0..d4043c5 100644 --- a/go.mod +++ b/go.mod @@ -1,11 +1,16 @@ module github.com/brevdev/cloud -go 1.24.0 - -toolchain go1.24.6 +go 1.25.1 require ( github.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b + github.com/aws/aws-sdk-go-v2 v1.39.2 + github.com/aws/aws-sdk-go-v2/config v1.31.11 + github.com/aws/aws-sdk-go-v2/credentials v1.18.15 + github.com/aws/aws-sdk-go-v2/service/ec2 v1.254.1 + github.com/aws/aws-sdk-go-v2/service/eks v1.73.3 + github.com/aws/aws-sdk-go-v2/service/iam v1.47.5 + github.com/aws/smithy-go v1.23.0 github.com/bojanz/currency v1.3.1 github.com/cenkalti/backoff v2.2.1+incompatible github.com/cenkalti/backoff/v4 v4.3.0 @@ -16,28 +21,88 @@ require ( github.com/jarcoal/httpmock v1.4.0 github.com/nebius/gosdk v0.0.0-20250826102719-940ad1dfb5de github.com/pkg/errors v0.9.1 - github.com/stretchr/testify v1.11.0 - golang.org/x/crypto v0.41.0 - golang.org/x/text v0.28.0 + github.com/stretchr/testify v1.11.1 + golang.org/x/crypto v0.42.0 + golang.org/x/text v0.29.0 + google.golang.org/grpc v1.75.0 gopkg.in/validator.v2 v2.0.1 gopkg.in/yaml.v3 v3.0.1 + k8s.io/api v0.34.1 + k8s.io/apimachinery v0.34.1 + k8s.io/client-go v0.34.1 + sigs.k8s.io/aws-iam-authenticator v0.7.8 ) require ( buf.build/gen/go/bufbuild/protovalidate/protocolbuffers/go v1.36.8-20250717185734-6c6e0d3c608e.1 // indirect github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be // indirect + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.9 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.9 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.9 // indirect + github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.1 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.9 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.29.5 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.1 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.38.6 // indirect + github.com/beorn7/perks v1.0.1 // indirect + github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/cockroachdb/apd/v3 v3.2.1 // indirect - github.com/davecgh/go-spew v1.1.1 // indirect + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect + github.com/emicklei/go-restful/v3 v3.13.0 // indirect + github.com/fxamacker/cbor/v2 v2.9.0 // indirect + github.com/go-logr/logr v1.4.3 // indirect + github.com/go-openapi/jsonpointer v0.22.1 // indirect + github.com/go-openapi/jsonreference v0.21.2 // indirect + github.com/go-openapi/swag v0.25.1 // indirect + github.com/go-openapi/swag/cmdutils v0.25.1 // indirect + github.com/go-openapi/swag/conv v0.25.1 // indirect + github.com/go-openapi/swag/fileutils v0.25.1 // indirect + github.com/go-openapi/swag/jsonname v0.25.1 // indirect + github.com/go-openapi/swag/jsonutils v0.25.1 // indirect + github.com/go-openapi/swag/loading v0.25.1 // indirect + github.com/go-openapi/swag/mangling v0.25.1 // indirect + github.com/go-openapi/swag/netutils v0.25.1 // indirect + github.com/go-openapi/swag/stringutils v0.25.1 // indirect + github.com/go-openapi/swag/typeutils v0.25.1 // indirect + github.com/go-openapi/swag/yamlutils v0.25.1 // indirect github.com/gofrs/flock v0.12.1 // indirect + github.com/gogo/protobuf v1.3.2 // indirect github.com/golang-jwt/jwt/v4 v4.5.2 // indirect + github.com/google/gnostic-models v0.7.0 // indirect github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.3.2 // indirect - github.com/kr/text v0.2.0 // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee // indirect + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect + github.com/prometheus/client_golang v1.23.2 // indirect + github.com/prometheus/client_model v0.6.2 // indirect + github.com/prometheus/common v0.66.1 // indirect + github.com/prometheus/procfs v0.17.0 // indirect + github.com/sirupsen/logrus v1.9.3 // indirect + github.com/spf13/afero v1.15.0 // indirect + github.com/spf13/pflag v1.0.10 // indirect + github.com/x448/float16 v0.8.4 // indirect + go.yaml.in/yaml/v2 v2.4.3 // indirect + go.yaml.in/yaml/v3 v3.0.4 // indirect golang.org/x/exp v0.0.0-20250819193227-8b4c13bb791b // indirect - golang.org/x/net v0.43.0 // indirect - golang.org/x/sync v0.16.0 // indirect - golang.org/x/sys v0.35.0 // indirect + golang.org/x/net v0.44.0 // indirect + golang.org/x/oauth2 v0.31.0 // indirect + golang.org/x/sync v0.17.0 // indirect + golang.org/x/sys v0.36.0 // indirect + golang.org/x/term v0.35.0 // indirect + golang.org/x/time v0.13.0 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20250825161204-c5933d9347a5 // indirect - google.golang.org/grpc v1.75.0 // indirect - google.golang.org/protobuf v1.36.8 // indirect + google.golang.org/protobuf v1.36.9 // indirect + gopkg.in/evanphx/json-patch.v4 v4.13.0 // indirect + gopkg.in/inf.v0 v0.9.1 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect + k8s.io/klog/v2 v2.130.1 // indirect + k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b // indirect + k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 // indirect + sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 // indirect + sigs.k8s.io/randfill v1.0.0 // indirect + sigs.k8s.io/structured-merge-diff/v6 v6.3.0 // indirect + sigs.k8s.io/yaml v1.6.0 // indirect ) diff --git a/go.sum b/go.sum index 64dd129..1f70d3a 100644 --- a/go.sum +++ b/go.sum @@ -6,40 +6,123 @@ github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be h1:9AeTilPcZAjCFI github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be/go.mod h1:ySMOLuWl6zY27l47sB3qLNK6tF2fkHG55UZxx8oIVo4= github.com/asaskevich/govalidator v0.0.0-20200108200545-475eaeb16496 h1:zV3ejI06GQ59hwDQAvmK1qxOQGB3WuVTRoY0okPTAv0= github.com/asaskevich/govalidator v0.0.0-20200108200545-475eaeb16496/go.mod h1:oGkLhpf+kjZl6xBf758TQhh5XrAeiJv/7FRz/2spLIg= +github.com/aws/aws-sdk-go-v2 v1.39.2 h1:EJLg8IdbzgeD7xgvZ+I8M1e0fL0ptn/M47lianzth0I= +github.com/aws/aws-sdk-go-v2 v1.39.2/go.mod h1:sDioUELIUO9Znk23YVmIk86/9DOpkbyyVb1i/gUNFXY= +github.com/aws/aws-sdk-go-v2/config v1.31.11 h1:6QOO1mP0MgytbfKsL/r/gE1P6/c/4pPzrrU3hKxa5fs= +github.com/aws/aws-sdk-go-v2/config v1.31.11/go.mod h1:KzpDsPX/dLxaUzoqM3sN2NOhbQIW4HW/0W8rQA1YFEs= +github.com/aws/aws-sdk-go-v2/credentials v1.18.15 h1:Gqy7/05KEfUSulSvwxnB7t8DuZMR3ShzNcwmTD6HOLU= +github.com/aws/aws-sdk-go-v2/credentials v1.18.15/go.mod h1:VWDWSRpYHjcjURRaQ7NUzgeKFN8Iv31+EOMT/W+bFyc= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.9 h1:Mv4Bc0mWmv6oDuSWTKnk+wgeqPL5DRFu5bQL9BGPQ8Y= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.9/go.mod h1:IKlKfRppK2a1y0gy1yH6zD+yX5uplJ6UuPlgd48dJiQ= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.9 h1:se2vOWGD3dWQUtfn4wEjRQJb1HK1XsNIt825gskZ970= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.9/go.mod h1:hijCGH2VfbZQxqCDN7bwz/4dzxV+hkyhjawAtdPWKZA= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.9 h1:6RBnKZLkJM4hQ+kN6E7yWFveOTg8NLPHAkqrs4ZPlTU= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.9/go.mod h1:V9rQKRmK7AWuEsOMnHzKj8WyrIir1yUJbZxDuZLFvXI= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 h1:bIqFDwgGXXN1Kpp99pDOdKMTTb5d2KyU5X/BZxjOkRo= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3/go.mod h1:H5O/EsxDWyU+LP/V8i5sm8cxoZgc2fdNR9bxlOFrQTo= +github.com/aws/aws-sdk-go-v2/service/ec2 v1.254.1 h1:7p9bJCZ/b3EJXXARW7JMEs2IhsnI4YFHpfXQfgMh0eg= +github.com/aws/aws-sdk-go-v2/service/ec2 v1.254.1/go.mod h1:M8WWWIfXmxA4RgTXcI/5cSByxRqjgne32Sh0VIbrn0A= +github.com/aws/aws-sdk-go-v2/service/eks v1.73.3 h1:V6MAr82kSLdj3/tN4UcPtlXDbvkNcAxsIvq59CNe704= +github.com/aws/aws-sdk-go-v2/service/eks v1.73.3/go.mod h1:FeDTTHze8jWVCZBiMkUYxJ/TQdOpTf9zbJjf0RI0ajo= +github.com/aws/aws-sdk-go-v2/service/iam v1.47.5 h1:o2gRl9x3A/Sp6q4oHinnrS+2AC9Ud8DaG4JL9ygMACk= +github.com/aws/aws-sdk-go-v2/service/iam v1.47.5/go.mod h1:0y7wFmnEg9xTZxjmr2gHQ4xOHpCfrt70lFWTOAkrij4= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.1 h1:oegbebPEMA/1Jny7kvwejowCaHz1FWZAQ94WXFNCyTM= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.1/go.mod h1:kemo5Myr9ac0U9JfSjMo9yHLtw+pECEHsFtJ9tqCEI8= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.9 h1:5r34CgVOD4WZudeEKZ9/iKpiT6cM1JyEROpXjOcdWv8= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.9/go.mod h1:dB12CEbNWPbzO2uC6QSWHteqOg4JfBVJOojbAoAUb5I= +github.com/aws/aws-sdk-go-v2/service/sso v1.29.5 h1:WwL5YLHabIBuAlEKRoLgqLz1LxTvCEpwsQr7MiW/vnM= +github.com/aws/aws-sdk-go-v2/service/sso v1.29.5/go.mod h1:5PfYspyCU5Vw1wNPsxi15LZovOnULudOQuVxphSflQA= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.1 h1:5fm5RTONng73/QA73LhCNR7UT9RpFH3hR6HWL6bIgVY= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.1/go.mod h1:xBEjWD13h+6nq+z4AkqSfSvqRKFgDIQeaMguAJndOWo= +github.com/aws/aws-sdk-go-v2/service/sts v1.38.6 h1:p3jIvqYwUZgu/XYeI48bJxOhvm47hZb5HUQ0tn6Q9kA= +github.com/aws/aws-sdk-go-v2/service/sts v1.38.6/go.mod h1:WtKK+ppze5yKPkZ0XwqIVWD4beCwv056ZbPQNoeHqM8= +github.com/aws/smithy-go v1.23.0 h1:8n6I3gXzWJB2DxBDnfxgBaSX6oe0d/t10qGz7OKqMCE= +github.com/aws/smithy-go v1.23.0/go.mod h1:t1ufH5HMublsJYulve2RKmHDC15xu1f26kHCp/HgceI= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bojanz/currency v1.3.1 h1:3BUAvy/5hU/Pzqg5nrQslVihV50QG+A2xKPoQw1RKH4= github.com/bojanz/currency v1.3.1/go.mod h1:jNoZiJyRTqoU5DFoa+n+9lputxPUDa8Fz8BdDrW06Go= github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cockroachdb/apd/v3 v3.2.1 h1:U+8j7t0axsIgvQUqthuNm82HIrYXodOV2iWLWtEaIwg= github.com/cockroachdb/apd/v3 v3.2.1/go.mod h1:klXJcjp+FffLTHlhIG69tezTDvdP065naDsHzKhYSqc= -github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/emicklei/go-restful/v3 v3.13.0 h1:C4Bl2xDndpU6nJ4bc1jXd+uTmYPVUwkD6bFY/oTyCes= +github.com/emicklei/go-restful/v3 v3.13.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sapM= +github.com/fxamacker/cbor/v2 v2.9.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ= github.com/gliderlabs/ssh v0.3.8 h1:a4YXD1V7xMF9g5nTkdfnja3Sxy1PVDCj1Zg4Wb8vY6c= github.com/gliderlabs/ssh v0.3.8/go.mod h1:xYoytBv1sV0aL3CavoDuJIQNURXkkfPA/wxQ1pL1fAU= github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-openapi/jsonpointer v0.22.1 h1:sHYI1He3b9NqJ4wXLoJDKmUmHkWy/L7rtEo92JUxBNk= +github.com/go-openapi/jsonpointer v0.22.1/go.mod h1:pQT9OsLkfz1yWoMgYFy4x3U5GY5nUlsOn1qSBH5MkCM= +github.com/go-openapi/jsonreference v0.21.2 h1:Wxjda4M/BBQllegefXrY/9aq1fxBA8sI5M/lFU6tSWU= +github.com/go-openapi/jsonreference v0.21.2/go.mod h1:pp3PEjIsJ9CZDGCNOyXIQxsNuroxm8FAJ/+quA0yKzQ= +github.com/go-openapi/swag v0.25.1 h1:6uwVsx+/OuvFVPqfQmOOPsqTcm5/GkBhNwLqIR916n8= +github.com/go-openapi/swag v0.25.1/go.mod h1:bzONdGlT0fkStgGPd3bhZf1MnuPkf2YAys6h+jZipOo= +github.com/go-openapi/swag/cmdutils v0.25.1 h1:nDke3nAFDArAa631aitksFGj2omusks88GF1VwdYqPY= +github.com/go-openapi/swag/cmdutils v0.25.1/go.mod h1:pdae/AFo6WxLl5L0rq87eRzVPm/XRHM3MoYgRMvG4A0= +github.com/go-openapi/swag/conv v0.25.1 h1:+9o8YUg6QuqqBM5X6rYL/p1dpWeZRhoIt9x7CCP+he0= +github.com/go-openapi/swag/conv v0.25.1/go.mod h1:Z1mFEGPfyIKPu0806khI3zF+/EUXde+fdeksUl2NiDs= +github.com/go-openapi/swag/fileutils v0.25.1 h1:rSRXapjQequt7kqalKXdcpIegIShhTPXx7yw0kek2uU= +github.com/go-openapi/swag/fileutils v0.25.1/go.mod h1:+NXtt5xNZZqmpIpjqcujqojGFek9/w55b3ecmOdtg8M= +github.com/go-openapi/swag/jsonname v0.25.1 h1:Sgx+qbwa4ej6AomWC6pEfXrA6uP2RkaNjA9BR8a1RJU= +github.com/go-openapi/swag/jsonname v0.25.1/go.mod h1:71Tekow6UOLBD3wS7XhdT98g5J5GR13NOTQ9/6Q11Zo= +github.com/go-openapi/swag/jsonutils v0.25.1 h1:AihLHaD0brrkJoMqEZOBNzTLnk81Kg9cWr+SPtxtgl8= +github.com/go-openapi/swag/jsonutils v0.25.1/go.mod h1:JpEkAjxQXpiaHmRO04N1zE4qbUEg3b7Udll7AMGTNOo= +github.com/go-openapi/swag/jsonutils/fixtures_test v0.25.1 h1:DSQGcdB6G0N9c/KhtpYc71PzzGEIc/fZ1no35x4/XBY= +github.com/go-openapi/swag/jsonutils/fixtures_test v0.25.1/go.mod h1:kjmweouyPwRUEYMSrbAidoLMGeJ5p6zdHi9BgZiqmsg= +github.com/go-openapi/swag/loading v0.25.1 h1:6OruqzjWoJyanZOim58iG2vj934TysYVptyaoXS24kw= +github.com/go-openapi/swag/loading v0.25.1/go.mod h1:xoIe2EG32NOYYbqxvXgPzne989bWvSNoWoyQVWEZicc= +github.com/go-openapi/swag/mangling v0.25.1 h1:XzILnLzhZPZNtmxKaz/2xIGPQsBsvmCjrJOWGNz/ync= +github.com/go-openapi/swag/mangling v0.25.1/go.mod h1:CdiMQ6pnfAgyQGSOIYnZkXvqhnnwOn997uXZMAd/7mQ= +github.com/go-openapi/swag/netutils v0.25.1 h1:2wFLYahe40tDUHfKT1GRC4rfa5T1B4GWZ+msEFA4Fl4= +github.com/go-openapi/swag/netutils v0.25.1/go.mod h1:CAkkvqnUJX8NV96tNhEQvKz8SQo2KF0f7LleiJwIeRE= +github.com/go-openapi/swag/stringutils v0.25.1 h1:Xasqgjvk30eUe8VKdmyzKtjkVjeiXx1Iz0zDfMNpPbw= +github.com/go-openapi/swag/stringutils v0.25.1/go.mod h1:JLdSAq5169HaiDUbTvArA2yQxmgn4D6h4A+4HqVvAYg= +github.com/go-openapi/swag/typeutils v0.25.1 h1:rD/9HsEQieewNt6/k+JBwkxuAHktFtH3I3ysiFZqukA= +github.com/go-openapi/swag/typeutils v0.25.1/go.mod h1:9McMC/oCdS4BKwk2shEB7x17P6HmMmA6dQRtAkSnNb8= +github.com/go-openapi/swag/yamlutils v0.25.1 h1:mry5ez8joJwzvMbaTGLhw8pXUnhDK91oSJLDPF1bmGk= +github.com/go-openapi/swag/yamlutils v0.25.1/go.mod h1:cm9ywbzncy3y6uPm/97ysW8+wZ09qsks+9RS8fLWKqg= github.com/go-ozzo/ozzo-validation/v4 v4.3.0 h1:byhDUpfEwjsVQb1vBunvIjh2BHQ9ead57VkAEY4V+Es= github.com/go-ozzo/ozzo-validation/v4 v4.3.0/go.mod h1:2NKgrcHl3z6cJs+3Oo940FPRiTzuqKbvfrL2RxCj6Ew= +github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= +github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= github.com/gofrs/flock v0.12.1 h1:MTLVXXHf8ekldpJk3AKicLij9MdwOWkZ+a/jHHZby9E= github.com/gofrs/flock v0.12.1/go.mod h1:9zxTsyu5xtJ9DK+1tFZyibEV7y3uwDxPPfbxeeHCoD0= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang-jwt/jwt/v4 v4.5.2 h1:YtQM7lnr8iZ+j5q71MGKkNw9Mn7AjHM68uc9g5fXeUI= github.com/golang-jwt/jwt/v4 v4.5.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +github.com/google/gnostic-models v0.7.0 h1:qwTtogB15McXDaNqTZdzPJRHvaVJlAl+HVQnLmJEJxo= +github.com/google/gnostic-models v0.7.0/go.mod h1:whL5G0m6dmc5cPxKc5bdKdEN3UjI7OUGxBlw57miDrQ= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db h1:097atOisP2aRj7vFgYQBbFN4U4JNXUNYpxael3UzMyo= +github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.3.2 h1:sGm2vDRFUrQJO/Veii4h4zG2vvqG6uWNkBHSTqXOZk0= github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.3.2/go.mod h1:wd1YpapPLivG6nQgbf7ZkG1hhSOXDhhn4MLTknx2aAc= github.com/jarcoal/httpmock v1.4.0 h1:BvhqnH0JAYbNudL2GMJKgOHe2CtKlzJ/5rWKyp+hc2k= github.com/jarcoal/httpmock v1.4.0/go.mod h1:ftW1xULwo+j0R0JJkJIIi7UKigZUXCLLanykgjwBXL0= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= @@ -48,66 +131,165 @@ github.com/lib/pq v1.10.7 h1:p7ZhMD+KsSRozJr34udlUrhboJwWAgCg34+/ZZNvZZw= github.com/lib/pq v1.10.7/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/maxatome/go-testdeep v1.14.0 h1:rRlLv1+kI8eOI3OaBXZwb3O7xY3exRzdW5QyX48g9wI= github.com/maxatome/go-testdeep v1.14.0/go.mod h1:lPZc/HAcJMP92l7yI6TRz1aZN5URwUBUAfUNvrclaNM= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee h1:W5t00kpgFdJifH4BDsTlE89Zl93FEloxaWZfGcifgq8= +github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/nebius/gosdk v0.0.0-20250826102719-940ad1dfb5de h1:7GbDUDyH22dvN7ata8HuNVuDlcyaDzUs/s+03Y3pDqU= github.com/nebius/gosdk v0.0.0-20250826102719-940ad1dfb5de/go.mod h1:eVbm4Qc4GPzBn3EL4rLvy1WS9zqJDw+giksOA2NZERY= +github.com/onsi/ginkgo/v2 v2.21.0 h1:7rg/4f3rB88pb5obDgNZrNHrQ4e6WpjonchcpuBRnZM= +github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= +github.com/onsi/gomega v1.35.1 h1:Cwbd75ZBPxFSuZ6T+rN/WCb/gOc6YgFBXLlZLhC7Ds4= +github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h0RJWRi/o0o= +github.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg= +github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk= +github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE= +github.com/prometheus/common v0.66.1 h1:h5E0h5/Y8niHc5DlaLlWLArTQI7tMrsfQjHV+d9ZoGs= +github.com/prometheus/common v0.66.1/go.mod h1:gcaUsgf3KfRSwHY4dIMXLPV0K/Wg1oZ8+SbZk/HH/dA= +github.com/prometheus/procfs v0.17.0 h1:FuLQ+05u4ZI+SS/w9+BWEM2TXiHKsUQ9TADiRH7DuK0= +github.com/prometheus/procfs v0.17.0/go.mod h1:oPQLaDAMRbA+u8H5Pbfq+dl3VDAvHxMUOVhe0wYB2zw= github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= +github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= +github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/spf13/afero v1.15.0 h1:b/YBCLWAJdFWJTN9cLhiXXcD7mzKn9Dm86dNnfyQw1I= +github.com/spf13/afero v1.15.0/go.mod h1:NC2ByUVxtQs4b3sIUphxK0NioZnmxgyCrfzeuq8lxMg= +github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk= +github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= -github.com/stretchr/testify v1.11.0 h1:ib4sjIrwZKxE5u/Japgo/7SJV3PvgjGiRNAvTVGqQl8= -github.com/stretchr/testify v1.11.0/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= +github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= +github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= -go.opentelemetry.io/otel v1.37.0 h1:9zhNfelUvx0KBfu/gb+ZgeAfAgtWrfHJZcAqFC228wQ= -go.opentelemetry.io/otel v1.37.0/go.mod h1:ehE/umFRLnuLa/vSccNq9oS1ErUlkkK71gMcN34UG8I= +go.opentelemetry.io/otel v1.38.0 h1:RkfdswUDRimDg0m2Az18RKOsnI8UDzppJAtj01/Ymk8= +go.opentelemetry.io/otel v1.38.0/go.mod h1:zcmtmQ1+YmQM9wrNsTGV/q/uyusom3P8RxwExxkZhjM= go.opentelemetry.io/otel/metric v1.37.0 h1:mvwbQS5m0tbmqML4NqK+e3aDiO02vsf/WgbsdpcPoZE= go.opentelemetry.io/otel/metric v1.37.0/go.mod h1:04wGrZurHYKOc+RKeye86GwKiTb9FKm1WHtO+4EVr2E= go.opentelemetry.io/otel/sdk v1.37.0 h1:ItB0QUqnjesGRvNcmAcU0LyvkVyGJ2xftD29bWdDvKI= go.opentelemetry.io/otel/sdk v1.37.0/go.mod h1:VredYzxUvuo2q3WRcDnKDjbdvmO0sCzOvVAiY+yUkAg= go.opentelemetry.io/otel/sdk/metric v1.37.0 h1:90lI228XrB9jCMuSdA0673aubgRobVZFhbjxHHspCPc= go.opentelemetry.io/otel/sdk/metric v1.37.0/go.mod h1:cNen4ZWfiD37l5NhS+Keb5RXVWZWpRE+9WyVCpbo5ps= -go.opentelemetry.io/otel/trace v1.37.0 h1:HLdcFNbRQBE2imdSEgm/kwqmQj1Or1l/7bW6mxVK7z4= -go.opentelemetry.io/otel/trace v1.37.0/go.mod h1:TlgrlQ+PtQO5XFerSPUYG0JSgGyryXewPGyayAWSBS0= -golang.org/x/crypto v0.41.0 h1:WKYxWedPGCTVVl5+WHSSrOBT0O8lx32+zxmHxijgXp4= -golang.org/x/crypto v0.41.0/go.mod h1:pO5AFd7FA68rFak7rOAGVuygIISepHftHnr8dr6+sUc= +go.opentelemetry.io/otel/trace v1.38.0 h1:Fxk5bKrDZJUH+AMyyIXGcFAPah0oRcT+LuNtJrmcNLE= +go.opentelemetry.io/otel/trace v1.38.0/go.mod h1:j1P9ivuFsTceSWe1oY+EeW3sc+Pp42sO++GHkg4wwhs= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= +go.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0= +go.yaml.in/yaml/v2 v2.4.3/go.mod h1:zSxWcmIDjOzPXpjlTTbAsKokqkDNAVtZO0WOMiT90s8= +go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc= +go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.42.0 h1:chiH31gIWm57EkTXpwnqf8qeuMUi0yekh6mT2AvFlqI= +golang.org/x/crypto v0.42.0/go.mod h1:4+rDnOTJhQCx2q7/j6rAN5XDw8kPjeaXEUR2eL94ix8= golang.org/x/exp v0.0.0-20250819193227-8b4c13bb791b h1:DXr+pvt3nC887026GRP39Ej11UATqWDmWuS99x26cD0= golang.org/x/exp v0.0.0-20250819193227-8b4c13bb791b/go.mod h1:4QTo5u+SEIbbKW1RacMZq1YEfOBqeXa19JeshGi+zc4= -golang.org/x/net v0.43.0 h1:lat02VYK2j4aLzMzecihNvTlJNQUq316m2Mr9rnM6YE= -golang.org/x/net v0.43.0/go.mod h1:vhO1fvI4dGsIjh73sWfUVjj3N7CA9WkKJNQm2svM6Jg= -golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw= -golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= -golang.org/x/sys v0.35.0 h1:vz1N37gP5bs89s7He8XuIYXpyY0+QlsKmzipCbUtyxI= -golang.org/x/sys v0.35.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= -golang.org/x/term v0.34.0 h1:O/2T7POpk0ZZ7MAzMeWFSg6S5IpWd/RXDlM9hgM3DR4= -golang.org/x/term v0.34.0/go.mod h1:5jC53AEywhIVebHgPVeg0mj8OD3VO9OzclacVrqpaAw= -golang.org/x/text v0.28.0 h1:rhazDwis8INMIwQ4tpjLDzUhx6RlXqZNPEM0huQojng= -golang.org/x/text v0.28.0/go.mod h1:U8nCwOR8jO/marOQ0QbDiOngZVEBB7MAiitBuMjXiNU= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.44.0 h1:evd8IRDyfNBMBTTY5XRF1vaZlD+EmWx6x8PkhR04H/I= +golang.org/x/net v0.44.0/go.mod h1:ECOoLqd5U3Lhyeyo/QDCEVQ4sNgYsqvCZ722XogGieY= +golang.org/x/oauth2 v0.31.0 h1:8Fq0yVZLh4j4YA47vHKFTa9Ew5XIrCP8LC6UeNZnLxo= +golang.org/x/oauth2 v0.31.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.17.0 h1:l60nONMj9l5drqw6jlhIELNv9I0A4OFgRsG9k2oT9Ug= +golang.org/x/sync v0.17.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.36.0 h1:KVRy2GtZBrk1cBYA7MKu5bEZFxQk4NIDV6RLVcC8o0k= +golang.org/x/sys v0.36.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +golang.org/x/term v0.35.0 h1:bZBVKBudEyhRcajGcNc3jIfWPqV4y/Kt2XcoigOWtDQ= +golang.org/x/term v0.35.0/go.mod h1:TPGtkTLesOwf2DE8CgVYiZinHAOuy5AYUYT1lENIZnA= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.29.0 h1:1neNs90w9YzJ9BocxfsQNHKuAT4pkghyXc4nhZ6sJvk= +golang.org/x/text v0.29.0/go.mod h1:7MhJOA9CD2qZyOKYazxdYMF85OwPdEr9jTtBpO7ydH4= +golang.org/x/time v0.13.0 h1:eUlYslOIt32DgYD6utsuUeHs4d7AsEYLuIAdg7FlYgI= +golang.org/x/time v0.13.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.37.0 h1:DVSRzp7FwePZW356yEAChSdNcQo6Nsp+fex1SUW09lE= +golang.org/x/tools v0.37.0/go.mod h1:MBN5QPQtLMHVdvsbtarmTNukZDdgwdwlO5qGacAzF0w= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk= gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E= google.golang.org/genproto/googleapis/rpc v0.0.0-20250825161204-c5933d9347a5 h1:eaY8u2EuxbRv7c3NiGK0/NedzVsCcV6hDuU5qPX5EGE= google.golang.org/genproto/googleapis/rpc v0.0.0-20250825161204-c5933d9347a5/go.mod h1:M4/wBTSeyLxupu3W3tJtOgB14jILAS/XWPSSa3TAlJc= google.golang.org/grpc v1.75.0 h1:+TW+dqTd2Biwe6KKfhE5JpiYIBWq865PhKGSXiivqt4= google.golang.org/grpc v1.75.0/go.mod h1:JtPAzKiq4v1xcAB2hydNlWI2RnF85XXcV0mhKXr2ecQ= -google.golang.org/protobuf v1.36.8 h1:xHScyCOEuuwZEc6UtSOvPbAT4zRh0xcNRYekJwfqyMc= -google.golang.org/protobuf v1.36.8/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= +google.golang.org/protobuf v1.36.9 h1:w2gp2mA27hUeUzj9Ex9FBjsBm40zfaDtEWow293U7Iw= +google.golang.org/protobuf v1.36.9/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/evanphx/json-patch.v4 v4.13.0 h1:czT3CmqEaQ1aanPc5SdlgQrrEIb8w/wwCvWWnfEbYzo= +gopkg.in/evanphx/json-patch.v4 v4.13.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M= +gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/validator.v2 v2.0.1 h1:xF0KWyGWXm/LM2G1TrEjqOu4pa6coO9AlWSf3msVfDY= gopkg.in/validator.v2 v2.0.1/go.mod h1:lIUZBlB3Im4s/eYp39Ry/wkR02yOPhZ9IwIRBjuPuG8= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +k8s.io/api v0.34.1 h1:jC+153630BMdlFukegoEL8E/yT7aLyQkIVuwhmwDgJM= +k8s.io/api v0.34.1/go.mod h1:SB80FxFtXn5/gwzCoN6QCtPD7Vbu5w2n1S0J5gFfTYk= +k8s.io/apimachinery v0.34.1 h1:dTlxFls/eikpJxmAC7MVE8oOeP1zryV7iRyIjB0gky4= +k8s.io/apimachinery v0.34.1/go.mod h1:/GwIlEcWuTX9zKIg2mbw0LRFIsXwrfoVxn+ef0X13lw= +k8s.io/client-go v0.34.1 h1:ZUPJKgXsnKwVwmKKdPfw4tB58+7/Ik3CrjOEhsiZ7mY= +k8s.io/client-go v0.34.1/go.mod h1:kA8v0FP+tk6sZA0yKLRG67LWjqufAoSHA2xVGKw9Of8= +k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= +k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= +k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b h1:MloQ9/bdJyIu9lb1PzujOPolHyvO06MXG5TUIj2mNAA= +k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b/go.mod h1:UZ2yyWbFTpuhSbFhv24aGNOdoRdJZgsIObGBUaYVsts= +k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 h1:hwvWFiBzdWw1FhfY1FooPn3kzWuJ8tmbZBHi4zVsl1Y= +k8s.io/utils v0.0.0-20250604170112-4c0f3b243397/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +sigs.k8s.io/aws-iam-authenticator v0.7.8 h1:1WOjMbjr51BJ/hfvnBJpAIwBkpdY2uhsltV9NGVm9y4= +sigs.k8s.io/aws-iam-authenticator v0.7.8/go.mod h1:HpxBHu4dSc+TB4hgx/OiOsZrh7jqkRmhn6MQJXK+bEk= +sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 h1:gBQPwqORJ8d8/YNZWEjoZs7npUVDpVXUUOFfW6CgAqE= +sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg= +sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU= +sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= +sigs.k8s.io/structured-merge-diff/v6 v6.3.0 h1:jTijUJbW353oVOd9oTlifJqOGEkUw2jB/fXCbTiQEco= +sigs.k8s.io/structured-merge-diff/v6 v6.3.0/go.mod h1:M3W8sfWvn2HhQDIbGWj3S099YozAsymCo/wrT5ohRUE= +sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs= +sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4= diff --git a/internal/collections/collections.go b/internal/collections/collections.go index e244816..4960193 100644 --- a/internal/collections/collections.go +++ b/internal/collections/collections.go @@ -46,10 +46,7 @@ func GetMapValues[K comparable, V any](m map[K]V) []V { // loops over list and returns when has returns true func ListHas[K any](list []K, has func(l K) bool) bool { k := Find(list, has) - if k != nil { - return true - } - return false + return k != nil } func MapHasKey[K comparable, V any](m map[K]V, key K) bool { diff --git a/internal/errors/errors.go b/internal/errors/errors.go index dcbc663..7769de1 100644 --- a/internal/errors/errors.go +++ b/internal/errors/errors.go @@ -120,11 +120,7 @@ func CombineByString(err error) error { mapEList = append(mapEList, e) } } - errsOut := make([]error, 0, len(mapE)) - for _, e := range mapEList { - errsOut = append(errsOut, e) - } - return Join(errsOut...) + return Join(mapEList...) } var Is = stderrors.Is diff --git a/internal/kubernetes/kubernetes.go b/internal/kubernetes/kubernetes.go new file mode 100644 index 0000000..86e3480 --- /dev/null +++ b/internal/kubernetes/kubernetes.go @@ -0,0 +1,124 @@ +package kubernetes + +import ( + "context" + "crypto/rand" + "crypto/x509" + "crypto/x509/pkix" + "encoding/pem" + "fmt" + + certificatesv1 "k8s.io/api/certificates/v1" + rbacv1 "k8s.io/api/rbac/v1" + k8serrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" +) + +// ClientCertificateData creates a client certificate for the given cluster and private key. This is used to to authenticate to the cluster. +func ClientCertificateData(ctx context.Context, k8sClient *kubernetes.Clientset, username string, userPrivateKey any) ([]byte, error) { + // Check to see if the CSR already exists for this username + csr, err := k8sClient.CertificatesV1().CertificateSigningRequests().Get(ctx, username, metav1.GetOptions{}) + if err != nil { + // If the error is not a not found error, return the error. If it is a not found error, continue. + if !k8serrors.IsNotFound(err) { + return nil, fmt.Errorf("failed to get CSR: %w", err) + } + } else { + // If there is no error and the CSR exists, return the certificate. + if csr != nil && csr.Status.Certificate != nil { + return csr.Status.Certificate, nil + } + } + + // Create the certificate request + certRequestBytes, err := x509.CreateCertificateRequest(rand.Reader, &x509.CertificateRequest{ + Subject: pkix.Name{ + CommonName: username, + Organization: []string{"brev"}, + }, + }, userPrivateKey) + if err != nil { + return nil, fmt.Errorf("failed to create CSR: %w", err) + } + + // Encode CSR & key in PEM + crtRequestPEM := pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE REQUEST", Bytes: certRequestBytes}) + + // Create the CSR + csr, err = k8sClient.CertificatesV1().CertificateSigningRequests().Create(ctx, + &certificatesv1.CertificateSigningRequest{ + ObjectMeta: metav1.ObjectMeta{Name: username}, + Spec: certificatesv1.CertificateSigningRequestSpec{ + Usages: []certificatesv1.KeyUsage{certificatesv1.UsageClientAuth}, + Request: crtRequestPEM, + SignerName: certificatesv1.KubeAPIServerClientSignerName, + }, + }, + metav1.CreateOptions{}) + if err != nil { + return nil, fmt.Errorf("failed to create certificate signing request: %w", err) + } + + // Approve the CSR + csr.Status.Conditions = append(csr.Status.Conditions, certificatesv1.CertificateSigningRequestCondition{ + Type: certificatesv1.CertificateApproved, + Status: "True", + Reason: "BrevCloudSDK", + Message: "BrevCloudSDK approved certificate signing request", + LastUpdateTime: metav1.Now(), + }) + csr, err = k8sClient.CertificatesV1().CertificateSigningRequests().UpdateApproval(ctx, csr.Name, csr, metav1.UpdateOptions{}) + if err != nil { + return nil, fmt.Errorf("failed to approve certificate signing request: %w", err) + } + + // Get the signed certificate + signedCertificate, err := k8sClient.CertificatesV1().CertificateSigningRequests().Get(ctx, csr.Name, metav1.GetOptions{}) + if err != nil { + return nil, fmt.Errorf("failed to get signed certificate: %w", err) + } + + return signedCertificate.Status.Certificate, nil +} + +// SetUserRole sets the role for the given user. +func SetUserRole(ctx context.Context, k8sClient *kubernetes.Clientset, username string, roleName string) error { + clusterRoleBindingName := fmt.Sprintf("%s-%s", username, roleName) + + // Check to see if the cluster role binding already exists for this username + clusterRoleBinding, err := k8sClient.RbacV1().ClusterRoleBindings().Get(ctx, clusterRoleBindingName, metav1.GetOptions{}) + if err != nil { + if !k8serrors.IsNotFound(err) { + return fmt.Errorf("failed to get cluster role binding: %w", err) + } + } else { + if clusterRoleBinding != nil { + return nil + } + } + + // Create the cluster role binding + _, err = k8sClient.RbacV1().ClusterRoleBindings().Create(ctx, &rbacv1.ClusterRoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: clusterRoleBindingName, + }, + RoleRef: rbacv1.RoleRef{ + APIGroup: "rbac.authorization.k8s.io", + Kind: "ClusterRole", + Name: roleName, + }, + Subjects: []rbacv1.Subject{ + { + Kind: "User", + Name: username, + Namespace: "default", + }, + }, + }, metav1.CreateOptions{}) + if err != nil { + return fmt.Errorf("failed to set role %s for user %s: %w", roleName, username, err) + } + + return nil +} diff --git a/internal/rsa/rsa.go b/internal/rsa/rsa.go new file mode 100644 index 0000000..c5188c1 --- /dev/null +++ b/internal/rsa/rsa.go @@ -0,0 +1,50 @@ +package rsa + +import ( + "crypto/ed25519" + "crypto/rsa" + "crypto/x509" + "fmt" + + "golang.org/x/crypto/ssh" +) + +// BytesToRSAKey parses a byte slice into an RSA private key. +// It supports OpenSSH, PKCS8, and PKCS1 formats. +func BytesToRSAKey(keyBytes []byte) (any, error) { + // The key may be in OpenSSH format + key, err := ssh.ParseRawPrivateKey(keyBytes) + if err == nil { + // This is an OpenSSH key, now check to see if it is a private key + switch k := key.(type) { + case *rsa.PrivateKey, *ed25519.PrivateKey: + return k, nil + default: + // This is an OpenSSH key, but it is not a private key + return nil, fmt.Errorf("key is not an RSA private key") + } + } + + // The key may be in PKCS8 format + key, err = x509.ParsePKCS8PrivateKey(keyBytes) + if err == nil { + // This is a PKCS8 key, now check to see if it is a private key + switch k := key.(type) { + case *rsa.PrivateKey, *ed25519.PrivateKey: + return k, nil + default: + // This is a PKCS8 key, but it is not a private key + return nil, fmt.Errorf("key is not an RSA private key") + } + } + + // The key may be in PKCS1 format + key, err = x509.ParsePKCS1PrivateKey(keyBytes) + if err == nil { + // This is a PKCS1 private key, return it + return key, nil + } + + // The key is not in any of the supported formats + return nil, fmt.Errorf("key is not an RSA private key") +} diff --git a/internal/ssh/ssh.go b/internal/ssh/ssh.go index 700e31b..ebc7680 100644 --- a/internal/ssh/ssh.go +++ b/internal/ssh/ssh.go @@ -357,7 +357,7 @@ func doWithTimeout(ctx context.Context, fn func(context.Context) error) error { func waitForSSH(ctx context.Context, errChan chan error, c ConnectionConfig, options WaitForSSHOptions) { for ctx.Err() == nil { - _ = <-errChan + <-errChan tryCtx, cancel := context.WithTimeout(ctx, options.ConnectionTimeout) sshErr := TrySSHConnect(tryCtx, c, options) cancel() diff --git a/internal/validation/log.go b/internal/validation/log.go new file mode 100644 index 0000000..d2fe372 --- /dev/null +++ b/internal/validation/log.go @@ -0,0 +1,43 @@ +package validation + +import ( + "context" + "fmt" + "strings" + + v1 "github.com/brevdev/cloud/v1" +) + +// implementation of the "v1.Logger" interface +type ValidationLogger struct{} + +var _ v1.Logger = &ValidationLogger{} + +func (l *ValidationLogger) Debug(_ context.Context, msg string, fields ...v1.Field) { + log("DEBUG", msg, fields...) +} + +func (l *ValidationLogger) Info(_ context.Context, msg string, fields ...v1.Field) { + log("INFO", msg, fields...) +} + +func (l *ValidationLogger) Warn(_ context.Context, msg string, fields ...v1.Field) { + log("WARN", msg, fields...) +} + +func (l *ValidationLogger) Error(_ context.Context, err error, fields ...v1.Field) { + log("ERROR", err.Error(), fields...) +} + +func log(level string, msg string, fields ...v1.Field) { + fmt.Printf("%s: %s\n", level, msg) + + if len(fields) > 0 { + fieldStrings := []string{} + for _, field := range fields { + fieldStrings = append(fieldStrings, fmt.Sprintf("%s: %s", field.Key, field.Value)) + } + + fmt.Printf("{%s}\n", strings.Join(fieldStrings, ", ")) + } +} diff --git a/internal/validation/suite.go b/internal/validation/suite.go index 877919a..3122695 100644 --- a/internal/validation/suite.go +++ b/internal/validation/suite.go @@ -6,7 +6,7 @@ import ( "time" "github.com/brevdev/cloud/internal/ssh" - "github.com/brevdev/cloud/v1" + v1 "github.com/brevdev/cloud/v1" "github.com/stretchr/testify/require" ) @@ -126,3 +126,426 @@ func RunInstanceLifecycleValidation(t *testing.T, config ProviderConfig) { }) }) } + +type NetworkValidationOpts struct { + Name string + RefID string + CidrBlock string + PublicSubnetCidrBlock string + Tags map[string]string +} + +func RunNetworkValidation(t *testing.T, config ProviderConfig, opts NetworkValidationOpts) { + if testing.Short() { + t.Skip("Skipping validation tests in short mode") + } + + // Set a default timeout of 15 minutes for the validation suite + ctx, cancel := context.WithTimeout(context.Background(), 15*time.Minute) + defer cancel() + + client, err := config.Credential.MakeClient(ctx, config.Location) + if err != nil { + t.Fatalf("Failed to create client for %s: %v", config.Credential.GetCloudProviderID(), err) + } + + // Test #1: ValidateCreateVPC + var vpcID v1.CloudProviderResourceID + t.Run("ValidateCreateVPC", func(t *testing.T) { + vpc, err := v1.ValidateCreateVPC(ctx, client, v1.CreateVPCArgs{ + Name: opts.Name, + RefID: opts.RefID, + CidrBlock: opts.CidrBlock, + Subnets: []v1.CreateSubnetArgs{ + {CidrBlock: opts.PublicSubnetCidrBlock, Type: v1.SubnetTypePublic}, + }, + Tags: opts.Tags, + }) + require.NoError(t, err, "ValidateCreateVPC should pass") + vpcID = vpc.GetID() + }) + + // The VPC was created successfully -- create a defer function to delete the VPC if the tests fail + deletionSucceeded := false + defer func() { + if !deletionSucceeded && vpcID != "" { + t.Logf("Cleaning up VPC after failed tests: %s", vpcID) + err = v1.ValidateDeleteVPC(ctx, client, v1.DeleteVPCArgs{ + ID: vpcID, + }) + if err != nil { + t.Fatalf("Failed to cleanup after validation of VPC: %v", err) + } + } + }() + + // Test #2: ValidateGetVPC + t.Run("ValidateGetVPC", func(t *testing.T) { + vpc, err := v1.ValidateGetVPC(ctx, client, v1.GetVPCArgs{ + ID: vpcID, + }) + require.NoError(t, err, "ValidateGetVPC should pass") + require.NotNil(t, vpc) + }) + + // Test #3: WaitForVPCToBeAvailable + t.Run("WaitForVPCToBeAvailable", func(t *testing.T) { + err := WaitForResourcePredicate(ctx, WaitForResourcePredicateOpts[*v1.VPC]{ + GetResource: func() (*v1.VPC, error) { + return client.GetVPC(ctx, v1.GetVPCArgs{ID: vpcID}) + }, + Predicate: func(vpc *v1.VPC) bool { + return vpc.GetStatus() == v1.VPCStatusAvailable + }, + Timeout: 5 * time.Minute, + Interval: 5 * time.Second, + }) + require.NoError(t, err, "WaitForVPCToBeAvailable should pass") + }) + + // Test #4: ValidateDeleteVPC + t.Run("ValidateDeleteVPC", func(t *testing.T) { + err := v1.ValidateDeleteVPC(ctx, client, v1.DeleteVPCArgs{ + ID: vpcID, + }) + require.NoError(t, err, "ValidateDeleteVPC should pass") + deletionSucceeded = true + }) + + // Test #5: WaitForVPCToBeDeleted + t.Run("WaitForVPCToBeDeleted", func(t *testing.T) { + err := WaitForResourcePredicate(ctx, WaitForResourcePredicateOpts[*v1.VPC]{ + GetResource: func() (*v1.VPC, error) { + return client.GetVPC(ctx, v1.GetVPCArgs{ID: vpcID}) + }, + Predicate: func(_ *v1.VPC) bool { + return false // continue until failure + }, + Timeout: 5 * time.Minute, + Interval: 5 * time.Second, + }) + require.ErrorIs(t, err, v1.ErrResourceNotFound) + deletionSucceeded = true + }) +} + +type KubernetesValidationOpts struct { + Name string + RefID string + KubernetesVersion string + Subnets []KubernetesValidationSubnetOpts + NodeGroupOpts *KubernetesValidationNodeGroupOpts + NetworkOpts *KubernetesValidationNetworkOpts + UserOpts *KubernetesValidationUserOpts + Tags map[string]string +} + +type KubernetesValidationNodeGroupOpts struct { + Name string + RefID string + MinNodeCount int + MaxNodeCount int + InstanceType string + DiskSizeGiB int +} + +type KubernetesValidationNetworkOpts struct { + Name string + RefID string + CidrBlock string + Subnets []KubernetesValidationSubnetOpts +} + +type KubernetesValidationSubnetOpts struct { + Name string + RefID string + CidrBlock string + SubnetType v1.SubnetType +} + +type KubernetesValidationUserOpts struct { + Username string + Role string + RSAPEMBase64 string +} + +func RunKubernetesValidation(t *testing.T, config ProviderConfig, opts KubernetesValidationOpts) { //nolint:funlen,gocyclo // This function is long but it is a validation suite + if testing.Short() { + t.Skip("Skipping validation tests in short mode") + } + + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Minute) + defer cancel() + + client, err := config.Credential.MakeClient(ctx, config.Location) + if err != nil { + t.Fatalf("Failed to create client for %s: %v", config.Credential.GetCloudProviderID(), err) + } + + if opts.NetworkOpts == nil { + t.Fatalf("KubernetesValidationOpts.NetworkOpts is required") + } + + subnets := []v1.CreateSubnetArgs{} + for _, subnet := range opts.NetworkOpts.Subnets { + subnets = append(subnets, v1.CreateSubnetArgs{ + RefID: subnet.RefID, + CidrBlock: subnet.CidrBlock, + Type: subnet.SubnetType, + }) + } + + // Create the initial VPC + vpc, err := v1.ValidateCreateVPC(ctx, client, v1.CreateVPCArgs{ + Name: opts.NetworkOpts.Name, + RefID: opts.NetworkOpts.RefID, + CidrBlock: opts.NetworkOpts.CidrBlock, + Subnets: subnets, + Tags: opts.Tags, + }) + require.NoError(t, err, "ValidateCreateVPC should pass") + + // Wait for the VPC to be available + err = WaitForResourcePredicate(ctx, WaitForResourcePredicateOpts[*v1.VPC]{ + GetResource: func() (*v1.VPC, error) { + return client.GetVPC(ctx, v1.GetVPCArgs{ID: vpc.GetID()}) + }, + Predicate: func(vpc *v1.VPC) bool { + return vpc.GetStatus() == v1.VPCStatusAvailable + }, + Timeout: 5 * time.Minute, + Interval: 5 * time.Second, + }) + require.NoError(t, err, "WaitForVPCToBeAvailable should pass") + t.Logf("VPC created: %s", vpc.GetID()) + + // The VPC was created successfully -- create a defer function to delete the VPC if the tests fail + defer func() { + if vpc != nil { + err = v1.ValidateDeleteVPC(ctx, client, v1.DeleteVPCArgs{ + ID: vpc.GetID(), + }) + if err != nil { + t.Fatalf("Failed to cleanup after validation of VPC: %v", err) + } + t.Logf("VPC deleted: %s", vpc.GetID()) + } + }() + + // Map the input subnet ref IDs to their real subnets + subnetRefIDs := make(map[string]*v1.Subnet) + for _, subnet := range vpc.GetSubnets() { + subnetRefIDs[subnet.GetRefID()] = subnet + } + + // Convert the input subnet ref IDs to their real subnet IDs + subnetIDs := []v1.CloudProviderResourceID{} + for _, subnet := range opts.Subnets { + subnetIDs = append(subnetIDs, subnetRefIDs[subnet.RefID].GetID()) + } + + // Test: Create Kubernetes Cluster + var clusterID v1.CloudProviderResourceID + t.Run("ValidateCreateKubernetesCluster", func(t *testing.T) { + cluster, err := v1.ValidateCreateKubernetesCluster(ctx, client, v1.CreateClusterArgs{ + Name: opts.Name, + RefID: opts.RefID, + VPCID: vpc.GetID(), + SubnetIDs: subnetIDs, + KubernetesVersion: opts.KubernetesVersion, + Tags: opts.Tags, + }) + require.NoError(t, err, "ValidateCreateKubernetesCluster should pass") + require.NotNil(t, cluster) + clusterID = cluster.GetID() + }) + + // The Kubernetes cluster was created successfully -- create a defer function to delete the Kubernetes cluster if the tests fail + clusterDeletionSucceeded := false + defer func() { + if !clusterDeletionSucceeded && clusterID != "" { + t.Logf("Cleaning up Kubernetes cluster after failed tests: %s", clusterID) + err = v1.ValidateDeleteKubernetesCluster(ctx, client, v1.DeleteClusterArgs{ + ID: clusterID, + }) + if err != nil { + t.Fatalf("Failed to cleanup after validation of Kubernetes cluster: %v", err) + } + } + }() + + // Test: Get Kubernetes Cluster + t.Run("ValidateGetKubernetesCluster", func(t *testing.T) { + cluster, err := v1.ValidateGetKubernetesCluster(ctx, client, v1.GetClusterArgs{ + ID: clusterID, + }) + require.NoError(t, err, "ValidateGetKubernetesCluster should pass") + require.NotNil(t, cluster) + }) + + // Test: WaitFor Kubernetes Cluster to Be Available + t.Run("WaitForKubernetesClusterToBeAvailable", func(t *testing.T) { + err := WaitForResourcePredicate(ctx, WaitForResourcePredicateOpts[*v1.Cluster]{ + GetResource: func() (*v1.Cluster, error) { + return client.GetCluster(ctx, v1.GetClusterArgs{ID: clusterID}) + }, + Predicate: func(cluster *v1.Cluster) bool { + return cluster.GetStatus() == v1.ClusterStatusAvailable + }, + Timeout: 20 * time.Minute, + Interval: 15 * time.Second, + }) + require.NoError(t, err, "WaitForKubernetesClusterToBeAvailable should pass") + }) + + // Test: Get Kubernetes Cluster Credentials + t.Run("ValidateGetKubernetesClusterCredentials", func(t *testing.T) { + t.Skip("Under development") + _, err := v1.ValidateSetKubernetesClusterUser(ctx, client, v1.SetClusterUserArgs{ + ClusterID: clusterID, + Username: opts.UserOpts.Username, + Role: opts.UserOpts.Role, + RSAPEMBase64: opts.UserOpts.RSAPEMBase64, + }) + require.NoError(t, err, "ValidateGetKubernetesClusterCredentials should pass") + }) + + // Test: Create Kubernetes Node Group + var nodeGroup v1.NodeGroup + t.Run("ValidateCreateKubernetesNodeGroup", func(t *testing.T) { + ng, err := v1.ValidateCreateKubernetesNodeGroup(ctx, client, v1.CreateNodeGroupArgs{ + ClusterID: clusterID, + Name: opts.NodeGroupOpts.Name, + RefID: opts.NodeGroupOpts.RefID, + MinNodeCount: opts.NodeGroupOpts.MinNodeCount, + MaxNodeCount: opts.NodeGroupOpts.MaxNodeCount, + InstanceType: opts.NodeGroupOpts.InstanceType, + DiskSizeGiB: opts.NodeGroupOpts.DiskSizeGiB, + Tags: opts.Tags, + }) + require.NoError(t, err, "ValidateCreateKubernetesNodeGroup should pass") + require.NotNil(t, ng) + nodeGroup = *ng + }) + + // The node group was created successfully -- create a defer function to delete the node group if the tests fail + nodeGroupDeletionSucceeded := false + defer func() { + if !nodeGroupDeletionSucceeded && nodeGroup.GetID() != "" { + t.Logf("Cleaning up Kubernetes node group after failed tests: %s", nodeGroup.GetID()) + err = v1.ValidateDeleteKubernetesNodeGroup(ctx, client, v1.DeleteNodeGroupArgs{ + ID: nodeGroup.GetID(), + }) + if err != nil { + t.Fatalf("Failed to cleanup after validation of Kubernetes node group: %v", err) + } + } + }() + + // Test: WaitFor Kubernetes Node Group to Be Available + t.Run("WaitForKubernetesNodeGroupToBeAvailable", func(t *testing.T) { + err := WaitForResourcePredicate(ctx, WaitForResourcePredicateOpts[*v1.NodeGroup]{ + GetResource: func() (*v1.NodeGroup, error) { + return client.GetNodeGroup(ctx, v1.GetNodeGroupArgs{ + ClusterID: clusterID, + ID: nodeGroup.GetID(), + }) + }, + Predicate: func(nodeGroup *v1.NodeGroup) bool { + return nodeGroup.GetStatus() == v1.NodeGroupStatusAvailable + }, + Timeout: 20 * time.Minute, + Interval: 15 * time.Second, + }) + require.NoError(t, err, "WaitForKubernetesNodeGroupToBeAvailable should pass") + }) + + // Test: Validate Cluster Node Groups matches the created node group + t.Run("ValidateClusterNodeGroups", func(t *testing.T) { + err := v1.ValidateClusterNodeGroups(ctx, client, v1.GetClusterArgs{ID: clusterID}, nodeGroup) + require.NoError(t, err, "ValidateClusterNodeGroups should pass") + }) + + // Test: Modify Kubernetes Node Group + t.Run("ValidateModifyKubernetesNodeGroup", func(t *testing.T) { + err := v1.ValidateModifyKubernetesNodeGroup(ctx, client, v1.ModifyNodeGroupArgs{ + ClusterID: clusterID, + ID: nodeGroup.GetID(), + MinNodeCount: opts.NodeGroupOpts.MinNodeCount + 1, + MaxNodeCount: opts.NodeGroupOpts.MaxNodeCount + 1, + }) + require.NoError(t, err, "ValidateModifyKubernetesNodeGroup should pass") + }) + + // Test: WaitFor Kubernetes Node Group to Be Available + t.Run("WaitForKubernetesNodeGroupToBeAvailable", func(t *testing.T) { + err := WaitForResourcePredicate(ctx, WaitForResourcePredicateOpts[*v1.NodeGroup]{ + GetResource: func() (*v1.NodeGroup, error) { + return client.GetNodeGroup(ctx, v1.GetNodeGroupArgs{ + ClusterID: clusterID, + ID: nodeGroup.GetID(), + }) + }, + Predicate: func(nodeGroup *v1.NodeGroup) bool { + return nodeGroup.GetStatus() == v1.NodeGroupStatusAvailable && + nodeGroup.GetMinNodeCount() == opts.NodeGroupOpts.MinNodeCount+1 && + nodeGroup.GetMaxNodeCount() == opts.NodeGroupOpts.MaxNodeCount+1 + }, + Timeout: 20 * time.Minute, + Interval: 15 * time.Second, + }) + require.NoError(t, err, "WaitForKubernetesNodeGroupToBeAvailable should pass") + }) + + // Test: Delete Kubernetes Node Group + t.Run("ValidateDeleteKubernetesNodeGroup", func(t *testing.T) { + err := v1.ValidateDeleteKubernetesNodeGroup(ctx, client, v1.DeleteNodeGroupArgs{ + ClusterID: clusterID, + ID: nodeGroup.GetID(), + }) + require.NoError(t, err, "ValidateDeleteKubernetesNodeGroup should pass") + nodeGroupDeletionSucceeded = true + }) + + // Test: WaitFor Kubernetes Node Group to Be Deleted + t.Run("WaitForKubernetesNodeGroupToBeDeleted", func(t *testing.T) { + err := WaitForResourcePredicate(ctx, WaitForResourcePredicateOpts[*v1.NodeGroup]{ + GetResource: func() (*v1.NodeGroup, error) { + return client.GetNodeGroup(ctx, v1.GetNodeGroupArgs{ + ClusterID: clusterID, + ID: nodeGroup.GetID(), + }) + }, + Predicate: func(_ *v1.NodeGroup) bool { + return false // continue until failure + }, + Timeout: 20 * time.Minute, + Interval: 15 * time.Second, + }) + require.ErrorIs(t, err, v1.ErrResourceNotFound) + }) + + // Test: Delete Kubernetes Cluster + t.Run("ValidateDeleteKubernetesCluster", func(t *testing.T) { + err := v1.ValidateDeleteKubernetesCluster(ctx, client, v1.DeleteClusterArgs{ + ID: clusterID, + }) + require.NoError(t, err, "ValidateDeleteKubernetesCluster should pass") + }) + + // Test: WaitFor Kubernetes Cluster to Be Deleted + t.Run("WaitForKubernetesClusterToBeDeleted", func(t *testing.T) { + err := WaitForResourcePredicate(ctx, WaitForResourcePredicateOpts[*v1.Cluster]{ + GetResource: func() (*v1.Cluster, error) { + return client.GetCluster(ctx, v1.GetClusterArgs{ID: clusterID}) + }, + Predicate: func(_ *v1.Cluster) bool { + return false // continue until failure + }, + Timeout: 20 * time.Minute, + Interval: 15 * time.Second, + }) + require.ErrorIs(t, err, v1.ErrResourceNotFound) + clusterDeletionSucceeded = true + }) +} diff --git a/internal/validation/utils.go b/internal/validation/utils.go new file mode 100644 index 0000000..6cdbaa0 --- /dev/null +++ b/internal/validation/utils.go @@ -0,0 +1,44 @@ +package validation + +import ( + "context" + "fmt" + "time" +) + +// WaitForKubernetesClusterPredicate waits for the Kubernetes cluster to satisfy the predicate function. If the predicate returns true, the loop breaks. +type WaitForResourcePredicateOpts[T any] struct { + GetResource func() (T, error) + Predicate func(resource T) bool + Timeout time.Duration + Interval time.Duration +} + +func WaitForResourcePredicate[T any](ctx context.Context, opts WaitForResourcePredicateOpts[T]) error { + ctx, cancel := context.WithTimeout(ctx, opts.Timeout) + defer cancel() + + ticker := time.NewTicker(opts.Interval) + defer ticker.Stop() + + fmt.Printf("Entering WaitForResourcePredicate, timeout: %s, interval: %s\n", opts.Timeout.String(), opts.Interval.String()) + for { + resource, err := opts.GetResource() + if err != nil { + return err + } + + if opts.Predicate(resource) { + fmt.Println("Resource satisfies predicate") + break + } + fmt.Printf("Waiting %s for resource to satisfy predicate\n", opts.Interval.String()) + select { + case <-ctx.Done(): + return fmt.Errorf("timeout waiting for cluster to satisfy predicate") + case <-ticker.C: + continue + } + } + return nil +} diff --git a/v1/capabilities.go b/v1/capabilities.go index 7342e0d..3d08a6a 100644 --- a/v1/capabilities.go +++ b/v1/capabilities.go @@ -1,37 +1,27 @@ package v1 +import "slices" + type Capability string type Capabilities []Capability func (c Capabilities) IsCapable(cc Capability) bool { - for _, capability := range c { - if capability == cc { - return true - } - } - return false + return slices.Contains(c, cc) } const ( CapabilityCreateInstance Capability = "create-instance" CapabilityCreateIdempotentInstance Capability = "create-instance-idempotent" CapabilityTerminateInstance Capability = "terminate-instance" + CapabilityCreateTerminateInstance Capability = "create-terminate-instance" + CapabilityInstanceUserData Capability = "instance-userdata" // specify user data when creating an instance in CreateInstanceAttrs // should be in instance type + CapabilityTags Capability = "tags" + CapabilityRebootInstance Capability = "reboot-instance" + CapabilityResizeInstanceVolume Capability = "resize-instance-volume" + CapabilityStopStartInstance Capability = "stop-start-instance" + CapabilityMachineImage Capability = "machine-image" + CapabilityModifyFirewall Capability = "modify-firewall" + CapabilityVPC Capability = "vpc" + CapabilityManagedKubernetes Capability = "managed-kubernetes" ) - -const ( - CapabilityCreateTerminateInstance Capability = "create-terminate-instance" - CapabilityInstanceUserData Capability = "instance-userdata" // specify user data when creating an instance in CreateInstanceAttrs // should be in instance type -) - -const CapabilityTags Capability = "tags" - -const CapabilityRebootInstance Capability = "reboot-instance" - -const CapabilityResizeInstanceVolume Capability = "resize-instance-volume" - -const CapabilityStopStartInstance Capability = "stop-start-instance" - -const CapabilityMachineImage Capability = "machine-image" - -const CapabilityModifyFirewall Capability = "modify-firewall" diff --git a/v1/client.go b/v1/client.go index 54be552..6d282a0 100644 --- a/v1/client.go +++ b/v1/client.go @@ -42,4 +42,6 @@ type CloudClient interface { CloudModifyFirewall CloudInstanceTags UpdateHandler + CloudMaintainVPC + CloudMaintainKubernetes } diff --git a/v1/errors.go b/v1/errors.go index ceb0b2e..290ed20 100644 --- a/v1/errors.go +++ b/v1/errors.go @@ -8,5 +8,6 @@ var ( ErrImageNotFound = errors.New("image not found") ErrDuplicateFirewallRule = errors.New("duplicate firewall rule") ErrInstanceNotFound = errors.New("instance not found") + ErrResourceNotFound = errors.New("resource not found") ErrServiceUnavailable = errors.New("api is temporarily unavailable") ) diff --git a/v1/ids.go b/v1/ids.go new file mode 100644 index 0000000..a71167c --- /dev/null +++ b/v1/ids.go @@ -0,0 +1,3 @@ +package v1 + +type CloudProviderResourceID string diff --git a/v1/instance.go b/v1/instance.go index 772f331..82f4afb 100644 --- a/v1/instance.go +++ b/v1/instance.go @@ -87,6 +87,7 @@ func ValidateListCreatedInstance(ctx context.Context, client CloudCreateTerminat }) if foundInstance == nil { validationErr = errors.Join(validationErr, fmt.Errorf("instance not found: %s", i.CloudID)) + return validationErr } if foundInstance.Location != i.Location { //nolint:gocritic // fine validationErr = errors.Join(validationErr, fmt.Errorf("location mismatch: %s != %s", foundInstance.Location, i.Location)) diff --git a/v1/kubernetes.go b/v1/kubernetes.go new file mode 100644 index 0000000..d436778 --- /dev/null +++ b/v1/kubernetes.go @@ -0,0 +1,550 @@ +package v1 + +import ( + "context" + "fmt" + + "github.com/brevdev/cloud/internal/errors" +) + +// Cluster represents the complete specification of a Brev Kubernetes cluster. +type Cluster struct { + // The ID assigned by the cloud provider to the cluster. + id CloudProviderResourceID + + // The name of the cluster, displayed on clients. + name string + + // The unique ID used to associate with this cluster. + refID string + + // The cloud provider that manages the cluster. + provider string + + // The cloud that hosts the cluster. + cloud string + + // The location of the cluster. + location string + + // The ID of the VPC that the cluster is associated with. + vpcID CloudProviderResourceID + + // The subnet IDs that the cluster's nodes are deployed into. + subnetIDs []CloudProviderResourceID + + // The version of Kubernetes that the cluster is running. + kubernetesVersion string + + // The status of the cluster. + status ClusterStatus + + // The API endpoint of the cluster. + apiEndpoint string + + // The CA certificate of the cluster, in base64. + clusterCACertificateBase64 string + + // The node groups associated with the cluster. + nodeGroups []*NodeGroup + + // The tags associated with the cluster. + tags Tags +} + +type ClusterStatus string + +const ( + ClusterStatusUnknown ClusterStatus = "unknown" + ClusterStatusPending ClusterStatus = "pending" + ClusterStatusAvailable ClusterStatus = "available" + ClusterStatusDeleting ClusterStatus = "deleting" + ClusterStatusFailed ClusterStatus = "failed" +) + +func (c *Cluster) GetID() CloudProviderResourceID { + return c.id +} + +func (c *Cluster) GetName() string { + return c.name +} + +func (c *Cluster) GetRefID() string { + return c.refID +} + +func (c *Cluster) GetProvider() string { + return c.provider +} + +func (c *Cluster) GetCloud() string { + return c.cloud +} + +func (c *Cluster) GetLocation() string { + return c.location +} + +func (c *Cluster) GetVPCID() CloudProviderResourceID { + return c.vpcID +} + +func (c *Cluster) GetSubnetIDs() []CloudProviderResourceID { + return c.subnetIDs +} + +func (c *Cluster) GetKubernetesVersion() string { + return c.kubernetesVersion +} + +func (c *Cluster) GetStatus() ClusterStatus { + return c.status +} + +func (c *Cluster) GetAPIEndpoint() string { + return c.apiEndpoint +} + +func (c *Cluster) GetClusterCACertificateBase64() string { + return c.clusterCACertificateBase64 +} + +func (c *Cluster) GetNodeGroups() []*NodeGroup { + return c.nodeGroups +} + +func (c *Cluster) GetTags() Tags { + return c.tags +} + +// ClusterSettings represents the settings for a Kubernetes cluster. This is the input to the NewCluster function. +type ClusterSettings struct { + // The ID assigned by the cloud provider to the cluster. + ID CloudProviderResourceID + + // The name of the cluster, displayed on clients. + Name string + + // The unique ID used to associate with this cluster. + RefID string + + // The cloud provider that manages the cluster. + Provider string + + // The cloud that hosts the cluster. + Cloud string + + // The location of the cluster. + Location string + + // The ID of the VPC that the cluster is associated with. + VPCID CloudProviderResourceID + + // The subnet IDs that the cluster's nodes are deployed into. + SubnetIDs []CloudProviderResourceID + + // The version of Kubernetes that the cluster is running. + KubernetesVersion string + + // The status of the cluster. + Status ClusterStatus + + // The API endpoint of the cluster. + APIEndpoint string + + // The CA certificate of the cluster, in base64. + ClusterCACertificateBase64 string + + // The node groups associated with the cluster. + NodeGroups []*NodeGroup + + // The tags associated with the cluster. + Tags Tags +} + +func (s *ClusterSettings) setDefaults() { +} + +func (s *ClusterSettings) validate() error { + var errs []error + if s.RefID == "" { + errs = append(errs, fmt.Errorf("refID is required")) + } + if s.Name == "" { + errs = append(errs, fmt.Errorf("name is required")) + } + if s.Status == "" { + errs = append(errs, fmt.Errorf("status is required")) + } + return errors.WrapAndTrace(errors.Join(errs...)) +} + +// NewCluster creates a new Cluster from the provided settings. +func NewCluster(settings ClusterSettings) (*Cluster, error) { + settings.setDefaults() + err := settings.validate() + if err != nil { + return nil, errors.WrapAndTrace(err) + } + return &Cluster{ + id: settings.ID, + name: settings.Name, + refID: settings.RefID, + provider: settings.Provider, + cloud: settings.Cloud, + location: settings.Location, + vpcID: settings.VPCID, + subnetIDs: settings.SubnetIDs, + kubernetesVersion: settings.KubernetesVersion, + status: settings.Status, + apiEndpoint: settings.APIEndpoint, + clusterCACertificateBase64: settings.ClusterCACertificateBase64, + nodeGroups: settings.NodeGroups, + tags: settings.Tags, + }, nil +} + +// NodeGroup represents the complete specification of a Brev Kubernetes node group. +type NodeGroup struct { + // The name of the node group, displayed on clients. + name string + + // The unique ID used to associate with this node group. + refID string + + // The ID assigned by the cloud provider to the node group. + id CloudProviderResourceID + + // The minimum number of nodes in the node group. + minNodeCount int + + // The maximum number of nodes in the node group. + maxNodeCount int + + // The instance type of the nodes in the node group. + instanceType string + + // The disk size of the nodes in the node group. + diskSizeGiB int + + // The status of the node group. + status NodeGroupStatus + + // The tags associated with the node group. + tags Tags +} + +type NodeGroupStatus string + +const ( + NodeGroupStatusUnknown NodeGroupStatus = "unknown" + NodeGroupStatusPending NodeGroupStatus = "pending" + NodeGroupStatusAvailable NodeGroupStatus = "available" + NodeGroupStatusDeleting NodeGroupStatus = "deleting" + NodeGroupStatusFailed NodeGroupStatus = "failed" +) + +func (n *NodeGroup) GetName() string { + return n.name +} + +func (n *NodeGroup) GetRefID() string { + return n.refID +} + +func (n *NodeGroup) GetID() CloudProviderResourceID { + return n.id +} + +func (n *NodeGroup) GetMinNodeCount() int { + return n.minNodeCount +} + +func (n *NodeGroup) GetMaxNodeCount() int { + return n.maxNodeCount +} + +func (n *NodeGroup) GetInstanceType() string { + return n.instanceType +} + +func (n *NodeGroup) GetDiskSizeGiB() int { + return n.diskSizeGiB +} + +func (n *NodeGroup) GetStatus() NodeGroupStatus { + return n.status +} + +func (n *NodeGroup) GetTags() Tags { + return n.tags +} + +// NodeGroupSettings represents the settings for a Kubernetes node group. This is the input to the NewNodeGroup function. +type NodeGroupSettings struct { + // The name of the node group, displayed on clients. + Name string + + // The unique ID used to associate with this node group. + RefID string + + // The ID assigned by the cloud provider to the node group. + ID CloudProviderResourceID + + // The minimum number of nodes in the node group. + MinNodeCount int + + // The maximum number of nodes in the node group. + MaxNodeCount int + + // The instance type of the nodes in the node group. + InstanceType string + + // The disk size of the nodes in the node group. + DiskSizeGiB int + + // The status of the node group. + Status NodeGroupStatus + + // The tags associated with the node group. + Tags Tags +} + +func (s *NodeGroupSettings) setDefaults() { +} + +func (s *NodeGroupSettings) validate() error { + var errs []error + if s.RefID == "" { + errs = append(errs, fmt.Errorf("refID is required")) + } + if s.Name == "" { + errs = append(errs, fmt.Errorf("name is required")) + } + if s.Status == "" { + errs = append(errs, fmt.Errorf("status is required")) + } + return errors.WrapAndTrace(errors.Join(errs...)) +} + +// NewNodeGroup creates a new NodeGroup from the provided settings. +func NewNodeGroup(settings NodeGroupSettings) (*NodeGroup, error) { + settings.setDefaults() + err := settings.validate() + if err != nil { + return nil, errors.WrapAndTrace(err) + } + return &NodeGroup{ + name: settings.Name, + refID: settings.RefID, + id: settings.ID, + minNodeCount: settings.MinNodeCount, + maxNodeCount: settings.MaxNodeCount, + instanceType: settings.InstanceType, + diskSizeGiB: settings.DiskSizeGiB, + status: settings.Status, + tags: settings.Tags, + }, nil +} + +// ClusterUser represents the complete specification of a Brev Kubernetes cluster user. +type ClusterUser struct { + // The name of the cluster that the user is associated with. + clusterName string + + // The CA certificate of the cluster, in base64. + clusterCertificateAuthorityDataBase64 string + + // The API endpoint of the cluster. + clusterServerURL string + + // The username of the user. + username string + + // The client certificate of the user, in base64. + userClientCertificateDataBase64 string + + // The client key of the user, in base64. + userClientKeyDataBase64 string + + // The kubeconfig of the user, in base64. + kubeconfigBase64 string +} + +func (c *ClusterUser) GetClusterName() string { + return c.clusterName +} + +func (c *ClusterUser) GetClusterCertificateAuthorityDataBase64() string { + return c.clusterCertificateAuthorityDataBase64 +} + +func (c *ClusterUser) GetClusterServerURL() string { + return c.clusterServerURL +} + +func (c *ClusterUser) GetUsername() string { + return c.username +} + +func (c *ClusterUser) GetUserClientCertificateDataBase64() string { + return c.userClientCertificateDataBase64 +} + +func (c *ClusterUser) GetUserClientKeyDataBase64() string { + return c.userClientKeyDataBase64 +} + +func (c *ClusterUser) GetKubeconfigBase64() string { + return c.kubeconfigBase64 +} + +// ClusterUserSettings represents the settings for a Kubernetes cluster user. This is the input to the NewClusterUser function. +type ClusterUserSettings struct { + // The name of the cluster that the user is associated with. + ClusterName string + + // The CA certificate of the cluster, in base64. + ClusterCertificateAuthorityDataBase64 string + + // The API endpoint of the cluster. + ClusterServerURL string + + // The username of the user. + Username string + + // The client certificate of the user, in base64. + UserClientCertificateDataBase64 string + + // The client key of the user, in base64. + UserClientKeyDataBase64 string + + // The kubeconfig of the user, in base64. + KubeconfigBase64 string +} + +func (s *ClusterUserSettings) setDefaults() { +} + +func (s *ClusterUserSettings) validate() error { + var errs []error + if s.ClusterName == "" { + errs = append(errs, fmt.Errorf("clusterName is required")) + } + if s.ClusterCertificateAuthorityDataBase64 == "" { + errs = append(errs, fmt.Errorf("clusterCertificateAuthorityDataBase64 is required")) + } + if s.ClusterServerURL == "" { + errs = append(errs, fmt.Errorf("clusterServerURL is required")) + } + if s.Username == "" { + errs = append(errs, fmt.Errorf("username is required")) + } + if s.UserClientCertificateDataBase64 == "" { + errs = append(errs, fmt.Errorf("userClientCertificateDataBase64 is required")) + } + if s.UserClientKeyDataBase64 == "" { + errs = append(errs, fmt.Errorf("userClientKeyDataBase64 is required")) + } + if s.KubeconfigBase64 == "" { + errs = append(errs, fmt.Errorf("kubeconfigBase64 is required")) + } + return errors.WrapAndTrace(errors.Join(errs...)) +} + +// NewClusterUser creates a new ClusterUser from the provided settings. +func NewClusterUser(settings ClusterUserSettings) (*ClusterUser, error) { + settings.setDefaults() + err := settings.validate() + if err != nil { + return nil, errors.WrapAndTrace(err) + } + return &ClusterUser{ + clusterName: settings.ClusterName, + clusterCertificateAuthorityDataBase64: settings.ClusterCertificateAuthorityDataBase64, + clusterServerURL: settings.ClusterServerURL, + username: settings.Username, + userClientCertificateDataBase64: settings.UserClientCertificateDataBase64, + userClientKeyDataBase64: settings.UserClientKeyDataBase64, + kubeconfigBase64: settings.KubeconfigBase64, + }, nil +} + +type CloudMaintainKubernetes interface { + // Create a new Kubernetes cluster. + CreateCluster(ctx context.Context, args CreateClusterArgs) (*Cluster, error) + + // Get a Kubernetes cluster identified by the provided args. + GetCluster(ctx context.Context, args GetClusterArgs) (*Cluster, error) + + // Idempotently set a user into a Kubernetes cluster. + SetClusterUser(ctx context.Context, args SetClusterUserArgs) (*ClusterUser, error) + + // Create a new Kubernetes node group. + CreateNodeGroup(ctx context.Context, args CreateNodeGroupArgs) (*NodeGroup, error) + + // Get a Kubernetes node group identified by the provided args. + GetNodeGroup(ctx context.Context, args GetNodeGroupArgs) (*NodeGroup, error) + + // Modify a Kubernetes node group. + ModifyNodeGroup(ctx context.Context, args ModifyNodeGroupArgs) error + + // Delete a Kubernetes node group identified by the provided args. + DeleteNodeGroup(ctx context.Context, args DeleteNodeGroupArgs) error + + // Delete a Kubernetes cluster identified by the provided args. + DeleteCluster(ctx context.Context, args DeleteClusterArgs) error +} + +type CreateClusterArgs struct { + Name string + RefID string + VPCID CloudProviderResourceID + SubnetIDs []CloudProviderResourceID + KubernetesVersion string + Tags Tags +} + +type SetClusterUserArgs struct { + ClusterID CloudProviderResourceID + Username string + RSAPEMBase64 string + Role string +} + +type GetClusterArgs struct { + ID CloudProviderResourceID +} + +type CreateNodeGroupArgs struct { + ClusterID CloudProviderResourceID + Name string + RefID string + MinNodeCount int + MaxNodeCount int + InstanceType string + DiskSizeGiB int + Tags Tags +} + +type GetNodeGroupArgs struct { + ClusterID CloudProviderResourceID + ID CloudProviderResourceID +} + +type ModifyNodeGroupArgs struct { + ClusterID CloudProviderResourceID + ID CloudProviderResourceID + MinNodeCount int + MaxNodeCount int +} + +type DeleteNodeGroupArgs struct { + ClusterID CloudProviderResourceID + ID CloudProviderResourceID +} + +type DeleteClusterArgs struct { + ID CloudProviderResourceID +} diff --git a/v1/kubernetes_validation.go b/v1/kubernetes_validation.go new file mode 100644 index 0000000..f28a1a3 --- /dev/null +++ b/v1/kubernetes_validation.go @@ -0,0 +1,165 @@ +package v1 + +import ( + "context" + "fmt" +) + +// ValidateCreateKubernetesCluster validates that the CreateCluster functionality works correctly. +func ValidateCreateKubernetesCluster(ctx context.Context, client CloudMaintainKubernetes, attrs CreateClusterArgs) (*Cluster, error) { + cluster, err := client.CreateCluster(ctx, attrs) + if err != nil { + return nil, err + } + + if cluster.GetName() != attrs.Name { + return nil, fmt.Errorf("cluster name does not match create args: '%s' != '%s'", cluster.GetName(), attrs.Name) + } + if cluster.GetRefID() != attrs.RefID { + return nil, fmt.Errorf("cluster refID does not match create args: '%s' != '%s'", cluster.GetRefID(), attrs.RefID) + } + if cluster.GetKubernetesVersion() != attrs.KubernetesVersion { + return nil, fmt.Errorf("cluster KubernetesVersion does not match create args: '%s' != '%s'", cluster.GetKubernetesVersion(), attrs.KubernetesVersion) + } + if cluster.GetVPCID() != attrs.VPCID { + return nil, fmt.Errorf("cluster VPCID does not match create args: '%s' != '%s'", cluster.GetVPCID(), attrs.VPCID) + } + if len(cluster.GetSubnetIDs()) != len(attrs.SubnetIDs) { + return nil, fmt.Errorf("cluster subnetIDs does not match create args: '%d' != '%d'", len(cluster.GetSubnetIDs()), len(attrs.SubnetIDs)) + } + for key, value := range attrs.Tags { + tagValue, ok := cluster.GetTags()[key] + if !ok { + return nil, fmt.Errorf("cluster tag does not match create args: '%s' not found", key) + } + if tagValue != value { + return nil, fmt.Errorf("cluster tag does not match create args: '%s' != '%s'", key, value) + } + } + return cluster, nil +} + +// ValidateGetKubernetesCluster validates that the GetCluster functionality works correctly. +func ValidateGetKubernetesCluster(ctx context.Context, client CloudMaintainKubernetes, attrs GetClusterArgs) (*Cluster, error) { + cluster, err := client.GetCluster(ctx, attrs) + if err != nil { + return nil, err + } + + if cluster.GetID() != attrs.ID { + return nil, fmt.Errorf("cluster ID does not match get args: '%s' != '%s'", cluster.GetID(), attrs.ID) + } + + return cluster, nil +} + +// ValidateSetKubernetesClusterUser validates that the SetClusterUser functionality works correctly. +func ValidateSetKubernetesClusterUser(ctx context.Context, client CloudMaintainKubernetes, attrs SetClusterUserArgs) (*ClusterUser, error) { + clusterUser, err := client.SetClusterUser(ctx, attrs) + if err != nil { + return nil, err + } + return clusterUser, nil +} + +// ValidateCreateKubernetesNodeGroup validates that the CreateNodeGroup functionality works correctly. +func ValidateCreateKubernetesNodeGroup(ctx context.Context, client CloudMaintainKubernetes, attrs CreateNodeGroupArgs) (*NodeGroup, error) { + nodeGroup, err := client.CreateNodeGroup(ctx, attrs) + if err != nil { + return nil, err + } + + if nodeGroup.GetName() != attrs.Name { + return nil, fmt.Errorf("node group name does not match create args: '%s' != '%s'", nodeGroup.GetName(), attrs.Name) + } + if nodeGroup.GetRefID() != attrs.RefID { + return nil, fmt.Errorf("node group refID does not match create args: '%s' != '%s'", nodeGroup.GetRefID(), attrs.RefID) + } + if nodeGroup.GetMinNodeCount() != attrs.MinNodeCount { + return nil, fmt.Errorf("node group minNodeCount does not match create args: '%d' != '%d'", nodeGroup.GetMinNodeCount(), attrs.MinNodeCount) + } + if nodeGroup.GetMaxNodeCount() != attrs.MaxNodeCount { + return nil, fmt.Errorf("node group maxNodeCount does not match create args: '%d' != '%d'", nodeGroup.GetMaxNodeCount(), attrs.MaxNodeCount) + } + if nodeGroup.GetInstanceType() != attrs.InstanceType { + return nil, fmt.Errorf("node group instanceType does not match create args: '%s' != '%s'", nodeGroup.GetInstanceType(), attrs.InstanceType) + } + if nodeGroup.GetDiskSizeGiB() != attrs.DiskSizeGiB { + return nil, fmt.Errorf("node group diskSizeGiB does not match create args: '%d' != '%d'", nodeGroup.GetDiskSizeGiB(), attrs.DiskSizeGiB) + } + + return nodeGroup, nil +} + +// ValidateClusterNodeGroups validates that the GetCluster functionality works correctly. +func ValidateClusterNodeGroups(ctx context.Context, client CloudMaintainKubernetes, attrs GetClusterArgs, nodeGroup NodeGroup) error { + cluster, err := client.GetCluster(ctx, attrs) + if err != nil { + return err + } + + if len(cluster.GetNodeGroups()) != 1 { + return fmt.Errorf("cluster node groups does not match create args: '%d' != '%d'", len(cluster.GetNodeGroups()), 1) + } + + clusterNodeGroup := cluster.GetNodeGroups()[0] + if clusterNodeGroup.GetID() != nodeGroup.GetID() { + return fmt.Errorf("cluster node group ID does not match create args: '%s' != '%s'", clusterNodeGroup.GetID(), nodeGroup.GetID()) + } + if clusterNodeGroup.GetName() != nodeGroup.GetName() { + return fmt.Errorf("cluster node group name does not match create args: '%s' != '%s'", clusterNodeGroup.GetName(), nodeGroup.GetName()) + } + if clusterNodeGroup.GetRefID() != nodeGroup.GetRefID() { + return fmt.Errorf("cluster node group refID does not match create args: '%s' != '%s'", clusterNodeGroup.GetRefID(), nodeGroup.GetRefID()) + } + if clusterNodeGroup.GetMinNodeCount() != nodeGroup.GetMinNodeCount() { + return fmt.Errorf("cluster node group minNodeCount does not match create args: '%d' != '%d'", clusterNodeGroup.GetMinNodeCount(), nodeGroup.GetMinNodeCount()) + } + if clusterNodeGroup.GetMaxNodeCount() != nodeGroup.GetMaxNodeCount() { + return fmt.Errorf("cluster node group maxNodeCount does not match create args: '%d' != '%d'", clusterNodeGroup.GetMaxNodeCount(), nodeGroup.GetMaxNodeCount()) + } + if clusterNodeGroup.GetInstanceType() != nodeGroup.GetInstanceType() { + return fmt.Errorf("cluster node group instanceType does not match create args: '%s' != '%s'", clusterNodeGroup.GetInstanceType(), nodeGroup.GetInstanceType()) + } + if clusterNodeGroup.GetDiskSizeGiB() != nodeGroup.GetDiskSizeGiB() { + return fmt.Errorf("cluster node group diskSizeGiB does not match create args: '%d' != '%d'", clusterNodeGroup.GetDiskSizeGiB(), nodeGroup.GetDiskSizeGiB()) + } + for key, value := range nodeGroup.GetTags() { + tagValue, ok := clusterNodeGroup.GetTags()[key] + if !ok { + return fmt.Errorf("cluster node group tag does not match create args: '%s' not found", key) + } + if tagValue != value { + return fmt.Errorf("cluster node group tag does not match create args: '%s' != '%s'", key, value) + } + } + + return nil +} + +// ValidateModifyKubernetesNodeGroup validates that the ModifyNodeGroup functionality works correctly. +func ValidateModifyKubernetesNodeGroup(ctx context.Context, client CloudMaintainKubernetes, attrs ModifyNodeGroupArgs) error { + err := client.ModifyNodeGroup(ctx, attrs) + if err != nil { + return err + } + return nil +} + +// ValidateDeleteKubernetesNodeGroup validates that the DeleteNodeGroup functionality works correctly. +func ValidateDeleteKubernetesNodeGroup(ctx context.Context, client CloudMaintainKubernetes, attrs DeleteNodeGroupArgs) error { + err := client.DeleteNodeGroup(ctx, attrs) + if err != nil { + return err + } + return nil +} + +// ValidateDeleteKubernetesCluster validates that the DeleteCluster functionality works correctly. +func ValidateDeleteKubernetesCluster(ctx context.Context, client CloudMaintainKubernetes, attrs DeleteClusterArgs) error { + err := client.DeleteCluster(ctx, attrs) + if err != nil { + return err + } + return nil +} diff --git a/v1/networking.go b/v1/networking.go index 5b03e1e..b8ab27f 100644 --- a/v1/networking.go +++ b/v1/networking.go @@ -1,6 +1,8 @@ package v1 -import "context" +import ( + "context" +) type CloudModifyFirewall interface { AddFirewallRulesToInstance(ctx context.Context, args AddFirewallRulesToInstanceArgs) error diff --git a/v1/notimplemented.go b/v1/notimplemented.go index afe02d7..84fe74f 100644 --- a/v1/notimplemented.go +++ b/v1/notimplemented.go @@ -126,3 +126,47 @@ func (c notImplCloudClient) MergeInstanceTypeForUpdate(_, i InstanceType) Instan func (c notImplCloudClient) GetMaxCreateRequestsPerMinute() int { return 10 } + +func (c notImplCloudClient) CreateVPC(_ context.Context, _ CreateVPCArgs) (*VPC, error) { + return nil, ErrNotImplemented +} + +func (c notImplCloudClient) GetVPC(_ context.Context, _ GetVPCArgs) (*VPC, error) { + return nil, ErrNotImplemented +} + +func (c notImplCloudClient) DeleteVPC(_ context.Context, _ DeleteVPCArgs) error { + return ErrNotImplemented +} + +func (c notImplCloudClient) CreateCluster(_ context.Context, _ CreateClusterArgs) (*Cluster, error) { + return nil, ErrNotImplemented +} + +func (c notImplCloudClient) GetCluster(_ context.Context, _ GetClusterArgs) (*Cluster, error) { + return nil, ErrNotImplemented +} + +func (c notImplCloudClient) SetClusterUser(_ context.Context, _ SetClusterUserArgs) (*ClusterUser, error) { + return nil, ErrNotImplemented +} + +func (c notImplCloudClient) CreateNodeGroup(_ context.Context, _ CreateNodeGroupArgs) (*NodeGroup, error) { + return nil, ErrNotImplemented +} + +func (c notImplCloudClient) GetNodeGroup(_ context.Context, _ GetNodeGroupArgs) (*NodeGroup, error) { + return nil, ErrNotImplemented +} + +func (c notImplCloudClient) ModifyNodeGroup(_ context.Context, _ ModifyNodeGroupArgs) error { + return ErrNotImplemented +} + +func (c notImplCloudClient) DeleteNodeGroup(_ context.Context, _ DeleteNodeGroupArgs) error { + return ErrNotImplemented +} + +func (c notImplCloudClient) DeleteCluster(_ context.Context, _ DeleteClusterArgs) error { + return ErrNotImplemented +} diff --git a/v1/providers/aws/capabilities.go b/v1/providers/aws/capabilities.go new file mode 100644 index 0000000..bd1a789 --- /dev/null +++ b/v1/providers/aws/capabilities.go @@ -0,0 +1,22 @@ +package v1 + +import ( + "context" + + v1 "github.com/brevdev/cloud/v1" +) + +func getAWSCapabilities() v1.Capabilities { + return v1.Capabilities{ + v1.CapabilityVPC, + v1.CapabilityManagedKubernetes, + } +} + +func (c *AWSClient) GetCapabilities(_ context.Context) (v1.Capabilities, error) { + return getAWSCapabilities(), nil +} + +func (c *AWSCredential) GetCapabilities(_ context.Context) (v1.Capabilities, error) { + return getAWSCapabilities(), nil +} diff --git a/v1/providers/aws/client.go b/v1/providers/aws/client.go new file mode 100644 index 0000000..7f655ef --- /dev/null +++ b/v1/providers/aws/client.go @@ -0,0 +1,127 @@ +package v1 + +import ( + "context" + "crypto/sha256" + "fmt" + + v1 "github.com/brevdev/cloud/v1" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/config" + "github.com/aws/aws-sdk-go-v2/credentials" + awslogging "github.com/aws/smithy-go/logging" +) + +const CloudProviderID string = "aws" + +type AWSCredential struct { + RefID string + AccessKeyID string + SecretAccessKey string +} + +var _ v1.CloudCredential = &AWSCredential{} + +func NewAWSCredential(refID string, accessKeyID string, secretAccessKey string) *AWSCredential { + return &AWSCredential{ + RefID: refID, + AccessKeyID: accessKeyID, + SecretAccessKey: secretAccessKey, + } +} + +func (c *AWSCredential) GetReferenceID() string { + return c.RefID +} + +func (c *AWSCredential) GetAPIType() v1.APIType { + return v1.APITypeGlobal +} + +func (c *AWSCredential) GetCloudProviderID() v1.CloudProviderID { + return v1.CloudProviderID(CloudProviderID) +} + +func (c *AWSCredential) GetTenantID() (string, error) { + return fmt.Sprintf("%s-%x", CloudProviderID, sha256.Sum256([]byte(c.AccessKeyID))), nil +} + +func (c *AWSCredential) MakeClient(_ context.Context, region string) (v1.CloudClient, error) { + return NewAWSClient(c.RefID, c.AccessKeyID, c.SecretAccessKey, region) +} + +type AWSClient struct { + v1.NotImplCloudClient + refID string + awsConfig aws.Config + region string + logger v1.Logger +} + +var _ v1.CloudClient = &AWSClient{} + +type AWSClientOption func(c *AWSClient) + +func WithLogger(logger v1.Logger) AWSClientOption { + return func(c *AWSClient) { + c.logger = logger + } +} + +func NewAWSClient(refID string, accessKeyID string, secretAccessKey string, region string, opts ...AWSClientOption) (*AWSClient, error) { + ctx := context.Background() + + awsCredentials := credentials.NewStaticCredentialsProvider(accessKeyID, secretAccessKey, "") + + awsClient := &AWSClient{ + refID: refID, + region: region, + logger: &v1.NoopLogger{}, + } + + for _, opt := range opts { + opt(awsClient) + } + + awsConfig, err := config.LoadDefaultConfig(ctx, + config.WithCredentialsProvider(awsCredentials), + config.WithRegion(region), + config.WithLogger(&AWSLoggerAdapter{ + logger: awsClient.logger, + }), + ) + if err != nil { + return nil, fmt.Errorf("failed to load AWS config: %w", err) + } + + awsClient.awsConfig = awsConfig + + return awsClient, nil +} + +func (c *AWSClient) GetAPIType() v1.APIType { + return v1.APITypeGlobal +} + +func (c *AWSClient) GetCloudProviderID() v1.CloudProviderID { + return v1.CloudProviderID(CloudProviderID) +} + +func (c *AWSClient) GetReferenceID() string { + return c.refID +} + +type AWSLoggerAdapter struct { + logger v1.Logger +} + +func (l *AWSLoggerAdapter) Logf(classification awslogging.Classification, format string, v ...interface{}) { + ctx := context.Background() + switch classification { + case awslogging.Debug: + l.logger.Debug(ctx, fmt.Sprintf(format, v...)) + case awslogging.Warn: + l.logger.Warn(ctx, fmt.Sprintf(format, v...)) + } +} diff --git a/v1/providers/aws/kubernetes.go b/v1/providers/aws/kubernetes.go new file mode 100644 index 0000000..113b4f3 --- /dev/null +++ b/v1/providers/aws/kubernetes.go @@ -0,0 +1,1027 @@ +package v1 + +import ( + "context" + "encoding/base64" + "fmt" + "math" + "strings" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/eks" + ekstypes "github.com/aws/aws-sdk-go-v2/service/eks/types" + "github.com/aws/aws-sdk-go-v2/service/iam" + iamtypes "github.com/aws/aws-sdk-go-v2/service/iam/types" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" + k8scmd "k8s.io/client-go/tools/clientcmd/api" + "sigs.k8s.io/aws-iam-authenticator/pkg/token" + + "github.com/brevdev/cloud/internal/errors" + cloudk8s "github.com/brevdev/cloud/internal/kubernetes" + "github.com/brevdev/cloud/internal/rsa" + v1 "github.com/brevdev/cloud/v1" +) + +var ( + errUsernameIsRequired = fmt.Errorf("username is required") + errRoleIsRequired = fmt.Errorf("role is required") + errClusterIDIsRequired = fmt.Errorf("cluster ID is required") + errRSAPEMBase64IsRequired = fmt.Errorf("RSA PEM base64 is required") + + errNodeGroupMinNodeCountMustBeGreaterThan0 = fmt.Errorf("node group minNodeCount must be greater than 0") + errNodeGroupMaxNodeCountMustBeGreaterThan0 = fmt.Errorf("node group maxNodeCount must be greater than 0") + errNodeGroupMaxNodeCountMustBeGreaterThanOrEqualToMinNodeCount = fmt.Errorf("node group maxNodeCount must be greater than or equal to minNodeCount") + errNodeGroupInstanceTypeIsRequired = fmt.Errorf("node group instanceType is required") + errNodeGroupDiskSizeGiBMustBeGreaterThanOrEqualTo20 = fmt.Errorf("node group diskSizeGiB must be greater than or equal to 20") + errNodeGroupDiskSizeGiBMustBeLessThanOrEqualToMaxInt32 = fmt.Errorf("node group diskSizeGiB must be less than or equal to %d", math.MaxInt32) + errNodeGroupMaxNodeCountMustBeLessThanOrEqualToMaxInt32 = fmt.Errorf("node group maxNodeCount must be less than or equal to %d", math.MaxInt32) + errNodeGroupMinNodeCountMustBeLessThanOrEqualToMaxInt32 = fmt.Errorf("node group minNodeCount must be less than or equal to %d", math.MaxInt32) +) + +var _ v1.CloudMaintainKubernetes = &AWSClient{} + +const iamRolePathPrefix = "/brevcloudsdk/eks/clusters" + +func (c *AWSClient) CreateCluster(ctx context.Context, args v1.CreateClusterArgs) (*v1.Cluster, error) { + eksClient := eks.NewFromConfig(c.awsConfig) + iamClient := iam.NewFromConfig(c.awsConfig) + + // Fetch the target VPC + vpc, err := c.GetVPC(ctx, v1.GetVPCArgs{ + ID: args.VPCID, + }) + if err != nil { + return nil, errors.WrapAndTrace(err) + } + + // Create a map of subnetID->subnet for this VPC so that we can find the target subnet + subnetMap := make(map[string]*v1.Subnet) + for _, subnet := range vpc.GetSubnets() { + subnetMap[string(subnet.GetID())] = subnet + } + + // Get the target subnets from the map + subnets := make([]*v1.Subnet, len(args.SubnetIDs)) + for i, subnetID := range args.SubnetIDs { + if _, ok := subnetMap[string(subnetID)]; !ok { + return nil, errors.WrapAndTrace(fmt.Errorf("subnet ID %s does not match VPC %s", subnetID, vpc.GetID())) + } else { + subnets[i] = subnetMap[string(subnetID)] + } + } + + // Create the cluster + awsCluster, err := c.createEKSCluster(ctx, eksClient, iamClient, subnets, args) + if err != nil { + return nil, errors.WrapAndTrace(err) + } + + brevCluster, err := v1.NewCluster(v1.ClusterSettings{ + ID: v1.CloudProviderResourceID(*awsCluster.Name), + // ID: v1.CloudProviderResourceID(*awsCluster.Arn), // todo: no API exists to fetch by ARN, so we may need to always use name as the ID + Name: *awsCluster.Name, + RefID: args.RefID, + Provider: CloudProviderID, + Cloud: CloudProviderID, + Location: c.region, + VPCID: vpc.GetID(), + SubnetIDs: args.SubnetIDs, + KubernetesVersion: args.KubernetesVersion, + Status: v1.ClusterStatusPending, + Tags: args.Tags, + }) + if err != nil { + return nil, errors.WrapAndTrace(err) + } + return brevCluster, nil +} + +func (c *AWSClient) createEKSCluster(ctx context.Context, eksClient *eks.Client, iamClient *iam.Client, subnets []*v1.Subnet, args v1.CreateClusterArgs) (*ekstypes.Cluster, error) { + serviceRole, err := c.createServiceRole(ctx, iamClient, args) + if err != nil { + return nil, errors.WrapAndTrace(err) + } + + eksCluster, err := c.createCluster(ctx, eksClient, args, serviceRole, subnets) + if err != nil { + return nil, errors.WrapAndTrace(err) + } + + err = c.installEKSAddons(ctx, eksClient, eksCluster) + if err != nil { + return nil, errors.WrapAndTrace(err) + } + + return eksCluster, nil +} + +func (c *AWSClient) createServiceRole(ctx context.Context, iamClient *iam.Client, args v1.CreateClusterArgs) (*iamtypes.Role, error) { + serviceRoleName := fmt.Sprintf("%s-service-role", args.RefID) + + c.logger.Debug(ctx, "creating service role", v1.Field{Key: "name", Value: serviceRoleName}) + + // Convert the tags to AWS tags + tags := make(map[string]string) + for key, value := range args.Tags { + tags[key] = value + } + + // Add the required tags + tags[tagName] = args.Name + tags[tagBrevRefID] = args.RefID + tags[tagCreatedBy] = tagBrevCloudSDK + tags[tagBrevClusterID] = args.RefID + + iamTags := makeIAMTags(tags) + + iamPath := fmt.Sprintf("%s/%s/", iamRolePathPrefix, args.RefID) + iamPath = strings.ReplaceAll(iamPath, "[^a-zA-Z0-9/]", "") + + // Create the role + input := &iam.CreateRoleInput{ + RoleName: aws.String(serviceRoleName), + Description: aws.String("Role for EKS cluster"), + Path: aws.String(iamPath), + AssumeRolePolicyDocument: aws.String(`{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { + "Service": "eks.amazonaws.com" + }, + "Action": "sts:AssumeRole" + } + ] + }`), + Tags: iamTags, + } + output, err := iamClient.CreateRole(ctx, input) + if err != nil { + return nil, errors.WrapAndTrace(err) + } + + // Attach the AmazonEKSClusterPolicy to the role + _, err = iamClient.AttachRolePolicy(ctx, &iam.AttachRolePolicyInput{ + RoleName: aws.String(serviceRoleName), + PolicyArn: aws.String("arn:aws:iam::aws:policy/AmazonEKSClusterPolicy"), + }) + if err != nil { + return nil, errors.WrapAndTrace(err) + } + + return output.Role, nil +} + +func (c *AWSClient) createCluster(ctx context.Context, eksClient *eks.Client, args v1.CreateClusterArgs, serviceRole *iamtypes.Role, subnets []*v1.Subnet) (*ekstypes.Cluster, error) { + c.logger.Debug(ctx, "creating cluster", v1.Field{Key: "name", Value: args.Name}) + + // Convert the tags to AWS tags + tags := make(map[string]string) + for key, value := range args.Tags { + tags[key] = value + } + + // Add the required tags + tags[tagName] = args.Name + tags[tagBrevRefID] = args.RefID + tags[tagCreatedBy] = tagBrevCloudSDK + + // Convert the subnets to subnet IDs + subnetIDs := make([]string, len(subnets)) + for i, subnet := range subnets { + subnetIDs[i] = string(subnet.GetID()) + } + + c.logger.Debug(ctx, "creating cluster", + v1.Field{Key: "clusterName", Value: args.Name}, + v1.Field{Key: "kubernetesVersion", Value: args.KubernetesVersion}, + v1.Field{Key: "serviceRoleARN", Value: *serviceRole.Arn}, + v1.Field{Key: "subnetIDs", Value: subnetIDs}, + v1.Field{Key: "tags", Value: tags}, + ) + input := &eks.CreateClusterInput{ + Name: aws.String(args.Name), + Version: aws.String(args.KubernetesVersion), + RoleArn: aws.String(*serviceRole.Arn), + ResourcesVpcConfig: &ekstypes.VpcConfigRequest{ + SubnetIds: subnetIDs, + }, + AccessConfig: &ekstypes.CreateAccessConfigRequest{ + AuthenticationMode: ekstypes.AuthenticationModeApiAndConfigMap, + }, + Tags: tags, + } + + output, err := eksClient.CreateCluster(ctx, input) + if err != nil { + return nil, errors.WrapAndTrace(err) + } + + return output.Cluster, nil +} + +func (c *AWSClient) installEKSAddons(ctx context.Context, eksClient *eks.Client, eksCluster *ekstypes.Cluster) error { + err := c.installEKSAddon(ctx, eksClient, eksCluster, "vpc-cni") + if err != nil { + return err + } + + err = c.installEKSAddon(ctx, eksClient, eksCluster, "eks-pod-identity-agent") + if err != nil { + return err + } + + return nil +} + +func (c *AWSClient) installEKSAddon(ctx context.Context, eksClient *eks.Client, eksCluster *ekstypes.Cluster, addonName string) error { + c.logger.Debug(ctx, "installing EKS addon", + v1.Field{Key: "clusterName", Value: *eksCluster.Name}, + v1.Field{Key: "name", Value: addonName}, + ) + + _, err := eksClient.CreateAddon(ctx, &eks.CreateAddonInput{ + ClusterName: eksCluster.Name, + AddonName: aws.String(addonName), + }) + if err != nil { + return err + } + + return nil +} + +func (c *AWSClient) GetCluster(ctx context.Context, args v1.GetClusterArgs) (*v1.Cluster, error) { + eksClient := eks.NewFromConfig(c.awsConfig) + + eksCluster, err := eksClient.DescribeCluster(ctx, &eks.DescribeClusterInput{ + Name: aws.String(string(args.ID)), + }) + if err != nil { + var noSuchEntityError *ekstypes.ResourceNotFoundException + if errors.As(err, &noSuchEntityError) { + return nil, v1.ErrResourceNotFound + } + return nil, errors.WrapAndTrace(err) + } + + subnetIDs := make([]v1.CloudProviderResourceID, 0, len(eksCluster.Cluster.ResourcesVpcConfig.SubnetIds)) + for _, subnetID := range eksCluster.Cluster.ResourcesVpcConfig.SubnetIds { + subnetIDs = append(subnetIDs, v1.CloudProviderResourceID(subnetID)) + } + + nodeGroups, err := c.getClusterNodeGroups(ctx, eksClient, eksCluster.Cluster) + if err != nil { + return nil, errors.WrapAndTrace(err) + } + + // List all addons and use their status to determine if the cluster is ready + addonNames, err := eksClient.ListAddons(ctx, &eks.ListAddonsInput{ + ClusterName: eksCluster.Cluster.Name, + }) + if err != nil { + return nil, errors.WrapAndTrace(err) + } + + inactiveAddons := 0 + for _, name := range addonNames.Addons { + addon, err := eksClient.DescribeAddon(ctx, &eks.DescribeAddonInput{ + ClusterName: eksCluster.Cluster.Name, + AddonName: aws.String(name), + }) + if err != nil { + return nil, errors.WrapAndTrace(err) + } + if addon.Addon.Status != ekstypes.AddonStatusActive { + inactiveAddons++ + } + } + + var clusterStatus v1.ClusterStatus + if inactiveAddons > 0 { + clusterStatus = v1.ClusterStatusPending + } else { + clusterStatus = parseEKSClusterStatus(eksCluster.Cluster.Status) + } + + brevCluster, err := v1.NewCluster(v1.ClusterSettings{ + RefID: eksCluster.Cluster.Tags[tagBrevRefID], + ID: v1.CloudProviderResourceID(*eksCluster.Cluster.Name), + // ID: v1.CloudProviderResourceID(*eksCluster.Cluster.Arn), // todo: no API exists to fetch by ARN, so we may need to always use name as the ID + Name: *eksCluster.Cluster.Name, + KubernetesVersion: *eksCluster.Cluster.Version, + Status: clusterStatus, + VPCID: v1.CloudProviderResourceID(*eksCluster.Cluster.ResourcesVpcConfig.VpcId), + SubnetIDs: subnetIDs, + NodeGroups: nodeGroups, + ClusterCACertificateBase64: getClusterCACertificateBase64(eksCluster.Cluster), + APIEndpoint: getClusterAPIEndpoint(eksCluster.Cluster), + Provider: CloudProviderID, + Tags: v1.Tags(eksCluster.Cluster.Tags), + }) + if err != nil { + return nil, errors.WrapAndTrace(err) + } + return brevCluster, nil +} + +func getClusterCACertificateBase64(cluster *ekstypes.Cluster) string { + if cluster == nil || + cluster.CertificateAuthority == nil || + cluster.CertificateAuthority.Data == nil { + return "" + } + return *cluster.CertificateAuthority.Data +} + +func getClusterAPIEndpoint(cluster *ekstypes.Cluster) string { + if cluster == nil || + cluster.Endpoint == nil { + return "" + } + return *cluster.Endpoint +} + +func parseEKSClusterStatus(status ekstypes.ClusterStatus) v1.ClusterStatus { + switch status { + case ekstypes.ClusterStatusCreating: + return v1.ClusterStatusPending + case ekstypes.ClusterStatusActive: + return v1.ClusterStatusAvailable + case ekstypes.ClusterStatusDeleting: + return v1.ClusterStatusDeleting + case ekstypes.ClusterStatusFailed: + return v1.ClusterStatusFailed + case ekstypes.ClusterStatusUpdating: + return v1.ClusterStatusPending + case ekstypes.ClusterStatusPending: + return v1.ClusterStatusPending + } + return v1.ClusterStatusUnknown +} + +func (c *AWSClient) getClusterNodeGroups(ctx context.Context, eksClient *eks.Client, eksCluster *ekstypes.Cluster) ([]*v1.NodeGroup, error) { + // First fetch the names of all of the cluster's node groups + eksNodeGroupNames, err := eksClient.ListNodegroups(ctx, &eks.ListNodegroupsInput{ + ClusterName: eksCluster.Name, + }) + if err != nil { + return nil, errors.WrapAndTrace(err) + } + + // Then fetch the details of each node group + nodeGroups := make([]*v1.NodeGroup, 0, len(eksNodeGroupNames.Nodegroups)) + for _, eksNodeGroupName := range eksNodeGroupNames.Nodegroups { + eksNodeGroup, err := eksClient.DescribeNodegroup(ctx, &eks.DescribeNodegroupInput{ + ClusterName: eksCluster.Name, + NodegroupName: aws.String(eksNodeGroupName), + }) + if err != nil { + return nil, errors.WrapAndTrace(err) + } + + brevNodeGroup, err := parseEKSNodeGroup(eksNodeGroup.Nodegroup) + if err != nil { + return nil, errors.WrapAndTrace(err) + } + nodeGroups = append(nodeGroups, brevNodeGroup) + } + + return nodeGroups, nil +} + +func parseEKSNodeGroup(eksNodeGroup *ekstypes.Nodegroup) (*v1.NodeGroup, error) { + brevNodeGroup, err := v1.NewNodeGroup(v1.NodeGroupSettings{ + ID: v1.CloudProviderResourceID(*eksNodeGroup.NodegroupName), + // ID: v1.CloudProviderResourceID(*eksNodeGroup.NodegroupArn), // todo: no API exists to fetch by ARN, so we may need to always use name as the ID + RefID: eksNodeGroup.Tags[tagBrevRefID], + Name: *eksNodeGroup.NodegroupName, + MinNodeCount: int(*eksNodeGroup.ScalingConfig.MinSize), + MaxNodeCount: int(*eksNodeGroup.ScalingConfig.MaxSize), + InstanceType: eksNodeGroup.InstanceTypes[0], // todo: handle multiple instance types + DiskSizeGiB: int(*eksNodeGroup.DiskSize), + Status: parseEKSNodeGroupStatus(eksNodeGroup.Status), + Tags: v1.Tags(eksNodeGroup.Tags), + }) + if err != nil { + return nil, errors.WrapAndTrace(err) + } + return brevNodeGroup, nil +} + +func parseEKSNodeGroupStatus(status ekstypes.NodegroupStatus) v1.NodeGroupStatus { + switch status { + case ekstypes.NodegroupStatusCreating: + return v1.NodeGroupStatusPending + case ekstypes.NodegroupStatusActive: + return v1.NodeGroupStatusAvailable + case ekstypes.NodegroupStatusDeleting: + return v1.NodeGroupStatusDeleting + case ekstypes.NodegroupStatusCreateFailed: + return v1.NodeGroupStatusFailed + case ekstypes.NodegroupStatusDeleteFailed: + return v1.NodeGroupStatusFailed + } + return v1.NodeGroupStatusUnknown +} + +func (c *AWSClient) CreateNodeGroup(ctx context.Context, args v1.CreateNodeGroupArgs) (*v1.NodeGroup, error) { + err := validateCreateNodeGroupArgs(args) + if err != nil { + return nil, errors.WrapAndTrace(err) + } + + eksClient := eks.NewFromConfig(c.awsConfig) + iamClient := iam.NewFromConfig(c.awsConfig) + + // Fetch the target cluster + cluster, err := c.GetCluster(ctx, v1.GetClusterArgs{ + ID: args.ClusterID, + }) + if err != nil { + return nil, errors.WrapAndTrace(err) + } + + // Convert the target cluster's subnet IDs to AWS subnet IDs + subnetIDs := make([]string, len(cluster.GetSubnetIDs())) + for i, subnetID := range cluster.GetSubnetIDs() { + subnetIDs[i] = string(subnetID) + } + + // Create the node role that will be attached to all nodes in the node group + nodeRoleARN, err := c.createNodeRole(ctx, iamClient, cluster, args) + if err != nil { + return nil, errors.WrapAndTrace(err) + } + + // Convert the tags to AWS tags + tags := make(map[string]string) + for key, value := range args.Tags { + tags[key] = value + } + tags[tagName] = args.Name + tags[tagBrevRefID] = args.RefID + tags[tagCreatedBy] = tagBrevCloudSDK + + // Create the node group + c.logger.Debug(ctx, "creating node group", + v1.Field{Key: "clusterName", Value: cluster.GetName()}, + v1.Field{Key: "nodeGroupName", Value: args.Name}, + ) + output, err := eksClient.CreateNodegroup(ctx, &eks.CreateNodegroupInput{ + ClusterName: aws.String(cluster.GetName()), + NodegroupName: aws.String(args.Name), + NodeRole: aws.String(nodeRoleARN), + ScalingConfig: &ekstypes.NodegroupScalingConfig{ + MinSize: aws.Int32(int32(args.MinNodeCount)), //nolint:gosec // checked in input validation + MaxSize: aws.Int32(int32(args.MaxNodeCount)), //nolint:gosec // checked in input validation + }, + DiskSize: aws.Int32(int32(args.DiskSizeGiB)), //nolint:gosec // checked in input validation + Subnets: subnetIDs, + InstanceTypes: []string{ + args.InstanceType, + }, + Tags: tags, + }) + if err != nil { + return nil, errors.WrapAndTrace(err) + } + + brevNodeGroup, err := parseEKSNodeGroup(output.Nodegroup) + if err != nil { + return nil, errors.WrapAndTrace(err) + } + return brevNodeGroup, nil +} + +func validateCreateNodeGroupArgs(args v1.CreateNodeGroupArgs) error { + errs := []error{} + if args.MinNodeCount < 1 { + errs = append(errs, errNodeGroupMinNodeCountMustBeGreaterThan0) + } + if args.MaxNodeCount < 1 { + errs = append(errs, errNodeGroupMaxNodeCountMustBeGreaterThan0) + } + if args.MaxNodeCount < args.MinNodeCount { + errs = append(errs, errNodeGroupMaxNodeCountMustBeGreaterThanOrEqualToMinNodeCount) + } + if args.InstanceType == "" { + errs = append(errs, errNodeGroupInstanceTypeIsRequired) + } + if args.DiskSizeGiB < 20 { + errs = append(errs, errNodeGroupDiskSizeGiBMustBeGreaterThanOrEqualTo20) + } + if args.DiskSizeGiB > math.MaxInt32 { + errs = append(errs, errNodeGroupDiskSizeGiBMustBeLessThanOrEqualToMaxInt32) + } + if args.MaxNodeCount > math.MaxInt32 { + errs = append(errs, errNodeGroupMaxNodeCountMustBeLessThanOrEqualToMaxInt32) + } + if args.MinNodeCount > math.MaxInt32 { + errs = append(errs, errNodeGroupMinNodeCountMustBeLessThanOrEqualToMaxInt32) + } + return errors.WrapAndTrace(errors.Join(errs...)) +} + +func (c *AWSClient) createNodeRole(ctx context.Context, iamClient *iam.Client, cluster *v1.Cluster, args v1.CreateNodeGroupArgs) (string, error) { + roleName := fmt.Sprintf("%s-node-role", args.RefID) + + c.logger.Debug(ctx, "creating node role", + v1.Field{Key: "clusterName", Value: cluster.GetName()}, + v1.Field{Key: "roleName", Value: roleName}, + ) + + // Convert the tags to AWS tags + tags := make(map[string]string) + for key, value := range args.Tags { + tags[key] = value + } + + // Add the required tags + tags[tagName] = args.Name + tags[tagBrevRefID] = args.RefID + tags[tagCreatedBy] = tagBrevCloudSDK + + iamTags := makeIAMTags(tags) + iamPath := getNodeGroupIAMRolePath(cluster.GetRefID(), args.RefID) + + // Create the role + input := &iam.CreateRoleInput{ + RoleName: aws.String(roleName), + Description: aws.String("Role for EKS node group"), + Path: aws.String(iamPath), + AssumeRolePolicyDocument: aws.String(`{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { + "Service": "ec2.amazonaws.com" + }, + "Action": "sts:AssumeRole" + } + ] + }`), + Tags: iamTags, + } + output, err := iamClient.CreateRole(ctx, input) + if err != nil { + return "", errors.WrapAndTrace(err) + } + + // Attach the required managed policies to the role + managedPolicies := []string{ + "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly", + "arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy", + "arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy", + } + for _, policyArn := range managedPolicies { + _, err = iamClient.AttachRolePolicy(ctx, &iam.AttachRolePolicyInput{ + RoleName: aws.String(roleName), + PolicyArn: aws.String(policyArn), + }) + if err != nil { + return "", errors.WrapAndTrace(err) + } + } + + return *output.Role.Arn, nil +} + +func (c *AWSClient) GetNodeGroup(ctx context.Context, args v1.GetNodeGroupArgs) (*v1.NodeGroup, error) { + eksClient := eks.NewFromConfig(c.awsConfig) + + eksNodeGroup, err := eksClient.DescribeNodegroup(ctx, &eks.DescribeNodegroupInput{ + ClusterName: aws.String(string(args.ClusterID)), + NodegroupName: aws.String(string(args.ID)), + }) + if err != nil { + var noSuchEntityError *ekstypes.ResourceNotFoundException + if errors.As(err, &noSuchEntityError) { + return nil, v1.ErrResourceNotFound + } + return nil, errors.WrapAndTrace(err) + } + + brevNodeGroup, err := parseEKSNodeGroup(eksNodeGroup.Nodegroup) + if err != nil { + return nil, errors.WrapAndTrace(err) + } + return brevNodeGroup, nil +} + +func (c *AWSClient) ModifyNodeGroup(ctx context.Context, args v1.ModifyNodeGroupArgs) error { + eksClient := eks.NewFromConfig(c.awsConfig) + + err := validateModifyNodeGroupArgs(args) + if err != nil { + return errors.WrapAndTrace(err) + } + + cluster, err := c.GetCluster(ctx, v1.GetClusterArgs{ + ID: args.ClusterID, + }) + if err != nil { + return errors.WrapAndTrace(err) + } + + nodeGroup, err := c.GetNodeGroup(ctx, v1.GetNodeGroupArgs{ + ClusterID: cluster.GetID(), + ID: args.ID, + }) + if err != nil { + return errors.WrapAndTrace(err) + } + + _, err = eksClient.UpdateNodegroupConfig(ctx, &eks.UpdateNodegroupConfigInput{ + ClusterName: aws.String(cluster.GetName()), + NodegroupName: aws.String(nodeGroup.GetName()), + ScalingConfig: &ekstypes.NodegroupScalingConfig{ + DesiredSize: aws.Int32(int32(args.MinNodeCount)), //nolint:gosec // checked in input validation + MinSize: aws.Int32(int32(args.MinNodeCount)), //nolint:gosec // checked in input validation + MaxSize: aws.Int32(int32(args.MaxNodeCount)), //nolint:gosec // checked in input validation + }, + }) + if err != nil { + return errors.WrapAndTrace(err) + } + return nil +} + +func validateModifyNodeGroupArgs(args v1.ModifyNodeGroupArgs) error { + errs := []error{} + if args.MinNodeCount < 1 { + errs = append(errs, errNodeGroupMinNodeCountMustBeGreaterThan0) + } + if args.MaxNodeCount < 1 { + errs = append(errs, errNodeGroupMaxNodeCountMustBeGreaterThan0) + } + if args.MaxNodeCount < args.MinNodeCount { + errs = append(errs, errNodeGroupMaxNodeCountMustBeGreaterThanOrEqualToMinNodeCount) + } + if args.MinNodeCount > math.MaxInt32 { + errs = append(errs, errNodeGroupMinNodeCountMustBeLessThanOrEqualToMaxInt32) + } + if args.MaxNodeCount > math.MaxInt32 { + errs = append(errs, errNodeGroupMaxNodeCountMustBeLessThanOrEqualToMaxInt32) + } + return errors.WrapAndTrace(errors.Join(errs...)) +} + +func (c *AWSClient) DeleteNodeGroup(ctx context.Context, args v1.DeleteNodeGroupArgs) error { + eksClient := eks.NewFromConfig(c.awsConfig) + iamClient := iam.NewFromConfig(c.awsConfig) + + // Fetch the target cluster + cluster, err := c.GetCluster(ctx, v1.GetClusterArgs{ + ID: args.ClusterID, + }) + if err != nil { + return errors.WrapAndTrace(err) + } + + // Fetch the target node group + nodeGroup, err := c.GetNodeGroup(ctx, v1.GetNodeGroupArgs{ + ClusterID: cluster.GetID(), + ID: args.ID, + }) + if err != nil { + return errors.WrapAndTrace(err) + } + + // Get the roles associated with the node group + iamPath := getNodeGroupIAMRolePath(cluster.GetRefID(), nodeGroup.GetRefID()) + roles, err := iamClient.ListRoles(ctx, &iam.ListRolesInput{ + PathPrefix: aws.String(iamPath), + }) + if err != nil { + return errors.WrapAndTrace(err) + } + + c.logger.Debug(ctx, fmt.Sprintf("found %d roles associated with node group", len(roles.Roles)), + v1.Field{Key: "clusterName", Value: cluster.GetName()}, + v1.Field{Key: "nodeGroupName", Value: nodeGroup.GetName()}, + ) + + // Delete the roles associated with the node group + for _, role := range roles.Roles { + c.logger.Debug(ctx, "removing role from EKS access entries", v1.Field{Key: "roleName", Value: *role.RoleName}) + + // Remove roles from EKS access entries + _, err = eksClient.DeleteAccessEntry(ctx, &eks.DeleteAccessEntryInput{ + ClusterName: aws.String(cluster.GetName()), + PrincipalArn: role.Arn, + }) + if err != nil { + return errors.WrapAndTrace(err) + } + + // Delete the role + err = c.deleteRole(ctx, iamClient, role) + if err != nil { + return errors.WrapAndTrace(err) + } + } + + // Delete the node group + c.logger.Debug(ctx, "deleting node group", + v1.Field{Key: "clusterName", Value: cluster.GetName()}, + v1.Field{Key: "nodeGroupName", Value: nodeGroup.GetName()}, + ) + _, err = eksClient.DeleteNodegroup(ctx, &eks.DeleteNodegroupInput{ + ClusterName: aws.String(cluster.GetName()), + NodegroupName: aws.String(nodeGroup.GetName()), + }) + if err != nil { + return errors.WrapAndTrace(err) + } + + return nil +} + +// TODO: AWS EKS only supports IAM or OIDC authentication. +func (c *AWSClient) SetClusterUser(ctx context.Context, args v1.SetClusterUserArgs) (*v1.ClusterUser, error) { + err := validatePutUserArgs(args) + if err != nil { + return nil, errors.WrapAndTrace(err) + } + + // Fetch the cluster the user key will be added to + cluster, err := c.GetCluster(ctx, v1.GetClusterArgs{ + ID: args.ClusterID, + }) + if err != nil { + return nil, errors.WrapAndTrace(fmt.Errorf("failed to get cluster: %w", err)) + } + + // Create a clientset to interact with the cluster using the bearer token and CA certificate + clientset, err := c.newK8sClient(ctx, cluster) + if err != nil { + return nil, errors.WrapAndTrace(fmt.Errorf("failed to create clientset: %w", err)) + } + + // Prepare the private key for the CSR + privateKeyBytes, err := base64.StdEncoding.DecodeString(args.RSAPEMBase64) + if err != nil { + return nil, errors.WrapAndTrace(fmt.Errorf("failed to decode base64 string: %w", err)) + } + + // Parse the private key + privateKey, err := rsa.BytesToRSAKey(privateKeyBytes) + if err != nil { + return nil, errors.WrapAndTrace(fmt.Errorf("failed to parse private key: %w", err)) + } + + // Create the client certificate to allow for external access to the cluster for the holders of this private key + signedCertificate, err := cloudk8s.ClientCertificateData(ctx, clientset, args.Username, privateKey) + if err != nil { + return nil, errors.WrapAndTrace(fmt.Errorf("failed to get signed certificate: %w", err)) + } + + // Make the user a cluster admin + err = cloudk8s.SetUserRole(ctx, clientset, args.Username, args.Role) + if err != nil { + return nil, errors.WrapAndTrace(fmt.Errorf("failed to set user role: %w", err)) + } + + // Get the certificate authority data + certificateAuthorityData, err := base64.StdEncoding.DecodeString(cluster.GetClusterCACertificateBase64()) + if err != nil { + return nil, errors.WrapAndTrace(fmt.Errorf("failed to decode certificate authority data: %w", err)) + } + + // Generate the complete kubeconfig + kubeconfigBytes, err := clientcmd.Write(k8scmd.Config{ + Kind: "Config", + APIVersion: "v1", + Clusters: map[string]*k8scmd.Cluster{ + cluster.GetRefID(): { + Server: cluster.GetAPIEndpoint(), + CertificateAuthorityData: certificateAuthorityData, + }, + }, + AuthInfos: map[string]*k8scmd.AuthInfo{ + cluster.GetRefID(): { + ClientCertificateData: signedCertificate, + ClientKeyData: privateKeyBytes, + }, + }, + }) + if err != nil { + return nil, errors.WrapAndTrace(fmt.Errorf("failed to write kubeconfig: %w", err)) + } + + brevClusterUser, err := v1.NewClusterUser(v1.ClusterUserSettings{ + ClusterName: cluster.GetRefID(), + ClusterCertificateAuthorityDataBase64: cluster.GetClusterCACertificateBase64(), + ClusterServerURL: cluster.GetAPIEndpoint(), + Username: args.Username, + UserClientCertificateDataBase64: base64.StdEncoding.EncodeToString(signedCertificate), + UserClientKeyDataBase64: base64.StdEncoding.EncodeToString(privateKeyBytes), + KubeconfigBase64: base64.StdEncoding.EncodeToString(kubeconfigBytes), + }) + if err != nil { + return nil, errors.WrapAndTrace(fmt.Errorf("failed to create cluster user: %w", err)) + } + return brevClusterUser, nil +} + +func validatePutUserArgs(args v1.SetClusterUserArgs) error { + errs := []error{} + if args.Username == "" { + errs = append(errs, errUsernameIsRequired) + } + if args.Role == "" { + errs = append(errs, errRoleIsRequired) + } + if args.ClusterID == "" { + errs = append(errs, errClusterIDIsRequired) + } + if args.RSAPEMBase64 == "" { + errs = append(errs, errRSAPEMBase64IsRequired) + } + return errors.WrapAndTrace(errors.Join(errs...)) +} + +func (c *AWSClient) DeleteCluster(ctx context.Context, args v1.DeleteClusterArgs) error { + eksClient := eks.NewFromConfig(c.awsConfig) + iamClient := iam.NewFromConfig(c.awsConfig) + + // Fetch the target cluster + cluster, err := c.GetCluster(ctx, v1.GetClusterArgs{ //nolint:staticcheck // prefer explicit struct literal + ID: args.ID, + }) + if err != nil { + return errors.WrapAndTrace(err) + } + + // Delete the cluster + c.logger.Debug(ctx, "deleting cluster", v1.Field{Key: "clusterName", Value: cluster.GetName()}) + _, err = eksClient.DeleteCluster(ctx, &eks.DeleteClusterInput{ + Name: aws.String(cluster.GetName()), + }) + if err != nil { + return errors.WrapAndTrace(err) + } + + // Get the roles associated with the cluster + roles, err := iamClient.ListRoles(ctx, &iam.ListRolesInput{ + PathPrefix: aws.String(iamRolePathPrefix), + }) + if err != nil { + return errors.WrapAndTrace(err) + } + + c.logger.Debug(ctx, fmt.Sprintf("found %d roles associated with cluster", len(roles.Roles)), + v1.Field{Key: "clusterName", Value: cluster.GetName()}, + ) + + // Delete the roles associated with the cluster + for _, role := range roles.Roles { + err = c.deleteRole(ctx, iamClient, role) + if err != nil { + return errors.WrapAndTrace(err) + } + } + + return nil +} + +func (c *AWSClient) deleteRole(ctx context.Context, iamClient *iam.Client, role iamtypes.Role) error { + // Get the instance profiles associated with the role -- these are created as a side effect of attachment to a node (EC2 instance) + instanceProfiles, err := iamClient.ListInstanceProfilesForRole(ctx, &iam.ListInstanceProfilesForRoleInput{ + RoleName: role.RoleName, + }) + if err != nil { + return errors.WrapAndTrace(err) + } + + c.logger.Debug(ctx, fmt.Sprintf("found %d instance profiles associated with role", len(instanceProfiles.InstanceProfiles)), + v1.Field{Key: "roleName", Value: *role.RoleName}, + ) + // Remove the role from the instance profiles + for _, instanceProfile := range instanceProfiles.InstanceProfiles { + c.logger.Debug(ctx, "removing role from instance profile", + v1.Field{Key: "instanceProfileName", Value: *instanceProfile.InstanceProfileName}, + v1.Field{Key: "roleName", Value: *role.RoleName}, + ) + _, err = iamClient.RemoveRoleFromInstanceProfile(ctx, &iam.RemoveRoleFromInstanceProfileInput{ + InstanceProfileName: instanceProfile.InstanceProfileName, + RoleName: role.RoleName, + }) + if err != nil { + return errors.WrapAndTrace(err) + } + + // Delete the instance profile + c.logger.Debug(ctx, "deleting instance profile", + v1.Field{Key: "instanceProfileName", Value: *instanceProfile.InstanceProfileName}, + v1.Field{Key: "roleName", Value: *role.RoleName}, + ) + _, err = iamClient.DeleteInstanceProfile(ctx, &iam.DeleteInstanceProfileInput{ + InstanceProfileName: instanceProfile.InstanceProfileName, + }) + if err != nil { + return errors.WrapAndTrace(err) + } + } + + // Detach the policies from the role + attachedPolicies, err := iamClient.ListAttachedRolePolicies(ctx, &iam.ListAttachedRolePoliciesInput{ + RoleName: role.RoleName, + }) + if err != nil { + return errors.WrapAndTrace(err) + } + + c.logger.Debug(ctx, fmt.Sprintf("found %d policies associated with role", len(attachedPolicies.AttachedPolicies)), + v1.Field{Key: "roleName", Value: *role.RoleName}, + ) + for _, policy := range attachedPolicies.AttachedPolicies { + c.logger.Debug(ctx, "detaching policy from role", + v1.Field{Key: "policyArn", Value: *policy.PolicyArn}, + v1.Field{Key: "roleName", Value: *role.RoleName}, + ) + _, err = iamClient.DetachRolePolicy(ctx, &iam.DetachRolePolicyInput{ + RoleName: role.RoleName, + PolicyArn: policy.PolicyArn, + }) + if err != nil { + return errors.WrapAndTrace(err) + } + } + + // Delete the role + c.logger.Debug(ctx, "deleting role", v1.Field{Key: "roleName", Value: *role.RoleName}) + _, err = iamClient.DeleteRole(ctx, &iam.DeleteRoleInput{ + RoleName: role.RoleName, + }) + if err != nil { + return errors.WrapAndTrace(err) + } + + return nil +} + +func makeIAMTags(tags map[string]string) []iamtypes.Tag { + iamTags := make([]iamtypes.Tag, 0, len(tags)) + for key, value := range tags { + iamTags = append(iamTags, iamtypes.Tag{Key: aws.String(key), Value: aws.String(value)}) + } + return iamTags +} + +func getNodeGroupIAMRolePath(clusterRefID string, nodeGroupRefID string) string { + iamPath := fmt.Sprintf("%s/%s/nodegroups/%s/", iamRolePathPrefix, clusterRefID, nodeGroupRefID) + iamPath = strings.ReplaceAll(iamPath, "[^a-zA-Z0-9/]", "") + return iamPath +} + +func (c *AWSClient) newK8sClient(ctx context.Context, cluster *v1.Cluster) (*kubernetes.Clientset, error) { + newK8sConfig, err := c.newK8sConfig(ctx, cluster) + if err != nil { + return nil, errors.WrapAndTrace(fmt.Errorf("failed to create k8s config: %w", err)) + } + + // Create a clientset to interact with the cluster using the bearer token and CA certificate + clientset, err := kubernetes.NewForConfig(newK8sConfig) + if err != nil { + return nil, errors.WrapAndTrace(fmt.Errorf("failed to create clientset: %w", err)) + } + + return clientset, nil +} + +func (c *AWSClient) newK8sConfig(ctx context.Context, cluster *v1.Cluster) (*rest.Config, error) { + // Decode the cluster CA certificate + clusterCACertificate, err := base64.StdEncoding.DecodeString(cluster.GetClusterCACertificateBase64()) + if err != nil { + return nil, errors.WrapAndTrace(fmt.Errorf("failed to decode cluster CA certificate: %w", err)) + } + + // Get a bearer token to authenticate to the cluster + forwardSessionName := true + cache := false + tokenGenerator, err := token.NewGenerator(forwardSessionName, cache) + if err != nil { + return nil, errors.WrapAndTrace(fmt.Errorf("failed to create token generator: %w", err)) + } + + token, err := tokenGenerator.GetWithOptions(ctx, &token.GetTokenOptions{ + ClusterID: cluster.GetName(), + }) + if err != nil { + return nil, errors.WrapAndTrace(fmt.Errorf("failed to generate token: %w", err)) + } + + return &rest.Config{ + Host: cluster.GetAPIEndpoint(), + BearerToken: token.Token, + TLSClientConfig: rest.TLSClientConfig{ + CAData: clusterCACertificate, + }, + }, nil +} diff --git a/v1/providers/aws/kubernetes_unit_test.go b/v1/providers/aws/kubernetes_unit_test.go new file mode 100644 index 0000000..80a1e70 --- /dev/null +++ b/v1/providers/aws/kubernetes_unit_test.go @@ -0,0 +1,631 @@ +package v1 + +import ( + "errors" + "math" + "reflect" + "testing" + + "github.com/aws/aws-sdk-go-v2/aws" + ekstypes "github.com/aws/aws-sdk-go-v2/service/eks/types" + + v1 "github.com/brevdev/cloud/v1" +) + +func TestValidateCreateNodeGroupArgs(t *testing.T) { //nolint:funlen // test ok + tests := []struct { + name string + args v1.CreateNodeGroupArgs + expectError error + }{ + { + name: "valid args", + args: v1.CreateNodeGroupArgs{ + Name: "test-node-group", + RefID: "test-ref", + MinNodeCount: 1, + MaxNodeCount: 3, + InstanceType: "t3.medium", + DiskSizeGiB: 20, + ClusterID: "cluster-123", + }, + expectError: nil, + }, + { + name: "min node count less than 1", + args: v1.CreateNodeGroupArgs{ + Name: "test-node-group", + RefID: "test-ref", + MinNodeCount: 0, + MaxNodeCount: 3, + InstanceType: "t3.medium", + DiskSizeGiB: 20, + }, + expectError: errNodeGroupMinNodeCountMustBeGreaterThan0, + }, + { + name: "max node count less than 1", + args: v1.CreateNodeGroupArgs{ + Name: "test-node-group", + RefID: "test-ref", + MinNodeCount: 1, + MaxNodeCount: 0, + InstanceType: "t3.medium", + DiskSizeGiB: 20, + }, + expectError: errNodeGroupMaxNodeCountMustBeGreaterThan0, + }, + { + name: "max node count less than min node count", + args: v1.CreateNodeGroupArgs{ + Name: "test-node-group", + RefID: "test-ref", + MinNodeCount: 5, + MaxNodeCount: 3, + InstanceType: "t3.medium", + DiskSizeGiB: 20, + }, + expectError: errNodeGroupMaxNodeCountMustBeGreaterThanOrEqualToMinNodeCount, + }, + { + name: "missing instance type", + args: v1.CreateNodeGroupArgs{ + Name: "test-node-group", + RefID: "test-ref", + MinNodeCount: 1, + MaxNodeCount: 3, + InstanceType: "", + DiskSizeGiB: 20, + }, + expectError: errNodeGroupInstanceTypeIsRequired, + }, + { + name: "disk size too small", + args: v1.CreateNodeGroupArgs{ + Name: "test-node-group", + RefID: "test-ref", + MinNodeCount: 1, + MaxNodeCount: 3, + InstanceType: "t3.medium", + DiskSizeGiB: 10, + }, + expectError: errNodeGroupDiskSizeGiBMustBeGreaterThanOrEqualTo20, + }, + { + name: "disk size exceeds max int32", + args: v1.CreateNodeGroupArgs{ + Name: "test-node-group", + RefID: "test-ref", + MinNodeCount: 1, + MaxNodeCount: 3, + InstanceType: "t3.medium", + DiskSizeGiB: math.MaxInt32 + 1, + }, + expectError: errNodeGroupDiskSizeGiBMustBeLessThanOrEqualToMaxInt32, + }, + { + name: "max node count exceeds max int32", + args: v1.CreateNodeGroupArgs{ + Name: "test-node-group", + RefID: "test-ref", + MinNodeCount: 1, + MaxNodeCount: math.MaxInt32 + 1, + InstanceType: "t3.medium", + DiskSizeGiB: 20, + }, + expectError: errNodeGroupMaxNodeCountMustBeLessThanOrEqualToMaxInt32, + }, + { + name: "min node count exceeds max int32", + args: v1.CreateNodeGroupArgs{ + Name: "test-node-group", + RefID: "test-ref", + MinNodeCount: math.MaxInt32 + 1, + MaxNodeCount: math.MaxInt32 + 2, + InstanceType: "t3.medium", + DiskSizeGiB: 20, + }, + expectError: errNodeGroupMinNodeCountMustBeLessThanOrEqualToMaxInt32, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := validateCreateNodeGroupArgs(tt.args) + if err != nil && tt.expectError == nil { + t.Fatalf("expected no error but got: %v", err) + } + if err != nil && tt.expectError != nil { + if !errors.Is(err, tt.expectError) { + t.Errorf("expected error %v, got %v", tt.expectError, err) + } + } + }) + } +} + +func TestValidateModifyNodeGroupArgs(t *testing.T) { + tests := []struct { + name string + args v1.ModifyNodeGroupArgs + expectError error + }{ + { + name: "valid args", + args: v1.ModifyNodeGroupArgs{ + ID: "node-group-123", + ClusterID: "cluster-123", + MinNodeCount: 1, + MaxNodeCount: 3, + }, + expectError: nil, + }, + { + name: "min node count less than 1", + args: v1.ModifyNodeGroupArgs{ + ID: "node-group-123", + ClusterID: "cluster-123", + MinNodeCount: 0, + MaxNodeCount: 3, + }, + expectError: errNodeGroupMinNodeCountMustBeGreaterThan0, + }, + { + name: "max node count less than 1", + args: v1.ModifyNodeGroupArgs{ + ID: "node-group-123", + ClusterID: "cluster-123", + MinNodeCount: 1, + MaxNodeCount: 0, + }, + expectError: errNodeGroupMaxNodeCountMustBeGreaterThan0, + }, + { + name: "max node count less than min node count", + args: v1.ModifyNodeGroupArgs{ + ID: "node-group-123", + ClusterID: "cluster-123", + MinNodeCount: 5, + MaxNodeCount: 3, + }, + expectError: errNodeGroupMaxNodeCountMustBeGreaterThanOrEqualToMinNodeCount, + }, + { + name: "min node count exceeds max int32", + args: v1.ModifyNodeGroupArgs{ + ID: "node-group-123", + ClusterID: "cluster-123", + MinNodeCount: math.MaxInt32 + 1, + MaxNodeCount: math.MaxInt32 + 2, + }, + expectError: errNodeGroupMinNodeCountMustBeLessThanOrEqualToMaxInt32, + }, + { + name: "max node count exceeds max int32", + args: v1.ModifyNodeGroupArgs{ + ID: "node-group-123", + ClusterID: "cluster-123", + MinNodeCount: 1, + MaxNodeCount: math.MaxInt32 + 1, + }, + expectError: errNodeGroupMaxNodeCountMustBeLessThanOrEqualToMaxInt32, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := validateModifyNodeGroupArgs(tt.args) + if err != nil && tt.expectError == nil { + t.Errorf("expected error but got nil") + } + if err != nil && tt.expectError != nil { + if !errors.Is(err, tt.expectError) { + t.Errorf("expected error %v, got %v", tt.expectError, err) + } + } + }) + } +} + +func TestValidatePutUserArgs(t *testing.T) { + tests := []struct { + name string + args v1.SetClusterUserArgs + expectError error + }{ + { + name: "valid args", + args: v1.SetClusterUserArgs{ + Username: "test-user", + Role: "cluster-admin", + ClusterID: "cluster-123", + RSAPEMBase64: "base64encodedkey", + }, + expectError: nil, + }, + { + name: "missing username", + args: v1.SetClusterUserArgs{ + Username: "", + Role: "cluster-admin", + ClusterID: "cluster-123", + RSAPEMBase64: "base64encodedkey", + }, + expectError: errUsernameIsRequired, + }, + { + name: "missing role", + args: v1.SetClusterUserArgs{ + Username: "test-user", + Role: "", + ClusterID: "cluster-123", + RSAPEMBase64: "base64encodedkey", + }, + expectError: errRoleIsRequired, + }, + { + name: "missing cluster ID", + args: v1.SetClusterUserArgs{ + Username: "test-user", + Role: "cluster-admin", + ClusterID: "", + RSAPEMBase64: "base64encodedkey", + }, + expectError: errClusterIDIsRequired, + }, + { + name: "missing RSA PEM base64", + args: v1.SetClusterUserArgs{ + Username: "test-user", + Role: "cluster-admin", + ClusterID: "cluster-123", + RSAPEMBase64: "", + }, + expectError: errRSAPEMBase64IsRequired, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := validatePutUserArgs(tt.args) + if err != nil && tt.expectError == nil { + t.Errorf("expected error but got nil") + } + if err != nil && tt.expectError != nil { + if !errors.Is(err, tt.expectError) { + t.Errorf("expected error %v, got %v", tt.expectError, err) + } + } + }) + } +} + +func TestMakeIAMTags(t *testing.T) { + tests := []struct { + name string + tags map[string]string + expected int + }{ + { + name: "empty tags", + tags: map[string]string{}, + expected: 0, + }, + { + name: "single tag", + tags: map[string]string{ + "Name": "test-cluster", + }, + expected: 1, + }, + { + name: "multiple tags", + tags: map[string]string{ + "Name": "test-cluster", + "Environment": "production", + "Team": "platform", + }, + expected: 3, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + iamTags := makeIAMTags(tt.tags) + + if len(iamTags) != tt.expected { + t.Errorf("expected %d tags, got %d", tt.expected, len(iamTags)) + } + + // Verify tags are properly converted + tagMap := make(map[string]string) + for _, tag := range iamTags { + tagMap[*tag.Key] = *tag.Value + } + + for key, value := range tt.tags { + if tagMap[key] != value { + t.Errorf("expected tag %s=%s, got %s=%s", key, value, key, tagMap[key]) + } + } + }) + } +} + +func TestGetNodeGroupIAMRolePath(t *testing.T) { + tests := []struct { + name string + clusterRefID string + nodeGroupRefID string + expectedContain string + }{ + { + name: "basic path", + clusterRefID: "my-cluster", + nodeGroupRefID: "my-nodegroup", + expectedContain: "/brevcloudsdk/eks/clusters/my-cluster/nodegroups/my-nodegroup/", + }, + { + name: "with special characters", + clusterRefID: "my-cluster-123", + nodeGroupRefID: "my-nodegroup-456", + expectedContain: "/brevcloudsdk/eks/clusters/my-cluster-123/nodegroups/my-nodegroup-456/", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + path := getNodeGroupIAMRolePath(tt.clusterRefID, tt.nodeGroupRefID) + + if path != tt.expectedContain { + t.Errorf("expected path to be %s, got %s", tt.expectedContain, path) + } + }) + } +} + +func TestParseEKSClusterStatus(t *testing.T) { + tests := []struct { + name string + status ekstypes.ClusterStatus + expectedStatus v1.ClusterStatus + }{ + { + name: "creating", + status: ekstypes.ClusterStatusCreating, + expectedStatus: v1.ClusterStatusPending, + }, + { + name: "active", + status: ekstypes.ClusterStatusActive, + expectedStatus: v1.ClusterStatusAvailable, + }, + { + name: "deleting", + status: ekstypes.ClusterStatusDeleting, + expectedStatus: v1.ClusterStatusDeleting, + }, + { + name: "failed", + status: ekstypes.ClusterStatusFailed, + expectedStatus: v1.ClusterStatusFailed, + }, + { + name: "updating", + status: ekstypes.ClusterStatusUpdating, + expectedStatus: v1.ClusterStatusPending, + }, + { + name: "pending", + status: ekstypes.ClusterStatusPending, + expectedStatus: v1.ClusterStatusPending, + }, + { + name: "unknown", + status: ekstypes.ClusterStatus("foobar"), + expectedStatus: v1.ClusterStatusUnknown, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := parseEKSClusterStatus(tt.status) + if result != tt.expectedStatus { + t.Errorf("expected status %v, got %v", tt.expectedStatus, result) + } + }) + } +} + +func TestParseEKSNodeGroupStatus(t *testing.T) { + tests := []struct { + name string + status ekstypes.NodegroupStatus + expectedStatus v1.NodeGroupStatus + }{ + { + name: "creating", + status: ekstypes.NodegroupStatusCreating, + expectedStatus: v1.NodeGroupStatusPending, + }, + { + name: "active", + status: ekstypes.NodegroupStatusActive, + expectedStatus: v1.NodeGroupStatusAvailable, + }, + { + name: "deleting", + status: ekstypes.NodegroupStatusDeleting, + expectedStatus: v1.NodeGroupStatusDeleting, + }, + { + name: "create failed", + status: ekstypes.NodegroupStatusCreateFailed, + expectedStatus: v1.NodeGroupStatusFailed, + }, + { + name: "delete failed", + status: ekstypes.NodegroupStatusDeleteFailed, + expectedStatus: v1.NodeGroupStatusFailed, + }, + { + name: "unknown", + status: ekstypes.NodegroupStatus("foobar"), + expectedStatus: v1.NodeGroupStatusUnknown, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := parseEKSNodeGroupStatus(tt.status) + if result != tt.expectedStatus { + t.Errorf("expected status %v, got %v", tt.expectedStatus, result) + } + }) + } +} + +func TestGetClusterCACertificateBase64(t *testing.T) { + tests := []struct { + name string + cluster *ekstypes.Cluster + expected string + }{ + { + name: "nil cluster", + cluster: nil, + expected: "", + }, + { + name: "nil certificate authority", + cluster: &ekstypes.Cluster{ + CertificateAuthority: nil, + }, + expected: "", + }, + { + name: "nil certificate data", + cluster: &ekstypes.Cluster{ + CertificateAuthority: &ekstypes.Certificate{ + Data: nil, + }, + }, + expected: "", + }, + { + name: "valid certificate", + cluster: &ekstypes.Cluster{ + CertificateAuthority: &ekstypes.Certificate{ + Data: aws.String("base64encodedcert"), + }, + }, + expected: "base64encodedcert", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := getClusterCACertificateBase64(tt.cluster) + if result != tt.expected { + t.Errorf("expected %s, got %s", tt.expected, result) + } + }) + } +} + +func TestGetClusterAPIEndpoint(t *testing.T) { + tests := []struct { + name string + cluster *ekstypes.Cluster + expected string + }{ + { + name: "nil cluster", + cluster: nil, + expected: "", + }, + { + name: "nil endpoint", + cluster: &ekstypes.Cluster{ + Endpoint: nil, + }, + expected: "", + }, + { + name: "valid endpoint", + cluster: &ekstypes.Cluster{ + Endpoint: aws.String("https://example.com"), + }, + expected: "https://example.com", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := getClusterAPIEndpoint(tt.cluster) + if result != tt.expected { + t.Errorf("expected %s, got %s", tt.expected, result) + } + }) + } +} + +func TestParseEKSNodeGroup(t *testing.T) { //nolint:gocognit // test ok + tests := []struct { + name string + nodeGroup *ekstypes.Nodegroup + }{ + { + name: "valid node group", + nodeGroup: &ekstypes.Nodegroup{ + NodegroupName: aws.String("test-nodegroup"), + ScalingConfig: &ekstypes.NodegroupScalingConfig{ + MinSize: aws.Int32(1), + MaxSize: aws.Int32(3), + }, + InstanceTypes: []string{"t3.medium"}, + DiskSize: aws.Int32(20), + Status: ekstypes.NodegroupStatusActive, + Tags: map[string]string{ + tagBrevRefID: "test-ref", + }, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result, err := parseEKSNodeGroup(tt.nodeGroup) + if err != nil { + t.Fatalf("expected no error but got: %v", err) + } + if result == nil { + t.Fatalf("expected valid node group but got nil") + } + if result.GetName() != *tt.nodeGroup.NodegroupName { + t.Errorf("expected name %s, got %s", *tt.nodeGroup.NodegroupName, result.GetName()) + } + if result.GetMinNodeCount() != int(*tt.nodeGroup.ScalingConfig.MinSize) { + t.Errorf("expected min node count %d, got %d", *tt.nodeGroup.ScalingConfig.MinSize, result.GetMinNodeCount()) + } + if result.GetMaxNodeCount() != int(*tt.nodeGroup.ScalingConfig.MaxSize) { + t.Errorf("expected max node count %d, got %d", *tt.nodeGroup.ScalingConfig.MaxSize, result.GetMaxNodeCount()) + } + if result.GetInstanceType() != tt.nodeGroup.InstanceTypes[0] { + t.Errorf("expected instance type %s, got %s", tt.nodeGroup.InstanceTypes[0], result.GetInstanceType()) + } + if result.GetDiskSizeGiB() != int(*tt.nodeGroup.DiskSize) { + t.Errorf("expected disk size %d, got %d", *tt.nodeGroup.DiskSize, result.GetDiskSizeGiB()) + } + if result.GetStatus() != parseEKSNodeGroupStatus(tt.nodeGroup.Status) { + t.Errorf("expected status %v, got %v", parseEKSNodeGroupStatus(tt.nodeGroup.Status), result.GetStatus()) + } + if result.GetRefID() != tt.nodeGroup.Tags[tagBrevRefID] { + t.Errorf("expected ref ID %s, got %s", tt.nodeGroup.Tags[tagBrevRefID], result.GetRefID()) + } + if !reflect.DeepEqual(result.GetTags(), v1.Tags(tt.nodeGroup.Tags)) { + t.Errorf("expected tags %v, got %v", v1.Tags(tt.nodeGroup.Tags), result.GetTags()) + } + }) + } +} diff --git a/v1/providers/aws/network.go b/v1/providers/aws/network.go new file mode 100644 index 0000000..e68f7c8 --- /dev/null +++ b/v1/providers/aws/network.go @@ -0,0 +1,1042 @@ +package v1 + +import ( + "context" + "fmt" + "sort" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/ec2" + "github.com/aws/aws-sdk-go-v2/service/ec2/types" + "github.com/aws/smithy-go" + + "github.com/brevdev/cloud/internal/errors" + v1 "github.com/brevdev/cloud/v1" +) + +func (c *AWSClient) CreateVPC(ctx context.Context, args v1.CreateVPCArgs) (*v1.VPC, error) { + // Validate the inputs + publicSubnetArgs := filterSubnetArgs(args.Subnets, v1.SubnetTypePublic) + privateSubnetArgs := filterSubnetArgs(args.Subnets, v1.SubnetTypePrivate) + + // If there are no public subnets but there are private subnets, return an error, as we need at least one + // public subnet to create NAT gateways for private subnets. + if len(publicSubnetArgs) == 0 && len(privateSubnetArgs) > 0 { + return nil, errors.WrapAndTrace(fmt.Errorf("VPC creation with private subnets requires at least one public subnet, but no public subnets were provided for VPC %s", args.RefID)) + } + + // Create the AWS client in the specified region + awsClient := ec2.NewFromConfig(c.awsConfig) + + // Create the VPC and subnets + awsVPC, subnets, err := c.createCompleteVPC(ctx, awsClient, args, publicSubnetArgs, privateSubnetArgs) + if err != nil { + return nil, errors.WrapAndTrace(err) + } + + tags := make(map[string]string) + for _, tag := range awsVPC.Tags { + tags[*tag.Key] = *tag.Value + } + + brevVPC, err := v1.NewVPC(v1.VPCSettings{ + RefID: args.RefID, + Name: args.Name, + Location: c.region, + Provider: CloudProviderID, + Cloud: CloudProviderID, + ID: v1.CloudProviderResourceID(*awsVPC.VpcId), + CidrBlock: *awsVPC.CidrBlock, + Status: v1.VPCStatusPending, + Subnets: subnets, + Tags: v1.Tags(tags), + }) + if err != nil { + return nil, errors.WrapAndTrace(err) + } + return brevVPC, nil +} + +// Helper function to filter subnet arguments by type +func filterSubnetArgs(subnets []v1.CreateSubnetArgs, subnetType v1.SubnetType) []v1.CreateSubnetArgs { + filteredSubnets := make([]v1.CreateSubnetArgs, 0) + for _, subnet := range subnets { + if subnet.Type == subnetType { + filteredSubnets = append(filteredSubnets, subnet) + } + } + return filteredSubnets +} + +func (c *AWSClient) createCompleteVPC(ctx context.Context, awsClient *ec2.Client, args v1.CreateVPCArgs, publicSubnetArgs []v1.CreateSubnetArgs, privateSubnetArgs []v1.CreateSubnetArgs) (*types.Vpc, []*v1.Subnet, error) { + // Create the VPC + vpc, err := c.createVPC(ctx, awsClient, args) + if err != nil { + return nil, nil, errors.WrapAndTrace(err) + } + + // Enable DNS support for the VPC + err = c.enableVPCDNSSupport(ctx, awsClient, vpc) + if err != nil { + return nil, nil, errors.WrapAndTrace(err) + } + + // Enable DNS hostnames for the VPC + err = c.enableVPCDNSHostnames(ctx, awsClient, vpc) + if err != nil { + return nil, nil, errors.WrapAndTrace(err) + } + + // Create an Internet Gateway for the VPC + _, err = c.createInternetGateway(ctx, awsClient, vpc, args) + if err != nil { + return nil, nil, errors.WrapAndTrace(err) + } + + // Get the availability zones in the context region + availabilityZones, err := getAvailabilityZones(ctx, awsClient) + if err != nil { + return nil, nil, errors.WrapAndTrace(err) + } + + var subnets []*v1.Subnet + + // Create public subnets (TODO: parallelize) + var publicSubnets []*types.Subnet + for i, subnetArgs := range publicSubnetArgs { + // Round-robin through the availability zones + availabilityZone := availabilityZones[i%len(availabilityZones)] + + // Create the public subnet + publicSubnet, err := c.createPublicSubnet(ctx, awsClient, vpc, subnetArgs, args, availabilityZone) + if err != nil { + return nil, nil, errors.WrapAndTrace(err) + } + publicSubnets = append(publicSubnets, publicSubnet) + + brevSubnet, err := awsSubnetToCloudSubnet(publicSubnet, v1.SubnetTypePublic, vpc) + if err != nil { + return nil, nil, errors.WrapAndTrace(err) + } + subnets = append(subnets, brevSubnet) + } + + // Create private subnets (TODO: parallelize) + for i := range privateSubnetArgs { + // Choose a public subnet for the NAT gateway + natGatewaySubnet := publicSubnets[i%len(publicSubnets)] + subnetArgs := privateSubnetArgs[i] + + // Create the private subnet + privateSubnet, err := c.createPrivateSubnet(ctx, awsClient, vpc, natGatewaySubnet, subnetArgs) + if err != nil { + return nil, nil, errors.WrapAndTrace(err) + } + + brevSubnet, err := awsSubnetToCloudSubnet(privateSubnet, v1.SubnetTypePrivate, vpc) + if err != nil { + return nil, nil, errors.WrapAndTrace(err) + } + subnets = append(subnets, brevSubnet) + } + + return vpc, subnets, nil +} + +func getAvailabilityZones(ctx context.Context, awsClient *ec2.Client) ([]string, error) { + describeAvailabilityZonesOutput, err := awsClient.DescribeAvailabilityZones(ctx, &ec2.DescribeAvailabilityZonesInput{}) + if err != nil { + return nil, errors.WrapAndTrace(err) + } + + availabilityZones := []string{} + for _, availabilityZone := range describeAvailabilityZonesOutput.AvailabilityZones { + availabilityZones = append(availabilityZones, *availabilityZone.ZoneName) + } + + // Sort the availability zones alphabetically for consistent round-robin behavior + sort.Strings(availabilityZones) + + return availabilityZones, nil +} + +func (c *AWSClient) createVPC(ctx context.Context, awsClient *ec2.Client, args v1.CreateVPCArgs) (*types.Vpc, error) { + // Convert the tags to AWS tags + tags := make(map[string]string) + for key, value := range args.Tags { + tags[key] = value + } + + // Add the required tags + tags[tagName] = args.Name + tags[tagBrevRefID] = args.RefID + tags[tagCreatedBy] = tagBrevCloudSDK + + awsTags := makeEC2Tags(tags) + + c.logger.Debug(ctx, "creating VPC", + v1.Field{Key: "name", Value: tags[tagName]}, + v1.Field{Key: "refID", Value: tags[tagBrevRefID]}, + v1.Field{Key: "cidrBlock", Value: args.CidrBlock}, + ) + input := &ec2.CreateVpcInput{ + CidrBlock: &args.CidrBlock, + TagSpecifications: []types.TagSpecification{ + { + ResourceType: types.ResourceTypeVpc, + Tags: awsTags, + }, + }, + } + output, err := awsClient.CreateVpc(ctx, input) + if err != nil { + return nil, errors.WrapAndTrace(err) + } + + return output.Vpc, nil +} + +func (c *AWSClient) enableVPCDNSSupport(ctx context.Context, awsClient *ec2.Client, vpc *types.Vpc) error { + input := &ec2.ModifyVpcAttributeInput{ + VpcId: vpc.VpcId, + EnableDnsSupport: &types.AttributeBooleanValue{ + Value: aws.Bool(true), + }, + } + + c.logger.Debug(ctx, "enabling DNS support for VPC", v1.Field{Key: "vpcID", Value: *vpc.VpcId}) + _, err := awsClient.ModifyVpcAttribute(ctx, input) + if err != nil { + return errors.WrapAndTrace(err) + } + return nil +} + +func (c *AWSClient) enableVPCDNSHostnames(ctx context.Context, awsClient *ec2.Client, vpc *types.Vpc) error { + input := &ec2.ModifyVpcAttributeInput{ + VpcId: vpc.VpcId, + EnableDnsHostnames: &types.AttributeBooleanValue{ + Value: aws.Bool(true), + }, + } + + c.logger.Debug(ctx, "enabling DNS hostnames for VPC", v1.Field{Key: "vpcID", Value: *vpc.VpcId}) + _, err := awsClient.ModifyVpcAttribute(ctx, input) + if err != nil { + return errors.WrapAndTrace(err) + } + return nil +} + +func (c *AWSClient) createInternetGateway(ctx context.Context, awsClient *ec2.Client, vpc *types.Vpc, args v1.CreateVPCArgs) (*types.InternetGateway, error) { + tags := make(map[string]string) + for key, value := range args.Tags { + tags[key] = value + } + + tags[tagName] = fmt.Sprintf("%s-public", *vpc.VpcId) + tags[tagBrevVPCID] = *vpc.VpcId + tags[tagCreatedBy] = tagBrevCloudSDK + + awsTags := makeEC2Tags(tags) + + // Create an Internet Gateway for the VPC + c.logger.Debug(ctx, "creating internet gateway", + v1.Field{Key: "name", Value: tags[tagName]}, + v1.Field{Key: "vpcID", Value: *vpc.VpcId}, + ) + createInput := &ec2.CreateInternetGatewayInput{ + TagSpecifications: []types.TagSpecification{ + { + ResourceType: types.ResourceTypeInternetGateway, + Tags: awsTags, + }, + }, + } + createOutput, err := awsClient.CreateInternetGateway(ctx, createInput) + if err != nil { + return nil, errors.WrapAndTrace(err) + } + + internetGateway := createOutput.InternetGateway + + // Attach the Internet Gateway to the VPC + c.logger.Debug(ctx, "attaching internet gateway to VPC", + v1.Field{Key: "internetGatewayID", Value: *internetGateway.InternetGatewayId}, + v1.Field{Key: "vpcID", Value: *vpc.VpcId}, + ) + _, err = awsClient.AttachInternetGateway(ctx, &ec2.AttachInternetGatewayInput{ + InternetGatewayId: internetGateway.InternetGatewayId, + VpcId: vpc.VpcId, + }) + if err != nil { + return nil, errors.WrapAndTrace(err) + } + + return internetGateway, nil +} + +func awsSubnetToCloudSubnet(awsSubnet *types.Subnet, subnetType v1.SubnetType, vpc *types.Vpc) (*v1.Subnet, error) { + tags := make(map[string]string) + for _, tag := range awsSubnet.Tags { + tags[*tag.Key] = *tag.Value + } + + brevSubnet, err := v1.NewSubnet(v1.SubnetSettings{ + ID: v1.CloudProviderResourceID(*awsSubnet.SubnetId), + RefID: tags[tagBrevRefID], + Name: tags[tagName], + VPCID: v1.CloudProviderResourceID(*vpc.VpcId), + Location: *awsSubnet.AvailabilityZone, + CidrBlock: *awsSubnet.CidrBlock, + Type: subnetType, + Tags: v1.Tags(tags), + }) + if err != nil { + return nil, errors.WrapAndTrace(err) + } + return brevSubnet, nil +} + +func (c *AWSClient) createPublicSubnet(ctx context.Context, awsClient *ec2.Client, vpc *types.Vpc, createSubnetArgs v1.CreateSubnetArgs, createVPCArgs v1.CreateVPCArgs, availabilityZone string) (*types.Subnet, error) { + tags := make(map[string]string) + for key, value := range createSubnetArgs.Tags { + tags[key] = value + } + + tags[tagName] = fmt.Sprintf("%s-public", *vpc.VpcId) + if createSubnetArgs.RefID != "" { + tags[tagBrevRefID] = createSubnetArgs.RefID + } else { + tags[tagBrevRefID] = tags[tagName] + } + tags[tagBrevVPCID] = *vpc.VpcId + tags[tagBrevSubnetType] = string(createSubnetArgs.Type) + tags[tagCreatedBy] = tagBrevCloudSDK + + awsTags := makeEC2Tags(tags) + + c.logger.Debug(ctx, "creating public subnet", + v1.Field{Key: "name", Value: tags[tagName]}, + v1.Field{Key: "refID", Value: tags[tagBrevRefID]}, + v1.Field{Key: "vpcID", Value: *vpc.VpcId}, + v1.Field{Key: "type", Value: tags[tagBrevSubnetType]}, + v1.Field{Key: "cidrBlock", Value: createSubnetArgs.CidrBlock}, + v1.Field{Key: "availabilityZone", Value: availabilityZone}, + ) + input := &ec2.CreateSubnetInput{ + VpcId: vpc.VpcId, + CidrBlock: aws.String(createSubnetArgs.CidrBlock), + AvailabilityZone: aws.String(availabilityZone), + TagSpecifications: []types.TagSpecification{ + { + ResourceType: types.ResourceTypeSubnet, + Tags: awsTags, + }, + }, + } + output, err := awsClient.CreateSubnet(ctx, input) + if err != nil { + return nil, errors.WrapAndTrace(err) + } + + subnet := output.Subnet + + // Get or create the Public Route Table for the VPC + publicRouteTable, err := c.getOrCreatePublicRouteTable(ctx, awsClient, vpc, createVPCArgs) + if err != nil { + return nil, errors.WrapAndTrace(err) + } + + // Associate the Public Subnet with the Public Route Table + _, err = awsClient.AssociateRouteTable(ctx, &ec2.AssociateRouteTableInput{ + RouteTableId: publicRouteTable.RouteTableId, + SubnetId: subnet.SubnetId, + }) + if err != nil { + return nil, errors.WrapAndTrace(err) + } + + return subnet, nil +} + +func (c *AWSClient) getOrCreatePublicRouteTable(ctx context.Context, awsClient *ec2.Client, vpc *types.Vpc, args v1.CreateVPCArgs) (*types.RouteTable, error) { + // Find the Public Route Table + rtNameTag := fmt.Sprintf("%s-public", *vpc.VpcId) + + describeRouteTablesOutput, err := awsClient.DescribeRouteTables(ctx, &ec2.DescribeRouteTablesInput{ + Filters: []types.Filter{ + { + Name: aws.String("vpc-id"), + Values: []string{*vpc.VpcId}, + }, + { + Name: aws.String("tag:Name"), + Values: []string{rtNameTag}, + }, + }, + }) + if err != nil { + return nil, errors.WrapAndTrace(err) + } + + // If there are multiple public route tables, return an error + if len(describeRouteTablesOutput.RouteTables) > 1 { + return nil, fmt.Errorf("multiple public route tables found for VPC %s", *vpc.VpcId) + } + + // If there is one public route table, return it + if len(describeRouteTablesOutput.RouteTables) == 1 { + c.logger.Debug(ctx, "found public route table", v1.Field{Key: "routeTableID", Value: *describeRouteTablesOutput.RouteTables[0].RouteTableId}) + return &describeRouteTablesOutput.RouteTables[0], nil + } + + // If there is no public route table, create one + tags := make(map[string]string) + for key, value := range args.Tags { + tags[key] = value + } + + tags[tagName] = rtNameTag + tags[tagBrevVPCID] = *vpc.VpcId + tags[tagCreatedBy] = tagBrevCloudSDK + + awsTags := makeEC2Tags(tags) + + c.logger.Debug(ctx, "creating public route table", + v1.Field{Key: "name", Value: tags[tagName]}, + v1.Field{Key: "vpcID", Value: *vpc.VpcId}, + v1.Field{Key: "refID", Value: tags[tagBrevRefID]}, + ) + input := &ec2.CreateRouteTableInput{ + VpcId: aws.String(*vpc.VpcId), + TagSpecifications: []types.TagSpecification{ + { + ResourceType: types.ResourceTypeRouteTable, + Tags: awsTags, + }, + }, + } + output, err := awsClient.CreateRouteTable(ctx, input) + if err != nil { + return nil, errors.WrapAndTrace(err) + } + + routeTable := output.RouteTable + + // Get or create the Internet Gateway + internetGateway, err := c.getOrCreateInternetGateway(ctx, awsClient, vpc, args) + if err != nil { + return nil, errors.WrapAndTrace(err) + } + + // Create the route to the Internet Gateway + _, err = awsClient.CreateRoute(ctx, &ec2.CreateRouteInput{ + RouteTableId: routeTable.RouteTableId, + GatewayId: internetGateway.InternetGatewayId, + DestinationCidrBlock: aws.String("0.0.0.0/0"), + }) + if err != nil { + return nil, errors.WrapAndTrace(err) + } + + return routeTable, nil +} + +func (c *AWSClient) getOrCreateInternetGateway(ctx context.Context, awsClient *ec2.Client, vpc *types.Vpc, args v1.CreateVPCArgs) (*types.InternetGateway, error) { + // Find the Internet Gateway + igwNameTag := fmt.Sprintf("%s-public", *vpc.VpcId) + + describeInternetGatewaysOutput, err := awsClient.DescribeInternetGateways(ctx, &ec2.DescribeInternetGatewaysInput{ + Filters: []types.Filter{ + { + Name: aws.String("attachment.vpc-id"), + Values: []string{*vpc.VpcId}, + }, + { + Name: aws.String("tag:Name"), + Values: []string{igwNameTag}, + }, + }, + }) + if err != nil { + return nil, errors.WrapAndTrace(err) + } + + // If there are multiple internet gateways, return an error + if len(describeInternetGatewaysOutput.InternetGateways) > 1 { + return nil, fmt.Errorf("multiple internet gateways found for VPC %s", *vpc.VpcId) + } + + // If there is one internet gateway, return it + if len(describeInternetGatewaysOutput.InternetGateways) == 1 { + c.logger.Debug(ctx, "found internet gateway", v1.Field{Key: "internetGatewayID", Value: *describeInternetGatewaysOutput.InternetGateways[0].InternetGatewayId}) + return &describeInternetGatewaysOutput.InternetGateways[0], nil + } + + // If there is no internet gateway, create one + tags := make(map[string]string) + for key, value := range args.Tags { + tags[key] = value + } + + tags[tagName] = igwNameTag + tags[tagBrevVPCID] = *vpc.VpcId + tags[tagCreatedBy] = tagBrevCloudSDK + + awsTags := makeEC2Tags(tags) + + c.logger.Debug(ctx, "creating internet gateway", + v1.Field{Key: "name", Value: tags[tagName]}, + v1.Field{Key: "vpcID", Value: *vpc.VpcId}, + v1.Field{Key: "refID", Value: tags[tagBrevRefID]}, + ) + input := &ec2.CreateInternetGatewayInput{ + TagSpecifications: []types.TagSpecification{ + { + ResourceType: types.ResourceTypeInternetGateway, + Tags: awsTags, + }, + }, + } + output, err := awsClient.CreateInternetGateway(ctx, input) + if err != nil { + return nil, errors.WrapAndTrace(err) + } + + return output.InternetGateway, nil +} + +func (c *AWSClient) createPrivateSubnet(ctx context.Context, awsClient *ec2.Client, vpc *types.Vpc, natGatewaySubnet *types.Subnet, createSubnetArgs v1.CreateSubnetArgs) (*types.Subnet, error) { + tags := make(map[string]string) + for key, value := range createSubnetArgs.Tags { + tags[key] = value + } + + tags[tagName] = fmt.Sprintf("%s-private", *vpc.VpcId) + if createSubnetArgs.RefID != "" { + tags[tagBrevRefID] = createSubnetArgs.RefID + } else { + tags[tagBrevRefID] = tags[tagName] + } + tags[tagBrevVPCID] = *vpc.VpcId + tags[tagBrevSubnetType] = string(createSubnetArgs.Type) + tags[tagCreatedBy] = tagBrevCloudSDK + + awsTags := makeEC2Tags(tags) + + c.logger.Debug(ctx, "creating private subnet", + v1.Field{Key: "name", Value: tags[tagName]}, + v1.Field{Key: "refID", Value: tags[tagBrevRefID]}, + v1.Field{Key: "vpcID", Value: *vpc.VpcId}, + v1.Field{Key: "type", Value: tags[tagBrevSubnetType]}, + v1.Field{Key: "cidrBlock", Value: createSubnetArgs.CidrBlock}, + v1.Field{Key: "availabilityZone", Value: *natGatewaySubnet.AvailabilityZone}, + ) + createSubnetInput := &ec2.CreateSubnetInput{ + VpcId: vpc.VpcId, + CidrBlock: aws.String(createSubnetArgs.CidrBlock), + AvailabilityZone: aws.String(*natGatewaySubnet.AvailabilityZone), + TagSpecifications: []types.TagSpecification{ + { + ResourceType: types.ResourceTypeSubnet, + Tags: awsTags, + }, + }, + } + createSubnetOutput, err := awsClient.CreateSubnet(ctx, createSubnetInput) + if err != nil { + return nil, errors.WrapAndTrace(err) + } + + // Get or create the NAT Gateway + natGateway, err := c.createNatGateway(ctx, awsClient, vpc, natGatewaySubnet, createSubnetArgs) + if err != nil { + return nil, errors.WrapAndTrace(err) + } + + // Create a private route table + tags = make(map[string]string) + for key, value := range createSubnetArgs.Tags { + tags[key] = value + } + + tags[tagName] = fmt.Sprintf("%s-private", *vpc.VpcId) + tags[tagBrevVPCID] = *vpc.VpcId + tags[tagCreatedBy] = tagBrevCloudSDK + + awsTags = makeEC2Tags(tags) + + createRouteTableInput := &ec2.CreateRouteTableInput{ + VpcId: aws.String(*vpc.VpcId), + TagSpecifications: []types.TagSpecification{ + { + ResourceType: types.ResourceTypeRouteTable, + Tags: awsTags, + }, + }, + } + createRouteTableOutput, err := awsClient.CreateRouteTable(ctx, createRouteTableInput) + if err != nil { + return nil, errors.WrapAndTrace(err) + } + + routeTable := createRouteTableOutput.RouteTable + + // Associate the Private Subnet with the Private Route Table + _, err = awsClient.AssociateRouteTable(ctx, &ec2.AssociateRouteTableInput{ + RouteTableId: routeTable.RouteTableId, + SubnetId: createSubnetOutput.Subnet.SubnetId, + }) + if err != nil { + return nil, errors.WrapAndTrace(err) + } + + // Create a route to the NAT Gateway + _, err = awsClient.CreateRoute(ctx, &ec2.CreateRouteInput{ + RouteTableId: routeTable.RouteTableId, + DestinationCidrBlock: aws.String("0.0.0.0/0"), + GatewayId: natGateway.NatGatewayId, + }) + if err != nil { + return nil, errors.WrapAndTrace(err) + } + + return createSubnetOutput.Subnet, nil +} + +func (c *AWSClient) createNatGateway(ctx context.Context, awsClient *ec2.Client, vpc *types.Vpc, subnet *types.Subnet, args v1.CreateSubnetArgs) (*types.NatGateway, error) { + // Allocate an Elastic IP address for the NAT Gateway + allocateElasticIPOutput, err := awsClient.AllocateAddress(ctx, &ec2.AllocateAddressInput{ + Domain: types.DomainTypeVpc, + }) + if err != nil { + return nil, errors.WrapAndTrace(err) + } + + // Create the NAT Gateway in the provided subnet + tags := make(map[string]string) + for key, value := range args.Tags { + tags[key] = value + } + + tags[tagName] = fmt.Sprintf("%s-nat", *vpc.VpcId) + tags[tagBrevVPCID] = *vpc.VpcId + tags[tagCreatedBy] = tagBrevCloudSDK + + awsTags := makeEC2Tags(tags) + + c.logger.Debug(ctx, "creating NAT gateway", + v1.Field{Key: "name", Value: tags[tagName]}, + v1.Field{Key: "vpcID", Value: *vpc.VpcId}, + v1.Field{Key: "refID", Value: tags[tagBrevRefID]}, + v1.Field{Key: "subnetID", Value: *subnet.SubnetId}, + v1.Field{Key: "allocationID", Value: *allocateElasticIPOutput.AllocationId}, + ) + createNatGatewayInput := &ec2.CreateNatGatewayInput{ + SubnetId: subnet.SubnetId, + AllocationId: allocateElasticIPOutput.AllocationId, + TagSpecifications: []types.TagSpecification{ + { + ResourceType: types.ResourceTypeNatgateway, + Tags: awsTags, + }, + }, + } + createNatGatewayOutput, err := awsClient.CreateNatGateway(ctx, createNatGatewayInput) + if err != nil { + return nil, errors.WrapAndTrace(err) + } + + natGateway := createNatGatewayOutput.NatGateway + + // Wait for the NAT Gateway to be available + c.logger.Debug(ctx, "waiting for NAT gateway to be available", + v1.Field{Key: "natGatewayID", Value: *natGateway.NatGatewayId}, + v1.Field{Key: "vpcID", Value: *vpc.VpcId}, + v1.Field{Key: "subnetID", Value: *subnet.SubnetId}, + ) + + w := ec2.NewNatGatewayAvailableWaiter(awsClient, func(o *ec2.NatGatewayAvailableWaiterOptions) { + o.MaxDelay = 10 * time.Second + o.MinDelay = 10 * time.Second + }) + err = w.Wait(ctx, &ec2.DescribeNatGatewaysInput{ + NatGatewayIds: []string{*natGateway.NatGatewayId}, + }, 10*time.Minute) + if err != nil { + return nil, errors.WrapAndTrace(err) + } + + return natGateway, nil +} + +// GetVPC retrieves a VPC from AWS +func (c *AWSClient) GetVPC(ctx context.Context, args v1.GetVPCArgs) (*v1.VPC, error) { + awsClient := ec2.NewFromConfig(c.awsConfig) + + awsVPC, err := getVPC(ctx, awsClient, args) + if err != nil { + return nil, errors.WrapAndTrace(err) + } + + tags := make(map[string]string) + for _, tag := range awsVPC.Tags { + tags[*tag.Key] = *tag.Value + } + brevVPCName := tags[tagName] + brevRefID := tags[tagBrevRefID] + + status, err := getVPCStatus(ctx, awsClient, awsVPC) + if err != nil { + return nil, err + } + + subnets, err := getVPCSubnets(ctx, awsClient, awsVPC) + if err != nil { + return nil, errors.WrapAndTrace(err) + } + + brevVPC, err := v1.NewVPC(v1.VPCSettings{ + RefID: brevRefID, + Name: brevVPCName, + Location: c.region, + ID: v1.CloudProviderResourceID(*awsVPC.VpcId), + Provider: CloudProviderID, + Cloud: CloudProviderID, + CidrBlock: *awsVPC.CidrBlock, + Status: status, + Subnets: subnets, + Tags: v1.Tags(tags), + }) + if err != nil { + return nil, errors.WrapAndTrace(err) + } + return brevVPC, nil +} + +func getVPC(ctx context.Context, awsClient *ec2.Client, args v1.GetVPCArgs) (*types.Vpc, error) { + describeVPCsOutput, err := awsClient.DescribeVpcs(ctx, &ec2.DescribeVpcsInput{ + VpcIds: []string{string(args.ID)}, + }) + if err != nil { + var apiErr smithy.APIError + if errors.As(err, &apiErr) && apiErr.ErrorCode() == "InvalidVpcID.NotFound" { + return nil, v1.ErrResourceNotFound + } + return nil, errors.WrapAndTrace(err) + } + + if len(describeVPCsOutput.Vpcs) == 0 { + return nil, nil + } + + return &describeVPCsOutput.Vpcs[0], nil +} + +func getVPCStatus(ctx context.Context, awsClient *ec2.Client, awsVPC *types.Vpc) (v1.VPCStatus, error) { + if awsVPC.State == types.VpcStatePending { + return v1.VPCStatusPending, nil + } + + // The VPC is available if all NAT gateways are available + natGateways, err := awsClient.DescribeNatGateways(ctx, &ec2.DescribeNatGatewaysInput{ + Filter: []types.Filter{ + { + Name: aws.String("vpc-id"), + Values: []string{*awsVPC.VpcId}, + }, + }, + }) + if err != nil { + return v1.VPCStatusAvailable, errors.WrapAndTrace(err) + } + + for _, natGateway := range natGateways.NatGateways { + if natGateway.State != types.NatGatewayStateAvailable { + return v1.VPCStatusPending, nil + } + } + + return v1.VPCStatusAvailable, nil +} + +func getVPCSubnets(ctx context.Context, awsClient *ec2.Client, awsVPC *types.Vpc) ([]*v1.Subnet, error) { + describeSubnetsOutput, err := awsClient.DescribeSubnets(ctx, &ec2.DescribeSubnetsInput{ + Filters: []types.Filter{ + { + Name: aws.String("vpc-id"), + Values: []string{*awsVPC.VpcId}, + }, + }, + }) + if err != nil { + return nil, errors.WrapAndTrace(err) + } + + subnets := make([]*v1.Subnet, 0) + for _, subnet := range describeSubnetsOutput.Subnets { + tags := make(map[string]string) + for _, tag := range subnet.Tags { + tags[*tag.Key] = *tag.Value + } + + brevSubnetName := tags[tagName] + brevSubnetType := v1.SubnetType(tags[tagBrevSubnetType]) + + brevSubnet, err := v1.NewSubnet(v1.SubnetSettings{ + ID: v1.CloudProviderResourceID(*subnet.SubnetId), + RefID: tags[tagBrevRefID], + VPCID: v1.CloudProviderResourceID(*awsVPC.VpcId), + Location: *subnet.AvailabilityZone, + CidrBlock: *subnet.CidrBlock, + Type: brevSubnetType, + Name: brevSubnetName, + Tags: v1.Tags(tags), + }) + if err != nil { + return nil, errors.WrapAndTrace(err) + } + subnets = append(subnets, brevSubnet) + } + return subnets, nil +} + +func (c *AWSClient) DeleteVPC(ctx context.Context, args v1.DeleteVPCArgs) error { + awsClient := ec2.NewFromConfig(c.awsConfig) + + err := c.deleteVPC(ctx, awsClient, string(args.ID)) + if err != nil { + return errors.WrapAndTrace(err) + } + return nil +} + +func (c *AWSClient) deleteVPC(ctx context.Context, awsClient *ec2.Client, vpcID string) error { + err := c.deleteNATGateways(ctx, awsClient, vpcID) + if err != nil { + return errors.WrapAndTrace(err) + } + + err = c.deleteInternetGateways(ctx, awsClient, vpcID) + if err != nil { + return errors.WrapAndTrace(err) + } + + err = c.deleteSubnets(ctx, awsClient, vpcID) + if err != nil { + return errors.WrapAndTrace(err) + } + + err = c.deleteRouteTables(ctx, awsClient, vpcID) + if err != nil { + return errors.WrapAndTrace(err) + } + + // Delete the VPC + c.logger.Debug(ctx, "deleting VPC", v1.Field{Key: "vpcID", Value: vpcID}) + _, err = awsClient.DeleteVpc(ctx, &ec2.DeleteVpcInput{ + VpcId: aws.String(vpcID), + }) + if err != nil { + return errors.WrapAndTrace(err) + } + + return nil +} + +func (c *AWSClient) deleteNATGateways(ctx context.Context, awsClient *ec2.Client, vpcID string) error { + // Find associated NAT gateways + describeNatGatewaysOutput, err := awsClient.DescribeNatGateways(ctx, &ec2.DescribeNatGatewaysInput{ + Filter: []types.Filter{ + { + Name: aws.String("vpc-id"), + Values: []string{vpcID}, + }, + }, + }) + if err != nil { + return errors.WrapAndTrace(err) + } + + c.logger.Debug(ctx, fmt.Sprintf("found %d NAT gateways", len(describeNatGatewaysOutput.NatGateways)), + v1.Field{Key: "vpcID", Value: vpcID}, + ) + // Delete associated NAT gateways + for _, natGateway := range describeNatGatewaysOutput.NatGateways { + if natGateway.State == types.NatGatewayStateDeleting || natGateway.State == types.NatGatewayStateDeleted { + continue + } + + // Delete the NAT Gateway + c.logger.Debug(ctx, "deleting NAT gateway", + v1.Field{Key: "natGatewayID", Value: *natGateway.NatGatewayId}, + v1.Field{Key: "vpcID", Value: vpcID}, + ) + _, err = awsClient.DeleteNatGateway(ctx, &ec2.DeleteNatGatewayInput{ + NatGatewayId: natGateway.NatGatewayId, + }) + if err != nil { + return errors.WrapAndTrace(err) + } + + // Wait until the NAT Gateway is deleted + c.logger.Debug(ctx, "waiting for NAT gateway to be deleted", + v1.Field{Key: "natGatewayID", Value: *natGateway.NatGatewayId}, + v1.Field{Key: "vpcID", Value: vpcID}, + ) + w := ec2.NewNatGatewayDeletedWaiter(awsClient, func(o *ec2.NatGatewayDeletedWaiterOptions) { + o.MaxDelay = 10 * time.Second + o.MinDelay = 10 * time.Second + }) + err = w.Wait(ctx, &ec2.DescribeNatGatewaysInput{ + NatGatewayIds: []string{*natGateway.NatGatewayId}, + }, 10*time.Minute) + if err != nil { + return errors.WrapAndTrace(err) + } + + // Release the Elastic IP address + c.logger.Debug(ctx, "releasing Elastic IP address", + v1.Field{Key: "allocationID", Value: *natGateway.NatGatewayAddresses[0].AllocationId}, + v1.Field{Key: "vpcID", Value: vpcID}, + ) + _, err = awsClient.ReleaseAddress(ctx, &ec2.ReleaseAddressInput{ + AllocationId: natGateway.NatGatewayAddresses[0].AllocationId, + }) + if err != nil { + return errors.WrapAndTrace(err) + } + } + + return nil +} + +func (c *AWSClient) deleteInternetGateways(ctx context.Context, awsClient *ec2.Client, vpcID string) error { + // Find all Internet Gateways + describeInternetGatewaysOutput, err := awsClient.DescribeInternetGateways(ctx, &ec2.DescribeInternetGatewaysInput{ + Filters: []types.Filter{ + { + Name: aws.String("attachment.vpc-id"), + Values: []string{vpcID}, + }, + }, + }) + if err != nil { + return errors.WrapAndTrace(err) + } + + c.logger.Debug(ctx, fmt.Sprintf("found %d Internet gateways", len(describeInternetGatewaysOutput.InternetGateways)), + v1.Field{Key: "vpcID", Value: vpcID}, + ) + for _, internetGateway := range describeInternetGatewaysOutput.InternetGateways { + // Detach the Internet Gateway from the VPC + c.logger.Debug(ctx, "detaching Internet gateway from VPC", + v1.Field{Key: "internetGatewayID", Value: *internetGateway.InternetGatewayId}, + v1.Field{Key: "vpcID", Value: vpcID}, + ) + _, err = awsClient.DetachInternetGateway(ctx, &ec2.DetachInternetGatewayInput{ + InternetGatewayId: internetGateway.InternetGatewayId, + VpcId: aws.String(vpcID), + }) + if err != nil { + return errors.WrapAndTrace(err) + } + + // Delete the Internet Gateway + c.logger.Debug(ctx, "deleting Internet gateway", + v1.Field{Key: "internetGatewayID", Value: *internetGateway.InternetGatewayId}, + v1.Field{Key: "vpcID", Value: vpcID}, + ) + _, err = awsClient.DeleteInternetGateway(ctx, &ec2.DeleteInternetGatewayInput{ + InternetGatewayId: internetGateway.InternetGatewayId, + }) + if err != nil { + return errors.WrapAndTrace(err) + } + } + + return nil +} + +func (c *AWSClient) deleteSubnets(ctx context.Context, awsClient *ec2.Client, vpcID string) error { + // Find all subnets + describeSubnetsOutput, err := awsClient.DescribeSubnets(ctx, &ec2.DescribeSubnetsInput{ + Filters: []types.Filter{ + { + Name: aws.String("vpc-id"), + Values: []string{vpcID}, + }, + }, + }) + if err != nil { + return errors.WrapAndTrace(err) + } + + // Delete all subnets + c.logger.Debug(ctx, fmt.Sprintf("found %d subnets", len(describeSubnetsOutput.Subnets)), + v1.Field{Key: "vpcID", Value: vpcID}, + ) + for _, subnet := range describeSubnetsOutput.Subnets { + // Delete the subnet + c.logger.Debug(ctx, "deleting subnet", + v1.Field{Key: "subnetID", Value: *subnet.SubnetId}, + v1.Field{Key: "vpcID", Value: vpcID}, + ) + _, err = awsClient.DeleteSubnet(ctx, &ec2.DeleteSubnetInput{ + SubnetId: subnet.SubnetId, + }) + if err != nil { + return errors.WrapAndTrace(err) + } + } + + return nil +} + +func (c *AWSClient) deleteRouteTables(ctx context.Context, awsClient *ec2.Client, vpcID string) error { + // Find all route tables + describeRouteTablesOutput, err := awsClient.DescribeRouteTables(ctx, &ec2.DescribeRouteTablesInput{ + Filters: []types.Filter{ + { + Name: aws.String("vpc-id"), + Values: []string{vpcID}, + }, + { + Name: aws.String("tag:" + tagBrevVPCID), // ensure we do not select the default route table + Values: []string{vpcID}, + }, + }, + }) + if err != nil { + return errors.WrapAndTrace(err) + } + + // Delete all route tables + c.logger.Debug(ctx, fmt.Sprintf("found %d route tables", len(describeRouteTablesOutput.RouteTables)), + v1.Field{Key: "vpcID", Value: vpcID}, + ) + for _, routeTable := range describeRouteTablesOutput.RouteTables { + c.logger.Debug(ctx, "deleting route table", + v1.Field{Key: "routeTableID", Value: *routeTable.RouteTableId}, + v1.Field{Key: "vpcID", Value: vpcID}, + ) + _, err = awsClient.DeleteRouteTable(ctx, &ec2.DeleteRouteTableInput{ + RouteTableId: routeTable.RouteTableId, + }) + if err != nil { + return errors.WrapAndTrace(err) + } + } + + return nil +} + +func makeEC2Tags(tags map[string]string) []types.Tag { + awsTags := make([]types.Tag, 0, len(tags)) + for key, value := range tags { + awsTags = append(awsTags, types.Tag{Key: aws.String(key), Value: aws.String(value)}) + } + return awsTags +} diff --git a/v1/providers/aws/network_test.go b/v1/providers/aws/network_test.go new file mode 100644 index 0000000..8585af2 --- /dev/null +++ b/v1/providers/aws/network_test.go @@ -0,0 +1,232 @@ +package v1 + +import ( + "testing" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/ec2/types" + + v1 "github.com/brevdev/cloud/v1" +) + +func TestFilterSubnetArgs(t *testing.T) { + tests := []struct { + name string + subnets []v1.CreateSubnetArgs + subnetType v1.SubnetType + expectedSubnets []v1.CreateSubnetArgs + }{ + { + name: "filter public subnets", + subnets: []v1.CreateSubnetArgs{ + {CidrBlock: "10.0.0.0/24", Type: v1.SubnetTypePublic}, + {CidrBlock: "10.0.1.0/24", Type: v1.SubnetTypePrivate}, + {CidrBlock: "10.0.2.0/24", Type: v1.SubnetTypePublic}, + }, + subnetType: v1.SubnetTypePublic, + expectedSubnets: []v1.CreateSubnetArgs{ + {CidrBlock: "10.0.0.0/24", Type: v1.SubnetTypePublic}, + {CidrBlock: "10.0.2.0/24", Type: v1.SubnetTypePublic}, + }, + }, + { + name: "filter private subnets", + subnets: []v1.CreateSubnetArgs{ + {CidrBlock: "10.0.0.0/24", Type: v1.SubnetTypePublic}, + {CidrBlock: "10.0.1.0/24", Type: v1.SubnetTypePrivate}, + {CidrBlock: "10.0.2.0/24", Type: v1.SubnetTypePublic}, + }, + subnetType: v1.SubnetTypePrivate, + expectedSubnets: []v1.CreateSubnetArgs{ + {CidrBlock: "10.0.1.0/24", Type: v1.SubnetTypePrivate}, + }, + }, + { + name: "no matching subnets", + subnets: []v1.CreateSubnetArgs{ + {CidrBlock: "10.0.0.0/24", Type: v1.SubnetTypePublic}, + {CidrBlock: "10.0.2.0/24", Type: v1.SubnetTypePublic}, + }, + subnetType: v1.SubnetTypePrivate, + expectedSubnets: []v1.CreateSubnetArgs{}, + }, + { + name: "empty input", + subnets: []v1.CreateSubnetArgs{}, + subnetType: v1.SubnetTypePublic, + expectedSubnets: []v1.CreateSubnetArgs{}, + }, + { + name: "all subnets match", + subnets: []v1.CreateSubnetArgs{ + {CidrBlock: "10.0.0.0/24", Type: v1.SubnetTypePublic}, + {CidrBlock: "10.0.1.0/24", Type: v1.SubnetTypePublic}, + {CidrBlock: "10.0.2.0/24", Type: v1.SubnetTypePublic}, + }, + subnetType: v1.SubnetTypePublic, + expectedSubnets: []v1.CreateSubnetArgs{ + {CidrBlock: "10.0.0.0/24", Type: v1.SubnetTypePublic}, + {CidrBlock: "10.0.1.0/24", Type: v1.SubnetTypePublic}, + {CidrBlock: "10.0.2.0/24", Type: v1.SubnetTypePublic}, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := filterSubnetArgs(tt.subnets, tt.subnetType) + + if len(result) != len(tt.expectedSubnets) { + t.Errorf("expected %d subnets, got %d", len(tt.expectedSubnets), len(result)) + } + + for i, subnet := range result { + if subnet.CidrBlock != tt.expectedSubnets[i].CidrBlock { + t.Errorf("expected subnet CIDR %s, got %s", tt.expectedSubnets[i].CidrBlock, subnet.CidrBlock) + } + if subnet.Type != tt.expectedSubnets[i].Type { + t.Errorf("expected subnet type %v, got %v", tt.expectedSubnets[i].Type, subnet.Type) + } + } + }) + } +} + +func TestMakeEC2Tags(t *testing.T) { + tests := []struct { + name string + tags map[string]string + expected []types.Tag + }{ + { + name: "empty tags", + tags: map[string]string{}, + expected: []types.Tag{}, + }, + { + name: "single tag", + tags: map[string]string{ + "Name": "test-vpc", + }, + expected: []types.Tag{ + {Key: aws.String("Name"), Value: aws.String("test-vpc")}, + }, + }, + { + name: "multiple tags", + tags: map[string]string{ + "Name": "test-vpc", + "Environment": "production", + "Team": "platform", + }, + expected: []types.Tag{ + {Key: aws.String("Name"), Value: aws.String("test-vpc")}, + {Key: aws.String("Environment"), Value: aws.String("production")}, + {Key: aws.String("Team"), Value: aws.String("platform")}, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ec2Tags := makeEC2Tags(tt.tags) + + if len(ec2Tags) != len(tt.expected) { + t.Errorf("expected %d tags, got %d", tt.expected, len(ec2Tags)) + } + + // Verify tags are properly converted + tagMap := make(map[string]string) + for _, tag := range ec2Tags { + tagMap[*tag.Key] = *tag.Value + } + + for key, value := range tt.tags { + if tagMap[key] != value { + t.Errorf("expected tag %s=%s, got %s=%s", key, value, key, tagMap[key]) + } + } + }) + } +} + +func TestAwsSubnetToCloudSubnet(t *testing.T) { + tests := []struct { + name string + awsSubnet *types.Subnet + subnetType v1.SubnetType + vpc *types.Vpc + }{ + { + name: "valid public subnet", + awsSubnet: &types.Subnet{ + SubnetId: aws.String("subnet-123"), + CidrBlock: aws.String("10.0.0.0/24"), + AvailabilityZone: aws.String("us-east-1a"), + Tags: []types.Tag{ + {Key: aws.String(tagBrevRefID), Value: aws.String("test-subnet")}, + {Key: aws.String(tagName), Value: aws.String("test-subnet-name")}, + }, + }, + subnetType: v1.SubnetTypePublic, + vpc: &types.Vpc{ + VpcId: aws.String("vpc-123"), + }, + }, + { + name: "valid private subnet", + awsSubnet: &types.Subnet{ + SubnetId: aws.String("subnet-456"), + CidrBlock: aws.String("10.0.1.0/24"), + AvailabilityZone: aws.String("us-east-1b"), + Tags: []types.Tag{ + {Key: aws.String(tagBrevRefID), Value: aws.String("test-private-subnet")}, + {Key: aws.String(tagName), Value: aws.String("test-private-subnet-name")}, + }, + }, + subnetType: v1.SubnetTypePrivate, + vpc: &types.Vpc{ + VpcId: aws.String("vpc-123"), + }, + }, + { + name: "subnet with minimal tags", + awsSubnet: &types.Subnet{ + SubnetId: aws.String("subnet-789"), + CidrBlock: aws.String("10.0.2.0/24"), + AvailabilityZone: aws.String("us-east-1c"), + Tags: []types.Tag{ + {Key: aws.String(tagBrevRefID), Value: aws.String("minimal-subnet")}, + {Key: aws.String(tagName), Value: aws.String("minimal-subnet-name")}, + }, + }, + subnetType: v1.SubnetTypePublic, + vpc: &types.Vpc{ + VpcId: aws.String("vpc-123"), + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result, err := awsSubnetToCloudSubnet(tt.awsSubnet, tt.subnetType, tt.vpc) + if err != nil { + t.Fatalf("expected no error but got: %v", err) + } + if result != nil { + if string(result.GetID()) != *tt.awsSubnet.SubnetId { + t.Errorf("expected subnet ID %s, got %s", *tt.awsSubnet.SubnetId, result.GetID()) + } + if result.GetCidrBlock() != *tt.awsSubnet.CidrBlock { + t.Errorf("expected CIDR block %s, got %s", *tt.awsSubnet.CidrBlock, result.GetCidrBlock()) + } + if result.GetSubnetType() != tt.subnetType { + t.Errorf("expected subnet type %v, got %v", tt.subnetType, result.GetSubnetType()) + } + if string(result.GetVPCID()) != *tt.vpc.VpcId { + t.Errorf("expected VPC ID %s, got %s", *tt.vpc.VpcId, result.GetVPCID()) + } + } + }) + } +} diff --git a/v1/providers/aws/scripts/kubernetes_test.go b/v1/providers/aws/scripts/kubernetes_test.go new file mode 100644 index 0000000..ae99076 --- /dev/null +++ b/v1/providers/aws/scripts/kubernetes_test.go @@ -0,0 +1,178 @@ +//go:build scripts +// +build scripts + +package scripts + +import ( + "context" + "fmt" + "os" + "testing" + + "github.com/brevdev/cloud/internal/validation" + v1 "github.com/brevdev/cloud/v1" + aws "github.com/brevdev/cloud/v1/providers/aws" +) + +const ( + accessKeyID = "test" + secretAccessKey = "test" +) + +func TestCreateKubernetesCluster(t *testing.T) { + awsClient, err := aws.NewAWSClient("test", accessKeyID, secretAccessKey, "us-east-1", aws.WithLogger(&validation.ValidationLogger{})) + if err != nil { + t.Fatalf("failed to create AWS client: %v", err) + } + + cluster, err := awsClient.CreateCluster(context.Background(), v1.CreateClusterArgs{ + Name: "cloud-sdk-test", + RefID: "cloud-sdk-test", + VPCID: v1.CloudProviderResourceID("vpc-09035a20d5b393eff"), + SubnetIDs: []v1.CloudProviderResourceID{ + v1.CloudProviderResourceID("subnet-0ba8c98b636237a2d"), + v1.CloudProviderResourceID("subnet-07fadc6ba1992285b"), + }, + KubernetesVersion: "1.31", + Tags: v1.Tags{ + "test": "test", + }, + }) + if err != nil { + t.Fatalf("failed to create cluster: %v", err) + } + + cluster, err = awsClient.GetCluster(context.Background(), v1.GetClusterArgs{ + ID: cluster.GetID(), + }) + if err != nil { + t.Fatalf("failed to get cluster: %v", err) + } + + fmt.Println(cluster) +} + +func TestGetKubernetesCluster(t *testing.T) { + awsClient, err := aws.NewAWSClient("test", accessKeyID, secretAccessKey, "us-east-1", aws.WithLogger(&validation.ValidationLogger{})) + if err != nil { + t.Fatalf("failed to create AWS client: %v", err) + } + + cluster, err := awsClient.GetCluster(context.Background(), v1.GetClusterArgs{ + ID: v1.CloudProviderResourceID("cloud-sdk-test2"), + }) + if err != nil { + t.Fatalf("failed to get cluster: %v", err) + } + + fmt.Println(cluster) +} + +func TestAddNodeGroupToKubernetesCluster(t *testing.T) { + awsClient, err := aws.NewAWSClient("test", accessKeyID, secretAccessKey, "us-east-1", aws.WithLogger(&validation.ValidationLogger{})) + if err != nil { + t.Fatalf("failed to create AWS client: %v", err) + } + + nodeGroup, err := awsClient.CreateNodeGroup(context.Background(), v1.CreateNodeGroupArgs{ + ClusterID: v1.CloudProviderResourceID("cloud-sdk-test2"), + Name: "cloud-sdk-test-node-group", + RefID: "cloud-sdk-test-node-group", + MinNodeCount: 1, + MaxNodeCount: 1, + InstanceType: "t3.medium", + DiskSizeGiB: 20, + Tags: v1.Tags{ + "test": "test", + }, + }) + if err != nil { + t.Fatalf("failed to create node group: %v", err) + } + + fmt.Println(nodeGroup) +} + +func TestGetNodeGroup(t *testing.T) { + awsClient, err := aws.NewAWSClient("test", accessKeyID, secretAccessKey, "us-east-1", aws.WithLogger(&validation.ValidationLogger{})) + if err != nil { + t.Fatalf("failed to create AWS client: %v", err) + } + + nodeGroup, err := awsClient.GetNodeGroup(context.Background(), v1.GetNodeGroupArgs{ + ID: v1.CloudProviderResourceID("cloud-sdk-test-node-group"), + ClusterID: v1.CloudProviderResourceID("cloud-sdk-test"), + }) + if err != nil { + t.Fatalf("failed to get node group: %v", err) + } + + fmt.Println(nodeGroup) +} + +func TestModifyNodeGroup(t *testing.T) { + awsClient, err := aws.NewAWSClient("test", accessKeyID, secretAccessKey, "us-east-1", aws.WithLogger(&validation.ValidationLogger{})) + if err != nil { + t.Fatalf("failed to create AWS client: %v", err) + } + + err = awsClient.ModifyNodeGroup(context.Background(), v1.ModifyNodeGroupArgs{ + ClusterID: v1.CloudProviderResourceID("cloud-sdk-test"), + ID: v1.CloudProviderResourceID("cloud-sdk-test-node-group"), + MinNodeCount: 2, + MaxNodeCount: 2, + }) + if err != nil { + t.Fatalf("failed to modify node group: %v", err) + } +} + +func TestDeleteNodeGroup(t *testing.T) { + awsClient, err := aws.NewAWSClient("test", accessKeyID, secretAccessKey, "us-east-1", aws.WithLogger(&validation.ValidationLogger{})) + if err != nil { + t.Fatalf("failed to create AWS client: %v", err) + } + + err = awsClient.DeleteNodeGroup(context.Background(), v1.DeleteNodeGroupArgs{ + ClusterID: v1.CloudProviderResourceID("testcloudsdk-20251103191744"), + ID: v1.CloudProviderResourceID("testcloudsdk-20251103191744"), + }) + if err != nil { + t.Fatalf("failed to delete node group: %v", err) + } +} + +func TestPutUser(t *testing.T) { + testUserPrivateKeyPEMBase64 := os.Getenv("TEST_USER_PRIVATE_KEY_PEM_BASE64") + + awsClient, err := aws.NewAWSClient("test", accessKeyID, secretAccessKey, "us-east-1", aws.WithLogger(&validation.ValidationLogger{})) + if err != nil { + t.Fatalf("failed to create AWS client: %v", err) + } + + config, err := awsClient.SetClusterUser(context.Background(), v1.SetClusterUserArgs{ + ClusterID: v1.CloudProviderResourceID("cloud-sdk-test2"), + Username: "test-user", + Role: "cluster-admin", + RSAPEMBase64: testUserPrivateKeyPEMBase64, + }) + if err != nil { + t.Fatalf("failed to put user: %v", err) + } + + fmt.Println(config) +} + +func TestDeleteKubernetesCluster(t *testing.T) { + awsClient, err := aws.NewAWSClient("test", accessKeyID, secretAccessKey, "us-east-1", aws.WithLogger(&validation.ValidationLogger{})) + if err != nil { + t.Fatalf("failed to create AWS client: %v", err) + } + + err = awsClient.DeleteCluster(context.Background(), v1.DeleteClusterArgs{ + ID: v1.CloudProviderResourceID("testcloudsdk-20251103200615"), + }) + if err != nil { + t.Fatalf("failed to delete cluster: %v", err) + } +} diff --git a/v1/providers/aws/scripts/network_test.go b/v1/providers/aws/scripts/network_test.go new file mode 100644 index 0000000..8b94177 --- /dev/null +++ b/v1/providers/aws/scripts/network_test.go @@ -0,0 +1,59 @@ +//go:build scripts +// +build scripts + +package scripts + +import ( + "context" + "fmt" + "testing" + + "github.com/brevdev/cloud/internal/validation" + v1 "github.com/brevdev/cloud/v1" + aws "github.com/brevdev/cloud/v1/providers/aws" +) + +func TestCreateVPC(t *testing.T) { + awsClient, err := aws.NewAWSClient("test", accessKeyID, secretAccessKey, "us-east-1", aws.WithLogger(&validation.ValidationLogger{})) + if err != nil { + t.Fatalf("failed to create AWS client: %v", err) + } + + vpc, err := awsClient.CreateVPC(context.Background(), v1.CreateVPCArgs{ + Name: "cloud-sdk-test", + RefID: "cloud-sdk-test", + CidrBlock: "10.0.0.0/16", + Subnets: []v1.CreateSubnetArgs{ + {CidrBlock: "10.0.0.0/19", Type: v1.SubnetTypePublic}, + {CidrBlock: "10.0.32.0/19", Type: v1.SubnetTypePrivate}, + {CidrBlock: "10.0.64.0/19", Type: v1.SubnetTypePublic}, + {CidrBlock: "10.0.96.0/19", Type: v1.SubnetTypePrivate}, + }, + }) + if err != nil { + t.Fatalf("failed to create VPC: %v", err) + } + + vpc, err = awsClient.GetVPC(context.Background(), v1.GetVPCArgs{ + ID: vpc.GetID(), + }) + if err != nil { + t.Fatalf("failed to get VPC: %v", err) + } +} + +func TestDeleteVPC(t *testing.T) { + awsClient, err := aws.NewAWSClient("test", accessKeyID, secretAccessKey, "us-east-1", aws.WithLogger(&validation.ValidationLogger{})) + if err != nil { + t.Fatalf("failed to create AWS client: %v", err) + } + + err = awsClient.DeleteVPC(context.Background(), v1.DeleteVPCArgs{ + ID: v1.CloudProviderResourceID("vpc-01e30509323927f79"), + }) + if err != nil { + t.Fatalf("failed to delete VPC: %v", err) + } + + fmt.Println("VPC deleted") +} diff --git a/v1/providers/aws/utils.go b/v1/providers/aws/utils.go new file mode 100644 index 0000000..6780866 --- /dev/null +++ b/v1/providers/aws/utils.go @@ -0,0 +1,11 @@ +package v1 + +const ( + tagBrevRefID = "brev-ref-id" + tagBrevVPCID = "brev-vpc-id" + tagBrevClusterID = "brev-cluster-id" + tagBrevSubnetType = "brev-subnet-type" + tagBrevCloudSDK = "brev-cloud-sdk" + tagCreatedBy = "CreatedBy" + tagName = "Name" +) diff --git a/v1/providers/aws/validation_kubernetes_test.go b/v1/providers/aws/validation_kubernetes_test.go new file mode 100644 index 0000000..67a6c3a --- /dev/null +++ b/v1/providers/aws/validation_kubernetes_test.go @@ -0,0 +1,71 @@ +package v1 + +import ( + "fmt" + "os" + "testing" + "time" + + "github.com/brevdev/cloud/internal/validation" + v1 "github.com/brevdev/cloud/v1" +) + +func TestAWSKubernetesValidation(t *testing.T) { + if isValidationTest == "" { + t.Skip("VALIDATION_TEST is not set, skipping AWS Kubernetes validation tests") + } + + testUserPrivateKeyPEMBase64 := os.Getenv("TEST_USER_PRIVATE_KEY_PEM_BASE64") + + if accessKeyID == "" || secretAccessKey == "" { + t.Fatalf("AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY must be set") + } + + config := validation.ProviderConfig{ + Location: "us-east-1", + Credential: NewAWSCredential(fmt.Sprintf("validation-%s", t.Name()), accessKeyID, secretAccessKey), + } + + // Use the test name as the name of the cluster and node group + name := fmt.Sprintf("testcloudsdk-%s", time.Now().UTC().Format("20060102150405")) + + // Network CIDR + networkCidr := "10.0.0.0/16" + + // Network subnets + pubSubnet1 := validation.KubernetesValidationSubnetOpts{Name: "pub-subnet-1", RefID: "pub-subnet-1", CidrBlock: "10.0.0.0/19", SubnetType: v1.SubnetTypePublic} + prvSubnet1 := validation.KubernetesValidationSubnetOpts{Name: "prv-subnet-1", RefID: "prv-subnet-1", CidrBlock: "10.0.32.0/19", SubnetType: v1.SubnetTypePrivate} + pubSubnet2 := validation.KubernetesValidationSubnetOpts{Name: "pub-subnet-2", RefID: "pub-subnet-2", CidrBlock: "10.0.64.0/19", SubnetType: v1.SubnetTypePublic} + prvSubnet2 := validation.KubernetesValidationSubnetOpts{Name: "prv-subnet-2", RefID: "prv-subnet-2", CidrBlock: "10.0.96.0/19", SubnetType: v1.SubnetTypePrivate} + + validation.RunKubernetesValidation(t, config, validation.KubernetesValidationOpts{ + Name: name, + RefID: name, + KubernetesVersion: "1.34", + // Associate the VPC with the private subnets + Subnets: []validation.KubernetesValidationSubnetOpts{prvSubnet1, prvSubnet2}, + NetworkOpts: &validation.KubernetesValidationNetworkOpts{ + Name: name, + RefID: name, + CidrBlock: networkCidr, + // Build the network with all subnets + Subnets: []validation.KubernetesValidationSubnetOpts{pubSubnet1, prvSubnet1, pubSubnet2, prvSubnet2}, + }, + NodeGroupOpts: &validation.KubernetesValidationNodeGroupOpts{ + Name: name, + RefID: name, + InstanceType: "t3.medium", + DiskSizeGiB: 20, + MinNodeCount: 1, + MaxNodeCount: 1, + }, + UserOpts: &validation.KubernetesValidationUserOpts{ + Username: "test-user", + Role: "cluster-admin", + RSAPEMBase64: testUserPrivateKeyPEMBase64, + }, + Tags: map[string]string{ + "test": t.Name(), + }, + }) +} diff --git a/v1/providers/aws/validation_network_test.go b/v1/providers/aws/validation_network_test.go new file mode 100644 index 0000000..5b6932a --- /dev/null +++ b/v1/providers/aws/validation_network_test.go @@ -0,0 +1,44 @@ +package v1 + +import ( + "fmt" + "os" + "testing" + "time" + + "github.com/brevdev/cloud/internal/validation" +) + +var ( + isValidationTest = os.Getenv("VALIDATION_TEST") + accessKeyID = os.Getenv("AWS_ACCESS_KEY_ID") + secretAccessKey = os.Getenv("AWS_SECRET_ACCESS_KEY") +) + +func TestAWSNetworkValidation(t *testing.T) { + if isValidationTest == "" { + t.Skip("VALIDATION_TEST is not set, skipping AWS Network validation tests") + } + + if accessKeyID == "" || secretAccessKey == "" { + t.Fatalf("AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY must be set") + } + + config := validation.ProviderConfig{ + Location: "us-east-1", + Credential: NewAWSCredential(fmt.Sprintf("validation-%s", t.Name()), accessKeyID, secretAccessKey), + } + + // Use the test name as the name of the VPC + name := fmt.Sprintf("cloud-sdk-%s-%s", t.Name(), time.Now().UTC().Format("20060102150405")) + + validation.RunNetworkValidation(t, config, validation.NetworkValidationOpts{ + Name: name, + RefID: name, + CidrBlock: "172.16.0.0/16", + PublicSubnetCidrBlock: "172.16.0.0/24", + Tags: map[string]string{ + "test": "TestNetworkValidation", + }, + }) +} diff --git a/v1/providers/fluidstack/instancetype.go b/v1/providers/fluidstack/instancetype.go index 3b08d9e..a677966 100644 --- a/v1/providers/fluidstack/instancetype.go +++ b/v1/providers/fluidstack/instancetype.go @@ -51,7 +51,7 @@ func (c *FluidStackClient) GetLocations(ctx context.Context, _ v1.GetLocationsAr } var locations []v1.Location - if resp != nil { + if len(resp) > 0 { for _, capacity := range resp { location := v1.Location{ Name: capacity.Name, diff --git a/v1/providers/lambdalabs/instance.go b/v1/providers/lambdalabs/instance.go index 706d882..c47d503 100644 --- a/v1/providers/lambdalabs/instance.go +++ b/v1/providers/lambdalabs/instance.go @@ -107,10 +107,9 @@ func (c *LambdaLabsClient) ListInstances(ctx context.Context, _ v1.ListInstances return nil, fmt.Errorf("failed to list instances: %w", err) } - instances := make([]v1.Instance, 0, len(resp.Data)) - for _, llInstance := range resp.Data { - instance := convertLambdaLabsInstanceToV1Instance(llInstance) - instances = append(instances, *instance) + instances := make([]v1.Instance, len(resp.Data)) + for i, llInstance := range resp.Data { + instances[i] = *convertLambdaLabsInstanceToV1Instance(llInstance) } return instances, nil diff --git a/v1/providers/launchpad/scripts/finalize-swagger/main.go b/v1/providers/launchpad/scripts/finalize-swagger/main.go index baf2f9f..8c1ce4f 100644 --- a/v1/providers/launchpad/scripts/finalize-swagger/main.go +++ b/v1/providers/launchpad/scripts/finalize-swagger/main.go @@ -127,10 +127,7 @@ func buildEnumVarnames(nodeName string, enumNode *yaml.Node) []*yaml.Node { } // Use the enum node name as the prefix for the enum values (removing the "Enum" suffix if it exists) - prefix := nodeName - if strings.HasSuffix(prefix, "Enum") { - prefix = prefix[:len(prefix)-4] - } + prefix := strings.TrimSuffix(nodeName, "Enum") // Create the enum nodes enumNodes := make([]*yaml.Node, 0, len(enumValues)) @@ -285,7 +282,7 @@ func AddOneOfEntries(yamlNode *yaml.Node) error { //nolint:gocyclo // readabilit } var oneOfNode *yaml.Node - if nodeType.Value == "string" { + if nodeType.Value == "string" { //nolint:staticcheck // if statement is preferable // If the node is a string, convert it to a oneOf entry oneOfNode = convertStringToOneOf(key, val) } else if nodeType.Value == "array" { diff --git a/v1/providers/nebius/capabilities.go b/v1/providers/nebius/capabilities.go index 39cee6c..3f92657 100644 --- a/v1/providers/nebius/capabilities.go +++ b/v1/providers/nebius/capabilities.go @@ -22,7 +22,8 @@ func getNebiusCapabilities() v1.Capabilities { v1.CapabilityResizeInstanceVolume, // Nebius supports disk resizing v1.CapabilityTags, // Nebius supports resource tagging v1.CapabilityInstanceUserData, // Nebius supports user data in instance creation - + v1.CapabilityVPC, // Nebius supports VPCs + v1.CapabilityManagedKubernetes, // Nebius supports managed Kubernetes clusters } } diff --git a/v1/providers/nebius/client.go b/v1/providers/nebius/client.go index 5301f2a..89582a8 100644 --- a/v1/providers/nebius/client.go +++ b/v1/providers/nebius/client.go @@ -2,25 +2,38 @@ package v1 import ( "context" + "crypto/rsa" + "crypto/x509" + "encoding/base64" + "encoding/pem" "fmt" + "github.com/brevdev/cloud/internal/errors" v1 "github.com/brevdev/cloud/v1" "github.com/nebius/gosdk" + "github.com/nebius/gosdk/auth" + nebiusiamv1 "github.com/nebius/gosdk/proto/nebius/iam/v1" ) +const CloudProviderID string = "nebius" + type NebiusCredential struct { - RefID string - ServiceAccountKey string // JSON service account key - ProjectID string + RefID string + PublicKeyID string + PrivateKeyPEMBase64 string + ServiceAccountID string + ProjectID string } var _ v1.CloudCredential = &NebiusCredential{} -func NewNebiusCredential(refID, serviceAccountKey, projectID string) *NebiusCredential { +func NewNebiusCredential(refID string, publicKeyID string, privateKeyPEMBase64 string, serviceAccountID string, projectID string) *NebiusCredential { return &NebiusCredential{ - RefID: refID, - ServiceAccountKey: serviceAccountKey, - ProjectID: projectID, + RefID: refID, + PublicKeyID: publicKeyID, + PrivateKeyPEMBase64: privateKeyPEMBase64, + ServiceAccountID: serviceAccountID, + ProjectID: projectID, } } @@ -36,48 +49,86 @@ func (c *NebiusCredential) GetAPIType() v1.APIType { // GetCloudProviderID returns the cloud provider ID for Nebius func (c *NebiusCredential) GetCloudProviderID() v1.CloudProviderID { - return "nebius" + return v1.CloudProviderID(CloudProviderID) } // GetTenantID returns the tenant ID for Nebius (project ID) func (c *NebiusCredential) GetTenantID() (string, error) { - if c.ProjectID == "" { - return "", fmt.Errorf("project ID is required for Nebius") + if c.ServiceAccountID == "" { + return "", fmt.Errorf("service account ID is required for Nebius") } - return c.ProjectID, nil + return c.ServiceAccountID, nil } -func (c *NebiusCredential) MakeClient(ctx context.Context, location string) (v1.CloudClient, error) { - return NewNebiusClient(ctx, c.RefID, c.ServiceAccountKey, c.ProjectID, location) +func (c *NebiusCredential) MakeClient(ctx context.Context, _ string) (v1.CloudClient, error) { + return NewNebiusClient(ctx, c.RefID, c.PublicKeyID, c.PrivateKeyPEMBase64, c.ServiceAccountID, c.ProjectID) } // It embeds NotImplCloudClient to handle unsupported features type NebiusClient struct { v1.NotImplCloudClient - refID string - serviceAccountKey string - projectID string - location string - sdk *gosdk.SDK + refID string + projectID string + sdk *gosdk.SDK + logger v1.Logger } var _ v1.CloudClient = &NebiusClient{} -func NewNebiusClient(ctx context.Context, refID, serviceAccountKey, projectID, location string) (*NebiusClient, error) { +type NebiusClientOption func(c *NebiusClient) + +func WithLogger(logger v1.Logger) NebiusClientOption { + return func(c *NebiusClient) { + c.logger = logger + } +} + +func NewNebiusClient(ctx context.Context, refID string, publicKeyID string, privateKeyPEMBase64 string, serviceAccountID string, projectID string, opts ...NebiusClientOption) (*NebiusClient, error) { + // Decode base64 into raw PEM bytes + pemBytes, err := base64.StdEncoding.DecodeString(privateKeyPEMBase64) + if err != nil { + return nil, fmt.Errorf("failed to base64 decode: %w", err) + } + + // Decode the PEM block + block, _ := pem.Decode(pemBytes) + if block == nil { + return nil, fmt.Errorf("failed to parse PEM block") + } + + parsedKey, err := x509.ParsePKCS8PrivateKey(block.Bytes) + if err != nil { + return nil, fmt.Errorf("failed to parse PKCS8 private key: %w", err) + } + var ok bool + privateKey, ok := parsedKey.(*rsa.PrivateKey) + if !ok { + return nil, fmt.Errorf("not an RSA private key") + } + sdk, err := gosdk.New(ctx, gosdk.WithCredentials( - gosdk.IAMToken(serviceAccountKey), // For now, treat as IAM token - will need proper service account handling later + gosdk.ServiceAccount(auth.ServiceAccount{ + PrivateKey: privateKey, + PublicKeyID: publicKeyID, + ServiceAccountID: serviceAccountID, + }), )) if err != nil { return nil, fmt.Errorf("failed to initialize Nebius SDK: %w", err) } - return &NebiusClient{ - refID: refID, - serviceAccountKey: serviceAccountKey, - projectID: projectID, - location: location, - sdk: sdk, - }, nil + nebiusClient := &NebiusClient{ + refID: refID, + projectID: projectID, + sdk: sdk, + logger: &v1.NoopLogger{}, + } + + for _, opt := range opts { + opt(nebiusClient) + } + + return nebiusClient, nil } // GetAPIType returns the API type for Nebius @@ -90,11 +141,6 @@ func (c *NebiusClient) GetCloudProviderID() v1.CloudProviderID { return "nebius" } -// MakeClient creates a new client instance for a different location -func (c *NebiusClient) MakeClient(ctx context.Context, location string) (v1.CloudClient, error) { - return NewNebiusClient(ctx, c.refID, c.serviceAccountKey, c.projectID, location) -} - // GetTenantID returns the tenant ID for Nebius func (c *NebiusClient) GetTenantID() (string, error) { return c.projectID, nil @@ -104,3 +150,16 @@ func (c *NebiusClient) GetTenantID() (string, error) { func (c *NebiusClient) GetReferenceID() string { return c.refID } + +func (c *NebiusClient) GetLocation(ctx context.Context) (string, error) { + nebiusProjectService := c.sdk.Services().IAM().V1().Project() + + // The target region is the same as the client's project region + project, err := nebiusProjectService.Get(ctx, &nebiusiamv1.GetProjectRequest{ + Id: c.projectID, + }) + if err != nil { + return "", errors.WrapAndTrace(err) + } + return project.GetSpec().GetRegion(), nil +} diff --git a/v1/providers/nebius/kubernetes.go b/v1/providers/nebius/kubernetes.go new file mode 100644 index 0000000..ef763fc --- /dev/null +++ b/v1/providers/nebius/kubernetes.go @@ -0,0 +1,678 @@ +package v1 + +import ( + "context" + "encoding/base64" + "fmt" + "strings" + + nebiuscommon "github.com/nebius/gosdk/proto/nebius/common/v1" + nebiusmk8s "github.com/nebius/gosdk/proto/nebius/mk8s/v1" + nebiusvpc "github.com/nebius/gosdk/proto/nebius/vpc/v1" + grpccodes "google.golang.org/grpc/codes" + grpcstatus "google.golang.org/grpc/status" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" + k8scmd "k8s.io/client-go/tools/clientcmd/api" + + "github.com/brevdev/cloud/internal/errors" + cloudk8s "github.com/brevdev/cloud/internal/kubernetes" + "github.com/brevdev/cloud/internal/rsa" + v1 "github.com/brevdev/cloud/v1" +) + +var ( + errVPCHasNoPublicSubnets = fmt.Errorf("VPC must have at least one public subnet with a CIDR block larger than /24") + errVPCHasNoPrivateSubnets = fmt.Errorf("VPC must have at least one private subnet with a CIDR block larger than /24") + errNoSubnetIDsSpecifiedForVPC = fmt.Errorf("no subnet IDs specified for VPC") + errMultipleSubnetIDsNotAllowedForVPC = fmt.Errorf("multiple subnet IDs not allowed for VPC") + + errNodeGroupNameIsRequired = fmt.Errorf("node group name is required") + errNodeGroupRefIDIsRequired = fmt.Errorf("node group refID is required") + errNodeGroupMinNodeCountMustBeGreaterThan0 = fmt.Errorf("node group minNodeCount must be greater than 0") + errNodeGroupMaxNodeCountMustBeGreaterThan0 = fmt.Errorf("node group maxNodeCount must be greater than 0") + errNodeGroupMaxNodeCountMustBeGreaterThanOrEqualToMinNodeCount = fmt.Errorf("node group maxNodeCount must be greater than or equal to minNodeCount") + errNodeGroupDiskSizeGiBMustBeGreaterThanOrEqualTo64 = fmt.Errorf("node group diskSizeGiB must be greater than or equal to 64") + errNodeGroupInstanceTypeIsRequired = fmt.Errorf("node group instanceType is required") + + errUsernameIsRequired = fmt.Errorf("username is required") + errRoleIsRequired = fmt.Errorf("role is required") + errClusterIDIsRequired = fmt.Errorf("cluster ID is required") + errRSAPEMBase64IsRequired = fmt.Errorf("RSA PEM base64 is required") +) + +var _ v1.CloudMaintainKubernetes = &NebiusClient{} + +func (c *NebiusClient) CreateCluster(ctx context.Context, args v1.CreateClusterArgs) (*v1.Cluster, error) { + nebiusClusterService := c.sdk.Services().MK8S().V1().Cluster() + + // Validate arguments + if len(args.SubnetIDs) == 0 { + return nil, errors.WrapAndTrace(errNoSubnetIDsSpecifiedForVPC) + } else if len(args.SubnetIDs) > 1 { + return nil, errors.WrapAndTrace(errMultipleSubnetIDsNotAllowedForVPC) + } + subnetID := string(args.SubnetIDs[0]) + + // Fetch the target location + location, err := c.GetLocation(ctx) + if err != nil { + return nil, errors.WrapAndTrace(err) + } + + // Fetch the target VPC + vpc, err := c.GetVPC(ctx, v1.GetVPCArgs{ + ID: args.VPCID, + }) + if err != nil { + return nil, errors.WrapAndTrace(err) + } + + // Validate VPC + err = validateVPC(vpc) + if err != nil { + return nil, errors.WrapAndTrace(err) + } + + // Create a map of subnetID->subnet for this VPC so that we can find the target subnet + subnetMap := make(map[string]*v1.Subnet) + for _, subnet := range vpc.GetSubnets() { + subnetMap[string(subnet.GetID())] = subnet + } + + // Get the target subnet from the map + var subnet *v1.Subnet + if _, ok := subnetMap[subnetID]; !ok { + return nil, errors.WrapAndTrace(fmt.Errorf("subnet ID %s does not match VPC %s", subnetID, vpc.GetID())) + } else { + subnet = subnetMap[subnetID] + } + + labels := make(map[string]string) + for key, value := range args.Tags { + labels[key] = value + } + + // Add the required labels + labels[labelBrevRefID] = args.RefID + labels[labelCreatedBy] = labelBrevCloudSDK + + // Create the cluster + createClusterOperation, err := nebiusClusterService.Create(ctx, &nebiusmk8s.CreateClusterRequest{ + Metadata: &nebiuscommon.ResourceMetadata{ + Name: args.Name, + ParentId: c.projectID, + Labels: labels, + }, + Spec: &nebiusmk8s.ClusterSpec{ + ControlPlane: &nebiusmk8s.ControlPlaneSpec{ + Version: args.KubernetesVersion, + SubnetId: string(subnet.GetID()), + EtcdClusterSize: 3, + Endpoints: &nebiusmk8s.ControlPlaneEndpointsSpec{ + PublicEndpoint: &nebiusmk8s.PublicEndpointSpec{}, + }, + }, + KubeNetwork: &nebiusmk8s.KubeNetworkSpec{ + ServiceCidrs: []string{subnet.GetCidrBlock()}, + }, + }, + }) + if err != nil { + return nil, errors.WrapAndTrace(err) + } + + brevCluster, err := v1.NewCluster(v1.ClusterSettings{ + ID: v1.CloudProviderResourceID(createClusterOperation.ResourceID()), + Name: args.Name, + RefID: args.RefID, + Provider: CloudProviderID, + Cloud: CloudProviderID, + Location: location, + VPCID: args.VPCID, + SubnetIDs: args.SubnetIDs, + KubernetesVersion: args.KubernetesVersion, + Status: v1.ClusterStatusPending, + Tags: args.Tags, + }) + if err != nil { + return nil, errors.WrapAndTrace(err) + } + return brevCluster, nil +} + +func validateVPC(vpc *v1.VPC) error { + validPublicSubnetCount := 0 + validPrivateSubnetCount := 0 + + for _, subnet := range vpc.GetSubnets() { + larger, err := cidrBlockLargerThanMask(subnet.GetCidrBlock(), 24) + if err != nil { + return errors.WrapAndTrace(err) + } + if !larger { + continue + } + + if subnet.GetSubnetType() == v1.SubnetTypePublic { + validPublicSubnetCount++ + } else { + validPrivateSubnetCount++ + } + } + + errs := []error{} + if validPublicSubnetCount == 0 { + errs = append(errs, errVPCHasNoPublicSubnets) + } + if validPrivateSubnetCount == 0 { + errs = append(errs, errVPCHasNoPrivateSubnets) + } + + return errors.WrapAndTrace(errors.Join(errs...)) +} + +func (c *NebiusClient) GetCluster(ctx context.Context, args v1.GetClusterArgs) (*v1.Cluster, error) { + nebiusClusterService := c.sdk.Services().MK8S().V1().Cluster() + nebiusSubnetService := c.sdk.Services().VPC().V1().Subnet() + + cluster, err := nebiusClusterService.Get(ctx, &nebiusmk8s.GetClusterRequest{ + Id: string(args.ID), + }) + if err != nil { + if grpcstatus.Code(err) == grpccodes.NotFound { + return nil, v1.ErrResourceNotFound + } + return nil, errors.WrapAndTrace(err) + } + + nebiusSubnet, err := nebiusSubnetService.Get(ctx, &nebiusvpc.GetSubnetRequest{ + Id: cluster.Spec.ControlPlane.SubnetId, + }) + if err != nil { + return nil, errors.WrapAndTrace(err) + } + + nodeGroups, err := c.getClusterNodeGroups(ctx, cluster) + if err != nil { + return nil, errors.WrapAndTrace(err) + } + + brevCluster, err := v1.NewCluster(v1.ClusterSettings{ + RefID: cluster.Metadata.Labels[labelBrevRefID], + ID: v1.CloudProviderResourceID(cluster.Metadata.Id), + Name: cluster.Metadata.Name, + APIEndpoint: getClusterAPIEndpoint(cluster), + KubernetesVersion: cluster.Spec.ControlPlane.Version, + Status: parseNebiusClusterStatus(cluster.Status), + VPCID: v1.CloudProviderResourceID(nebiusSubnet.Spec.NetworkId), + SubnetIDs: []v1.CloudProviderResourceID{v1.CloudProviderResourceID(nebiusSubnet.Metadata.Id)}, + NodeGroups: nodeGroups, + ClusterCACertificateBase64: getClusterCACertificateBase64(cluster), + Tags: v1.Tags(cluster.Metadata.Labels), + }) + if err != nil { + return nil, errors.WrapAndTrace(err) + } + return brevCluster, err +} + +func getClusterCACertificateBase64(cluster *nebiusmk8s.Cluster) string { + if cluster == nil { + return "" + } + if cluster.Status == nil || cluster.Status.ControlPlane == nil || cluster.Status.ControlPlane.Auth == nil { + return "" + } + return base64.StdEncoding.EncodeToString([]byte(cluster.Status.ControlPlane.Auth.ClusterCaCertificate)) +} + +func getClusterAPIEndpoint(cluster *nebiusmk8s.Cluster) string { + if cluster == nil { + return "" + } + if cluster.Status == nil || cluster.Status.ControlPlane == nil || cluster.Status.ControlPlane.Endpoints == nil { + return "" + } + return cluster.Status.ControlPlane.Endpoints.PublicEndpoint +} + +func (c *NebiusClient) getClusterNodeGroups(ctx context.Context, cluster *nebiusmk8s.Cluster) ([]*v1.NodeGroup, error) { + nebiusNodeGroupService := c.sdk.Services().MK8S().V1().NodeGroup() + + nebiusNodeGroups, err := nebiusNodeGroupService.List(ctx, &nebiusmk8s.ListNodeGroupsRequest{ + ParentId: cluster.Metadata.Id, + }) + if err != nil { + return nil, errors.WrapAndTrace(err) + } + + nodeGroups := make([]*v1.NodeGroup, len(nebiusNodeGroups.Items)) + for i, nebiusNodeGroup := range nebiusNodeGroups.Items { + brevNodeGroup, err := parseNebiusNodeGroup(nebiusNodeGroup) + if err != nil { + return nil, errors.WrapAndTrace(err) + } + nodeGroups[i] = brevNodeGroup + } + return nodeGroups, nil +} + +func parseNebiusClusterStatus(status *nebiusmk8s.ClusterStatus) v1.ClusterStatus { + if status == nil { + return v1.ClusterStatusUnknown + } + switch status.State { + case nebiusmk8s.ClusterStatus_PROVISIONING: + return v1.ClusterStatusPending + case nebiusmk8s.ClusterStatus_RUNNING: + return v1.ClusterStatusAvailable + case nebiusmk8s.ClusterStatus_DELETING: + return v1.ClusterStatusDeleting + } + return v1.ClusterStatusUnknown +} + +// SetClusterUser implements v1.CloudMaintainKubernetes. +func (c *NebiusClient) SetClusterUser(ctx context.Context, args v1.SetClusterUserArgs) (*v1.ClusterUser, error) { + err := validatePutUserArgs(args) + if err != nil { + return nil, errors.WrapAndTrace(err) + } + + // Fetch the cluster the user key will be added to + cluster, err := c.GetCluster(ctx, v1.GetClusterArgs{ + ID: args.ClusterID, + }) + if err != nil { + return nil, errors.WrapAndTrace(fmt.Errorf("failed to get cluster: %w", err)) + } + + // Create a clientset to interact with the cluster using the bearer token and CA certificate + clientset, err := c.newK8sClient(ctx, cluster) + if err != nil { + return nil, errors.WrapAndTrace(fmt.Errorf("failed to create clientset: %w", err)) + } + + // Prepare the private key for the CSR + privateKeyBytes, err := base64.StdEncoding.DecodeString(args.RSAPEMBase64) + if err != nil { + return nil, errors.WrapAndTrace(fmt.Errorf("failed to decode base64 string: %w", err)) + } + + // Parse the private key + privateKey, err := rsa.BytesToRSAKey(privateKeyBytes) + if err != nil { + return nil, errors.WrapAndTrace(fmt.Errorf("failed to parse private key: %w", err)) + } + + // Create the client certificate to allow for external access to the cluster for the holders of this private key + signedCertificate, err := cloudk8s.ClientCertificateData(ctx, clientset, args.Username, privateKey) + if err != nil { + return nil, errors.WrapAndTrace(fmt.Errorf("failed to get signed certificate: %w", err)) + } + + // Make the user a cluster admin + err = cloudk8s.SetUserRole(ctx, clientset, args.Username, args.Role) + if err != nil { + return nil, errors.WrapAndTrace(fmt.Errorf("failed to set user role: %w", err)) + } + + // Get the certificate authority data + certificateAuthorityData, err := base64.StdEncoding.DecodeString(cluster.GetClusterCACertificateBase64()) + if err != nil { + return nil, errors.WrapAndTrace(fmt.Errorf("failed to decode certificate authority data: %w", err)) + } + + // Generate the complete kubeconfig + kubeconfigBytes, err := clientcmd.Write(k8scmd.Config{ + Kind: "Config", + APIVersion: "v1", + Clusters: map[string]*k8scmd.Cluster{ + cluster.GetRefID(): { + Server: cluster.GetAPIEndpoint(), + CertificateAuthorityData: certificateAuthorityData, + }, + }, + AuthInfos: map[string]*k8scmd.AuthInfo{ + cluster.GetRefID(): { + ClientCertificateData: signedCertificate, + ClientKeyData: privateKeyBytes, + }, + }, + }) + if err != nil { + return nil, errors.WrapAndTrace(fmt.Errorf("failed to write kubeconfig: %w", err)) + } + + brevClusterUser, err := v1.NewClusterUser(v1.ClusterUserSettings{ + ClusterName: cluster.GetRefID(), + ClusterCertificateAuthorityDataBase64: cluster.GetClusterCACertificateBase64(), + ClusterServerURL: cluster.GetAPIEndpoint(), + Username: args.Username, + UserClientCertificateDataBase64: base64.StdEncoding.EncodeToString(signedCertificate), + UserClientKeyDataBase64: base64.StdEncoding.EncodeToString(privateKeyBytes), + KubeconfigBase64: base64.StdEncoding.EncodeToString(kubeconfigBytes), + }) + if err != nil { + return nil, errors.WrapAndTrace(fmt.Errorf("failed to create cluster user: %w", err)) + } + return brevClusterUser, nil +} + +func validatePutUserArgs(args v1.SetClusterUserArgs) error { + errs := []error{} + if args.Username == "" { + errs = append(errs, errUsernameIsRequired) + } + if args.Role == "" { + errs = append(errs, errRoleIsRequired) + } + if args.ClusterID == "" { + errs = append(errs, errClusterIDIsRequired) + } + if args.RSAPEMBase64 == "" { + errs = append(errs, errRSAPEMBase64IsRequired) + } + return errors.WrapAndTrace(errors.Join(errs...)) +} + +func (c *NebiusClient) CreateNodeGroup(ctx context.Context, args v1.CreateNodeGroupArgs) (*v1.NodeGroup, error) { + nebiusNodeGroupService := c.sdk.Services().MK8S().V1().NodeGroup() + + err := validateCreateNodeGroupArgs(args) + if err != nil { + return nil, errors.WrapAndTrace(err) + } + + // Fetch the cluster the user key will be added to + cluster, err := c.GetCluster(ctx, v1.GetClusterArgs{ + ID: args.ClusterID, + }) + if err != nil { + return nil, errors.WrapAndTrace(fmt.Errorf("failed to get cluster: %w", err)) + } + + // Placeholder for parsing instance type + parts := strings.Split(args.InstanceType, ".") + platform := parts[0] + preset := parts[1] + + labels := make(map[string]string) + for key, value := range args.Tags { + labels[key] = value + } + + // Add the required labels + labels[labelBrevRefID] = args.RefID + labels[labelCreatedBy] = labelBrevCloudSDK + + // create the node groups + createNodeGroupOperation, err := nebiusNodeGroupService.Create(ctx, &nebiusmk8s.CreateNodeGroupRequest{ + Metadata: &nebiuscommon.ResourceMetadata{ + Name: args.Name, + ParentId: string(cluster.GetID()), + Labels: labels, + }, + Spec: &nebiusmk8s.NodeGroupSpec{ + Size: &nebiusmk8s.NodeGroupSpec_Autoscaling{ + Autoscaling: &nebiusmk8s.NodeGroupAutoscalingSpec{ + MinNodeCount: int64(args.MinNodeCount), + MaxNodeCount: int64(args.MaxNodeCount), + }, + }, + Template: &nebiusmk8s.NodeTemplate{ + Resources: &nebiusmk8s.ResourcesSpec{ + Platform: platform, + Size: &nebiusmk8s.ResourcesSpec_Preset{ + Preset: preset, + }, + }, + GpuSettings: &nebiusmk8s.GpuSettings{ + DriversPreset: "cuda12", + }, + BootDisk: &nebiusmk8s.DiskSpec{ + Type: nebiusmk8s.DiskSpec_NETWORK_SSD, + Size: &nebiusmk8s.DiskSpec_SizeGibibytes{ + SizeGibibytes: int64(args.DiskSizeGiB), + }, + }, + }, + }, + }) + if err != nil { + return nil, errors.WrapAndTrace(err) + } + + brevNodeGroup, err := v1.NewNodeGroup(v1.NodeGroupSettings{ + ID: v1.CloudProviderResourceID(createNodeGroupOperation.ResourceID()), + Name: args.Name, + RefID: args.RefID, + MinNodeCount: args.MinNodeCount, + MaxNodeCount: args.MaxNodeCount, + InstanceType: args.InstanceType, + DiskSizeGiB: args.DiskSizeGiB, + Status: v1.NodeGroupStatusPending, + Tags: args.Tags, + }) + if err != nil { + return nil, errors.WrapAndTrace(err) + } + return brevNodeGroup, nil +} + +func validateCreateNodeGroupArgs(args v1.CreateNodeGroupArgs) error { + if args.Name == "" { + return errNodeGroupNameIsRequired + } + if args.RefID == "" { + return errNodeGroupRefIDIsRequired + } + if args.MinNodeCount < 1 { + return errNodeGroupMinNodeCountMustBeGreaterThan0 + } + if args.MaxNodeCount < 1 { + return errNodeGroupMaxNodeCountMustBeGreaterThan0 + } + if args.MaxNodeCount < args.MinNodeCount { + return errNodeGroupMaxNodeCountMustBeGreaterThanOrEqualToMinNodeCount + } + if args.DiskSizeGiB < 64 { + return errNodeGroupDiskSizeGiBMustBeGreaterThanOrEqualTo64 + } + if args.InstanceType == "" { + return errNodeGroupInstanceTypeIsRequired + } + return nil +} + +func (c *NebiusClient) GetNodeGroup(ctx context.Context, args v1.GetNodeGroupArgs) (*v1.NodeGroup, error) { + nebiusNodeGroupService := c.sdk.Services().MK8S().V1().NodeGroup() + + nodeGroup, err := nebiusNodeGroupService.Get(ctx, &nebiusmk8s.GetNodeGroupRequest{ + Id: string(args.ID), + }) + if err != nil { + if grpcstatus.Code(err) == grpccodes.NotFound { + return nil, v1.ErrResourceNotFound + } + return nil, errors.WrapAndTrace(err) + } + + brevNodeGroup, err := parseNebiusNodeGroup(nodeGroup) + if err != nil { + return nil, errors.WrapAndTrace(err) + } + return brevNodeGroup, nil +} + +func parseNebiusNodeGroup(nodeGroup *nebiusmk8s.NodeGroup) (*v1.NodeGroup, error) { + brevNodeGroup, err := v1.NewNodeGroup(v1.NodeGroupSettings{ + ID: v1.CloudProviderResourceID(nodeGroup.Metadata.Id), + RefID: nodeGroup.Metadata.Labels[labelBrevRefID], + Name: nodeGroup.Metadata.Name, + MinNodeCount: int(nodeGroup.Spec.GetAutoscaling().MinNodeCount), + MaxNodeCount: int(nodeGroup.Spec.GetAutoscaling().MaxNodeCount), + InstanceType: nodeGroup.Spec.Template.Resources.Platform + "." + nodeGroup.Spec.Template.Resources.GetPreset(), + DiskSizeGiB: int(nodeGroup.Spec.Template.BootDisk.GetSizeGibibytes()), + Status: parseNebiusNodeGroupStatus(nodeGroup.Status), + Tags: v1.Tags(nodeGroup.Metadata.Labels), + }) + if err != nil { + return nil, errors.WrapAndTrace(err) + } + return brevNodeGroup, nil +} + +func parseNebiusNodeGroupStatus(status *nebiusmk8s.NodeGroupStatus) v1.NodeGroupStatus { + if status == nil { + return v1.NodeGroupStatusUnknown + } + switch status.State { + case nebiusmk8s.NodeGroupStatus_PROVISIONING: + return v1.NodeGroupStatusPending + case nebiusmk8s.NodeGroupStatus_RUNNING: + return v1.NodeGroupStatusAvailable + case nebiusmk8s.NodeGroupStatus_DELETING: + return v1.NodeGroupStatusDeleting + } + return v1.NodeGroupStatusUnknown +} + +func (c *NebiusClient) ModifyNodeGroup(ctx context.Context, args v1.ModifyNodeGroupArgs) error { + nebiusNodeGroupService := c.sdk.Services().MK8S().V1().NodeGroup() + + err := validateModifyNodeGroupArgs(args) + if err != nil { + return errors.WrapAndTrace(err) + } + + nodeGroup, err := nebiusNodeGroupService.Get(ctx, &nebiusmk8s.GetNodeGroupRequest{ + Id: string(args.ID), + }) + if err != nil { + return errors.WrapAndTrace(err) + } + + _, err = nebiusNodeGroupService.Update(ctx, &nebiusmk8s.UpdateNodeGroupRequest{ + Metadata: &nebiuscommon.ResourceMetadata{ + Id: nodeGroup.Metadata.Id, + Name: nodeGroup.Metadata.Name, + ParentId: nodeGroup.Metadata.ParentId, + Labels: nodeGroup.Metadata.Labels, + }, + Spec: &nebiusmk8s.NodeGroupSpec{ + Size: &nebiusmk8s.NodeGroupSpec_Autoscaling{ + Autoscaling: &nebiusmk8s.NodeGroupAutoscalingSpec{ + MinNodeCount: int64(args.MinNodeCount), + MaxNodeCount: int64(args.MaxNodeCount), + }, + }, + Template: &nebiusmk8s.NodeTemplate{ + Resources: &nebiusmk8s.ResourcesSpec{ + Platform: nodeGroup.Spec.GetTemplate().GetResources().GetPlatform(), + Size: &nebiusmk8s.ResourcesSpec_Preset{ + Preset: nodeGroup.Spec.GetTemplate().GetResources().GetPreset(), + }, + }, + GpuSettings: &nebiusmk8s.GpuSettings{ + DriversPreset: nodeGroup.Spec.GetTemplate().GetGpuSettings().GetDriversPreset(), + }, + BootDisk: &nebiusmk8s.DiskSpec{ + Type: nodeGroup.Spec.GetTemplate().GetBootDisk().GetType(), + Size: &nebiusmk8s.DiskSpec_SizeGibibytes{ + SizeGibibytes: nodeGroup.Spec.GetTemplate().GetBootDisk().GetSizeGibibytes(), + }, + }, + }, + }, + }) + if err != nil { + return errors.WrapAndTrace(fmt.Errorf("failed to modify node group: %w", err)) + } + + return nil +} + +func validateModifyNodeGroupArgs(args v1.ModifyNodeGroupArgs) error { + if args.MinNodeCount < 1 { + return errNodeGroupMinNodeCountMustBeGreaterThan0 + } + if args.MaxNodeCount < 1 { + return errNodeGroupMaxNodeCountMustBeGreaterThan0 + } + if args.MaxNodeCount < args.MinNodeCount { + return errNodeGroupMaxNodeCountMustBeGreaterThanOrEqualToMinNodeCount + } + return nil +} + +func (c *NebiusClient) DeleteNodeGroup(ctx context.Context, args v1.DeleteNodeGroupArgs) error { + nebiusNodeGroupService := c.sdk.Services().MK8S().V1().NodeGroup() + + nodeGroup, err := c.GetNodeGroup(ctx, v1.GetNodeGroupArgs{ + ID: args.ID, + }) + if err != nil { + return errors.WrapAndTrace(fmt.Errorf("failed to get node group: %w", err)) + } + + _, err = nebiusNodeGroupService.Delete(ctx, &nebiusmk8s.DeleteNodeGroupRequest{ + Id: string(nodeGroup.GetID()), + }) + if err != nil { + return errors.WrapAndTrace(fmt.Errorf("failed to delete node group: %w", err)) + } + + return nil +} + +func (c *NebiusClient) DeleteCluster(ctx context.Context, args v1.DeleteClusterArgs) error { + nebiusClusterService := c.sdk.Services().MK8S().V1().Cluster() + + // Fetch the cluster the user key will be added to + cluster, err := c.GetCluster(ctx, v1.GetClusterArgs{ //nolint:staticcheck // prefer explicit struct literal + ID: args.ID, + }) + if err != nil { + return errors.WrapAndTrace(fmt.Errorf("failed to get cluster: %w", err)) + } + + _, err = nebiusClusterService.Delete(ctx, &nebiusmk8s.DeleteClusterRequest{ + Id: string(cluster.GetID()), + }) + if err != nil { + return errors.WrapAndTrace(fmt.Errorf("failed to delete cluster: %w", err)) + } + + return nil +} + +func (c *NebiusClient) newK8sClient(ctx context.Context, cluster *v1.Cluster) (*kubernetes.Clientset, error) { + // Decode the cluster CA certificate + clusterCACertificate, err := base64.StdEncoding.DecodeString(cluster.GetClusterCACertificateBase64()) + if err != nil { + return nil, errors.WrapAndTrace(fmt.Errorf("failed to decode cluster CA certificate: %w", err)) + } + + // Get a bearer token to authenticate to the cluster + bearerToken, err := c.sdk.BearerToken(ctx) + if err != nil { + return nil, errors.WrapAndTrace(fmt.Errorf("failed to get bearer token: %w", err)) + } + + // Create a clientset to interact with the cluster using the bearer token and CA certificate + clientset, err := kubernetes.NewForConfig(&rest.Config{ + Host: cluster.GetAPIEndpoint(), + BearerToken: bearerToken.Token, + TLSClientConfig: rest.TLSClientConfig{ + CAData: clusterCACertificate, + }, + }) + if err != nil { + return nil, errors.WrapAndTrace(fmt.Errorf("failed to create clientset: %w", err)) + } + + return clientset, nil +} diff --git a/v1/providers/nebius/kubernetes_unit_test.go b/v1/providers/nebius/kubernetes_unit_test.go new file mode 100644 index 0000000..65db631 --- /dev/null +++ b/v1/providers/nebius/kubernetes_unit_test.go @@ -0,0 +1,354 @@ +package v1 + +import ( + "errors" + "testing" + + nebiusmk8s "github.com/nebius/gosdk/proto/nebius/mk8s/v1" + + v1 "github.com/brevdev/cloud/v1" +) + +func TestValidateCreateNodeGroupArgs(t *testing.T) { //nolint:funlen // test ok + tests := []struct { + name string + args v1.CreateNodeGroupArgs + expectError error + }{ + { + name: "valid args", + args: v1.CreateNodeGroupArgs{ + Name: "test-node-group", + RefID: "test-ref", + MinNodeCount: 1, + MaxNodeCount: 3, + InstanceType: "cpu-d3.4vcpu-16gb", + DiskSizeGiB: 64, + ClusterID: "cluster-123", + }, + expectError: nil, + }, + { + name: "missing name", + args: v1.CreateNodeGroupArgs{ + Name: "", + RefID: "test-ref", + MinNodeCount: 1, + MaxNodeCount: 3, + InstanceType: "cpu-d3.4vcpu-16gb", + DiskSizeGiB: 64, + }, + expectError: errNodeGroupNameIsRequired, + }, + { + name: "missing refID", + args: v1.CreateNodeGroupArgs{ + Name: "test-node-group", + RefID: "", + MinNodeCount: 1, + MaxNodeCount: 3, + InstanceType: "cpu-d3.4vcpu-16gb", + DiskSizeGiB: 64, + }, + expectError: errNodeGroupRefIDIsRequired, + }, + { + name: "min node count less than 1", + args: v1.CreateNodeGroupArgs{ + Name: "test-node-group", + RefID: "test-ref", + MinNodeCount: 0, + MaxNodeCount: 3, + InstanceType: "cpu-d3.4vcpu-16gb", + DiskSizeGiB: 64, + }, + expectError: errNodeGroupMinNodeCountMustBeGreaterThan0, + }, + { + name: "max node count less than 1", + args: v1.CreateNodeGroupArgs{ + Name: "test-node-group", + RefID: "test-ref", + MinNodeCount: 1, + MaxNodeCount: 0, + InstanceType: "cpu-d3.4vcpu-16gb", + DiskSizeGiB: 64, + }, + expectError: errNodeGroupMaxNodeCountMustBeGreaterThan0, + }, + { + name: "max node count less than min node count", + args: v1.CreateNodeGroupArgs{ + Name: "test-node-group", + RefID: "test-ref", + MinNodeCount: 5, + MaxNodeCount: 3, + InstanceType: "cpu-d3.4vcpu-16gb", + DiskSizeGiB: 64, + }, + expectError: errNodeGroupMaxNodeCountMustBeGreaterThanOrEqualToMinNodeCount, + }, + { + name: "disk size less than 64", + args: v1.CreateNodeGroupArgs{ + Name: "test-node-group", + RefID: "test-ref", + MinNodeCount: 1, + MaxNodeCount: 3, + InstanceType: "cpu-d3.4vcpu-16gb", + DiskSizeGiB: 32, + }, + expectError: errNodeGroupDiskSizeGiBMustBeGreaterThanOrEqualTo64, + }, + { + name: "missing instance type", + args: v1.CreateNodeGroupArgs{ + Name: "test-node-group", + RefID: "test-ref", + MinNodeCount: 1, + MaxNodeCount: 3, + InstanceType: "", + DiskSizeGiB: 64, + }, + expectError: errNodeGroupInstanceTypeIsRequired, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := validateCreateNodeGroupArgs(tt.args) + if err != nil && tt.expectError != nil { + if !errors.Is(err, tt.expectError) { + t.Errorf("expected error %v, got %v", tt.expectError, err) + } + } + if err == nil && tt.expectError != nil { + t.Errorf("expected error but got nil") + } + }) + } +} + +func TestValidateModifyNodeGroupArgs(t *testing.T) { + tests := []struct { + name string + args v1.ModifyNodeGroupArgs + expectError error + }{ + { + name: "valid args", + args: v1.ModifyNodeGroupArgs{ + ID: "node-group-123", + ClusterID: "cluster-123", + MinNodeCount: 1, + MaxNodeCount: 3, + }, + expectError: nil, + }, + { + name: "min node count less than 1", + args: v1.ModifyNodeGroupArgs{ + ID: "node-group-123", + ClusterID: "cluster-123", + MinNodeCount: 0, + MaxNodeCount: 3, + }, + expectError: errNodeGroupMinNodeCountMustBeGreaterThan0, + }, + { + name: "max node count less than 1", + args: v1.ModifyNodeGroupArgs{ + ID: "node-group-123", + ClusterID: "cluster-123", + MinNodeCount: 1, + MaxNodeCount: 0, + }, + expectError: errNodeGroupMaxNodeCountMustBeGreaterThan0, + }, + { + name: "max node count less than min node count", + args: v1.ModifyNodeGroupArgs{ + ID: "node-group-123", + ClusterID: "cluster-123", + MinNodeCount: 5, + MaxNodeCount: 3, + }, + expectError: errNodeGroupMaxNodeCountMustBeGreaterThanOrEqualToMinNodeCount, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := validateModifyNodeGroupArgs(tt.args) + if err != nil && tt.expectError != nil { + if !errors.Is(err, tt.expectError) { + t.Errorf("expected error %v, got %v", tt.expectError, err) + } + } + if err == nil && tt.expectError != nil { + t.Errorf("expected error but got nil") + } + }) + } +} + +func TestValidatePutUserArgs(t *testing.T) { + tests := []struct { + name string + args v1.SetClusterUserArgs + expectError error + }{ + { + name: "valid args", + args: v1.SetClusterUserArgs{ + Username: "test-user", + Role: "cluster-admin", + ClusterID: "cluster-123", + RSAPEMBase64: "base64encodedkey", + }, + expectError: nil, + }, + { + name: "missing username", + args: v1.SetClusterUserArgs{ + Username: "", + Role: "cluster-admin", + ClusterID: "cluster-123", + RSAPEMBase64: "base64encodedkey", + }, + expectError: errUsernameIsRequired, + }, + { + name: "missing role", + args: v1.SetClusterUserArgs{ + Username: "test-user", + Role: "", + ClusterID: "cluster-123", + RSAPEMBase64: "base64encodedkey", + }, + expectError: errRoleIsRequired, + }, + { + name: "missing cluster ID", + args: v1.SetClusterUserArgs{ + Username: "test-user", + Role: "cluster-admin", + ClusterID: "", + RSAPEMBase64: "base64encodedkey", + }, + expectError: errClusterIDIsRequired, + }, + { + name: "missing RSA PEM base64", + args: v1.SetClusterUserArgs{ + Username: "test-user", + Role: "cluster-admin", + ClusterID: "cluster-123", + RSAPEMBase64: "", + }, + expectError: errRSAPEMBase64IsRequired, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := validatePutUserArgs(tt.args) + if err != nil && tt.expectError != nil { + if !errors.Is(err, tt.expectError) { + t.Errorf("expected error %v, got %v", tt.expectError, err) + } + } + if err == nil && tt.expectError != nil { + t.Errorf("expected error but got nil") + } + }) + } +} + +func TestParseNebiusClusterStatus(t *testing.T) { //nolint:dupl // false positive/ + tests := []struct { + name string + status *nebiusmk8s.ClusterStatus + expectedStatus v1.ClusterStatus + }{ + { + name: "nil status", + status: nil, + expectedStatus: v1.ClusterStatusUnknown, + }, + { + name: "provisioning", + status: &nebiusmk8s.ClusterStatus{ + State: nebiusmk8s.ClusterStatus_PROVISIONING, + }, + expectedStatus: v1.ClusterStatusPending, + }, + { + name: "running", + status: &nebiusmk8s.ClusterStatus{ + State: nebiusmk8s.ClusterStatus_RUNNING, + }, + expectedStatus: v1.ClusterStatusAvailable, + }, + { + name: "deleting", + status: &nebiusmk8s.ClusterStatus{ + State: nebiusmk8s.ClusterStatus_DELETING, + }, + expectedStatus: v1.ClusterStatusDeleting, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := parseNebiusClusterStatus(tt.status) + if result != tt.expectedStatus { + t.Errorf("expected status %v, got %v", tt.expectedStatus, result) + } + }) + } +} + +func TestParseNebiusNodeGroupStatus(t *testing.T) { //nolint:dupl // false positive + tests := []struct { + name string + status *nebiusmk8s.NodeGroupStatus + expectedStatus v1.NodeGroupStatus + }{ + { + name: "nil status", + status: nil, + expectedStatus: v1.NodeGroupStatusUnknown, + }, + { + name: "provisioning", + status: &nebiusmk8s.NodeGroupStatus{ + State: nebiusmk8s.NodeGroupStatus_PROVISIONING, + }, + expectedStatus: v1.NodeGroupStatusPending, + }, + { + name: "running", + status: &nebiusmk8s.NodeGroupStatus{ + State: nebiusmk8s.NodeGroupStatus_RUNNING, + }, + expectedStatus: v1.NodeGroupStatusAvailable, + }, + { + name: "deleting", + status: &nebiusmk8s.NodeGroupStatus{ + State: nebiusmk8s.NodeGroupStatus_DELETING, + }, + expectedStatus: v1.NodeGroupStatusDeleting, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := parseNebiusNodeGroupStatus(tt.status) + if result != tt.expectedStatus { + t.Errorf("expected status %v, got %v", tt.expectedStatus, result) + } + }) + } +} diff --git a/v1/providers/nebius/network.go b/v1/providers/nebius/network.go new file mode 100644 index 0000000..e6724d4 --- /dev/null +++ b/v1/providers/nebius/network.go @@ -0,0 +1,395 @@ +package v1 + +import ( + "context" + "fmt" + "net" + + nebiuscommon "github.com/nebius/gosdk/proto/nebius/common/v1" + nebiusvpc "github.com/nebius/gosdk/proto/nebius/vpc/v1" + nebiusvpcv1 "github.com/nebius/gosdk/services/nebius/vpc/v1" + grpccodes "google.golang.org/grpc/codes" + grpcstatus "google.golang.org/grpc/status" + + "github.com/brevdev/cloud/internal/errors" + v1 "github.com/brevdev/cloud/v1" +) + +var errVPCSubnetCIDRBlockMustBeGreaterThan24 = fmt.Errorf("VPC subnet CIDR block must be greater than /24") + +func (c *NebiusClient) CreateVPC(ctx context.Context, args v1.CreateVPCArgs) (*v1.VPC, error) { + nebiusNetworkService := c.sdk.Services().VPC().V1().Network() + nebiusSubnetService := c.sdk.Services().VPC().V1().Subnet() + nebiusPoolService := c.sdk.Services().VPC().V1().Pool() + + // Fetch the target location + location, err := c.GetLocation(ctx) + if err != nil { + return nil, errors.WrapAndTrace(err) + } + + err = validateCreateVPCArgs(args) + if err != nil { + return nil, errors.WrapAndTrace(err) + } + + // Create the network + vpcID, err := createNetwork(ctx, nebiusNetworkService, nebiusPoolService, c.projectID, args) + if err != nil { + return nil, errors.WrapAndTrace(err) + } + + // Create the subnets + subnets := make([]*v1.Subnet, 0) + for _, subnetArgs := range args.Subnets { + subnet, err := c.createSubnet(ctx, nebiusSubnetService, c.projectID, vpcID, subnetArgs) + if err != nil { + return nil, errors.WrapAndTrace(err) + } + subnets = append(subnets, subnet) + } + + brevVPC, err := v1.NewVPC(v1.VPCSettings{ + RefID: args.RefID, + Provider: CloudProviderID, + Name: args.Name, + Location: location, + CidrBlock: args.CidrBlock, + Status: v1.VPCStatusPending, + ID: v1.CloudProviderResourceID(vpcID), + Subnets: subnets, + Tags: args.Tags, + }) + if err != nil { + return nil, errors.WrapAndTrace(err) + } + return brevVPC, nil +} + +func validateCreateVPCArgs(args v1.CreateVPCArgs) error { + // Subnet CIDR blocks must be grreater than /24 + for _, subnet := range args.Subnets { + larger, err := cidrBlockLargerThanMask(subnet.CidrBlock, 24) + if err != nil { + return errors.WrapAndTrace(err) + } + if !larger { + return errVPCSubnetCIDRBlockMustBeGreaterThan24 + } + } + return nil +} + +func cidrBlockLargerThanMask(cidrBlock string, mask int) (bool, error) { + _, ipnet, err := net.ParseCIDR(cidrBlock) + if err != nil { + return false, errors.WrapAndTrace(err) + } + ones, _ := ipnet.Mask.Size() + return ones < mask, nil +} + +func createNetwork(ctx context.Context, nebiusNetworkService nebiusvpcv1.NetworkService, nebiusPoolService nebiusvpcv1.PoolService, projectID string, args v1.CreateVPCArgs) (string, error) { + // In Nebius, rather than creating a network with a CIDR, and subnets with slices of that CIDR, we instead first create a pool with + // several specific CIDR blocks. These blocks will be intended to be used by subnets at the moment of their creation. + // As we can add additional CIDR blocks to the pool later, we don't need to specify the entire network CIDR here. + + labels := make(map[string]string) + for key, value := range args.Tags { + labels[key] = value + } + + // Add the required labels + labels[labelBrevRefID] = args.RefID + labels[labelCreatedBy] = labelBrevCloudSDK + labels[labelBrevCIDRBlock] = args.CidrBlock + + // Create the pool with the CIDR blocks for the subnets + networkPoolCidrs := make([]*nebiusvpc.PoolCidr, 0) + for _, subnet := range args.Subnets { + networkPoolCidrs = append(networkPoolCidrs, &nebiusvpc.PoolCidr{Cidr: subnet.CidrBlock}) + } + createPoolOperation, err := nebiusPoolService.Create(ctx, &nebiusvpc.CreatePoolRequest{ + Metadata: &nebiuscommon.ResourceMetadata{ + Name: args.RefID, + ParentId: projectID, + Labels: labels, + }, + Spec: &nebiusvpc.PoolSpec{ + Version: nebiusvpc.IpVersion_IPV4, + Visibility: nebiusvpc.IpVisibility_PRIVATE, + Cidrs: networkPoolCidrs, + }, + }) + if err != nil { + return "", errors.WrapAndTrace(err) + } + + // Here we must wait for the pool to be created, as otherwise we cannot proceed to create the network. + createPoolOperation, err = createPoolOperation.Wait(ctx) + if err != nil { + return "", errors.WrapAndTrace(err) + } + poolID := createPoolOperation.ResourceID() + + // Create the network with the pool + createNetworkOperation, err := nebiusNetworkService.Create(ctx, &nebiusvpc.CreateNetworkRequest{ + Metadata: &nebiuscommon.ResourceMetadata{ + Name: args.Name, + ParentId: projectID, + Labels: labels, + }, + Spec: &nebiusvpc.NetworkSpec{ + Ipv4PrivatePools: &nebiusvpc.IPv4PrivateNetworkPools{ + Pools: []*nebiusvpc.NetworkPool{ + {Id: poolID}, + }, + }, + }, + }) + if err != nil { + return "", errors.WrapAndTrace(err) + } + + return createNetworkOperation.ResourceID(), nil +} + +func (c *NebiusClient) createSubnet(ctx context.Context, nebiusSubnetService nebiusvpcv1.SubnetService, projectID string, networkID string, args v1.CreateSubnetArgs) (*v1.Subnet, error) { + // In Nebius, the concept of "private" or "public" subnets is not a thing. Instead this concept is indirect -- subnets can be marked in such a + // way as to allow for resources that are placed within them to allocate public IPs. This is controlled by the below "allowPublicIPAllocations" flag. + + var allowPublicIPAllocations bool + if args.Type == v1.SubnetTypePublic { + allowPublicIPAllocations = true + } else { + allowPublicIPAllocations = false + } + + labels := make(map[string]string) + for key, value := range args.Tags { + labels[key] = value + } + + // Fetch the target location + location, err := c.GetLocation(ctx) + if err != nil { + return nil, errors.WrapAndTrace(err) + } + + // Add the required labels + if args.RefID != "" { + labels[labelBrevRefID] = args.RefID + } else { + labels[labelBrevRefID] = fmt.Sprintf("%s-%s-%s", networkID, args.CidrBlock, args.Type) + } + labels[labelCreatedBy] = labelBrevCloudSDK + labels[labelBrevSubnetType] = string(args.Type) + labels[labelBrevVPCID] = networkID + labels[labelBrevCIDRBlock] = args.CidrBlock + + // Create the subnet, specifying the CIDR block (not the pool!) and the allowPublicIPAllocations flag. + createSubnetOperation, err := nebiusSubnetService.Create(ctx, &nebiusvpc.CreateSubnetRequest{ + Metadata: &nebiuscommon.ResourceMetadata{ + Name: labels[labelBrevRefID], + ParentId: projectID, + Labels: labels, + }, + Spec: &nebiusvpc.SubnetSpec{ + NetworkId: networkID, + Ipv4PrivatePools: &nebiusvpc.IPv4PrivateSubnetPools{ + UseNetworkPools: true, + }, + Ipv4PublicPools: &nebiusvpc.IPv4PublicSubnetPools{ + UseNetworkPools: allowPublicIPAllocations, + }, + }, + }) + if err != nil { + return nil, errors.WrapAndTrace(err) + } + createSubnetOperation, err = createSubnetOperation.Wait(ctx) + if err != nil { + return nil, errors.WrapAndTrace(err) + } + + brevSubnet, err := v1.NewSubnet(v1.SubnetSettings{ + ID: v1.CloudProviderResourceID(createSubnetOperation.ResourceID()), + RefID: labels[labelBrevRefID], + Name: labels[labelBrevRefID], + Location: location, + CidrBlock: args.CidrBlock, + Type: args.Type, + VPCID: v1.CloudProviderResourceID(networkID), + Tags: args.Tags, + }) + if err != nil { + return nil, errors.WrapAndTrace(err) + } + return brevSubnet, nil +} + +func (c *NebiusClient) GetVPC(ctx context.Context, args v1.GetVPCArgs) (*v1.VPC, error) { + nebiusNetworkService := c.sdk.Services().VPC().V1().Network() + nebiusSubnetService := c.sdk.Services().VPC().V1().Subnet() + + // Fetch the target location + location, err := c.GetLocation(ctx) + if err != nil { + return nil, errors.WrapAndTrace(err) + } + + network, err := nebiusNetworkService.Get(ctx, &nebiusvpc.GetNetworkRequest{ + Id: string(args.ID), + }) + if err != nil { + if grpcstatus.Code(err) == grpccodes.NotFound { + return nil, v1.ErrResourceNotFound + } + return nil, errors.WrapAndTrace(err) + } + + nebiusSubnets, err := nebiusSubnetService.ListByNetwork(ctx, &nebiusvpc.ListSubnetsByNetworkRequest{ + NetworkId: network.Metadata.Id, + }) + if err != nil { + return nil, errors.WrapAndTrace(err) + } + + subnets := make([]*v1.Subnet, 0) + for _, subnet := range nebiusSubnets.Items { + brevSubnet, err := v1.NewSubnet(v1.SubnetSettings{ + ID: v1.CloudProviderResourceID(subnet.Metadata.Id), + RefID: subnet.Metadata.Labels[labelBrevRefID], + Location: location, + CidrBlock: subnet.Metadata.Labels[labelBrevCIDRBlock], + Type: v1.SubnetType(subnet.Metadata.Labels[labelBrevSubnetType]), + VPCID: v1.CloudProviderResourceID(network.Metadata.Id), + Name: subnet.Metadata.Name, + Tags: v1.Tags(subnet.Metadata.Labels), + }) + if err != nil { + return nil, errors.WrapAndTrace(err) + } + subnets = append(subnets, brevSubnet) + } + + brevVPC, err := v1.NewVPC(v1.VPCSettings{ + ID: v1.CloudProviderResourceID(network.Metadata.Id), + RefID: network.Metadata.Labels[labelBrevRefID], + Provider: CloudProviderID, + Name: network.Metadata.Name, + Location: location, + Status: parseNebiusNetworkStatus(network.Status), + Subnets: subnets, + CidrBlock: network.Metadata.Labels[labelBrevCIDRBlock], + Tags: v1.Tags(network.Metadata.Labels), + }) + if err != nil { + return nil, errors.WrapAndTrace(err) + } + return brevVPC, nil +} + +func parseNebiusNetworkStatus(status *nebiusvpc.NetworkStatus) v1.VPCStatus { + switch status.State { + case nebiusvpc.NetworkStatus_CREATING: + return v1.VPCStatusPending + case nebiusvpc.NetworkStatus_READY: + return v1.VPCStatusAvailable + case nebiusvpc.NetworkStatus_DELETING: + return v1.VPCStatusDeleting + } + return v1.VPCStatusUnknown +} + +func (c *NebiusClient) DeleteVPC(ctx context.Context, args v1.DeleteVPCArgs) error { + nebiusNetworkService := c.sdk.Services().VPC().V1().Network() + nebiusPoolService := c.sdk.Services().VPC().V1().Pool() + nebiusSubnetService := c.sdk.Services().VPC().V1().Subnet() + + // Find the network + network, err := nebiusNetworkService.Get(ctx, &nebiusvpc.GetNetworkRequest{ + Id: string(args.ID), + }) + if err != nil { + return errors.WrapAndTrace(err) + } + + // Find the network's subnets + subnets, err := nebiusSubnetService.ListByNetwork(ctx, &nebiusvpc.ListSubnetsByNetworkRequest{ + NetworkId: network.Metadata.Id, + }) + if err != nil { + return errors.WrapAndTrace(err) + } + + // Delete the subnets + for _, subnet := range subnets.Items { + _, err := nebiusSubnetService.Delete(ctx, &nebiusvpc.DeleteSubnetRequest{ + Id: subnet.Metadata.Id, + }) + if err != nil { + return errors.WrapAndTrace(err) + } + } + + pool, err := nebiusPoolService.GetByName(ctx, &nebiusvpc.GetPoolByNameRequest{ + ParentId: network.Metadata.ParentId, + Name: network.Metadata.Name, + }) + if err != nil { + if grpcstatus.Code(err) != grpccodes.NotFound { + return errors.WrapAndTrace(err) + } + // Pool not found, continue + } + + if pool != nil { + // Remove pool from network + updateNetworkOperation, err := nebiusNetworkService.Update(ctx, &nebiusvpc.UpdateNetworkRequest{ + Metadata: &nebiuscommon.ResourceMetadata{ + Name: network.Metadata.Name, + ParentId: network.Metadata.ParentId, + Id: network.Metadata.Id, + }, + Spec: &nebiusvpc.NetworkSpec{ + Ipv4PrivatePools: &nebiusvpc.IPv4PrivateNetworkPools{ + Pools: []*nebiusvpc.NetworkPool{}, + }, + }, + }) + if err != nil { + return errors.WrapAndTrace(err) + } + + // Here we must wait for the network to be updated, as otherwise we cannot proceed to delete the pool. + _, err = updateNetworkOperation.Wait(ctx) + if err != nil { + return errors.WrapAndTrace(err) + } + + // Delete pool + deletePoolOperation, err := nebiusPoolService.Delete(ctx, &nebiusvpc.DeletePoolRequest{ + Id: pool.Metadata.Id, + }) + if err != nil { + return errors.WrapAndTrace(err) + } + + // Here we must wait for the pool to be deleted, as otherwise we cannot proceed to delete the network. + _, err = deletePoolOperation.Wait(ctx) + if err != nil { + return errors.WrapAndTrace(err) + } + } + + // Delete the network + _, err = nebiusNetworkService.Delete(ctx, &nebiusvpc.DeleteNetworkRequest{ + Id: network.Metadata.Id, + }) + if err != nil { + return errors.WrapAndTrace(err) + } + + return nil +} diff --git a/v1/providers/nebius/network_unit_test.go b/v1/providers/nebius/network_unit_test.go new file mode 100644 index 0000000..d9b09aa --- /dev/null +++ b/v1/providers/nebius/network_unit_test.go @@ -0,0 +1,245 @@ +package v1 + +import ( + "errors" + "testing" + + nebiusvpc "github.com/nebius/gosdk/proto/nebius/vpc/v1" + + v1 "github.com/brevdev/cloud/v1" +) + +func TestValidateCreateVPCArgs(t *testing.T) { + tests := []struct { + name string + args v1.CreateVPCArgs + expectError error + }{ + { + name: "valid args with large enough subnets", + args: v1.CreateVPCArgs{ + RefID: "test-vpc", + Name: "test-vpc", + CidrBlock: "172.16.0.0/16", + Subnets: []v1.CreateSubnetArgs{ + {CidrBlock: "172.16.0.0/19", Type: v1.SubnetTypePublic}, + {CidrBlock: "172.16.32.0/19", Type: v1.SubnetTypePrivate}, + }, + }, + expectError: nil, + }, + { + name: "invalid - subnet CIDR /24 (not larger than /24)", + args: v1.CreateVPCArgs{ + RefID: "test-vpc", + Name: "test-vpc", + CidrBlock: "172.16.0.0/16", + Subnets: []v1.CreateSubnetArgs{ + {CidrBlock: "172.16.0.0/24", Type: v1.SubnetTypePublic}, + }, + }, + expectError: errVPCSubnetCIDRBlockMustBeGreaterThan24, + }, + { + name: "invalid - subnet CIDR /28 (smaller than /24)", + args: v1.CreateVPCArgs{ + RefID: "test-vpc", + Name: "test-vpc", + CidrBlock: "172.16.0.0/16", + Subnets: []v1.CreateSubnetArgs{ + {CidrBlock: "172.16.0.0/28", Type: v1.SubnetTypePrivate}, + }, + }, + expectError: errVPCSubnetCIDRBlockMustBeGreaterThan24, + }, + { + name: "valid - subnet CIDR /23 (larger than /24)", + args: v1.CreateVPCArgs{ + RefID: "test-vpc", + Name: "test-vpc", + CidrBlock: "172.16.0.0/16", + Subnets: []v1.CreateSubnetArgs{ + {CidrBlock: "172.16.0.0/23", Type: v1.SubnetTypePublic}, + }, + }, + expectError: nil, + }, + { + name: "invalid - one subnet too small", + args: v1.CreateVPCArgs{ + RefID: "test-vpc", + Name: "test-vpc", + CidrBlock: "172.16.0.0/16", + Subnets: []v1.CreateSubnetArgs{ + {CidrBlock: "172.16.0.0/19", Type: v1.SubnetTypePublic}, + {CidrBlock: "172.16.32.0/28", Type: v1.SubnetTypePrivate}, + }, + }, + expectError: errVPCSubnetCIDRBlockMustBeGreaterThan24, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := validateCreateVPCArgs(tt.args) + if err != nil && tt.expectError != nil { + if !errors.Is(err, tt.expectError) { + t.Fatalf("expected error %v, got %v", tt.expectError, err) + } + } + if err == nil && tt.expectError != nil { + t.Fatalf("expected error but got nil") + } + }) + } +} + +func TestCidrBlockLargerThanMask(t *testing.T) { //nolint:funlen // test ok + tests := []struct { + name string + cidrBlock string + mask int + expected bool + expectError bool + }{ + { + name: "/16 is larger than /24", + cidrBlock: "10.0.0.0/16", + mask: 24, + expected: true, + expectError: false, + }, + { + name: "/19 is larger than /24", + cidrBlock: "10.0.0.0/19", + mask: 24, + expected: true, + expectError: false, + }, + { + name: "/23 is larger than /24", + cidrBlock: "10.0.0.0/23", + mask: 24, + expected: true, + expectError: false, + }, + { + name: "/24 is not larger than /24", + cidrBlock: "10.0.0.0/24", + mask: 24, + expected: false, + expectError: false, + }, + { + name: "/28 is not larger than /24", + cidrBlock: "10.0.0.0/28", + mask: 24, + expected: false, + expectError: false, + }, + { + name: "/32 is not larger than /24", + cidrBlock: "10.0.0.0/32", + mask: 24, + expected: false, + expectError: false, + }, + { + name: "/8 is larger than /16", + cidrBlock: "10.0.0.0/8", + mask: 16, + expected: true, + expectError: false, + }, + { + name: "/20 is not larger than /16", + cidrBlock: "10.0.0.0/20", + mask: 16, + expected: false, + expectError: false, + }, + { + name: "invalid CIDR block", + cidrBlock: "invalid", + mask: 24, + expected: false, + expectError: true, + }, + { + name: "invalid CIDR block - no mask", + cidrBlock: "10.0.0.0", + mask: 24, + expected: false, + expectError: true, + }, + { + name: "IPv6 /48 is larger than /64", + cidrBlock: "2001:db8::/48", + mask: 64, + expected: true, + expectError: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result, err := cidrBlockLargerThanMask(tt.cidrBlock, tt.mask) + + if tt.expectError && err == nil { + t.Errorf("expected error but got nil") + } + if !tt.expectError && err != nil { + t.Errorf("expected no error but got: %v", err) + } + if !tt.expectError && result != tt.expected { + t.Errorf("expected %v, got %v", tt.expected, result) + } + }) + } +} + +func TestParseNebiusNetworkStatus(t *testing.T) { + tests := []struct { + name string + status *nebiusvpc.NetworkStatus + expectedStatus v1.VPCStatus + }{ + { + name: "creating", + status: &nebiusvpc.NetworkStatus{ + State: nebiusvpc.NetworkStatus_CREATING, + }, + expectedStatus: v1.VPCStatusPending, + }, + { + name: "ready", + status: &nebiusvpc.NetworkStatus{ + State: nebiusvpc.NetworkStatus_READY, + }, + expectedStatus: v1.VPCStatusAvailable, + }, + { + name: "deleting", + status: &nebiusvpc.NetworkStatus{ + State: nebiusvpc.NetworkStatus_DELETING, + }, + expectedStatus: v1.VPCStatusDeleting, + }, + { + name: "unknown state", + status: &nebiusvpc.NetworkStatus{ + State: nebiusvpc.NetworkStatus_STATE_UNSPECIFIED, + }, + expectedStatus: v1.VPCStatusUnknown, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := parseNebiusNetworkStatus(tt.status) + if result != tt.expectedStatus { + t.Errorf("expected status %v, got %v", tt.expectedStatus, result) + } + }) + } +} diff --git a/v1/providers/nebius/scripts/kubernetes_test.go b/v1/providers/nebius/scripts/kubernetes_test.go new file mode 100644 index 0000000..bbe2386 --- /dev/null +++ b/v1/providers/nebius/scripts/kubernetes_test.go @@ -0,0 +1,184 @@ +//go:build scripts +// +build scripts + +package scripts + +import ( + "context" + "fmt" + "os" + "testing" + + v1 "github.com/brevdev/cloud/v1" + nebius "github.com/brevdev/cloud/v1/providers/nebius" +) + +var platformPresetMap = map[string][]string{ + "cpu-d3": {"4vcpu-16gb", "8vcpu-32gb", "16vcpu-64gb", "32vcpu-128gb", "48vcpu-192gb", "64vcpu-256gb", "96vcpu-384gb", "128vcpu-512gb"}, + "cpu-e2": {"2vcpu-8gb", "4vcpu-16gb", "8vcpu-32gb", "16vcpu-64gb", "32vcpu-128gb", "48vcpu-192gb", "64vcpu-256gb", "80vcpu-320gb"}, + "gpu-h200-sxm": {"1gpu-16vcpu-200gb", "8gpu-128vcpu-1600gb"}, + "gpu-h100-sxm": {"1gpu-16vcpu-200gb", "8gpu-128vcpu-1600gb"}, + "gpu-l40s-a": {"1gpu-8vcpu-32gb", "1gpu-16vcpu-64gb", "1gpu-24vcpu-96gb", "1gpu-32vcpu-128gb", "1gpu-40vcpu-160gb"}, + "gpu-l40s-d": {"1gpu-16vcpu-96gb", "1gpu-32vcpu-192gb", "1gpu-48vcpu-288gb", "2gpu-64vcpu-384gb", "2gpu-96vcpu-576gb", "4gpu-128vcpu-768gb", "4gpu-192vcpu-1152gb"}, +} + +func Test_CreateVPCAndCluster(t *testing.T) { + privateKeyPEMBase64 := os.Getenv("NEBIUS_PRIVATE_KEY_PEM_BASE64") + publicKeyID := os.Getenv("NEBIUS_PUBLIC_KEY_ID") + serviceAccountID := os.Getenv("NEBIUS_SERVICE_ACCOUNT_ID") + + projectID := "project-e00nrhefpr009ynkkzcgba" // eu-north1 + + nebiusClient, err := nebius.NewNebiusClient(context.Background(), "test", publicKeyID, privateKeyPEMBase64, serviceAccountID, projectID) + if err != nil { + t.Fatalf("failed to create Nebius client: %v", err) + } + + vpc, err := nebiusClient.CreateVPC(context.Background(), v1.CreateVPCArgs{ + Name: "cloud-sdk-test", + RefID: "cloud-sdk-test", + CidrBlock: "172.16.0.0/16", + Subnets: []v1.CreateSubnetArgs{ + {CidrBlock: "172.16.0.0/19", Type: v1.SubnetTypePublic}, // note, /24 (IP count: 256) is too small for a subnet + {CidrBlock: "172.16.32.0/19", Type: v1.SubnetTypePrivate}, + }, + }) + if err != nil { + t.Fatalf("failed to create VPC: %v", err) + } + + cluster, err := nebiusClient.CreateCluster(context.Background(), v1.CreateClusterArgs{ + Name: "cloud-sdk-test", + RefID: "cloud-sdk-test", + VPCID: v1.CloudProviderResourceID(vpc.GetID()), + SubnetIDs: []v1.CloudProviderResourceID{v1.CloudProviderResourceID(vpc.GetSubnets()[0].GetID())}, + KubernetesVersion: "1.31", + }) + if err != nil { + t.Fatalf("failed to create cluster: %v", err) + } + + fmt.Println(cluster) +} + +func Test_GetCluster(t *testing.T) { + privateKeyPEMBase64 := os.Getenv("NEBIUS_PRIVATE_KEY_PEM_BASE64") + publicKeyID := os.Getenv("NEBIUS_PUBLIC_KEY_ID") + serviceAccountID := os.Getenv("NEBIUS_SERVICE_ACCOUNT_ID") + + projectID := "project-e00nrhefpr009ynkkzcgba" // eu-north1 + + nebiusClient, err := nebius.NewNebiusClient(context.Background(), "test", publicKeyID, privateKeyPEMBase64, serviceAccountID, projectID) + if err != nil { + t.Fatalf("failed to create Nebius client: %v", err) + } + + cluster, err := nebiusClient.GetCluster(context.Background(), v1.GetClusterArgs{ + ID: v1.CloudProviderResourceID("cloud-sdk-test"), + }) + if err != nil { + t.Fatalf("failed to get cluster: %v", err) + } + + fmt.Println(cluster) +} + +func Test_PutUser(t *testing.T) { + testUserPrivateKeyPEMBase64 := os.Getenv("TEST_USER_PRIVATE_KEY_PEM_BASE64") + privateKeyPEMBase64 := os.Getenv("NEBIUS_PRIVATE_KEY_PEM_BASE64") + publicKeyID := os.Getenv("NEBIUS_PUBLIC_KEY_ID") + serviceAccountID := os.Getenv("NEBIUS_SERVICE_ACCOUNT_ID") + + projectID := "project-e00nrhefpr009ynkkzcgba" // eu-north1 + + nebiusClient, err := nebius.NewNebiusClient(context.Background(), "test", publicKeyID, privateKeyPEMBase64, serviceAccountID, projectID) + if err != nil { + t.Fatalf("failed to create Nebius client: %v", err) + } + + putUserResponse, err := nebiusClient.SetClusterUser(context.Background(), v1.SetClusterUserArgs{ + Username: "test-user", + Role: "cluster-admin", + ClusterID: v1.CloudProviderResourceID("cloud-sdk-test"), + RSAPEMBase64: testUserPrivateKeyPEMBase64, + }) + if err != nil { + t.Fatalf("failed to put user: %v", err) + } + + fmt.Println(putUserResponse) +} + +func Test_CreateNodeGroup(t *testing.T) { + privateKeyPEMBase64 := os.Getenv("NEBIUS_PRIVATE_KEY_PEM_BASE64") + publicKeyID := os.Getenv("NEBIUS_PUBLIC_KEY_ID") + serviceAccountID := os.Getenv("NEBIUS_SERVICE_ACCOUNT_ID") + + projectID := "project-e00nrhefpr009ynkkzcgba" // eu-north1 + + nebiusClient, err := nebius.NewNebiusClient(context.Background(), "test", publicKeyID, privateKeyPEMBase64, serviceAccountID, projectID) + if err != nil { + t.Fatalf("failed to create Nebius client: %v", err) + } + + platform := "gpu-h100-sxm" + preset := platformPresetMap[platform][0] + + createNodeGroupResponse, err := nebiusClient.CreateNodeGroup(context.Background(), v1.CreateNodeGroupArgs{ + ClusterID: v1.CloudProviderResourceID("cloud-sdk-test"), + Name: "test-node-group3", + RefID: "test-node-group3", + MinNodeCount: 1, + MaxNodeCount: 2, + InstanceType: fmt.Sprintf("%s.%s", platform, preset), + DiskSizeGiB: 96, + }) + if err != nil { + t.Fatalf("failed to create node group: %v", err) + } + + fmt.Println(createNodeGroupResponse) +} + +func Test_DeleteCluster(t *testing.T) { + privateKeyPEMBase64 := os.Getenv("NEBIUS_PRIVATE_KEY_PEM_BASE64") + publicKeyID := os.Getenv("NEBIUS_PUBLIC_KEY_ID") + serviceAccountID := os.Getenv("NEBIUS_SERVICE_ACCOUNT_ID") + projectID := os.Getenv("NEBIUS_PROJECT_ID") + + nebiusClient, err := nebius.NewNebiusClient(context.Background(), "test", publicKeyID, privateKeyPEMBase64, serviceAccountID, projectID) + if err != nil { + t.Fatalf("failed to create Nebius client: %v", err) + } + + err = nebiusClient.DeleteCluster(context.Background(), v1.DeleteClusterArgs{ + ID: v1.CloudProviderResourceID("mk8scluster-u00vgffpfgh3ze60vr"), + }) + if err != nil { + t.Fatalf("failed to delete cluster: %v", err) + } + + fmt.Println("Cluster deleted") +} + +func Test_DeleteVPC(t *testing.T) { + privateKeyPEMBase64 := os.Getenv("NEBIUS_PRIVATE_KEY_PEM_BASE64") + publicKeyID := os.Getenv("NEBIUS_PUBLIC_KEY_ID") + serviceAccountID := os.Getenv("NEBIUS_SERVICE_ACCOUNT_ID") + + projectID := "project-e00nrhefpr009ynkkzcgba" // eu-north1 + + nebiusClient, err := nebius.NewNebiusClient(context.Background(), "test", publicKeyID, privateKeyPEMBase64, serviceAccountID, projectID) + if err != nil { + t.Fatalf("failed to create Nebius client: %v", err) + } + + err = nebiusClient.DeleteVPC(context.Background(), v1.DeleteVPCArgs{ + ID: v1.CloudProviderResourceID("cloud-sdk-test"), + }) + if err != nil { + t.Fatalf("failed to delete VPC: %v", err) + } + + fmt.Println("VPC deleted") +} diff --git a/v1/providers/nebius/scripts/network_test.go b/v1/providers/nebius/scripts/network_test.go new file mode 100644 index 0000000..f989e91 --- /dev/null +++ b/v1/providers/nebius/scripts/network_test.go @@ -0,0 +1,88 @@ +//go:build scripts +// +build scripts + +package scripts + +import ( + "context" + "fmt" + "testing" + + v1 "github.com/brevdev/cloud/v1" + nebius "github.com/brevdev/cloud/v1/providers/nebius" +) + +const ( + privateKeyPEMBase64 = "test" + publicKeyID = "test" + serviceAccountID = "test" + projectID = "test" +) + +func TestCreateVPC(t *testing.T) { + if privateKeyPEMBase64 == "" || publicKeyID == "" || serviceAccountID == "" || projectID == "" { + t.Fatalf("NEBIUS_PRIVATE_KEY_PEM_BASE64, NEBIUS_PUBLIC_KEY_ID, NEBIUS_SERVICE_ACCOUNT_ID, and NEBIUS_PROJECT_ID must be set") + } + + nebiusClient, err := nebius.NewNebiusClient(context.Background(), "test", publicKeyID, privateKeyPEMBase64, serviceAccountID, projectID) + if err != nil { + t.Fatalf("failed to create Nebius client: %v", err) + } + + vpc, err := nebiusClient.CreateVPC(context.Background(), v1.CreateVPCArgs{ + Name: "cloud-sdk-test", + RefID: "cloud-sdk-test", + CidrBlock: "172.16.0.0/16", + Subnets: []v1.CreateSubnetArgs{ + {CidrBlock: "172.16.0.0/24", Type: v1.SubnetTypePublic}, + {CidrBlock: "172.16.1.0/24", Type: v1.SubnetTypePrivate}, + {CidrBlock: "172.16.2.0/24", Type: v1.SubnetTypePublic}, + {CidrBlock: "172.16.3.0/24", Type: v1.SubnetTypePrivate}, + }, + }) + if err != nil { + t.Fatalf("failed to get VPC: %v", err) + } + + fmt.Println(vpc) +} + +func TestGetVPC(t *testing.T) { + if privateKeyPEMBase64 == "" || publicKeyID == "" || serviceAccountID == "" || projectID == "" { + t.Fatalf("NEBIUS_PRIVATE_KEY_PEM_BASE64, NEBIUS_PUBLIC_KEY_ID, NEBIUS_SERVICE_ACCOUNT_ID, and NEBIUS_PROJECT_ID must be set") + } + + nebiusClient, err := nebius.NewNebiusClient(context.Background(), "test", publicKeyID, privateKeyPEMBase64, serviceAccountID, projectID) + if err != nil { + t.Fatalf("failed to create Nebius client: %v", err) + } + + vpc, err := nebiusClient.GetVPC(context.Background(), v1.GetVPCArgs{ + ID: v1.CloudProviderResourceID("cloud-sdk-test"), + }) + if err != nil { + t.Fatalf("failed to get VPC: %v", err) + } + + fmt.Println(vpc) +} + +func TestDeleteVPC(t *testing.T) { + if privateKeyPEMBase64 == "" || publicKeyID == "" || serviceAccountID == "" || projectID == "" { + t.Fatalf("NEBIUS_PRIVATE_KEY_PEM_BASE64, NEBIUS_PUBLIC_KEY_ID, NEBIUS_SERVICE_ACCOUNT_ID, and NEBIUS_PROJECT_ID must be set") + } + + nebiusClient, err := nebius.NewNebiusClient(context.Background(), "test", publicKeyID, privateKeyPEMBase64, serviceAccountID, projectID) + if err != nil { + t.Fatalf("failed to create Nebius client: %v", err) + } + + err = nebiusClient.DeleteVPC(context.Background(), v1.DeleteVPCArgs{ + ID: v1.CloudProviderResourceID("vpcnetwork-u00r9ya5rc8wntbffr"), + }) + if err != nil { + t.Fatalf("failed to delete VPC: %v", err) + } + + fmt.Println("VPC deleted") +} diff --git a/v1/providers/nebius/utils.go b/v1/providers/nebius/utils.go new file mode 100644 index 0000000..f33d046 --- /dev/null +++ b/v1/providers/nebius/utils.go @@ -0,0 +1,10 @@ +package v1 + +const ( + labelBrevRefID = "brev-ref-id" + labelBrevVPCID = "brev-vpc-id" + labelBrevSubnetType = "brev-subnet-type" + labelBrevCIDRBlock = "brev-cidr-block" + labelCreatedBy = "CreatedBy" + labelBrevCloudSDK = "brev-cloud-sdk" +) diff --git a/v1/providers/nebius/validation_kubernetes_test.go b/v1/providers/nebius/validation_kubernetes_test.go new file mode 100644 index 0000000..8b9fbf0 --- /dev/null +++ b/v1/providers/nebius/validation_kubernetes_test.go @@ -0,0 +1,68 @@ +package v1 + +import ( + "fmt" + "os" + "testing" + "time" + + "github.com/brevdev/cloud/internal/validation" + v1 "github.com/brevdev/cloud/v1" +) + +func TestKubernetesValidation(t *testing.T) { + if isValidationTest == "" { + t.Skip("VALIDATION_TEST is not set, skipping Nebius Kubernetes validation tests") + } + + testUserPrivateKeyPEMBase64 := os.Getenv("TEST_USER_PRIVATE_KEY_PEM_BASE64") + + if privateKeyPEMBase64 == "" || publicKeyID == "" || serviceAccountID == "" || projectID == "" { + t.Fatalf("NEBIUS_PRIVATE_KEY_PEM_BASE64, NEBIUS_PUBLIC_KEY_ID, NEBIUS_SERVICE_ACCOUNT_ID, and NEBIUS_PROJECT_ID must be set") + } + + config := validation.ProviderConfig{ + Credential: NewNebiusCredential(fmt.Sprintf("validation-%s", t.Name()), publicKeyID, privateKeyPEMBase64, serviceAccountID, projectID), + } + + // Use the test name as the name of the cluster and node group + name := fmt.Sprintf("cloud-sdk-%s-%s", t.Name(), time.Now().UTC().Format("20060102150405")) + + // Network CIDR + networkCidr := "10.0.0.0/16" + + // Network subnets + pubSubnet1 := validation.KubernetesValidationSubnetOpts{Name: "pub-subnet-1", RefID: "pub-subnet-1", CidrBlock: "10.0.0.0/19", SubnetType: v1.SubnetTypePublic} + prvSubnet1 := validation.KubernetesValidationSubnetOpts{Name: "prv-subnet-1", RefID: "prv-subnet-1", CidrBlock: "10.0.32.0/19", SubnetType: v1.SubnetTypePrivate} + + validation.RunKubernetesValidation(t, config, validation.KubernetesValidationOpts{ + Name: name, + RefID: name, + KubernetesVersion: "1.31", + // Associate the VPC with the private subnets + Subnets: []validation.KubernetesValidationSubnetOpts{prvSubnet1}, + NetworkOpts: &validation.KubernetesValidationNetworkOpts{ + Name: name, + RefID: name, + CidrBlock: networkCidr, + // Build the network with all subnets + Subnets: []validation.KubernetesValidationSubnetOpts{pubSubnet1, prvSubnet1}, + }, + NodeGroupOpts: &validation.KubernetesValidationNodeGroupOpts{ + Name: name, + RefID: name, + InstanceType: "cpu-d3.4vcpu-16gb", + DiskSizeGiB: 64, + MinNodeCount: 1, + MaxNodeCount: 1, + }, + UserOpts: &validation.KubernetesValidationUserOpts{ + Username: "test-user", + Role: "cluster-admin", + RSAPEMBase64: testUserPrivateKeyPEMBase64, + }, + Tags: map[string]string{ + "test": "TestKubernetesValidation", + }, + }) +} diff --git a/v1/providers/nebius/validation_network_test.go b/v1/providers/nebius/validation_network_test.go new file mode 100644 index 0000000..c180fe4 --- /dev/null +++ b/v1/providers/nebius/validation_network_test.go @@ -0,0 +1,45 @@ +package v1 + +import ( + "fmt" + "os" + "testing" + "time" + + "github.com/brevdev/cloud/internal/validation" +) + +var ( + isValidationTest = os.Getenv("VALIDATION_TEST") + privateKeyPEMBase64 = os.Getenv("NEBIUS_PRIVATE_KEY_PEM_BASE64") + publicKeyID = os.Getenv("NEBIUS_PUBLIC_KEY_ID") + serviceAccountID = os.Getenv("NEBIUS_SERVICE_ACCOUNT_ID") + projectID = os.Getenv("NEBIUS_PROJECT_ID") +) + +func TestNetworkValidation(t *testing.T) { + if isValidationTest == "" { + t.Skip("VALIDATION_TEST is not set, skipping Nebius Network validation tests") + } + + if privateKeyPEMBase64 == "" || publicKeyID == "" || serviceAccountID == "" || projectID == "" { + t.Fatalf("NEBIUS_PRIVATE_KEY_PEM_BASE64, NEBIUS_PUBLIC_KEY_ID, NEBIUS_SERVICE_ACCOUNT_ID, and NEBIUS_PROJECT_ID must be set") + } + + config := validation.ProviderConfig{ + Credential: NewNebiusCredential(fmt.Sprintf("validation-%s", t.Name()), publicKeyID, privateKeyPEMBase64, serviceAccountID, projectID), + } + + // Use the test name as the name of the VPC + name := fmt.Sprintf("cloud-sdk-%s-%s", t.Name(), time.Now().UTC().Format("20060102150405")) + + validation.RunNetworkValidation(t, config, validation.NetworkValidationOpts{ + Name: name, + RefID: name, + CidrBlock: "172.16.0.0/16", + PublicSubnetCidrBlock: "172.16.0.0/23", + Tags: map[string]string{ + "test": "TestNetworkValidation", + }, + }) +} diff --git a/v1/vpc.go b/v1/vpc.go new file mode 100644 index 0000000..878e5c0 --- /dev/null +++ b/v1/vpc.go @@ -0,0 +1,360 @@ +package v1 + +import ( + "context" + "fmt" + + "github.com/brevdev/cloud/internal/errors" +) + +// VPC represents the complete specification of a Brev VPC. +type VPC struct { + // The ID assigned by the cloud provider to the VPC. + id CloudProviderResourceID + + // The name of the VPC, displayed on clients. + name string + + // The unique ID used to associate with this VPC. + refID string + + // The cloud provider that manages the VPC. Unless the provider is a broker to other clouds, this will be the same as + // the Cloud field. For example, "aws". + provider string + + // The cloud that hosts the VPC. For example, "aws". + cloud string + + // The location of the VPC. For example, "us-east-1". + location string + + // The IPv4 network range for the VPC, in CIDR notation. For example, "10.0.0.0/16". + cidrBlock string + + // The status of the VPC. + status VPCStatus + + // The subnets associated with the VPC. + subnets []*Subnet + + // The tags associated with the VPC. + tags Tags +} + +type VPCStatus string + +const ( + VPCStatusAvailable VPCStatus = "available" + VPCStatusPending VPCStatus = "pending" + VPCStatusDeleting VPCStatus = "deleting" + VPCStatusUnknown VPCStatus = "unknown" +) + +func (v *VPC) GetName() string { + return v.name +} + +func (v *VPC) GetRefID() string { + return v.refID +} + +func (v *VPC) GetProvider() string { + return v.provider +} + +func (v *VPC) GetCloud() string { + return v.cloud +} + +func (v *VPC) GetID() CloudProviderResourceID { + return v.id +} + +func (v *VPC) GetLocation() string { + return v.location +} + +func (v *VPC) GetCidrBlock() string { + return v.cidrBlock +} + +func (v *VPC) GetStatus() VPCStatus { + return v.status +} + +func (v *VPC) GetSubnets() []*Subnet { + return v.subnets +} + +func (v *VPC) GetTags() Tags { + return v.tags +} + +// VPCSettings represents the settings for a VPC. This is the input to the NewVPC function. +type VPCSettings struct { + // The name of the VPC, displayed on clients. + Name string + + // The unique ID used to associate with this VPC. + RefID string + + // The cloud provider that manages the VPC. Unless the provider is a broker to other clouds, this will be the same as + // the Cloud field. For example, "aws". + Provider string + + // The cloud that hosts the VPC. For example, "aws". + Cloud string + + // The ID assigned by the cloud provider to the VPC. + ID CloudProviderResourceID + + // The location of the VPC. For example, "us-east-1". + Location string + + // The IPv4 network range for the VPC, in CIDR notation. For example, "10.0.0.0/16". + CidrBlock string + + // The status of the VPC. + Status VPCStatus + + // The subnets associated with the VPC. + Subnets []*Subnet + + // The tags associated with the VPC. + Tags Tags +} + +func (s *VPCSettings) setDefaults() { +} + +func (s *VPCSettings) validate() error { + var errs []error + if s.RefID == "" { + errs = append(errs, fmt.Errorf("refID is required")) + } + if s.Name == "" { + errs = append(errs, fmt.Errorf("name is required")) + } + if s.Provider == "" { + errs = append(errs, fmt.Errorf("provider is required")) + } + if s.ID == "" { + errs = append(errs, fmt.Errorf("id is required")) + } + if s.Location == "" { + errs = append(errs, fmt.Errorf("location is required")) + } + if s.CidrBlock == "" { + errs = append(errs, fmt.Errorf("cidrBlock is required")) + } + if s.Status == "" { + errs = append(errs, fmt.Errorf("status is required")) + } + return errors.WrapAndTrace(errors.Join(errs...)) +} + +// NewVPC creates a new VPC from the provided settings. +func NewVPC(settings VPCSettings) (*VPC, error) { + settings.setDefaults() + err := settings.validate() + if err != nil { + return nil, errors.WrapAndTrace(err) + } + return &VPC{ + name: settings.Name, + refID: settings.RefID, + provider: settings.Provider, + cloud: settings.Cloud, + id: settings.ID, + location: settings.Location, + cidrBlock: settings.CidrBlock, + status: settings.Status, + subnets: settings.Subnets, + tags: settings.Tags, + }, nil +} + +// Subnet represents the complete specification of a Brev subnet. +type Subnet struct { + // The name of the subnet, displayed on clients. + name string + + // The unique ID used to associate with this subnet. + refID string + + // The ID of the VPC that the subnet is associated with. + vPCID CloudProviderResourceID + + // The ID assigned by the cloud provider to the subnet. + id CloudProviderResourceID + + // The location of the subnet. For example, "us-east-1". + location string + + // The IPv4 network range for the subnet, in CIDR notation. For example, "10.0.0.0/24". + cidrBlock string + + // The type of the subnet. + subnetType SubnetType + + // The tags associated with the subnet. + tags Tags +} + +type SubnetType string + +const ( + SubnetTypePublic SubnetType = "public" + SubnetTypePrivate SubnetType = "private" +) + +func (s *Subnet) GetName() string { + return s.name +} + +func (s *Subnet) GetRefID() string { + return s.refID +} + +func (s *Subnet) GetVPCID() CloudProviderResourceID { + return s.vPCID +} + +func (s *Subnet) GetID() CloudProviderResourceID { + return s.id +} + +func (s *Subnet) GetLocation() string { + return s.location +} + +func (s *Subnet) GetCidrBlock() string { + return s.cidrBlock +} + +func (s *Subnet) GetSubnetType() SubnetType { + return s.subnetType +} + +func (s *Subnet) GetTags() Tags { + return s.tags +} + +// SubnetSettings represents the settings for a subnet. This is the input to the NewSubnet function. +type SubnetSettings struct { + // The name of the subnet, displayed on clients. + Name string + + // The unique ID used to associate with this subnet. + RefID string + + // The ID of the VPC that the subnet is associated with. + VPCID CloudProviderResourceID + + // The ID assigned by the cloud provider to the subnet. + ID CloudProviderResourceID + + // The location of the subnet. For example, "us-east-1". + Location string + + // The IPv4 network range for the subnet, in CIDR notation. For example, "10.0.0.0/24". + CidrBlock string + + // The type of the subnet. + Type SubnetType + + // The tags associated with the subnet. + Tags Tags +} + +func (s *SubnetSettings) setDefaults() { +} + +func (s *SubnetSettings) validate() error { + var errs []error + if s.RefID == "" { + errs = append(errs, fmt.Errorf("refID is required")) + } + if s.Name == "" { + errs = append(errs, fmt.Errorf("name is required")) + } + if s.VPCID == "" { + errs = append(errs, fmt.Errorf("vPCID is required")) + } + if s.ID == "" { + errs = append(errs, fmt.Errorf("id is required")) + } + + return errors.WrapAndTrace(errors.Join(errs...)) +} + +// NewSubnet creates a new Subnet from the provided settings. +func NewSubnet(settings SubnetSettings) (*Subnet, error) { + settings.setDefaults() + err := settings.validate() + if err != nil { + return nil, errors.WrapAndTrace(err) + } + return &Subnet{ + name: settings.Name, + refID: settings.RefID, + vPCID: settings.VPCID, + id: settings.ID, + location: settings.Location, + cidrBlock: settings.CidrBlock, + subnetType: settings.Type, + tags: settings.Tags, + }, nil +} + +type CloudMaintainVPC interface { + // Create a new VPC. + CreateVPC(ctx context.Context, args CreateVPCArgs) (*VPC, error) + + // Get a VPC identified by the provided args. + GetVPC(ctx context.Context, args GetVPCArgs) (*VPC, error) + + // Delete a VPC identified by the provided args. + DeleteVPC(ctx context.Context, args DeleteVPCArgs) error +} + +type CreateVPCArgs struct { + // The name of the VPC, displayed on clients. + Name string + + // The unique ID used to associate with this VPC. + RefID string + + // The IPv4 network range for the VPC, in CIDR notation. For example, "10.0.0.0/16". + CidrBlock string + + // The subnets to create in the VPC. + Subnets []CreateSubnetArgs + + // The tags to associate with the VPC. + Tags Tags +} + +type CreateSubnetArgs struct { + // The unique ID used to associate with this subnet. + RefID string + + // The IPv4 network range for the subnet, in CIDR notation. For example, "10.0.0.0/24". + CidrBlock string + + // The type of the subnet. + Type SubnetType + + // The tags to associate with the subnet. + Tags Tags +} + +type GetVPCArgs struct { + // The ID of the VPC to get. + ID CloudProviderResourceID +} + +type DeleteVPCArgs struct { + // The ID of the VPC to delete. + ID CloudProviderResourceID +} diff --git a/v1/vpc_validation.go b/v1/vpc_validation.go new file mode 100644 index 0000000..f708350 --- /dev/null +++ b/v1/vpc_validation.go @@ -0,0 +1,82 @@ +package v1 + +import ( + "context" + "fmt" + "time" +) + +// ValidateCreateVPC validates that the CreateVPC functionality works correctly. +func ValidateCreateVPC(ctx context.Context, client CloudMaintainVPC, attrs CreateVPCArgs) (*VPC, error) { + ctx, cancel := context.WithTimeout(ctx, 5*time.Minute) + defer cancel() + + vpc, err := client.CreateVPC(ctx, attrs) + if err != nil { + return nil, err + } + + if vpc.GetName() != attrs.Name { + return nil, fmt.Errorf("VPC name does not match create args: '%s' != '%s'", vpc.GetName(), attrs.Name) + } + if vpc.GetRefID() != attrs.RefID { + return nil, fmt.Errorf("VPC refID does not match create args: '%s' != '%s'", vpc.GetRefID(), attrs.RefID) + } + if vpc.GetCidrBlock() != attrs.CidrBlock { + return nil, fmt.Errorf("VPC cidr block does not match create args: '%s' != '%s'", vpc.GetCidrBlock(), attrs.CidrBlock) + } + if len(vpc.GetSubnets()) != len(attrs.Subnets) { + return nil, fmt.Errorf("VPC subnets does not match create args: '%d' != '%d'", len(vpc.GetSubnets()), len(attrs.Subnets)) + } + for key, value := range attrs.Tags { + tagValue, ok := vpc.GetTags()[key] + if !ok { + return nil, fmt.Errorf("VPC tag does not match create args: '%s' not found", key) + } + if tagValue != value { + return nil, fmt.Errorf("VPC tag does not match create args: '%s' != '%s'", key, value) + } + } + + cidrToSubnetMap := make(map[string]*Subnet) + for _, subnet := range vpc.GetSubnets() { + cidrToSubnetMap[subnet.GetCidrBlock()] = subnet + } + for _, subnetAttrs := range attrs.Subnets { + subnetFromMap, ok := cidrToSubnetMap[subnetAttrs.CidrBlock] + if !ok { + return nil, fmt.Errorf("VPC subnet cidr block does not match create args: '%s' not found", subnetAttrs.CidrBlock) + } + if subnetFromMap.GetSubnetType() != subnetAttrs.Type { + return nil, fmt.Errorf("VPC subnet type does not match create args: '%s' != '%s'", subnetFromMap.GetSubnetType(), subnetAttrs.Type) + } + } + + return vpc, nil +} + +// ValidateGetVPC validates that the GetVPC functionality works correctly. +func ValidateGetVPC(ctx context.Context, client CloudMaintainVPC, attrs GetVPCArgs) (*VPC, error) { + ctx, cancel := context.WithTimeout(ctx, 5*time.Minute) + defer cancel() + + vpc, err := client.GetVPC(ctx, attrs) + if err != nil { + return nil, err + } + + if vpc.GetID() != attrs.ID { + return nil, fmt.Errorf("VPC ID does not match get args: '%s' != '%s'", vpc.GetID(), attrs.ID) + } + + return vpc, nil +} + +// ValidateDeleteVPC validates that the DeleteVPC functionality works correctly. +func ValidateDeleteVPC(ctx context.Context, client CloudMaintainVPC, attrs DeleteVPCArgs) error { + err := client.DeleteVPC(ctx, attrs) + if err != nil { + return err + } + return nil +}