diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..419a650
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,2 @@
+audit.json
+creds.json
diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
new file mode 100644
index 0000000..e816a1d
--- /dev/null
+++ b/.gitlab-ci.yml
@@ -0,0 +1,27 @@
+image: docker:latest
+
+services:
+ - docker:dind
+
+variables:
+ DOCKER_DRIVER: overlay2
+ GOPATH: /build
+ REPO_NAME: github.com/Unity-Technologies/nemesis
+ RUN_SRCCLR: 0
+
+before_script:
+ # Install CA certs, openssl to https downloads, python for gcloud sdk
+ - apk add --update make ca-certificates openssl python
+ - update-ca-certificates
+ # Authorize the docker client with GCR
+ - echo $GCLOUD_SERVICE_KEY | docker login -u _json_key --password-stdin https://gcr.io
+ # Go to build directory
+
+stages:
+ - build
+
+build:
+ stage: build
+ script:
+ - |
+ make build push
diff --git a/CHANGELOG.md b/CHANGELOG.md
new file mode 100644
index 0000000..a8385b6
--- /dev/null
+++ b/CHANGELOG.md
@@ -0,0 +1,19 @@
+# Changelog
+All notable changes to this project will be documented in this file.
+
+The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
+and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
+
+## [Unreleased]
+
+
+## [0.1.0] - 2019-07-25
+### Added
+- First release!
+- Support for scanning GCP project(s) and measuring against the [GCP CIS Benchmark](https://www.cisecurity.org/benchmark/google_cloud_computing_platform/)
+
+### Changed
+- N/A
+
+### Removed
+- N/A
diff --git a/Dockerfile b/Dockerfile
new file mode 100644
index 0000000..01f5d1d
--- /dev/null
+++ b/Dockerfile
@@ -0,0 +1,15 @@
+FROM alpine:3.10.3 AS certs
+RUN apk --no-cache add ca-certificates
+
+FROM golang:1.13.5 as builder
+WORKDIR /app
+COPY go.mod .
+COPY go.sum .
+RUN go mod download
+COPY . .
+RUN CGO_ENABLED=0 GOOS=linux go build -o /nemesis .
+
+FROM scratch
+COPY --from=builder /nemesis ./
+COPY --from=certs /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ca-certificates.crt
+ENTRYPOINT ["./nemesis"]
\ No newline at end of file
diff --git a/Gopkg.lock b/Gopkg.lock
new file mode 100644
index 0000000..1ae85bc
--- /dev/null
+++ b/Gopkg.lock
@@ -0,0 +1,379 @@
+# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
+
+
+[[projects]]
+ digest = "1:03c669c2e391bf1c766e146c4cd5389c141056b71940d854a828580473b01d3a"
+ name = "cloud.google.com/go"
+ packages = [
+ "compute/metadata",
+ "iam",
+ "internal/optional",
+ "internal/version",
+ "logging/apiv2",
+ "pubsub",
+ "pubsub/apiv1",
+ "pubsub/internal/distribution",
+ ]
+ pruneopts = "UT"
+ revision = "775730d6e48254a2430366162cf6298e5368833c"
+ version = "v0.39.0"
+
+[[projects]]
+ digest = "1:d6afaeed1502aa28e80a4ed0981d570ad91b2579193404256ce672ed0a609e0d"
+ name = "github.com/beorn7/perks"
+ packages = ["quantile"]
+ pruneopts = "UT"
+ revision = "4b2b341e8d7715fae06375aa633dbb6e91b3fb46"
+ version = "v1.0.0"
+
+[[projects]]
+ digest = "1:ffe9824d294da03b391f44e1ae8281281b4afc1bdaa9588c9097785e3af10cec"
+ name = "github.com/davecgh/go-spew"
+ packages = ["spew"]
+ pruneopts = "UT"
+ revision = "8991bc29aa16c548c550c7ff78260e27b9ab7c73"
+ version = "v1.1.1"
+
+[[projects]]
+ branch = "master"
+ digest = "1:1ba1d79f2810270045c328ae5d674321db34e3aae468eb4233883b473c5c0467"
+ name = "github.com/golang/glog"
+ packages = ["."]
+ pruneopts = "UT"
+ revision = "23def4e6c14b4da8ac2ed8007337bc5eb5007998"
+
+[[projects]]
+ digest = "1:1882d3bab192c14c94b61781ff6d3965362f98527f895987793908304e90c118"
+ name = "github.com/golang/protobuf"
+ packages = [
+ "proto",
+ "protoc-gen-go/descriptor",
+ "ptypes",
+ "ptypes/any",
+ "ptypes/duration",
+ "ptypes/empty",
+ "ptypes/struct",
+ "ptypes/timestamp",
+ ]
+ pruneopts = "UT"
+ revision = "b5d812f8a3706043e23a9cd5babf2e5423744d30"
+ version = "v1.3.1"
+
+[[projects]]
+ digest = "1:f1f70abea1ab125d48396343b4c053f8fecfbdb943037bf3d29dc80c90fe60b3"
+ name = "github.com/googleapis/gax-go"
+ packages = ["v2"]
+ pruneopts = "UT"
+ revision = "beaecbbdd8af86aa3acf14180d53828ce69400b2"
+ version = "v2.0.4"
+
+[[projects]]
+ digest = "1:67474f760e9ac3799f740db2c489e6423a4cde45520673ec123ac831ad849cb8"
+ name = "github.com/hashicorp/golang-lru"
+ packages = ["simplelru"]
+ pruneopts = "UT"
+ revision = "7087cb70de9f7a8bc0a10c375cb0d2280a8edf9c"
+ version = "v0.5.1"
+
+[[projects]]
+ digest = "1:ff5ebae34cfbf047d505ee150de27e60570e8c394b3b8fdbb720ff6ac71985fc"
+ name = "github.com/matttproud/golang_protobuf_extensions"
+ packages = ["pbutil"]
+ pruneopts = "UT"
+ revision = "c12348ce28de40eed0136aa2b644d0ee0650e56c"
+ version = "v1.0.1"
+
+[[projects]]
+ digest = "1:0028cb19b2e4c3112225cd871870f2d9cf49b9b4276531f03438a88e94be86fe"
+ name = "github.com/pmezard/go-difflib"
+ packages = ["difflib"]
+ pruneopts = "UT"
+ revision = "792786c7400a136282c1664665ae0a8db921c6c2"
+ version = "v1.0.0"
+
+[[projects]]
+ digest = "1:287c515ccefca6ea7614a1b1dad119211510bf33ed01334646a9444db68d25e6"
+ name = "github.com/prometheus/client_golang"
+ packages = [
+ "prometheus",
+ "prometheus/internal",
+ "prometheus/push",
+ ]
+ pruneopts = "UT"
+ revision = "50c4339db732beb2165735d2cde0bff78eb3c5a5"
+ version = "v0.9.3"
+
+[[projects]]
+ branch = "master"
+ digest = "1:2d5cd61daa5565187e1d96bae64dbbc6080dacf741448e9629c64fd93203b0d4"
+ name = "github.com/prometheus/client_model"
+ packages = ["go"]
+ pruneopts = "UT"
+ revision = "fd36f4220a901265f90734c3183c5f0c91daa0b8"
+
+[[projects]]
+ digest = "1:8dcedf2e8f06c7f94e48267dea0bc0be261fa97b377f3ae3e87843a92a549481"
+ name = "github.com/prometheus/common"
+ packages = [
+ "expfmt",
+ "internal/bitbucket.org/ww/goautoneg",
+ "model",
+ ]
+ pruneopts = "UT"
+ revision = "17f5ca1748182ddf24fc33a5a7caaaf790a52fcc"
+ version = "v0.4.1"
+
+[[projects]]
+ digest = "1:f8fac244ec2cb7daef48b0148dcf5a330ac7697fa83c2e1e78e65b21f7f43500"
+ name = "github.com/prometheus/procfs"
+ packages = [
+ ".",
+ "internal/fs",
+ ]
+ pruneopts = "UT"
+ revision = "65bdadfa96aecebf4dcf888da995a29eab4fc964"
+ version = "v0.0.1"
+
+[[projects]]
+ digest = "1:972c2427413d41a1e06ca4897e8528e5a1622894050e2f527b38ddf0f343f759"
+ name = "github.com/stretchr/testify"
+ packages = ["assert"]
+ pruneopts = "UT"
+ revision = "ffdc059bfe9ce6a4e144ba849dbedead332c6053"
+ version = "v1.3.0"
+
+[[projects]]
+ digest = "1:bf33f7cd985e8e62eeef3b1985ec48f0f274e4083fa811596aafaf3af2947e83"
+ name = "go.opencensus.io"
+ packages = [
+ ".",
+ "internal",
+ "internal/tagencoding",
+ "metric/metricdata",
+ "metric/metricproducer",
+ "plugin/ocgrpc",
+ "plugin/ochttp",
+ "plugin/ochttp/propagation/b3",
+ "resource",
+ "stats",
+ "stats/internal",
+ "stats/view",
+ "tag",
+ "trace",
+ "trace/internal",
+ "trace/propagation",
+ "trace/tracestate",
+ ]
+ pruneopts = "UT"
+ revision = "9c377598961b706d1542bd2d84d538b5094d596e"
+ version = "v0.22.0"
+
+[[projects]]
+ branch = "master"
+ digest = "1:1b13e8770142a9251361b13a3b8b9b77296be6fa32856c937b346a45f93c845c"
+ name = "golang.org/x/net"
+ packages = [
+ "context",
+ "context/ctxhttp",
+ "http/httpguts",
+ "http2",
+ "http2/hpack",
+ "idna",
+ "internal/timeseries",
+ "trace",
+ ]
+ pruneopts = "UT"
+ revision = "f3200d17e092c607f615320ecaad13d87ad9a2b3"
+
+[[projects]]
+ branch = "master"
+ digest = "1:7cba983d19f4aa6a154d73268dcc67a66bcc24bd7ee1c1b09d448a721dea0d9f"
+ name = "golang.org/x/oauth2"
+ packages = [
+ ".",
+ "google",
+ "internal",
+ "jws",
+ "jwt",
+ ]
+ pruneopts = "UT"
+ revision = "aaccbc9213b0974828f81aaac109d194880e3014"
+
+[[projects]]
+ branch = "master"
+ digest = "1:a2fc247e64b5dafd3251f12d396ec85f163d5bb38763c4997856addddf6e78d8"
+ name = "golang.org/x/sync"
+ packages = [
+ "errgroup",
+ "semaphore",
+ ]
+ pruneopts = "UT"
+ revision = "112230192c580c3556b8cee6403af37a4fc5f28c"
+
+[[projects]]
+ branch = "master"
+ digest = "1:668e8c66b8895d69391429b0f64a72c35603c94f364c94d4e5fab5053d57a0b6"
+ name = "golang.org/x/sys"
+ packages = ["unix"]
+ pruneopts = "UT"
+ revision = "ad28b68e88f12448a1685d038ffea87bbbb34148"
+
+[[projects]]
+ digest = "1:8d8faad6b12a3a4c819a3f9618cb6ee1fa1cfc33253abeeea8b55336721e3405"
+ name = "golang.org/x/text"
+ packages = [
+ "collate",
+ "collate/build",
+ "internal/colltab",
+ "internal/gen",
+ "internal/language",
+ "internal/language/compact",
+ "internal/tag",
+ "internal/triegen",
+ "internal/ucd",
+ "language",
+ "secure/bidirule",
+ "transform",
+ "unicode/bidi",
+ "unicode/cldr",
+ "unicode/norm",
+ "unicode/rangetable",
+ ]
+ pruneopts = "UT"
+ revision = "342b2e1fbaa52c93f31447ad2c6abc048c63e475"
+ version = "v0.3.2"
+
+[[projects]]
+ digest = "1:cd33295b974608edea17625187ae2fcfa9a61b0e814c814fbb0b410f3d9ab16b"
+ name = "google.golang.org/api"
+ packages = [
+ "cloudresourcemanager/v1",
+ "compute/v1",
+ "container/v1",
+ "gensupport",
+ "googleapi",
+ "googleapi/internal/uritemplates",
+ "googleapi/transport",
+ "iam/v1",
+ "internal",
+ "iterator",
+ "option",
+ "serviceusage/v1",
+ "storage/v1",
+ "support/bundler",
+ "transport",
+ "transport/grpc",
+ "transport/http",
+ "transport/http/internal/propagation",
+ ]
+ pruneopts = "UT"
+ revision = "721295fe20d585ce7e948146f82188429d14da33"
+ version = "v0.5.0"
+
+[[projects]]
+ digest = "1:1366eff573b4e7adc862f31d01f31f20b3d9267031d45c5995da14a711e4add0"
+ name = "google.golang.org/appengine"
+ packages = [
+ ".",
+ "internal",
+ "internal/app_identity",
+ "internal/base",
+ "internal/datastore",
+ "internal/log",
+ "internal/modules",
+ "internal/remote_api",
+ "internal/socket",
+ "internal/urlfetch",
+ "socket",
+ "urlfetch",
+ ]
+ pruneopts = "UT"
+ revision = "4c25cacc810c02874000e4f7071286a8e96b2515"
+ version = "v1.6.0"
+
+[[projects]]
+ branch = "master"
+ digest = "1:cc9e0911a08dc947ce18bae75e6f29e87a1b2acef52927c8a7311ffec2e654a0"
+ name = "google.golang.org/genproto"
+ packages = [
+ "googleapis/api",
+ "googleapis/api/annotations",
+ "googleapis/api/distribution",
+ "googleapis/api/label",
+ "googleapis/api/metric",
+ "googleapis/api/monitoredres",
+ "googleapis/iam/v1",
+ "googleapis/logging/type",
+ "googleapis/logging/v2",
+ "googleapis/pubsub/v1",
+ "googleapis/rpc/status",
+ "googleapis/type/expr",
+ "protobuf/field_mask",
+ ]
+ pruneopts = "UT"
+ revision = "fb225487d10142b5bcc35abfc6cb9a0609614976"
+
+[[projects]]
+ digest = "1:75fd7c63d317f4c60131dea3833934eb790ba067f90636fbcd51dbbd2ad57170"
+ name = "google.golang.org/grpc"
+ packages = [
+ ".",
+ "balancer",
+ "balancer/base",
+ "balancer/roundrobin",
+ "binarylog/grpc_binarylog_v1",
+ "codes",
+ "connectivity",
+ "credentials",
+ "credentials/internal",
+ "credentials/oauth",
+ "encoding",
+ "encoding/proto",
+ "grpclog",
+ "internal",
+ "internal/backoff",
+ "internal/balancerload",
+ "internal/binarylog",
+ "internal/channelz",
+ "internal/envconfig",
+ "internal/grpcrand",
+ "internal/grpcsync",
+ "internal/syscall",
+ "internal/transport",
+ "keepalive",
+ "metadata",
+ "naming",
+ "peer",
+ "resolver",
+ "resolver/dns",
+ "resolver/passthrough",
+ "stats",
+ "status",
+ "tap",
+ ]
+ pruneopts = "UT"
+ revision = "869adfc8d5a43efc0d05780ad109106f457f51e4"
+ version = "v1.21.0"
+
+[solve-meta]
+ analyzer-name = "dep"
+ analyzer-version = 1
+ input-imports = [
+ "cloud.google.com/go/logging/apiv2",
+ "cloud.google.com/go/pubsub",
+ "github.com/golang/glog",
+ "github.com/prometheus/client_golang/prometheus",
+ "github.com/prometheus/client_golang/prometheus/push",
+ "github.com/stretchr/testify/assert",
+ "golang.org/x/oauth2/google",
+ "google.golang.org/api/cloudresourcemanager/v1",
+ "google.golang.org/api/compute/v1",
+ "google.golang.org/api/container/v1",
+ "google.golang.org/api/iam/v1",
+ "google.golang.org/api/iterator",
+ "google.golang.org/api/serviceusage/v1",
+ "google.golang.org/api/storage/v1",
+ "google.golang.org/genproto/googleapis/logging/v2",
+ ]
+ solver-name = "gps-cdcl"
+ solver-version = 1
diff --git a/Gopkg.toml b/Gopkg.toml
new file mode 100644
index 0000000..d7072c2
--- /dev/null
+++ b/Gopkg.toml
@@ -0,0 +1,30 @@
+# Gopkg.toml example
+#
+# Refer to https://golang.github.io/dep/docs/Gopkg.toml.html
+# for detailed Gopkg.toml documentation.
+#
+# required = ["github.com/user/thing/cmd/thing"]
+# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"]
+#
+# [[constraint]]
+# name = "github.com/user/project"
+# version = "1.0.0"
+#
+# [[constraint]]
+# name = "github.com/user/project2"
+# branch = "dev"
+# source = "github.com/myfork/project2"
+#
+# [[override]]
+# name = "github.com/x/y"
+# version = "2.4.0"
+#
+# [prune]
+# non-go = false
+# go-tests = true
+# unused-packages = true
+
+
+[prune]
+ go-tests = true
+ unused-packages = true
diff --git a/Makefile b/Makefile
new file mode 100644
index 0000000..7e7c3d6
--- /dev/null
+++ b/Makefile
@@ -0,0 +1,24 @@
+name := $(shell basename "$(CURDIR)")
+registry := $GCR_REGISTRY
+
+ifdef CI
+git_hash := $(shell echo ${CI_COMMIT_SHA} | cut -c1-10 )
+git_branch := $(or ${CI_COMMIT_REF_NAME}, unknown)
+git_tag := $(or ${CI_COMMIT_TAG}, ${CI_COMMIT_REF_NAME}, unknown)
+else
+git_hash := $(shell git rev-parse HEAD | cut -c1-10)
+git_branch := $(shell git rev-parse --abbrev-ref HEAD || echo "unknown")
+git_tag := $(or ${git_branch}, unknown)
+endif
+
+build:
+ docker build \
+ -t \
+ $(registry)/$(name):${git_branch}-${git_hash} .
+
+push:
+ docker push \
+ $(registry)/$(name):${git_branch}-${git_hash}
+
+test:
+ go test -count=1 -v ./...
\ No newline at end of file
diff --git a/README.md b/README.md
new file mode 100644
index 0000000..b504437
--- /dev/null
+++ b/README.md
@@ -0,0 +1,92 @@
+# nemesis
+
+Nemesis is a tool for auditing platform configurations for measuring compliance. It is meant as a read-only view into Cloud platforms such that necessary audits can be performed, which can result in actions to take.
+
+## Usage
+You can install `nemesis` as a binary on your machine, or run it as a docker container.
+
+The following line demonstrates basic usage to invoke `nemesis` and output results into your terminal. This assumes that you have valid GCP credentials on the host you are running on:
+```
+nemesis --project.filter="my-project" --reports.stdout.enable
+```
+
+You can utilize a service account credential file to perform `nemesis` runs as the service account user:
+```
+# Set the environment variable that the Google Auth library expects
+export GOOGLE_APPLICATION_CREDENTIALS=/path/to/credentials.json
+
+# Now run nemesis as the service account
+nemesis --project.filter="my-awesome-project" --reports.stdout.enable
+```
+
+You can also combine `nemesis` with tools like `jq` to do parsing or formatting a JSON file:
+```
+# Output a nemesis report to a local JSON file that is formatted in a readable way
+
+nemesis --project.filter="my-project" --reports.stdout.enable | jq . >> report.json
+```
+
+You can scan multiple projects in a single `nemesis` run using a simple regular expression:
+```
+nemesis --project.filter="my-business-unit-projects-*"
+```
+
+`nemesis` reports can be directly shipped to a GCP Pub/Sub topic for direct ingestion into another system:
+```
+nemesis --project.filter="my-project" --reports.pubsub.enable --reports.pubsub.project="my-reporting-project" --reports.pubsub.topic="nemesis-reports'
+```
+
+All flags for `nemesis` have an equivalent environment variable you can use for configuration. The table under Flags indicates the equivalencies:
+```
+# Configure many settings before running
+export NEMESIS_METRICS_ENABLED="true"
+export NEMESIS_METRICS_GATEWAY="prometheus-pushgateway.example.com:9091"
+export NEMESIS_PROJECT_FILTER="my-project"
+export NEMESIS_ONLY_FAILURES="true"
+export NEMESIS_ENABLE_STDOUT="true"
+
+# Now run the scan
+nemesis
+```
+
+
+## Flags
+`nemesis` has a number of flags that can be invoked either using the command line flag or the equivalent environment variable. The following table describes their usage:
+
+| Flag | Environment Variable | Required | Description | Example Flag Usage |
+|------|----------------------|----------|-------------|--------------------|
+| project.filter | `NEMESIS_PROJECT_FILTER` | yes | (String) The project filter to perform audits on | `--project.filter="my-project"` |
+| compute.instance.allow-ip-forwarding | `NEMESIS_COMPUTE_ALLOW_IP_FORWARDING` | no | (Bool) Indicate whether instances should be allowed to perform IP forwarding | `--compute.instance.allow-ip-forwarding` |
+| compute.instance.allow-nat | `NEMESIS_COMPUTE_ALLOW_NAT` | no | (Bool) Indicate whether instances should be allowed to have external (NAT) IP addresses | `--compute.instance.allow-nat` |
+| compute.instance.num-interfaces | `NEMESIS_COMPUTE_NUM_NICS` | no | (String) The number of network interfaces (NIC) that an instance should have (default 1) | `--compute.instance.num-interfaces=1` |
+| container.oauth-scopes | `NEMESIS_CONTAINER_OAUTHSCOPES ` | no | (String) A comma-seperated list of OAuth scopes to allow for GKE clusters (default
"https://www.googleapis.com/auth/devstorage.read_only,
https://www.googleapis.com/auth/logging.write,
https://www.googleapis.com/auth/monitoring,
https://www.googleapis.com/auth/servicecontrol,
https://www.googleapis.com/auth/service.management.readonly,
https://www.googleapis.com/auth/trace.append") | `--container.oauth-scopes="..."` |
+| iam.sa-key-expiration-time | `NEMESIS_IAM_SA_KEY_EXPIRATION_TIME` | no | (String) The time in days to allow service account keys to live before being rotated (default "90") | `--iam.sa-key-expiration-time="90"` |
+| iam.user-domains | `NEMESIS_IAM_USERDOMAINS` | no | (String) A comma-separated list of domains to allow users from | `--iam.user-domains="google.com"` |
+| metrics.enabled | `NEMESIS_METRICS_ENABLED` | no | (Boolean) Enable Prometheus metrics | `--metrics.enabled` |
+| metrics.gateway | `NEMESIS_METRICS_GATEWAY` | no | (String) Prometheus metrics Push Gateway (default "127.0.0.1:9091") | `--metrics.gateway="10.0.160.12:9091"` |
+| reports.only-failures | `NEMESIS_ONLY_FAILURES` | no | (Boolean) Limit output of controls to only failed controls | `--reports.only-failures` |
+| reports.stdout.enable | `NEMESIS_ENABLE_STDOUT` | no | (Boolean) Enable outputting report via stdout | `--reports.stdout.enable` |
+| reports.pubsub.enable | `NEMESIS_ENABLE_PUBSUB` | no | (Boolean) Enable outputting report via Google Pub/Sub | `--reports.pubsub.enable` |
+| reports.pubsub.project | `NEMESIS_PUBSUB_PROJECT` | no | (Boolean) Indicate which GCP project to output Pub/Sub reports to | `--reports.pubsub.project="my-project"` |
+| reports.pubsub.topic | `NEMESIS_PUBSUB_TOPIC` | no | (Boolean) Indicate which topic to output Pub/Sub reports to (default "nemesis") | `--reports.pubsub.topic="nemesis-reports"` |
+
+## Motivation
+
+`nemesis` was created out of a need to generate compliance and auditing reports quickly and in consumable formats. This tool helps audit against GCP security standards and best practices. We implement, as a baseline security metric:
+* [CIS Controls for GCP](https://www.cisecurity.org/benchmark/google_cloud_computing_platform/)
+* [GKE Hardening Guidelines](https://cloud.google.com/kubernetes-engine/docs/how-to/hardening-your-cluster)
+* [Default Project Metadata](https://cloud.google.com/compute/docs/storing-retrieving-metadata#default)
+
+We strive to encourage best practices in our environment for the following GCP services:
+* [Identity and Access Management (IAM)](https://cloud.google.com/iam/docs/using-iam-securely)
+* [Google Cloud Storage (GCS)](https://cloud.google.com/storage/docs/access-control/using-iam-permissions)
+* [Google Compute Engine (GCE)](https://cloud.google.com/compute/docs/access/)
+* [Google Kubernetes Engine (GKE)](https://cloud.google.com/kubernetes-engine/docs/how-to/cluster-admin-overview#configuring_cluster_security)
+
+## Maintainers
+
+@TaylorMutch
+
+## Contributions
+
+See Contributions
\ No newline at end of file
diff --git a/go.mod b/go.mod
new file mode 100644
index 0000000..62af85b
--- /dev/null
+++ b/go.mod
@@ -0,0 +1,21 @@
+module github.com/Unity-Technologies/nemesis
+
+go 1.13
+
+require (
+ cloud.google.com/go v0.39.0
+ github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b
+ github.com/prometheus/client_golang v0.9.3
+ github.com/prometheus/common v0.4.1 // indirect
+ github.com/prometheus/procfs v0.0.1 // indirect
+ github.com/stretchr/testify v1.3.0
+ go.opencensus.io v0.22.0 // indirect
+ golang.org/x/net v0.0.0-20190522155817-f3200d17e092 // indirect
+ golang.org/x/oauth2 v0.0.0-20190523182746-aaccbc9213b0
+ golang.org/x/sync v0.0.0-20190423024810-112230192c58 // indirect
+ golang.org/x/sys v0.0.0-20190530182044-ad28b68e88f1 // indirect
+ google.golang.org/api v0.5.0
+ google.golang.org/appengine v1.6.0 // indirect
+ google.golang.org/genproto v0.0.0-20190530194941-fb225487d101
+ google.golang.org/grpc v1.21.0 // indirect
+)
diff --git a/go.sum b/go.sum
new file mode 100644
index 0000000..1a17c96
--- /dev/null
+++ b/go.sum
@@ -0,0 +1,141 @@
+cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
+cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
+cloud.google.com/go v0.39.0 h1:UgQP9na6OTfp4dsAiz/eFpFA1C6tPdH5wiRdi19tuMw=
+cloud.google.com/go v0.39.0/go.mod h1:rVLT6fkc8chs9sfPtFc1SBH6em7n+ZoXaG+87tDISts=
+github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
+github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
+github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
+github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
+github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
+github.com/beorn7/perks v1.0.0 h1:HWo1m869IqiPhD389kmkxeTalrjNbbJTC8LXupb+sl0=
+github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
+github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
+github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
+github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
+github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
+github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
+github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
+github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
+github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=
+github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
+github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
+github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
+github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg=
+github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
+github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
+github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY=
+github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
+github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
+github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
+github.com/googleapis/gax-go/v2 v2.0.4 h1:hU4mGcQI4DaAYW+IbTun+2qEZVFxK0ySjQLTbS0VQKc=
+github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
+github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
+github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU=
+github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
+github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
+github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
+github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
+github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
+github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
+github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
+github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
+github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
+github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
+github.com/prometheus/client_golang v0.9.3 h1:9iH4JKXLzFbOAdtqv/a+j8aewx2Y8lAjAydhbaScPF8=
+github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
+github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
+github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 h1:S/YWwWx/RA8rT8tKFRuGUZhuA90OyIBpPCXkcbwU8DE=
+github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
+github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
+github.com/prometheus/common v0.4.1 h1:K0MGApIoQvMw27RTdJkPbr3JZ7DNbtxQNyi5STVM6Kw=
+github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
+github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
+github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
+github.com/prometheus/procfs v0.0.1 h1:Vb1OE5ZDNKF3yhna6/G+5pHqADNm4I8hUoHj7YQhbZk=
+github.com/prometheus/procfs v0.0.1/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
+github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
+github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
+github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
+github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
+github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
+go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
+go.opencensus.io v0.22.0 h1:C9hSCOW830chIVkdja34wa6Ky+IzWllkUinR+BtRZd4=
+go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
+golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
+golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
+golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
+golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
+golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190522155817-f3200d17e092 h1:4QSRKanuywn15aTZvI/mIDEgPQpswuFndXpOj3rKEco=
+golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
+golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
+golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.0.0-20190523182746-aaccbc9213b0 h1:xFEXbcD0oa/xhqQmMXztdZ0bWvexAWds+8c1gRN8nu0=
+golang.org/x/oauth2 v0.0.0-20190523182746-aaccbc9213b0/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU=
+golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190530182044-ad28b68e88f1 h1:R4dVlxdmKenVdMRS/tTspEpSTRWINYrHD8ySIU9yCIU=
+golang.org/x/sys v0.0.0-20190530182044-ad28b68e88f1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
+golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
+golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
+golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+google.golang.org/api v0.5.0 h1:lj9SyhMzyoa38fgFF0oO2T6pjs5IzkLPKfVtxpyCRMM=
+google.golang.org/api v0.5.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
+google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
+google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
+google.golang.org/appengine v1.6.0 h1:Tfd7cKwKbFRsI8RMAD3oqqw7JPFRrvFlOsfbgVkjOOw=
+google.golang.org/appengine v1.6.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
+google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
+google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190508193815-b515fa19cec8/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190530194941-fb225487d101 h1:wuGevabY6r+ivPNagjUXGGxF+GqgMd+dBhjsxW4q9u4=
+google.golang.org/genproto v0.0.0-20190530194941-fb225487d101/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s=
+google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
+google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
+google.golang.org/grpc v1.21.0 h1:G+97AoqBnmZIT91cLG/EkCoK9NSelj64P8bOHHNmGn0=
+google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
+gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
diff --git a/main.go b/main.go
new file mode 100644
index 0000000..b365e40
--- /dev/null
+++ b/main.go
@@ -0,0 +1,15 @@
+package main
+
+import (
+ "flag"
+
+ "github.com/Unity-Technologies/nemesis/pkg/runner"
+)
+
+func main() {
+ flag.Parse()
+ audit := runner.NewAudit()
+ audit.Setup()
+ audit.Execute()
+ audit.Report()
+}
diff --git a/pkg/cis/compute.go b/pkg/cis/compute.go
new file mode 100644
index 0000000..190e5a2
--- /dev/null
+++ b/pkg/cis/compute.go
@@ -0,0 +1,40 @@
+package cis
+
+var (
+ compute1 = Recommendation{
+ Name: "Ensure that instances are not configured to use the default service account with full access to all Cloud APIs",
+ CisID: "4.1",
+ Scored: true,
+ Level: 1,
+ }
+ compute2 = Recommendation{
+ Name: "Ensure 'Block Project-wide SSH keys' enabled for VM instances",
+ CisID: "4.2",
+ Scored: false,
+ Level: 1,
+ }
+ compute3 = Recommendation{
+ Name: "Ensure oslogin is enabled for a Project",
+ CisID: "4.3",
+ Scored: true,
+ Level: 1,
+ }
+ compute4 = Recommendation{
+ Name: "Ensure 'Enable connecting to serial ports' is not enabled for VM Instance",
+ CisID: "4.4",
+ Scored: true,
+ Level: 1,
+ }
+ compute5 = Recommendation{
+ Name: "Ensure that IP forwarding is not enabled on Instances",
+ CisID: "4.5",
+ Scored: true,
+ Level: 1,
+ }
+ compute6 = Recommendation{
+ Name: "Ensure VM disks for critical VMs are encrypted with Customer-Supplied Encryption Keys (CSEK)",
+ CisID: "4.6",
+ Scored: true,
+ Level: 2,
+ }
+)
diff --git a/pkg/cis/gke.go b/pkg/cis/gke.go
new file mode 100644
index 0000000..84814bf
--- /dev/null
+++ b/pkg/cis/gke.go
@@ -0,0 +1,112 @@
+package cis
+
+var (
+ gke1 = Recommendation{
+ Name: "Ensure Stackdriver Logging is set to Enabled on Kubernetes Engine Clusters",
+ CisID: "7.1",
+ Scored: true,
+ Level: 1,
+ }
+ gke2 = Recommendation{
+ Name: "Ensure Stackdriver Monitoring is set to Enabled on Kubernetes Engine Clusters",
+ CisID: "7.2",
+ Scored: false,
+ Level: 1,
+ }
+ gke3 = Recommendation{
+ Name: "Ensure Legacy Authorization is set to Disabled on Kubernetes Engine Clusters",
+ CisID: "7.3",
+ Scored: true,
+ Level: 1,
+ }
+ gke4 = Recommendation{
+ Name: "Ensure Master authorized networks is set to Enabled on Kubernetes Engine Clusters",
+ CisID: "7.4",
+ Scored: false,
+ Level: 1,
+ }
+ gke5 = Recommendation{
+ Name: "Ensure Kubernetes Clusters are configured with Labels",
+ CisID: "7.5",
+ Scored: false,
+ Level: 1,
+ }
+ gke6 = Recommendation{
+ Name: "Ensure Kubernetes web UI / Dashboard is disabled",
+ CisID: "7.6",
+ Scored: true,
+ Level: 1,
+ }
+ gke7 = Recommendation{
+ Name: "Ensure Automatic node repair is enabled for Kubernetes Clusters",
+ CisID: "7.7",
+ Scored: true,
+ Level: 1,
+ }
+ gke8 = Recommendation{
+ Name: "Ensure Automatic node upgrades is enabled on Kubernetes Engine Clusters nodes",
+ CisID: "7.8",
+ Scored: true,
+ Level: 1,
+ }
+ gke9 = Recommendation{
+ Name: "Ensure Container-Optimized OS (COS) is used for Kubernetes Engine Clusters Node image",
+ CisID: "7.9",
+ Scored: false,
+ Level: 2,
+ }
+ gke10 = Recommendation{
+ Name: "Ensure Basic Authentication is disabled on Kubernetes Engine Clusters",
+ CisID: "7.10",
+ Scored: true,
+ Level: 1,
+ }
+ gke11 = Recommendation{
+ Name: "Ensure Network policy is enabled on Kubernetes Engine Clusters",
+ CisID: "7.11",
+ Scored: true,
+ Level: 1,
+ }
+ gke12 = Recommendation{
+ Name: "Ensure Kubernetes Cluster is created with Client Certificate enabled",
+ CisID: "7.12",
+ Scored: true,
+ Level: 1,
+ }
+ gke13 = Recommendation{
+ Name: "Ensure Kubernetes Cluster is created with Alias IP ranges enabled",
+ CisID: "7.13",
+ Scored: true,
+ Level: 1,
+ }
+ gke14 = Recommendation{
+ Name: "Ensure PodSecurityPolicy controller is enabled on the Kubernetes Engine Clusters",
+ CisID: "7.14",
+ Scored: false,
+ Level: 1,
+ }
+ gke15 = Recommendation{
+ Name: "Ensure Kubernetes Cluster is created with Private cluster enabled",
+ CisID: "7.15",
+ Scored: true,
+ Level: 1,
+ }
+ gke16 = Recommendation{
+ Name: "Ensure Private Google Access is set on Kubernetes Engine Cluster Subnets",
+ CisID: "7.16",
+ Scored: true,
+ Level: 1,
+ }
+ gke17 = Recommendation{
+ Name: "Ensure default Service account is not used for Project access in Kubernetes Clusters",
+ CisID: "7.17",
+ Scored: true,
+ Level: 1,
+ }
+ gke18 = Recommendation{
+ Name: "Ensure Kubernetes Clusters created with limited service account Access scopes for Project access",
+ CisID: "7.18",
+ Scored: true,
+ Level: 1,
+ }
+)
diff --git a/pkg/cis/iam.go b/pkg/cis/iam.go
new file mode 100644
index 0000000..e14a6f8
--- /dev/null
+++ b/pkg/cis/iam.go
@@ -0,0 +1,82 @@
+package cis
+
+var (
+ iam1 = Recommendation{
+ Name: "Ensure that corporate login credentials are used instead of Gmail accounts",
+ CisID: "1.1",
+ Scored: true,
+ Level: 1,
+ }
+ iam2 = Recommendation{
+ Name: "Ensure that multi-factor authentication is enabled for all non- service accounts",
+ CisID: "1.2",
+ Scored: false,
+ Level: 1,
+ }
+ iam3 = Recommendation{
+ Name: "Ensure that there are only GCP-managed service account keys for each service account",
+ CisID: "1.3",
+ Scored: true,
+ Level: 1,
+ }
+ iam4 = Recommendation{
+ Name: "Ensure that ServiceAccount has no Admin privileges",
+ CisID: "1.4",
+ Scored: true,
+ Level: 1,
+ }
+ iam5 = Recommendation{
+ Name: "Ensure that IAM users are not assigned Service Account User role at project level",
+ CisID: "1.5",
+ Scored: true,
+ Level: 1,
+ }
+ iam6 = Recommendation{
+ Name: "Ensure user-managed/external keys for service accounts are rotated every 90 days or less",
+ CisID: "1.6",
+ Scored: true,
+ Level: 1,
+ }
+ iam7 = Recommendation{
+ Name: "Ensure that Separation of duties is enforced while assigning service account related roles to users",
+ CisID: "1.7",
+ Scored: false,
+ Level: 2,
+ }
+ iam8 = Recommendation{
+ Name: "Ensure Encryption keys are rotated within a period of 365 days",
+ CisID: "1.8",
+ Scored: true,
+ Level: 1,
+ }
+ iam9 = Recommendation{
+ Name: "Ensure that Separation of duties is enforced while assigning KMS related roles to users",
+ CisID: "1.9",
+ Scored: true,
+ Level: 2,
+ }
+ iam10 = Recommendation{
+ Name: "Ensure API keys are not created for a project",
+ CisID: "1.10",
+ Scored: false,
+ Level: 2,
+ }
+ iam11 = Recommendation{
+ Name: "Ensure API keys are restricted to use by only specified Hosts and Apps",
+ CisID: "1.11",
+ Scored: false,
+ Level: 1,
+ }
+ iam12 = Recommendation{
+ Name: "Ensure API keys are restricted to only APIs that application needs access",
+ CisID: "1.12",
+ Scored: false,
+ Level: 1,
+ }
+ iam13 = Recommendation{
+ Name: "Ensure API keys are rotated every 90 days",
+ CisID: "1.13",
+ Scored: true,
+ Level: 1,
+ }
+)
diff --git a/pkg/cis/log_mon.go b/pkg/cis/log_mon.go
new file mode 100644
index 0000000..1a6130b
--- /dev/null
+++ b/pkg/cis/log_mon.go
@@ -0,0 +1,70 @@
+package cis
+
+var (
+ logmon1 = Recommendation{
+ Name: "Ensure that Cloud Audit Logging is configured properly across all services and all users from a project",
+ CisID: "2.1",
+ Scored: true,
+ Level: 1,
+ }
+ logmon2 = Recommendation{
+ Name: "Ensure that sinks are configured for all Log entries",
+ CisID: "2.2",
+ Scored: true,
+ Level: 1,
+ }
+ logmon3 = Recommendation{
+ Name: "Ensure that object versioning is enabled on log-buckets",
+ CisID: "2.3",
+ Scored: true,
+ Level: 1,
+ }
+ logmon4 = Recommendation{
+ Name: "Ensure log metric filter and alerts exists for Project Ownership assignments/changes",
+ CisID: "2.4",
+ Scored: true,
+ Level: 1,
+ }
+ logmon5 = Recommendation{
+ Name: "Ensure log metric filter and alerts exists for Audit Configuration Changes",
+ CisID: "2.5",
+ Scored: true,
+ Level: 1,
+ }
+ logmon6 = Recommendation{
+ Name: "Ensure log metric filter and alerts exists for Custom Role changes",
+ CisID: "2.6",
+ Scored: true,
+ Level: 2,
+ }
+ logmon7 = Recommendation{
+ Name: "Ensure log metric filter and alerts exists for VPC Network Firewall rule changes",
+ CisID: "2.7",
+ Scored: true,
+ Level: 1,
+ }
+ logmon8 = Recommendation{
+ Name: "Ensure log metric filter and alerts exists for VPC network route changes",
+ CisID: "2.8",
+ Scored: true,
+ Level: 1,
+ }
+ logmon9 = Recommendation{
+ Name: "Ensure log metric filter and alerts exists for VPC network changes",
+ CisID: "2.9",
+ Scored: true,
+ Level: 1,
+ }
+ logmon10 = Recommendation{
+ Name: "Ensure log metric filter and alerts exists for Cloud Storage IAM permission changes",
+ CisID: "2.10",
+ Scored: true,
+ Level: 1,
+ }
+ logmon11 = Recommendation{
+ Name: "Ensure log metric filter and alerts exists for SQL instance configuration changes",
+ CisID: "2.11",
+ Scored: true,
+ Level: 1,
+ }
+)
diff --git a/pkg/cis/network.go b/pkg/cis/network.go
new file mode 100644
index 0000000..60719e3
--- /dev/null
+++ b/pkg/cis/network.go
@@ -0,0 +1,58 @@
+package cis
+
+var (
+ network1 = Recommendation{
+ Name: "Ensure the default network does not exist in a project",
+ CisID: "3.1",
+ Scored: true,
+ Level: 1,
+ }
+ network2 = Recommendation{
+ Name: "Ensure legacy networks does not exists for a project",
+ CisID: "3.2",
+ Scored: true,
+ Level: 1,
+ }
+ network3 = Recommendation{
+ Name: "Ensure that DNSSEC is enabled for Cloud DNS",
+ CisID: "3.3",
+ Scored: false,
+ Level: 1,
+ }
+ network4 = Recommendation{
+ Name: "Ensure that RSASHA1 is not used for key-signing key in Cloud DNS DNSSEC",
+ CisID: "3.4",
+ Scored: false,
+ Level: 1,
+ }
+ network5 = Recommendation{
+ Name: "Ensure that RSASHA1 is not used for zone-signing key in Cloud DNS DNSSEC",
+ CisID: "3.5",
+ Scored: false,
+ Level: 1,
+ }
+ network6 = Recommendation{
+ Name: "Ensure that SSH access is restricted from the internet",
+ CisID: "3.6",
+ Scored: true,
+ Level: 2,
+ }
+ network7 = Recommendation{
+ Name: "Ensure that RDP access is restricted from the internet",
+ CisID: "3.7",
+ Scored: true,
+ Level: 2,
+ }
+ network8 = Recommendation{
+ Name: "Ensure Private Google Access is enabled for all subnetwork in VPC Network",
+ CisID: "3.8",
+ Scored: true,
+ Level: 2,
+ }
+ network9 = Recommendation{
+ Name: "Ensure VPC Flow logs is enabled for every subnet in VPC Network",
+ CisID: "3.9",
+ Scored: true,
+ Level: 1,
+ }
+)
diff --git a/pkg/cis/recommendation.go b/pkg/cis/recommendation.go
new file mode 100644
index 0000000..e2b9201
--- /dev/null
+++ b/pkg/cis/recommendation.go
@@ -0,0 +1,123 @@
+// Package cis is a schema for organizing CIS controls for Google Cloud
+package cis
+
+import (
+ "encoding/json"
+ "fmt"
+)
+
+// Recommendation is a CIS recommendation for GCP
+type Recommendation struct {
+ // The name of the CIS recommendation
+ Name string `json:"name"`
+
+ // Indicates whether compliance with the recommendation should
+ // be attributeable to the overall compliance of the relevant resource
+ Scored bool `json:"scored"`
+
+ // The CIS identifier for the recommendation. They are formatted in a major-minor
+ // string (E.g. "1.12")
+ CisID string `json:"cisId"`
+
+ // The CIS level for the recommendation.
+ Level int `json:"level"`
+}
+
+var (
+ // Registry is the registry of CIS recommendations
+ Registry = make(map[string]Recommendation, 1)
+)
+
+// Marshal returns the JSON formatted bytes for a recommendation
+func (r *Recommendation) Marshal() ([]byte, error) {
+ return json.Marshal(&r)
+}
+
+// Format returns the fully formatted CIS descriptive name
+func (r *Recommendation) Format() string {
+ score := "Scored"
+ if !r.Scored {
+ score = "Not Scored"
+ }
+ return fmt.Sprintf("CIS %v - %v (%v)", r.CisID, r.Name, score)
+}
+
+func init() {
+ // IAM controls
+ Registry[iam1.CisID] = iam1
+ Registry[iam2.CisID] = iam2
+ Registry[iam3.CisID] = iam3
+ Registry[iam4.CisID] = iam4
+ Registry[iam5.CisID] = iam5
+ Registry[iam6.CisID] = iam6
+ Registry[iam7.CisID] = iam7
+ Registry[iam8.CisID] = iam8
+ Registry[iam9.CisID] = iam9
+ Registry[iam10.CisID] = iam10
+ Registry[iam11.CisID] = iam11
+ Registry[iam12.CisID] = iam12
+ Registry[iam13.CisID] = iam13
+
+ // Logging & Monitoring
+ Registry[logmon1.CisID] = logmon1
+ Registry[logmon2.CisID] = logmon2
+ Registry[logmon3.CisID] = logmon3
+ Registry[logmon4.CisID] = logmon4
+ Registry[logmon5.CisID] = logmon5
+ Registry[logmon6.CisID] = logmon6
+ Registry[logmon7.CisID] = logmon7
+ Registry[logmon8.CisID] = logmon8
+ Registry[logmon9.CisID] = logmon9
+ Registry[logmon10.CisID] = logmon10
+ Registry[logmon11.CisID] = logmon11
+
+ // Networking
+ Registry[network1.CisID] = network1
+ Registry[network2.CisID] = network2
+ Registry[network3.CisID] = network3
+ Registry[network4.CisID] = network4
+ Registry[network5.CisID] = network5
+ Registry[network6.CisID] = network6
+ Registry[network7.CisID] = network7
+ Registry[network8.CisID] = network8
+ Registry[network9.CisID] = network9
+
+ // VM & Compute
+ Registry[compute1.CisID] = compute1
+ Registry[compute2.CisID] = compute2
+ Registry[compute3.CisID] = compute3
+ Registry[compute4.CisID] = compute4
+ Registry[compute5.CisID] = compute5
+ Registry[compute6.CisID] = compute6
+
+ // GCS Storage
+ Registry[storage1.CisID] = storage1
+ Registry[storage2.CisID] = storage2
+ Registry[storage3.CisID] = storage3
+
+ // SQL
+ Registry[sql1.CisID] = sql1
+ Registry[sql2.CisID] = sql2
+ Registry[sql3.CisID] = sql3
+ Registry[sql4.CisID] = sql4
+
+ // Kubernetes Engine
+ Registry[gke1.CisID] = gke1
+ Registry[gke2.CisID] = gke2
+ Registry[gke3.CisID] = gke3
+ Registry[gke4.CisID] = gke4
+ Registry[gke5.CisID] = gke5
+ Registry[gke6.CisID] = gke6
+ Registry[gke7.CisID] = gke7
+ Registry[gke8.CisID] = gke8
+ Registry[gke9.CisID] = gke9
+ Registry[gke10.CisID] = gke10
+ Registry[gke11.CisID] = gke11
+ Registry[gke12.CisID] = gke12
+ Registry[gke13.CisID] = gke13
+ Registry[gke14.CisID] = gke14
+ Registry[gke15.CisID] = gke15
+ Registry[gke16.CisID] = gke16
+ Registry[gke17.CisID] = gke17
+ Registry[gke18.CisID] = gke18
+}
diff --git a/pkg/cis/sql.go b/pkg/cis/sql.go
new file mode 100644
index 0000000..feb62f5
--- /dev/null
+++ b/pkg/cis/sql.go
@@ -0,0 +1,28 @@
+package cis
+
+var (
+ sql1 = Recommendation{
+ Name: "Ensure that Cloud SQL database instance requires all incoming connections to use SSL",
+ CisID: "6.1",
+ Scored: true,
+ Level: 1,
+ }
+ sql2 = Recommendation{
+ Name: "Ensure that Cloud SQL database Instances are not open to the world",
+ CisID: "6.2",
+ Scored: true,
+ Level: 1,
+ }
+ sql3 = Recommendation{
+ Name: "Ensure that MySql database instance does not allow anyone to connect with administrative privileges",
+ CisID: "6.3",
+ Scored: true,
+ Level: 1,
+ }
+ sql4 = Recommendation{
+ Name: "Ensure that MySQL Database Instance does not allows root login from any Host",
+ CisID: "6.4",
+ Scored: true,
+ Level: 1,
+ }
+)
diff --git a/pkg/cis/storage.go b/pkg/cis/storage.go
new file mode 100644
index 0000000..df14ec7
--- /dev/null
+++ b/pkg/cis/storage.go
@@ -0,0 +1,22 @@
+package cis
+
+var (
+ storage1 = Recommendation{
+ Name: "Ensure that Cloud Storage bucket is not anonymously or publicly accessible",
+ CisID: "5.1",
+ Scored: true,
+ Level: 1,
+ }
+ storage2 = Recommendation{
+ Name: "Ensure that there are no publicly accessible objects in storage buckets",
+ CisID: "5.2",
+ Scored: false,
+ Level: 1,
+ }
+ storage3 = Recommendation{
+ Name: "Ensure that logging is enabled for Cloud storage buckets",
+ CisID: "5.3",
+ Scored: true,
+ Level: 1,
+ }
+)
diff --git a/pkg/client/client.go b/pkg/client/client.go
new file mode 100644
index 0000000..fc28738
--- /dev/null
+++ b/pkg/client/client.go
@@ -0,0 +1,164 @@
+package client
+
+import (
+ "github.com/Unity-Technologies/nemesis/pkg/resource/gcp"
+
+ "github.com/golang/glog"
+
+ "context"
+
+ logging "cloud.google.com/go/logging/apiv2"
+ push "github.com/prometheus/client_golang/prometheus/push"
+ cloudresourcemanager "google.golang.org/api/cloudresourcemanager/v1"
+ compute "google.golang.org/api/compute/v1"
+ container "google.golang.org/api/container/v1"
+ iam "google.golang.org/api/iam/v1"
+ serviceusage "google.golang.org/api/serviceusage/v1"
+ storage "google.golang.org/api/storage/v1"
+)
+
+// Client is the client used for auditing Google Cloud Compute Engine resources
+type Client struct {
+
+ // API clients
+ computeClient *compute.Service
+ cloudResourceClient *cloudresourcemanager.Service
+ storageClient *storage.Service
+ containerClient *container.Service
+ serviceusageClient *serviceusage.Service
+ iamClient *iam.Service
+ logConfigClient *logging.ConfigClient
+ logMetricClient *logging.MetricsClient
+
+ // Root project
+ resourceprojects []*cloudresourcemanager.Project
+
+ // Resources
+ services map[string][]*gcp.ServiceAPIResource
+ computeprojects []*gcp.ComputeProjectResource
+ computeMetadatas map[string]*gcp.ComputeProjectMetadataResource
+ buckets map[string][]*gcp.StorageBucketResource
+ instances map[string][]*gcp.ComputeInstanceResource
+
+ // Container Resources
+ clusters map[string][]*gcp.ContainerClusterResource
+ nodepools map[string][]*gcp.ContainerNodePoolResource
+
+ // Compute Network Resources
+ networks map[string][]*gcp.ComputeNetworkResource
+ subnetworks map[string][]*gcp.ComputeSubnetworkResource
+ firewalls map[string][]*gcp.ComputeFirewallRuleResource
+ addresses map[string][]*gcp.ComputeAddressResource
+
+ // IAM Resources
+ policies map[string]*gcp.IamPolicyResource
+ serviceaccounts map[string][]*gcp.IamServiceAccountResource
+
+ // Logging resources
+ logSinks map[string][]*gcp.LoggingSinkResource
+ logMetrics map[string][]*gcp.LoggingMetricResource
+
+ // Metrics pusher
+ pusher *push.Pusher
+ metricsArePushed bool
+}
+
+// New returns a new wrk conforming to the worker.W interface
+func New() *Client {
+ var cc *compute.Service
+ var crm *cloudresourcemanager.Service
+ var cs *storage.Service
+ var con *container.Service
+ var su *serviceusage.Service
+ var i *iam.Service
+ var lc *logging.ConfigClient
+ var lm *logging.MetricsClient
+
+ c := new(Client)
+ ctx := context.Background()
+
+ // Create compute client
+ cc, err := compute.NewService(ctx)
+ if err != nil {
+ glog.Fatalf("Failed to create Google Cloud Engine client: %v", err)
+ }
+
+ // Create cloudresourcemanager client
+ crm, err = cloudresourcemanager.NewService(ctx)
+ if err != nil {
+ glog.Fatalf("Failed to create Google Cloud Resource Manager client: %v", err)
+ }
+
+ // Create storage client
+ cs, err = storage.NewService(ctx)
+ if err != nil {
+ glog.Fatalf("Failed to create Google Cloud Storage client: %v", err)
+ }
+
+ // Create container client
+ con, err = container.NewService(ctx)
+ if err != nil {
+ glog.Fatalf("Failed to create Google Container client: %v", err)
+ }
+
+ // Create serviceusage client
+ su, err = serviceusage.NewService(ctx)
+ if err != nil {
+ glog.Fatalf("Failed to create Google Service Usage client: %v", err)
+ }
+
+ i, err = iam.NewService(ctx)
+ if err != nil {
+ glog.Fatalf("Failed to create IAM client: %v", err)
+ }
+
+ lc, err = logging.NewConfigClient(ctx)
+ if err != nil {
+ glog.Fatalf("Failed to create logging config client: %v", err)
+ }
+
+ lm, err = logging.NewMetricsClient(ctx)
+ if err != nil {
+ glog.Fatalf("Failed to create logging metrics client: %v", err)
+ }
+
+ c.computeClient = cc
+ c.cloudResourceClient = crm
+ c.storageClient = cs
+ c.containerClient = con
+ c.serviceusageClient = su
+ c.iamClient = i
+ c.logConfigClient = lc
+ c.logMetricClient = lm
+
+ // Services
+ c.resourceprojects = []*cloudresourcemanager.Project{}
+ c.computeprojects = []*gcp.ComputeProjectResource{}
+
+ // Resources
+ c.services = make(map[string][]*gcp.ServiceAPIResource, 1)
+ c.computeMetadatas = make(map[string]*gcp.ComputeProjectMetadataResource, 1)
+ c.buckets = make(map[string][]*gcp.StorageBucketResource, 1)
+ c.instances = make(map[string][]*gcp.ComputeInstanceResource, 1)
+ c.clusters = make(map[string][]*gcp.ContainerClusterResource, 1)
+ c.nodepools = make(map[string][]*gcp.ContainerNodePoolResource, 1)
+
+ // Compute networking resources
+ c.networks = make(map[string][]*gcp.ComputeNetworkResource, 1)
+ c.subnetworks = make(map[string][]*gcp.ComputeSubnetworkResource, 1)
+ c.firewalls = make(map[string][]*gcp.ComputeFirewallRuleResource, 1)
+ c.addresses = make(map[string][]*gcp.ComputeAddressResource, 1)
+
+ // IAM resources
+ c.policies = make(map[string]*gcp.IamPolicyResource, 1)
+ c.serviceaccounts = make(map[string][]*gcp.IamServiceAccountResource, 1)
+
+ // Logging resources
+ c.logSinks = make(map[string][]*gcp.LoggingSinkResource, 1)
+ c.logMetrics = make(map[string][]*gcp.LoggingMetricResource, 1)
+
+ // Configure metrics
+ c.pusher = configureMetrics()
+
+ return c
+}
diff --git a/pkg/client/client_test.go b/pkg/client/client_test.go
new file mode 100644
index 0000000..da13c8e
--- /dev/null
+++ b/pkg/client/client_test.go
@@ -0,0 +1 @@
+package client
diff --git a/pkg/client/compute.go b/pkg/client/compute.go
new file mode 100644
index 0000000..c4444a3
--- /dev/null
+++ b/pkg/client/compute.go
@@ -0,0 +1,421 @@
+package client
+
+import (
+ "fmt"
+
+ "github.com/Unity-Technologies/nemesis/pkg/report"
+ "github.com/Unity-Technologies/nemesis/pkg/resource/gcp"
+ "github.com/Unity-Technologies/nemesis/pkg/utils"
+ "github.com/golang/glog"
+ compute "google.golang.org/api/compute/v1"
+)
+
+func (c *Client) getZoneNames() ([]string, error) {
+
+ // Get the zones from the first project. Should be the same for all projects
+ zoneNames := make([]string, 0)
+ var zones *compute.ZoneList
+ var err error
+ for i := 0; i < len(c.resourceprojects); i++ {
+
+ zones, err = c.computeClient.Zones.List(c.resourceprojects[i].ProjectId).Do()
+ if err == nil {
+ // We got a valid list of zones, so skip
+ break
+ }
+ }
+
+ if zones == nil {
+ err = fmt.Errorf("Error retrieving zones list from any project: %v", err)
+ return nil, err
+ }
+
+ for _, z := range zones.Items {
+ zoneNames = append(zoneNames, z.Name)
+ }
+
+ return zoneNames, nil
+}
+
+func (c *Client) getRegionNames() ([]string, error) {
+
+ // Get the regions from the first project. Should be the same for all projects
+ regionNames := make([]string, 0)
+ var regions *compute.RegionList
+ var err error
+ for i := 0; i < len(c.resourceprojects); i++ {
+
+ regions, err = c.computeClient.Regions.List(c.resourceprojects[i].ProjectId).Do()
+ if err == nil {
+ break
+ }
+ }
+
+ if regions == nil {
+ err = fmt.Errorf("Error retrieving regions list from any project: %v", err)
+ return nil, err
+ }
+
+ for _, r := range regions.Items {
+ regionNames = append(regionNames, r.Name)
+ }
+
+ return regionNames, nil
+}
+
+// GetComputeResources launches the process retrieving compute resources
+func (c *Client) GetComputeResources() error {
+
+ defer utils.Elapsed("GetComputeResources")()
+
+ // Get list of all projects.
+ projects := c.resourceprojects
+
+ zoneNames, err := c.getZoneNames()
+ if err != nil {
+ glog.Fatalf("%v", err)
+ }
+
+ // Create a worker pool for querying zones a bit faster
+ zoneWorker := func(projectID string, id int, zones <-chan string, results chan<- []*gcp.ComputeInstanceResource) {
+
+ // For each zone passed to this worker
+ for z := range zones {
+
+ // Create the list of instance resources to be retrieved for this zone
+ instanceResources := []*gcp.ComputeInstanceResource{}
+ res, err := c.computeClient.Instances.List(projectID, z).Do()
+ if err != nil {
+ glog.Fatalf("Error retrieving project %v's instances in zone %v: %v", projectID, z, err)
+ }
+
+ // Create the resource
+ for _, i := range res.Items {
+ instanceResources = append(instanceResources, gcp.NewComputeInstanceResource(i))
+ }
+
+ // Pass the list of zones across the channel
+ results <- instanceResources
+ }
+ }
+
+ // For each project, collect information
+ for _, p := range projects {
+
+ // Only save the project ID
+ projectID := p.ProjectId
+
+ // Check that the compute API is enabled for the project. If not, then skip auditing compute resources for the project entirely
+ if !c.isServiceEnabled(projectID, "compute.googleapis.com") {
+ continue
+ }
+
+ // Get the compute API's version of the project
+ project, err := c.computeClient.Projects.Get(projectID).Do()
+ if err != nil {
+ glog.Fatalf("Error retrieving project %v's metadata: %v", projectID, err)
+ }
+
+ // Store the project resource
+ c.computeprojects = append(c.computeprojects, gcp.NewComputeProjectResource(project))
+
+ // Store the project's compute metadata resource
+ c.computeMetadatas[projectID] = gcp.NewComputeProjectMetadataResource(project.CommonInstanceMetadata)
+
+ instances := []*gcp.ComputeInstanceResource{}
+ jobs := make(chan string, len(zoneNames))
+ results := make(chan []*gcp.ComputeInstanceResource, len(zoneNames))
+
+ // Create the zone worker pool
+ for w := 0; w < len(zoneNames); w++ {
+ go zoneWorker(projectID, w, jobs, results)
+ }
+
+ // Feed the zone names
+ for _, z := range zoneNames {
+ jobs <- z
+ }
+ close(jobs)
+
+ // Retrieve the full instances list
+ for i := 0; i < len(zoneNames); i++ {
+ instances = append(instances, <-results...)
+ }
+
+ // Store the project instance's resources
+ c.instances[projectID] = instances
+
+ }
+
+ return nil
+}
+
+// GenerateComputeMetadataReports signals the client to process ComputeMetadataResource's for reports.
+// If there are no metadata keys configured in the configuration, no reports will be created.
+func (c *Client) GenerateComputeMetadataReports() (reports []report.Report, err error) {
+
+ reports = []report.Report{}
+ typ := "compute_metadata"
+
+ // For each project compute metadata, generate one report
+ for _, p := range c.computeprojects {
+ projectID := p.Name()
+ projectMetadata := c.computeMetadatas[projectID]
+ r := report.NewReport(typ, fmt.Sprintf("Project %v Common Instance Metadata", projectID))
+
+ // Always connect the data for the report with the source data
+ if r.Data, err = projectMetadata.Marshal(); err != nil {
+ glog.Fatalf("Failed to marshal project metadata: %v", err)
+ }
+
+ blockSSHKeys := report.NewCISControl(
+ "4.2",
+ "Project metadata should include 'block-project-ssh-keys' and be set to 'true'",
+ )
+ res, err := projectMetadata.KeyValueEquals("block-project-ssh-keys", "true")
+ if err != nil {
+ blockSSHKeys.Error = err.Error()
+ } else {
+ if res {
+ blockSSHKeys.Passed()
+ } else {
+ glog.Fatalf("Could not determine the state of project %v's metadata, aborting...", projectID)
+ }
+ }
+
+ osLogin := report.NewCISControl(
+ "4.3",
+ "Project metadata should include the key 'enable-oslogin' with value set to 'true'",
+ )
+ res, err = projectMetadata.KeyValueEquals("enable-oslogin", "true")
+ if err != nil {
+ osLogin.Error = err.Error()
+ } else {
+ if res {
+ osLogin.Passed()
+ } else {
+ glog.Fatalf("Could not determine the state of project %v's metadata, aborting...", projectID)
+ }
+ }
+
+ // Dynamic serial port access should be denied
+ // serial-port-enable is a special case, where absence of the key is equivalent to disabling serial port access
+ serialPortAccess := report.NewCISControl(
+ "4.4",
+ "Project metadata should include the key 'serial-port-enable' with value set to '0'",
+ )
+ if projectMetadata.KeyAbsent("serial-port-enable") {
+ serialPortAccess.Passed()
+ } else {
+ res, err = projectMetadata.KeyValueEquals("serial-port-enable", "0")
+ if err != nil {
+ serialPortAccess.Error = err.Error()
+ } else {
+ if res {
+ serialPortAccess.Passed()
+ } else {
+ glog.Fatalf("Could not determine the state of project %v's metadata, aborting...", projectID)
+ }
+ }
+ }
+
+ legacyMetadata := report.NewControl(
+ "Ensure legacy metadata endpoints are not enabled for VM Instance",
+ "Project metadata should include the key 'disable-legacy-endpoints' with value set to 'true'",
+ )
+
+ res, err = projectMetadata.KeyValueEquals("disable-legacy-endpoints", "true")
+ if err != nil {
+ legacyMetadata.Error = err.Error()
+ } else {
+ if res {
+ legacyMetadata.Passed()
+ } else {
+ glog.Fatalf("Could not determine the state of project %v's metadata, aborting...", projectID)
+ }
+ }
+
+ // Append the control to this resource's report
+ r.AddControls(blockSSHKeys, osLogin, serialPortAccess, legacyMetadata)
+
+ // Append the resource's report to our final list
+ reports = append(reports, r)
+ c.incrementMetrics(typ, projectID, r.Status(), projectID)
+ }
+
+ return
+}
+
+// GenerateComputeInstanceReports signals the client to process ComputeInstanceResource's for reports.
+// If there are keys configured for instances in the configuration, no reports will be created.
+func (c *Client) GenerateComputeInstanceReports() (reports []report.Report, err error) {
+
+ reports = []report.Report{}
+ typ := "compute_instance"
+
+ for _, p := range c.computeprojects {
+
+ projectID := p.Name()
+ instanceResources := c.instances[projectID]
+ metadata := c.computeMetadatas[projectID]
+
+ for _, i := range instanceResources {
+ r := report.NewReport(typ, fmt.Sprintf("Project %v Compute Instance %v", projectID, i.Name()))
+ if r.Data, err = i.Marshal(); err != nil {
+ glog.Fatalf("Failed to marshal compute instance: %v", err)
+ }
+
+ // Make sure the number of Network Interfaces matches what is expected
+ numNicsControl := report.NewControl(
+ fmt.Sprintf("numNetworkInterfaces=%v", *flagComputeInstanceNumInterfaces),
+ fmt.Sprintf("Compute Instance should have a number of network interfaces equal to %v", *flagComputeInstanceNumInterfaces),
+ )
+
+ _, err := i.HasNumNetworkInterfaces(*flagComputeInstanceNumInterfaces)
+ if err != nil {
+ numNicsControl.Error = err.Error()
+ } else {
+ numNicsControl.Passed()
+ }
+
+ // Measure whether a NAT ip address is expected
+ natIPControl := report.NewControl(
+ fmt.Sprintf("hasNatIP=%v", *flagComputeInstanceAllowNat),
+ fmt.Sprintf("Compute Instance should have a NAT ip configured: %v", *flagComputeInstanceAllowNat),
+ )
+ if i.HasNatIP() {
+ if *flagComputeInstanceAllowNat {
+ // External IP exists, and we want it to exist
+ natIPControl.Passed()
+ } else {
+ // External IP exists, but we don't want it to exist
+ natIPControl.Error = "Compute Instance has NAT IP address, but should not"
+ }
+ } else {
+ // External IP does not exist, and we don't want it to exist
+ if !*flagComputeInstanceAllowNat {
+ natIPControl.Passed()
+ } else {
+ // It doesn't exist but we wanted it to exist
+ natIPControl.Error = "Compute Instance does not have a NAT IP address, but it should"
+ }
+ }
+
+ // Default compute service account should not be used to launch instances
+ defaultSA := report.NewCISControl(
+ "4.1",
+ "Compute Instance should not use the project default compute service account",
+ )
+
+ if !i.UsesDefaultServiceAccount() {
+ defaultSA.Passed()
+ } else {
+ defaultSA.Error = "Compute instance uses a default compute service account"
+ }
+
+ wrapMetadata := func(meta *gcp.ComputeProjectMetadataResource, i *gcp.ComputeInstanceResource, k string, v string) (bool, error) {
+ result, err := meta.KeyValueEquals(k, v)
+ if err != nil {
+ result, err = i.KeyValueEquals(k, v)
+ }
+ return result, err
+ }
+
+ // Project-wide SSH keys should not be used to access instances
+ blockSSHKeys := report.NewCISControl(
+ "4.2",
+ "Compute Instance metadata should include 'block-project-ssh-keys' and be set to 'true'",
+ )
+ res, err := wrapMetadata(metadata, i, "block-project-ssh-keys", "true")
+ if err != nil {
+ blockSSHKeys.Error = err.Error()
+ } else {
+ if res {
+ blockSSHKeys.Passed()
+ } else {
+ glog.Fatalf("Could not determine the state of instance %v's metadata, aborting...", projectID)
+ }
+ }
+
+ // Ensure os-login is enabled
+ osLogin := report.NewCISControl(
+ "4.3",
+ "Compute Instance metadata should include the key 'enable-oslogin' with value set to 'true'",
+ )
+ res, err = wrapMetadata(metadata, i, "enable-oslogin", "true")
+ if err != nil {
+ osLogin.Error = err.Error()
+ } else {
+ if res {
+ osLogin.Passed()
+ } else {
+ glog.Fatalf("Could not determine the state of instance %v's metadata, aborting...", projectID)
+ }
+ }
+
+ // Dynamic serial port access should be denied
+ // serial-port-enable is a special case, where absence of the key is equivalent to disabling serial port access
+ serialPortAccess := report.NewCISControl(
+ "4.4",
+ "Compute Instance metadata should include the key 'serial-port-enable' with value set to '0'",
+ )
+ if metadata.KeyAbsent("serial-port-enable") && i.KeyAbsent("serial-port-enable") {
+ serialPortAccess.Passed()
+ } else {
+ res, err = wrapMetadata(metadata, i, "serial-port-enable", "0")
+ if err != nil {
+ serialPortAccess.Error = err.Error()
+ } else {
+ if res {
+ serialPortAccess.Passed()
+ } else {
+ glog.Fatalf("Could not determine the state of instance %v's metadata, aborting...", projectID)
+ }
+ }
+ }
+
+ // IP forwarding should not be enabled
+ ipForwarding := report.NewCISControl(
+ "4.5",
+ "Compute Instance should not allow ip forwarding of packets",
+ )
+ if !i.HasIPForwardingEnabled() {
+ ipForwarding.Passed()
+ } else {
+ if *flagComputeInstanceAllowIPForwarding {
+ ipForwarding.Passed()
+ } else {
+ ipForwarding.Error = "Compute Instance allows IP Forwarding"
+ }
+ }
+
+ // Disks should be using Customer Supplied Encryption Keys
+ csekDisk := report.NewCISControl(
+ "4.6",
+ "Compute Instance should be encrypted with a CSEK",
+ )
+ if err := i.UsesCustomerSuppliedEncryptionKeys(); err != nil {
+ csekDisk.Passed()
+ } else {
+ csekDisk.Error = "Compute Instance does not have CSEK encryption on disk"
+ }
+
+ r.AddControls(
+ numNicsControl,
+ natIPControl,
+ defaultSA,
+ blockSSHKeys,
+ osLogin,
+ serialPortAccess,
+ ipForwarding,
+ )
+
+ // Add the instance resource report to the final list of reports
+ reports = append(reports, r)
+ totalResourcesCounter.Inc()
+ c.incrementMetrics(typ, i.Name(), r.Status(), projectID)
+ }
+ }
+
+ return
+}
diff --git a/pkg/client/compute_test.go b/pkg/client/compute_test.go
new file mode 100644
index 0000000..da13c8e
--- /dev/null
+++ b/pkg/client/compute_test.go
@@ -0,0 +1 @@
+package client
diff --git a/pkg/client/container.go b/pkg/client/container.go
new file mode 100644
index 0000000..219de60
--- /dev/null
+++ b/pkg/client/container.go
@@ -0,0 +1,315 @@
+package client
+
+import (
+ "fmt"
+
+ "github.com/Unity-Technologies/nemesis/pkg/report"
+ "github.com/Unity-Technologies/nemesis/pkg/resource/gcp"
+ "github.com/Unity-Technologies/nemesis/pkg/utils"
+ "github.com/golang/glog"
+)
+
+type projectParam struct {
+ projectID string
+}
+
+func (p projectParam) Get() (key, value string) {
+ return "projectId", p.projectID
+}
+
+// GetContainerResources launches the process retrieving container cluster and nodepool resources
+func (c *Client) GetContainerResources() error {
+
+ defer utils.Elapsed("GetContainerResources")()
+
+ clustersService := c.containerClient.Projects.Locations.Clusters
+
+ // Create a short-lived goroutine for retrieving a project's container clusters
+ worker := func(projectIDs <-chan string, results chan<- containerCallResult) {
+
+ id := <-projectIDs
+ res := containerCallResult{ProjectID: id, Clusters: []*gcp.ContainerClusterResource{}}
+
+ // Check that the container API is enabled. If not, don't audit container resources in the project
+ if !c.isServiceEnabled(id, "container.googleapis.com") {
+ results <- res
+ return
+ }
+
+ // Perform the query
+ location := fmt.Sprintf("projects/%v/locations/-", id)
+ clusters, err := clustersService.List(location).Do()
+ if err != nil {
+ glog.Fatalf("Error retrieving container clusters in project %v: %v", id, err)
+ }
+
+ for _, cluster := range clusters.Clusters {
+ res.Clusters = append(res.Clusters, gcp.NewContainerClusterResource(cluster))
+ }
+
+ results <- res
+ }
+
+ // Setup worker pool
+ projectIDs := make(chan string, len(c.resourceprojects))
+ results := make(chan containerCallResult, len(c.resourceprojects))
+ numWorkers := len(c.resourceprojects)
+ for w := 0; w < numWorkers; w++ {
+ go worker(projectIDs, results)
+ }
+
+ // Feed the workers and collect the cluster info
+ for _, p := range c.resourceprojects {
+ projectIDs <- p.ProjectId
+ }
+
+ // Collect the info
+ for i := 0; i < numWorkers; i++ {
+ res := <-results
+ c.clusters[res.ProjectID] = res.Clusters
+ }
+
+ return nil
+}
+
+type containerCallResult struct {
+ ProjectID string
+ Clusters []*gcp.ContainerClusterResource
+}
+
+// GenerateContainerClusterReports signals the client to process ContainerClusterResource's for reports.
+func (c *Client) GenerateContainerClusterReports() (reports []report.Report, err error) {
+
+ reports = []report.Report{}
+ typ := "container_cluster"
+
+ for _, p := range c.computeprojects {
+ projectID := p.Name()
+ for _, cluster := range c.clusters[projectID] {
+ r := report.NewReport(
+ typ,
+ fmt.Sprintf("Project %v Container Cluster %v", projectID, cluster.Name()),
+ )
+ if r.Data, err = cluster.Marshal(); err != nil {
+ glog.Fatalf("Failed to marshal container cluster: %v", err)
+ }
+
+ // Clusters should have stackdriver logging enabled
+ sdLogging := report.NewCISControl(
+ "7.1",
+ fmt.Sprintf("Cluster %v should have Stackdriver logging enabled", cluster.Name()),
+ )
+ if !cluster.IsStackdriverLoggingEnabled() {
+ sdLogging.Error = "Stackdriver logging is not enabled"
+ } else {
+ sdLogging.Passed()
+ }
+
+ // Clusters should have stackdriver monitoring enabled
+ sdMonitoring := report.NewCISControl(
+ "7.2",
+ fmt.Sprintf("Cluster %v should have Stackdriver monitoring enabled", cluster.Name()),
+ )
+ if !cluster.IsStackdriverMonitoringEnabled() {
+ sdMonitoring.Error = "Stackdriver monitoring is not enabled"
+ } else {
+ sdMonitoring.Passed()
+ }
+ // Clusters should not enable Attribute-Based Access Control (ABAC)
+ abac := report.NewCISControl(
+ "7.3",
+ fmt.Sprintf("Cluster %v should have Legacy ABAC disabled", cluster.Name()),
+ )
+ if !cluster.IsAbacDisabled() {
+ abac.Error = "Cluster has Legacy ABAC enabled when it should not"
+ } else {
+ abac.Passed()
+ }
+
+ // Clusters should use Master authorized networks
+ masterAuthNetworks := report.NewCISControl(
+ "7.4",
+ fmt.Sprintf("Cluster %v should have Master authorized networks enabled", cluster.Name()),
+ )
+ if !cluster.IsMasterAuthorizedNetworksEnabled() {
+ masterAuthNetworks.Error = "Cluster does not have Master Authorized Networks enabled"
+ } else {
+ masterAuthNetworks.Passed()
+ }
+
+ // Clusters should not enable Kubernetes Dashboard
+ dashboard := report.NewCISControl(
+ "7.6",
+ fmt.Sprintf("Cluster %v should have Kubernetes Dashboard disabled", cluster.Name()),
+ )
+ if !cluster.IsDashboardAddonDisabled() {
+ dashboard.Error = "Cluster has Kubernetes Dashboard add-on enabled when it should not"
+ } else {
+ dashboard.Passed()
+ }
+
+ // Clusters should not allow authentication with username/password
+ masterAuthPassword := report.NewCISControl(
+ "7.10",
+ fmt.Sprintf("Cluster %v should not have a password configured", cluster.Name()),
+ )
+ if !cluster.IsMasterAuthPasswordDisabled() {
+ masterAuthPassword.Error = "Cluster has a password configured to allow basic auth when it should not"
+ } else {
+ masterAuthPassword.Passed()
+ }
+
+ // Clusters should enable network policies (pod-to-pod policy)
+ networkPolicy := report.NewCISControl(
+ "7.11",
+ fmt.Sprintf("Cluster %v should have Network Policy addon enabled", cluster.Name()),
+ )
+ if !cluster.IsNetworkPolicyAddonEnabled() {
+ networkPolicy.Error = "Cluster does not have Network Policy addon enabled when it should"
+ } else {
+ networkPolicy.Passed()
+ }
+
+ // Clusters should not allow authentication with client certificates
+ clientCert := report.NewCISControl(
+ "7.12",
+ fmt.Sprintf("Cluster %v should not issue client certificates", cluster.Name()),
+ )
+ if !cluster.IsClientCertificateDisabled() {
+ clientCert.Error = "Cluster has ABAC enabled when it should not"
+ } else {
+ clientCert.Passed()
+ }
+
+ // Clusters should be launched as VPC-native and use Pod Alias IP ranges
+ aliasIps := report.NewCISControl(
+ "7.13",
+ fmt.Sprintf("Cluster %v should use VPC-native alias IP ranges", cluster.Name()),
+ )
+ if !cluster.IsAliasIPEnabled() {
+ aliasIps.Error = "Cluster is not using VPC-native alias IP ranges"
+ } else {
+ aliasIps.Passed()
+ }
+
+ // Cluster master should not be accessible over public IP
+ privateMaster := report.NewCISControl(
+ "7.15",
+ fmt.Sprintf("Cluster %v master should be private and not accessible over public IP", cluster.Name()),
+ )
+ if !cluster.IsMasterPrivate() {
+ privateMaster.Error = "Cluster master is not private and is routeable on public internet"
+ } else {
+ privateMaster.Passed()
+ }
+
+ // Cluster nodes should not be accessible over public IP
+ privateNodes := report.NewCISControl(
+ "7.15",
+ fmt.Sprintf("Cluster %v nodes should be private and not accessible over public IPs", cluster.Name()),
+ )
+ if !cluster.IsNodesPrivate() {
+ privateNodes.Error = "Cluster nodes are not private and are routable on the public internet"
+ } else {
+ privateNodes.Passed()
+ }
+
+ // Cluster should not be launched using the default compute service account
+ defaultSA := report.NewCISControl(
+ "7.17",
+ fmt.Sprintf("Cluster %v should not be using the default compute service account", cluster.Name()),
+ )
+ if cluster.IsUsingDefaultServiceAccount() {
+ defaultSA.Error = "Cluster is using the default compute service account"
+ } else {
+ defaultSA.Passed()
+ }
+
+ // Cluster should be using minimal OAuth scopes
+ oauthScopes := report.NewCISControl(
+ "7.18",
+ fmt.Sprintf("Cluster %v should be launched with minimal OAuth scopes", cluster.Name()),
+ )
+ if _, err := cluster.IsUsingMinimalOAuthScopes(); err != nil {
+ oauthScopes.Error = err.Error()
+ } else {
+ oauthScopes.Passed()
+ }
+
+ r.AddControls(sdLogging, sdMonitoring, abac, masterAuthNetworks, dashboard, masterAuthPassword, networkPolicy, clientCert, aliasIps, privateMaster, privateNodes, defaultSA, oauthScopes)
+ reports = append(reports, r)
+ c.incrementMetrics(typ, cluster.Name(), r.Status(), projectID)
+ }
+ }
+
+ return
+}
+
+// GenerateContainerNodePoolReports signals the client to process ContainerNodePoolResource's for reports.
+func (c *Client) GenerateContainerNodePoolReports() (reports []report.Report, err error) {
+ reports = []report.Report{}
+ typ := "container_nodepool"
+
+ for _, p := range c.computeprojects {
+ projectID := p.Name()
+ for _, nodepool := range c.nodepools[projectID] {
+ r := report.NewReport(
+ typ,
+ fmt.Sprintf("Project %v Container Node Pool %v", projectID, nodepool.Name()),
+ )
+ if r.Data, err = nodepool.Marshal(); err != nil {
+ glog.Fatalf("Failed to marshal container node pool: %v", err)
+ }
+
+ // Nodepools should not allow use of legacy metadata APIs
+ legacyAPI := report.NewControl(
+ "disableLegacyMetadataAPI",
+ fmt.Sprintf("Node pool %v should have legacy metadata API disabled", nodepool.Name()),
+ )
+ if _, err := nodepool.IsLegacyMetadataAPIDisabled(); err != nil {
+ legacyAPI.Error = err.Error()
+ } else {
+ legacyAPI.Passed()
+ }
+
+ // Node pools should be configured for automatic repairs
+ repair := report.NewCISControl(
+ "7.7",
+ fmt.Sprintf("Node pool %v should have automatic repairs enabled", nodepool.Name()),
+ )
+ if !nodepool.IsAutoRepairEnabled() {
+ repair.Error = "Automatic node repair is not enabled"
+ } else {
+ repair.Passed()
+ }
+
+ // Node pools should be configured for automatic upgrades
+ upgrade := report.NewCISControl(
+ "7.8",
+ fmt.Sprintf("Node pool %v should have automatic upgrades enabled", nodepool.Name()),
+ )
+ if !nodepool.IsAutoUpgradeEnabled() {
+ upgrade.Error = "Automatic node upgrade is not enabled"
+ } else {
+ upgrade.Passed()
+ }
+
+ // Node pools should be using COS (Google Container OS)
+ cos := report.NewCISControl(
+ "7.9",
+ fmt.Sprintf("Node pool %v should be using COS", nodepool.Name()),
+ )
+ if _, err := nodepool.CheckDistributionTypeIs("COS"); err != nil {
+ cos.Error = err.Error()
+ } else {
+ cos.Passed()
+ }
+
+ r.AddControls(legacyAPI, repair, upgrade, cos)
+ reports = append(reports, r)
+ c.incrementMetrics(typ, nodepool.Name(), r.Status(), projectID)
+ }
+ }
+
+ return
+}
diff --git a/pkg/client/container_test.go b/pkg/client/container_test.go
new file mode 100644
index 0000000..da13c8e
--- /dev/null
+++ b/pkg/client/container_test.go
@@ -0,0 +1 @@
+package client
diff --git a/pkg/client/flags.go b/pkg/client/flags.go
new file mode 100644
index 0000000..fa14580
--- /dev/null
+++ b/pkg/client/flags.go
@@ -0,0 +1,21 @@
+package client
+
+import (
+ "flag"
+
+ "github.com/Unity-Technologies/nemesis/pkg/utils"
+)
+
+var (
+ // Metrics
+ flagMetricsEnabled = flag.Bool("metrics.enabled", utils.GetEnvBool("NEMESIS_METRICS_ENABLED"), "Enable Prometheus metrics")
+ flagMetricsGateway = flag.String("metrics.gateway", utils.GetEnv("NEMESIS_METRICS_GATEWAY", "127.0.0.1:9091"), "Prometheus metrics Push Gateway")
+
+ // Projects
+ flagProjectFilter = flag.String("project.filter", utils.GetEnv("NEMESIS_PROJECT_FILTER", ""), "REQUIRED - the project filter to perform audits on.")
+
+ // Compute
+ flagComputeInstanceNumInterfaces = flag.Int("compute.instance.num-interfaces", utils.GetEnvInt("NEMESIS_COMPUTE_NUM_NICS", 1), "The number of network interfaces (NIC) that an instance should have")
+ flagComputeInstanceAllowNat = flag.Bool("compute.instance.allow-nat", utils.GetEnvBool("NEMESIS_COMPUTE_ALLOW_NAT"), "Indicate whether instances should be allowed to have external (NAT) IP addresses")
+ flagComputeInstanceAllowIPForwarding = flag.Bool("compute.instance.allow-ip-forwarding", utils.GetEnvBool("NEMESIS_COMPUTE_ALLOW_IP_FORWARDING"), "Indicate whether instances should be allowed to perform IP forwarding")
+)
diff --git a/pkg/client/iam.go b/pkg/client/iam.go
new file mode 100644
index 0000000..76b62dd
--- /dev/null
+++ b/pkg/client/iam.go
@@ -0,0 +1,214 @@
+package client
+
+import (
+ "fmt"
+
+ "github.com/Unity-Technologies/nemesis/pkg/report"
+ "github.com/Unity-Technologies/nemesis/pkg/resource/gcp"
+ "github.com/Unity-Technologies/nemesis/pkg/utils"
+ "github.com/golang/glog"
+ "google.golang.org/api/cloudresourcemanager/v1"
+)
+
+// GetIamResources gathers the list of IAM resources for the projects
+func (c *Client) GetIamResources() error {
+
+ defer utils.Elapsed("GetIamResources")()
+
+ worker := func(projectIDs <-chan string, results chan<- iamCallResult) {
+
+ id := <-projectIDs
+ projectID := fmt.Sprintf("projects/%v", id)
+ res := iamCallResult{ProjectID: id, Policy: nil, ServiceAccounts: []*gcp.IamServiceAccountResource{}}
+
+ req := cloudresourcemanager.GetIamPolicyRequest{}
+ policy, err := c.cloudResourceClient.Projects.GetIamPolicy(id, &req).Do()
+ if err != nil {
+ glog.Fatalf("Failed to retrieve IAM policy for project %v: %v", id, err)
+ }
+
+ res.Policy = gcp.NewIamPolicyResource(policy)
+
+ saList, err := c.iamClient.Projects.ServiceAccounts.List(projectID).Do()
+ if err != nil {
+ glog.Fatalf("Failed to retrieve service accounts from project %v: %v", id, err)
+ }
+
+ for _, a := range saList.Accounts {
+
+ acct := gcp.NewIamServiceAccountResource(a)
+ saKeySearch := fmt.Sprintf("%v/serviceAccounts/%v", projectID, a.UniqueId)
+ keys, err := c.iamClient.Projects.ServiceAccounts.Keys.List(saKeySearch).KeyTypes("USER_MANAGED").Do()
+ if err != nil {
+ glog.Fatalf("Failed to retrieve service account keys from project %v: %v", id, err)
+ }
+ for _, k := range keys.Keys {
+ acct.Keys = append(acct.Keys, k)
+ }
+
+ res.ServiceAccounts = append(res.ServiceAccounts, acct)
+ }
+
+ results <- res
+ }
+
+ // Setup worker pool
+ projectIDs := make(chan string, len(c.resourceprojects))
+ results := make(chan iamCallResult, len(c.resourceprojects))
+ numWorkers := len(c.resourceprojects)
+ for w := 0; w < numWorkers; w++ {
+ go worker(projectIDs, results)
+ }
+
+ // Feed the workers and collect the cluster info
+ for _, p := range c.resourceprojects {
+ projectIDs <- p.ProjectId
+ }
+
+ // Collect the info
+ for i := 0; i < numWorkers; i++ {
+ res := <-results
+ c.policies[res.ProjectID] = res.Policy
+ c.serviceaccounts[res.ProjectID] = res.ServiceAccounts
+ }
+
+ return nil
+}
+
+type iamCallResult struct {
+ ProjectID string
+ Policy *gcp.IamPolicyResource
+ ServiceAccounts []*gcp.IamServiceAccountResource
+}
+
+// GenerateIAMPolicyReports signals the client to process IamPolicyResource's for reports.
+func (c *Client) GenerateIAMPolicyReports() (reports []report.Report, err error) {
+
+ reports = []report.Report{}
+ typ := "iam_policy"
+
+ for _, p := range c.computeprojects {
+ projectID := p.Name()
+ policy := c.policies[projectID]
+ serviceAccounts := c.serviceaccounts[projectID]
+
+ r := report.NewReport(
+ typ,
+ fmt.Sprintf("Project %v IAM Policy", projectID),
+ )
+ r.Data, err = policy.Marshal()
+ if err != nil {
+ glog.Fatalf("Failed to marshal IAM policy: %v", err)
+ }
+
+ // Corporate login credentials should be used
+ corpCreds := report.NewCISControl(
+ "1.1",
+ fmt.Sprintf("Project %v should only allow corporate login credentials", p.Name()),
+ )
+ if err := policy.PolicyViolatesUserDomainWhitelist(); err != nil {
+ corpCreds.Error = err.Error()
+ } else {
+ corpCreds.Passed()
+ }
+ r.AddControls(corpCreds)
+
+ for _, sa := range serviceAccounts {
+
+ // Service account keys should be GCP-managed
+ saManagedKeys := report.NewCISControl(
+ "1.3",
+ fmt.Sprintf("%v should not have user-managed keys", sa.Email()),
+ )
+ if sa.HasUserManagedKeys() {
+ saManagedKeys.Error = "Service account has user-managed keys"
+ } else {
+ saManagedKeys.Passed()
+ }
+ r.AddControls(saManagedKeys)
+
+ // Service accounts should not have admin privileges
+ saAdminRole := report.NewCISControl(
+ "1.4",
+ fmt.Sprintf("%v should not have admin roles", sa.Email()),
+ )
+ if err := policy.MemberHasAdminRole(fmt.Sprintf("serviceAccount:%v", sa.Email())); err != nil {
+ saAdminRole.Error = err.Error()
+ } else {
+ saAdminRole.Passed()
+ }
+ r.AddControls(saAdminRole)
+
+ }
+
+ // IAM Users should not be able to impersonate service accounts at the project level
+ saServiceAccountUserRole := report.NewCISControl(
+ "1.5",
+ fmt.Sprintf("Project %v should not allow project-wide use of Service Account User role", p.Name()),
+ )
+ if err := policy.PolicyAllowsIAMUserServiceAccountUserRole(); err != nil {
+ saServiceAccountUserRole.Error = err.Error()
+ } else {
+ saServiceAccountUserRole.Passed()
+ }
+ r.AddControls(saServiceAccountUserRole)
+
+ // Service account keys should be rotated on a regular interval
+ for _, sa := range serviceAccounts {
+ saKeyExpired := report.NewCISControl(
+ "1.6",
+ fmt.Sprintf("%v should not have expired keys", sa.Email()),
+ )
+ if err := sa.HasKeysNeedingRotation(); err != nil {
+ saKeyExpired.Error = err.Error()
+ } else {
+ saKeyExpired.Passed()
+ }
+ r.AddControls(saKeyExpired)
+ }
+
+ // Users should not be allowed to administrate and impersonate service accounts
+ saSeperateDuties := report.NewCISControl(
+ "1.7",
+ fmt.Sprintf("Project %v should have separation of duties with respect to service account usage", p.Name()),
+ )
+ if err := policy.PolicyViolatesServiceAccountSeparationoOfDuties(); err != nil {
+ saSeperateDuties.Error = err.Error()
+ } else {
+ saSeperateDuties.Passed()
+ }
+ r.AddControls(saSeperateDuties)
+
+ // Users should not be allowed to administrate and utilize KMS functionality
+ kmsSeperateDuties := report.NewCISControl(
+ "1.9",
+ fmt.Sprintf("Project %v should have separation of duties with respect to KMS usage", p.Name()),
+ )
+ if err := policy.PolicyViolatesKMSSeparationoOfDuties(); err != nil {
+ kmsSeperateDuties.Error = err.Error()
+ } else {
+ kmsSeperateDuties.Passed()
+ }
+ r.AddControls(kmsSeperateDuties)
+
+ // Project IAM Policies should define audit configurations
+ auditConfig := report.NewCISControl(
+ "2.1",
+ fmt.Sprintf("Project %v should proper audit logging configurations", p.Name()),
+ )
+ if err := policy.PolicyConfiguresAuditLogging(); err != nil {
+ auditConfig.Error = err.Error()
+ } else {
+ if err := policy.PolicyDoesNotHaveAuditLogExceptions(); err != nil {
+ auditConfig.Error = err.Error()
+ } else {
+ auditConfig.Passed()
+ }
+ }
+ r.AddControls(auditConfig)
+
+ reports = append(reports, r)
+ }
+
+ return
+}
diff --git a/pkg/client/iam_test.go b/pkg/client/iam_test.go
new file mode 100644
index 0000000..da13c8e
--- /dev/null
+++ b/pkg/client/iam_test.go
@@ -0,0 +1 @@
+package client
diff --git a/pkg/client/logging.go b/pkg/client/logging.go
new file mode 100644
index 0000000..377013b
--- /dev/null
+++ b/pkg/client/logging.go
@@ -0,0 +1,237 @@
+package client
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/Unity-Technologies/nemesis/pkg/report"
+
+ "github.com/Unity-Technologies/nemesis/pkg/resource/gcp"
+
+ "google.golang.org/api/iterator"
+
+ "github.com/Unity-Technologies/nemesis/pkg/utils"
+ loggingpb "google.golang.org/genproto/googleapis/logging/v2"
+)
+
+// GetLoggingResources returns the logging config and log-based metric configurations
+func (c *Client) GetLoggingResources() error {
+
+ defer utils.Elapsed("GetLoggingResources")()
+
+ worker := func(projectIDs <-chan string, results chan<- loggingClientResult) {
+
+ id := <-projectIDs
+ parent := fmt.Sprintf("projects/%s", id)
+
+ ctx := context.Background()
+ res := loggingClientResult{ProjectID: id, LogSinks: []*gcp.LoggingSinkResource{}}
+
+ // Grab the project's logging sinks
+ req1 := loggingpb.ListSinksRequest{
+ Parent: parent,
+ }
+ it1 := c.logConfigClient.ListSinks(ctx, &req1)
+ for {
+ s, done := it1.Next()
+ if done == iterator.Done {
+ break
+ }
+
+ res.LogSinks = append(res.LogSinks, gcp.NewLoggingSinkResource(s))
+ }
+
+ // Grab the project's log-based metrics
+ req2 := loggingpb.ListLogMetricsRequest{
+ Parent: parent,
+ }
+ it2 := c.logMetricClient.ListLogMetrics(ctx, &req2)
+ for {
+ m, done := it2.Next()
+ if done == iterator.Done {
+ break
+ }
+
+ res.LogMetrics = append(res.LogMetrics, gcp.NewLoggingMetricResource(m))
+ }
+
+ results <- res
+ }
+
+ projectIDs := make(chan string, len(c.resourceprojects))
+ results := make(chan loggingClientResult, len(c.resourceprojects))
+ numWorkers := len(c.resourceprojects)
+ for w := 0; w < numWorkers; w++ {
+ go worker(projectIDs, results)
+ }
+
+ for _, p := range c.resourceprojects {
+ projectIDs <- p.ProjectId
+ }
+
+ for i := 0; i < numWorkers; i++ {
+ res := <-results
+ c.logSinks[res.ProjectID] = res.LogSinks
+ c.logMetrics[res.ProjectID] = res.LogMetrics
+ }
+
+ return nil
+}
+
+type loggingClientResult struct {
+ ProjectID string
+ LogSinks []*gcp.LoggingSinkResource
+ LogMetrics []*gcp.LoggingMetricResource
+}
+
+// GenerateLoggingReports signals the client to process LoggingResources for reports.
+// TODO - implement CIS 2.3
+func (c *Client) GenerateLoggingReports() (reports []report.Report, err error) {
+
+ reports = []report.Report{}
+
+ for _, p := range c.computeprojects {
+
+ r := report.NewReport(
+ "logging_configuration",
+ fmt.Sprintf("Project %s Logging Configuration", p.Name()),
+ )
+
+ // At least one sink in a project should ship all logs _somewhere_
+ exportLogs := report.NewCISControl(
+ "2.2",
+ fmt.Sprintf("Project %s should have at least one export configured with no filters", p.Name()),
+ )
+ isExported := false
+ for _, s := range c.logSinks[p.Name()] {
+ isExported = s.ShipsAllLogs()
+ if isExported {
+ break
+ }
+ }
+ if !isExported {
+ exportLogs.Error = fmt.Sprintf("There is no logging sink that exports all logs for project %s", p.Name())
+ } else {
+ exportLogs.Passed()
+ }
+
+ // Helper function to determine if a list of log-based metrics contains a specific filter
+ metricExists := func(metrics []*gcp.LoggingMetricResource, filter string) bool {
+ for _, m := range metrics {
+ if m.FilterMatches(filter) {
+ return true
+ }
+ }
+ return false
+ }
+
+ // Monitor Project Ownership Changes
+ projectOwnerChanges := report.NewCISControl(
+ "2.4",
+ fmt.Sprintf("Project %s should monitor ownership changes", p.Name()),
+ )
+
+ // Monitor Project Audit Configuration Changes
+ auditConfigChanges := report.NewCISControl(
+ "2.5",
+ fmt.Sprintf("Project %s should monitor audit log configuration changes", p.Name()),
+ )
+
+ // Monitor Project Custom Role Changes
+ customRoleChanges := report.NewCISControl(
+ "2.6",
+ fmt.Sprintf("Project %s should monitor custom IAM role changes", p.Name()),
+ )
+
+ // Monitor VPC Firewall Changes
+ vpcFirewallChanges := report.NewCISControl(
+ "2.7",
+ fmt.Sprintf("Project %s should monitor VPC firewall changes", p.Name()),
+ )
+
+ // Monitor VPC Route Changes
+ vpcRouteChanges := report.NewCISControl(
+ "2.8",
+ fmt.Sprintf("Project %s should monitor VPC route changes", p.Name()),
+ )
+
+ // Monitor General Changes to VPC Configuration
+ vpcNetworkChanges := report.NewCISControl(
+ "2.9",
+ fmt.Sprintf("Project %s should monitor VPC network changes", p.Name()),
+ )
+
+ // Monitor GCS IAM Policy Changes
+ gcsIamChanges := report.NewCISControl(
+ "2.10",
+ fmt.Sprintf("Project %s should monitor GCS IAM changes", p.Name()),
+ )
+
+ // Monitor SQL Configuration Changes
+ sqlConfigChanges := report.NewCISControl(
+ "2.11",
+ fmt.Sprintf("Project %s should monitor SQL config changes", p.Name()),
+ )
+
+ metricControls := []struct {
+ Control *report.Control
+ Filter string
+ }{
+ {
+ Control: &projectOwnerChanges,
+ Filter: `(protoPayload.serviceName="cloudresourcemanager.googleapis.com") AND (ProjectOwnership OR projectOwnerInvitee) OR (protoPayload.serviceData.policyDelta.bindingDeltas.action="REMOVE" AND protoPayload.serviceData.policyDelta.bindingDeltas.role="roles/owner") OR (protoPayload.serviceData.policyDelta.bindingDeltas.action="ADD" AND protoPayload.serviceData.policyDelta.bindingDeltas.role="roles/owner")`,
+ },
+ {
+ Control: &auditConfigChanges,
+ Filter: `protoPayload.methodName="SetIamPolicy" AND protoPayload.serviceData.policyDelta.auditConfigDeltas:*`,
+ },
+ {
+ Control: &customRoleChanges,
+ Filter: `resource.type="iam_role" AND protoPayload.methodName = "google.iam.admin.v1.CreateRole" OR protoPayload.methodName="google.iam.admin.v1.DeleteRole" OR protoPayload.methodName="google.iam.admin.v1.UpdateRole"`,
+ },
+ {
+ Control: &vpcFirewallChanges,
+ Filter: `resource.type="gce_firewall_rule" AND jsonPayload.event_subtype="compute.firewalls.patch" OR jsonPayload.event_subtype="compute.firewalls.insert"`,
+ },
+ {
+ Control: &vpcRouteChanges,
+ Filter: `resource.type="gce_route" AND jsonPayload.event_subtype="compute.routes.delete" OR jsonPayload.event_subtype="compute.routes.insert"`,
+ },
+ {
+ Control: &vpcNetworkChanges,
+ Filter: `resource.type=gce_network AND jsonPayload.event_subtype="compute.networks.insert" OR jsonPayload.event_subtype="compute.networks.patch" OR jsonPayload.event_subtype="compute.networks.delete" OR jsonPayload.event_subtype="compute.networks.removePeering" OR jsonPayload.event_subtype="compute.networks.addPeering"`,
+ },
+ {
+ Control: &gcsIamChanges,
+ Filter: `resource.type=gcs_bucket AND protoPayload.methodName="storage.setIamPermissions"`,
+ },
+ {
+ Control: &sqlConfigChanges,
+ Filter: `protoPayload.methodName="cloudsql.instances.update"`,
+ },
+ }
+
+ for _, m := range metricControls {
+ if metricExists(c.logMetrics[p.Name()], m.Filter) {
+ m.Control.Passed()
+ } else {
+ m.Control.Error = fmt.Sprintf("Project %s does not have the following filter monitored: %s", p.Name(), m.Filter)
+ }
+ }
+
+ r.AddControls(
+ exportLogs,
+ projectOwnerChanges,
+ auditConfigChanges,
+ customRoleChanges,
+ vpcFirewallChanges,
+ vpcRouteChanges,
+ vpcNetworkChanges,
+ gcsIamChanges,
+ sqlConfigChanges,
+ )
+ reports = append(reports, r)
+ }
+
+ return
+}
diff --git a/pkg/client/logging_test.go b/pkg/client/logging_test.go
new file mode 100644
index 0000000..da13c8e
--- /dev/null
+++ b/pkg/client/logging_test.go
@@ -0,0 +1 @@
+package client
diff --git a/pkg/client/metrics.go b/pkg/client/metrics.go
new file mode 100644
index 0000000..a9ebd30
--- /dev/null
+++ b/pkg/client/metrics.go
@@ -0,0 +1,82 @@
+package client
+
+import (
+ "errors"
+ "fmt"
+
+ "github.com/prometheus/client_golang/prometheus"
+ push "github.com/prometheus/client_golang/prometheus/push"
+)
+
+var (
+ promNamespace = "nemesis"
+
+ // Prometheus metrics
+ // Total resources scanned
+ totalResourcesCounter = prometheus.NewCounter(
+ prometheus.CounterOpts{
+ Namespace: promNamespace,
+ Name: "total_resources_scanned",
+ Help: "Total number of resources scanned",
+ },
+ )
+ // Report summaries, reported by type, status, and project
+ reportSummary = prometheus.NewGaugeVec(
+ prometheus.GaugeOpts{
+ Namespace: promNamespace,
+ Name: "report_summary",
+ Help: "Report summaries by type, status, and project",
+ },
+ []string{"type", "name", "status", "project"},
+ )
+)
+
+// configureMetrics is a helper function for configuring metrics.
+// Since we use a push gateway, we must configure our metrics as a push model
+func configureMetrics() *push.Pusher {
+
+ // Only configure metrics collection if enabled
+ if *flagMetricsEnabled {
+
+ // Create the prometheus registry. We explicitly declare a registry rather than
+ // depend on the default registry
+ registry := prometheus.NewRegistry()
+
+ // Register the necessary metrics
+ registry.MustRegister(totalResourcesCounter)
+ registry.MustRegister(reportSummary)
+
+ // Configure the gateway and return the pusher
+ pusher := push.New(*flagMetricsGateway, "nemesis_audit").Gatherer(registry)
+ return pusher
+ }
+
+ return nil
+}
+
+// incrementMetrics is a small helper to consolidate reporting metrics that are reported for all resources
+func (c *Client) incrementMetrics(typ string, name string, status string, projectID string) {
+ totalResourcesCounter.Inc()
+ reportSummary.WithLabelValues(typ, name, status, projectID).Inc()
+}
+
+// PushMetrics pushes the collected metrics from this client. Should only be called once.
+func (c *Client) PushMetrics() error {
+
+ // Only push metrics if we configured it
+ if c.pusher != nil {
+
+ if c.metricsArePushed {
+ return errors.New("Metrics were already pushed, make sure client.PushMetrics is only called once")
+ }
+
+ if err := c.pusher.Add(); err != nil {
+ return fmt.Errorf("Failed to push metrics to gateway: %v", err)
+ }
+
+ // Indicate that metrics for the client have already been pushed
+ c.metricsArePushed = true
+ }
+
+ return nil
+}
diff --git a/pkg/client/network.go b/pkg/client/network.go
new file mode 100644
index 0000000..45ac986
--- /dev/null
+++ b/pkg/client/network.go
@@ -0,0 +1,323 @@
+package client
+
+import (
+ "fmt"
+
+ "github.com/Unity-Technologies/nemesis/pkg/report"
+ "github.com/Unity-Technologies/nemesis/pkg/resource/gcp"
+ "github.com/Unity-Technologies/nemesis/pkg/utils"
+ "github.com/golang/glog"
+
+ compute "google.golang.org/api/compute/v1"
+)
+
+// GetNetworkResources launches the process retrieving network resources
+func (c *Client) GetNetworkResources() error {
+
+ defer utils.Elapsed("GetNetworkResources")()
+
+ regionNames, err := c.getRegionNames()
+ if err != nil {
+ glog.Fatalf("%v", err)
+ }
+
+ worker := func(projectIDs <-chan string, results chan<- networkCallResult) {
+ id := <-projectIDs
+
+ res := networkCallResult{
+ ProjectID: id,
+ Networks: []*gcp.ComputeNetworkResource{},
+ Subnetworks: []*gcp.ComputeSubnetworkResource{},
+ Firewalls: []*gcp.ComputeFirewallRuleResource{},
+ Addresses: []*gcp.ComputeAddressResource{},
+ }
+
+ // Get all networks active in the project
+ networks, err := c.computeClient.Networks.List(id).Do()
+ if err != nil {
+ glog.Fatalf("Error retrieving networks from project '%v': %v", id, err)
+ }
+
+ for _, n := range networks.Items {
+ res.Networks = append(res.Networks, gcp.NewComputeNetworkResource(n))
+ }
+
+ // Get all subnetworks active in the project
+ for _, region := range regionNames {
+ var subnetworks *compute.SubnetworkList
+
+ subnetworks, err = c.computeClient.Subnetworks.List(id, region).Do()
+ if err != nil {
+ glog.Fatalf("Error retrieving subnetworks from project '%v': %v", id, err)
+ }
+
+ for _, s := range subnetworks.Items {
+ res.Subnetworks = append(res.Subnetworks, gcp.NewComputeSubnetworkResource(s))
+ }
+
+ for subnetworks.NextPageToken != "" {
+ subnetworks, err := c.computeClient.Subnetworks.List(id, region).PageToken(subnetworks.NextPageToken).Do()
+ if err != nil {
+ glog.Fatalf("Error retrieving subnetworks from project '%v': %v", id, err)
+ }
+
+ for _, s := range subnetworks.Items {
+ res.Subnetworks = append(res.Subnetworks, gcp.NewComputeSubnetworkResource(s))
+ }
+ }
+ }
+
+ // Get all firewall rules active in audited projects, for all networks in the projects
+
+ var firewalls *compute.FirewallList
+
+ firewalls, err = c.computeClient.Firewalls.List(id).Do()
+ if err != nil {
+ glog.Fatalf("Error retrieving firewall rules from project '%v': %v", id, err)
+ }
+
+ for _, f := range firewalls.Items {
+ res.Firewalls = append(res.Firewalls, gcp.NewComputeFirewallRuleResource(f))
+ }
+
+ for firewalls.NextPageToken != "" {
+ firewalls, err = c.computeClient.Firewalls.List(id).PageToken(firewalls.NextPageToken).Do()
+ if err != nil {
+ glog.Fatalf("Error retrieving firewall rules from project '%v': %v", id, err)
+ }
+
+ for _, f := range firewalls.Items {
+ res.Firewalls = append(res.Firewalls, gcp.NewComputeFirewallRuleResource(f))
+ }
+ }
+
+ // Get aggregated IPs for the projects
+ aggregateAddressesList, err := c.computeClient.Addresses.AggregatedList(id).Do()
+ if err != nil {
+ glog.Fatalf("Error retrieving addresses from project '%v': %v", id, err)
+ }
+
+ scopedAddresses := aggregateAddressesList.Items
+ for _, scope := range scopedAddresses {
+ for _, a := range scope.Addresses {
+ res.Addresses = append(res.Addresses, gcp.NewComputeAddressResource(a))
+ }
+ }
+
+ results <- res
+ }
+
+ // Setup worker pool
+ projectIDs := make(chan string, len(c.resourceprojects))
+ results := make(chan networkCallResult, len(c.resourceprojects))
+ numWorkers := len(c.resourceprojects)
+ for w := 0; w < numWorkers; w++ {
+ go worker(projectIDs, results)
+ }
+
+ // Feed the workers and collect the storage info
+ for _, p := range c.resourceprojects {
+ projectIDs <- p.ProjectId
+ }
+
+ // Collect the info
+ for i := 0; i < numWorkers; i++ {
+ res := <-results
+ c.networks[res.ProjectID] = res.Networks
+ c.subnetworks[res.ProjectID] = res.Subnetworks
+ c.firewalls[res.ProjectID] = res.Firewalls
+ c.addresses[res.ProjectID] = res.Addresses
+ }
+
+ return nil
+}
+
+type networkCallResult struct {
+ ProjectID string
+ Networks []*gcp.ComputeNetworkResource
+ Subnetworks []*gcp.ComputeSubnetworkResource
+ Firewalls []*gcp.ComputeFirewallRuleResource
+ Addresses []*gcp.ComputeAddressResource
+}
+
+// GenerateComputeNetworkReports signals the client to process ComputeNetworkResource's for reports.
+// If there are no networks found in the configuration, no reports will be created.
+func (c *Client) GenerateComputeNetworkReports() (reports []report.Report, err error) {
+
+ reports = []report.Report{}
+ typ := "compute_network"
+
+ for _, p := range c.computeprojects {
+ projectID := p.Name()
+
+ for _, n := range c.networks[p.Name()] {
+ r := report.NewReport(
+ typ,
+ fmt.Sprintf("Network %v in Project %v", n.Name(), p.Name()),
+ )
+ r.Data, err = n.Marshal()
+ if err != nil {
+ glog.Fatalf("Failed to marshal network: %v", err)
+ }
+
+ // The default network should not be used in projects
+ defaultNetworkControl := report.NewCISControl(
+ "3.1",
+ fmt.Sprintf("Project %v should not have a default network", p.Name()),
+ )
+ if n.IsDefault() {
+ defaultNetworkControl.Error = fmt.Sprintf("Network %v is the default network", n.Name())
+ } else {
+ defaultNetworkControl.Passed()
+ }
+
+ // Legacy networks should not be used
+ legacyNetworkControl := report.NewCISControl(
+ "3.2",
+ fmt.Sprintf("Project %v should not have legacy networks", p.Name()),
+ )
+ if n.IsLegacy() {
+ legacyNetworkControl.Error = fmt.Sprintf("Network %v is a legacy network", n.Name())
+ } else {
+ legacyNetworkControl.Passed()
+ }
+
+ r.AddControls(defaultNetworkControl, legacyNetworkControl)
+
+ reports = append(reports, r)
+ c.incrementMetrics(typ, n.Name(), r.Status(), projectID)
+ }
+ }
+
+ return
+}
+
+// GenerateComputeSubnetworkReports signals the client to process ComputeSubnetworkResource's for reports.
+// If there are no subnetworks found in the configuration, no reports will be created.
+func (c *Client) GenerateComputeSubnetworkReports() (reports []report.Report, err error) {
+
+ reports = []report.Report{}
+ typ := "compute_subnetwork"
+
+ for _, p := range c.computeprojects {
+ projectID := p.Name()
+
+ for _, s := range c.subnetworks[p.Name()] {
+ r := report.NewReport(
+ typ,
+ fmt.Sprintf("Subnetwork %v in region %v for Project %v", s.Name(), s.Region(), p.Name()),
+ )
+ r.Data, err = s.Marshal()
+ if err != nil {
+ glog.Fatalf("Failed to marshal subnetwork: %v", err)
+ }
+
+ privateAccessControl := report.NewCISControl(
+ "3.8",
+ fmt.Sprintf("Subnetwork %v should have Private Google Access enabled", s.Name()),
+ )
+ if s.IsPrivateGoogleAccessEnabled() {
+ privateAccessControl.Passed()
+ } else {
+ privateAccessControl.Error = fmt.Sprintf("Subnetwork %v does not have Private Google Access enabled", s.Name())
+ }
+
+ flowLogsControl := report.NewCISControl(
+ "3.9",
+ fmt.Sprintf("Subnetwork %v should have VPC flow logs enabled", s.Name()),
+ )
+ if s.IsFlowLogsEnabled() {
+ flowLogsControl.Passed()
+ } else {
+ flowLogsControl.Error = fmt.Sprintf("Subnetwork %v does not have VPC flow logs enabled", s.Name())
+ }
+
+ r.AddControls(privateAccessControl, flowLogsControl)
+
+ reports = append(reports, r)
+ c.incrementMetrics(typ, s.Name(), r.Status(), projectID)
+ }
+ }
+
+ return
+}
+
+// GenerateComputeFirewallRuleReports signals the client to process ComputeFirewallRuleResource's for reports.
+// If there are no network keys configured in the configuration, no reports will be created.
+func (c *Client) GenerateComputeFirewallRuleReports() (reports []report.Report, err error) {
+
+ reports = []report.Report{}
+ typ := "compute_firewall_rule"
+
+ for _, p := range c.computeprojects {
+ projectID := p.Name()
+
+ for _, f := range c.firewalls[p.Name()] {
+ r := report.NewReport(
+ typ,
+ fmt.Sprintf("Network %v Firewall Rule %v", f.Network(), f.Name()),
+ )
+ r.Data, err = f.Marshal()
+ if err != nil {
+ glog.Fatalf("Failed to marshal firewall rule: %v", err)
+ }
+
+ // SSH access from the internet should not be allowed
+ sshControl := report.NewCISControl(
+ "3.6",
+ "SSH should not be allowed from the internet",
+ )
+ if (f.AllowsProtocolPort("TCP", "22") || f.AllowsProtocolPort("UDP", "22")) && f.AllowsSourceRange("0.0.0.0/0") {
+ sshControl.Error = fmt.Sprintf("%v allows SSH from the internet", f.Name())
+ } else {
+ sshControl.Passed()
+ }
+
+ // RDP access from the internet should not be allowed
+ rdpControl := report.NewCISControl(
+ "3.7",
+ "RDP should not be allowed from the internet",
+ )
+ if (f.AllowsProtocolPort("TCP", "3389") || f.AllowsProtocolPort("UDP", "3389")) && f.AllowsSourceRange("0.0.0.0/0") {
+ rdpControl.Error = fmt.Sprintf("%v allows RDP froom the internet", f.Name())
+ } else {
+ rdpControl.Passed()
+ }
+
+ r.AddControls(sshControl, rdpControl)
+ reports = append(reports, r)
+ c.incrementMetrics(typ, f.Name(), r.Status(), projectID)
+ }
+ }
+
+ return
+}
+
+// GenerateComputeAddressReports signals the client to process ComputeAddressResource's for reports.
+// If there are no network keys configured in the configuration, no reports will be created.
+func (c *Client) GenerateComputeAddressReports() (reports []report.Report, err error) {
+
+ reports = []report.Report{}
+ typ := "compute_address"
+
+ for _, p := range c.computeprojects {
+
+ projectID := p.Name()
+ for _, a := range c.addresses[projectID] {
+
+ r := report.NewReport(
+ typ,
+ fmt.Sprintf("Compute Address %v", a.Name()),
+ )
+ r.Data, err = a.Marshal()
+ if err != nil {
+ glog.Fatalf("Failed to marshal compute address: %v", err)
+ }
+
+ reports = append(reports, r)
+ c.incrementMetrics(typ, a.Name(), r.Status(), projectID)
+ }
+ }
+
+ return
+}
diff --git a/pkg/client/network_test.go b/pkg/client/network_test.go
new file mode 100644
index 0000000..da13c8e
--- /dev/null
+++ b/pkg/client/network_test.go
@@ -0,0 +1 @@
+package client
diff --git a/pkg/client/oauth.go b/pkg/client/oauth.go
new file mode 100644
index 0000000..4e7b3bd
--- /dev/null
+++ b/pkg/client/oauth.go
@@ -0,0 +1,21 @@
+package client
+
+import (
+ "context"
+ "net/http"
+
+ "github.com/golang/glog"
+ "golang.org/x/oauth2/google"
+)
+
+// getOAuthClient configures an http.client capable of authenticating to Google APIs
+func getOAuthClient(ctx context.Context, scope string) *http.Client {
+
+ // Configure client with OAuth
+ oauthClient, err := google.DefaultClient(ctx, scope)
+ if err != nil {
+ glog.Fatalf("Failed to create OAuth client: %v", err)
+ }
+
+ return oauthClient
+}
diff --git a/pkg/client/oauth_test.go b/pkg/client/oauth_test.go
new file mode 100644
index 0000000..da13c8e
--- /dev/null
+++ b/pkg/client/oauth_test.go
@@ -0,0 +1 @@
+package client
diff --git a/pkg/client/projects.go b/pkg/client/projects.go
new file mode 100644
index 0000000..03699b2
--- /dev/null
+++ b/pkg/client/projects.go
@@ -0,0 +1,77 @@
+package client
+
+import (
+ "fmt"
+
+ "github.com/Unity-Technologies/nemesis/pkg/resource/gcp"
+ "github.com/Unity-Technologies/nemesis/pkg/utils"
+ "github.com/golang/glog"
+)
+
+// GetProjects gathers the list of projects and active API resources for the project
+func (c *Client) GetProjects() error {
+
+ if *flagProjectFilter == "" {
+ glog.Exitf("No project filter was provided. Either specify --project.filter or set NEMESIS_PROJECT_FILTER to the appropriate regex (e.g. my-cool-projects-*)")
+ }
+
+ defer utils.Elapsed("GetProjects")()
+
+ // Get list of all projects.
+ // Additionally we must make sure that the project is ACTIVE. Any other state will return errors
+ projectFilter := fmt.Sprintf("%v AND lifecycleState=ACTIVE", *flagProjectFilter)
+ projects := listAllProjects(projectFilter, c.cloudResourceClient)
+
+ // Return an error that we retrieved no projects
+ if len(projects) == 0 {
+ return fmt.Errorf("No projects found when matching against '%v'", projectFilter)
+ }
+
+ // Create a short-lived goroutine for retrieving project services
+ servicesWorker := func(workerID int, projectIDs <-chan string, results chan<- serviceCallResult) {
+ id := <-projectIDs
+ projectID := fmt.Sprintf("projects/%v", id)
+
+ servicesList, err := c.serviceusageClient.Services.List(projectID).Filter("state:ENABLED").Do()
+ if err != nil {
+ glog.Fatalf("Failed to retrieve list of services for project %v: %v", projectID, err)
+ }
+
+ projectServices := []*gcp.ServiceAPIResource{}
+ for _, s := range servicesList.Services {
+ projectServices = append(projectServices, gcp.NewServiceAPIResource(s))
+ }
+
+ res := serviceCallResult{ProjectID: id, Services: projectServices}
+
+ results <- res
+ }
+
+ // Setup worker pool
+ projectIDs := make(chan string, len(projects))
+ results := make(chan serviceCallResult, len(projects))
+ numWorkers := len(projects)
+ for w := 0; w < numWorkers; w++ {
+ go servicesWorker(w, projectIDs, results)
+ }
+
+ // Feed the workers and collect the projects for reuse
+ for _, p := range projects {
+ projectIDs <- p.ProjectId
+ c.resourceprojects = append(c.resourceprojects, p)
+ }
+ close(projectIDs)
+
+ // Collect the results
+ for i := 0; i < len(projects); i++ {
+ res := <-results
+ c.services[res.ProjectID] = res.Services
+ }
+
+ return nil
+}
+
+type serviceCallResult struct {
+ ProjectID string
+ Services []*gcp.ServiceAPIResource
+}
diff --git a/pkg/client/projects_test.go b/pkg/client/projects_test.go
new file mode 100644
index 0000000..da13c8e
--- /dev/null
+++ b/pkg/client/projects_test.go
@@ -0,0 +1 @@
+package client
diff --git a/pkg/client/storage.go b/pkg/client/storage.go
new file mode 100644
index 0000000..29da89b
--- /dev/null
+++ b/pkg/client/storage.go
@@ -0,0 +1,137 @@
+package client
+
+import (
+ "fmt"
+
+ "github.com/Unity-Technologies/nemesis/pkg/report"
+ "github.com/Unity-Technologies/nemesis/pkg/resource/gcp"
+ "github.com/Unity-Technologies/nemesis/pkg/utils"
+ "github.com/golang/glog"
+)
+
+// GetStorageResources launches the process retrieving storage buckets and other storage resources
+func (c *Client) GetStorageResources() error {
+
+ defer utils.Elapsed("GetStorageResources")()
+
+ worker := func(projectIDs <-chan string, results chan<- storageCallResult) {
+
+ id := <-projectIDs
+ res := storageCallResult{ProjectID: id, Buckets: []*gcp.StorageBucketResource{}}
+
+ // Get the project's buckets
+ bucketList, err := c.storageClient.Buckets.List(id).Do()
+ if err != nil {
+ glog.Fatalf("Error retrieving project %v's bucket list: %v", id, err)
+ }
+
+ for _, b := range bucketList.Items {
+
+ // Get the ACLs for the bucket, as they are not included by default in the bucket list call
+ acls, err := c.storageClient.BucketAccessControls.List(b.Name).Do()
+ if err != nil {
+
+ continue
+ /*
+ // The call above will throw a 400 error if Bucket Policy Only is enabled
+ if strings.Contains(
+ err.Error(),
+ "googleapi: Error 400: Cannot get legacy ACLs for a bucket that has enabled Bucket Policy Only",
+ ) {
+ continue
+ }
+ */
+
+ // Otherwise, we hit a real error
+ //glog.Fatalf("Error retrieving bucket %v's ACLs: %v", b.Name, err)
+ }
+
+ // Store the ACLs with the bucket
+ b.Acl = acls.Items
+
+ // Append a new bucket resource
+ res.Buckets = append(res.Buckets, gcp.NewStorageBucketResource(b))
+ }
+
+ results <- res
+ }
+
+ // Setup worker pool
+ projectIDs := make(chan string, len(c.resourceprojects))
+ results := make(chan storageCallResult, len(c.resourceprojects))
+ numWorkers := len(c.resourceprojects)
+ for w := 0; w < numWorkers; w++ {
+ go worker(projectIDs, results)
+ }
+
+ // Feed the workers and collect the storage info
+ for _, p := range c.resourceprojects {
+ projectIDs <- p.ProjectId
+ }
+
+ // Collect the info
+ for i := 0; i < numWorkers; i++ {
+ res := <-results
+ c.buckets[res.ProjectID] = res.Buckets
+ }
+
+ return nil
+}
+
+type storageCallResult struct {
+ ProjectID string
+ Buckets []*gcp.StorageBucketResource
+}
+
+// GenerateStorageBucketReports signals the client to process ComputeStorageBucket's for reports.
+// If there are keys configured for buckets in the configuration, no reports will be created.
+func (c *Client) GenerateStorageBucketReports() (reports []report.Report, err error) {
+
+ reports = []report.Report{}
+ typ := "storage_bucket"
+
+ for _, p := range c.computeprojects {
+
+ projectID := p.Name()
+ projectBuckets := c.buckets[projectID]
+
+ for _, b := range projectBuckets {
+ r := report.NewReport(typ, fmt.Sprintf("Project %v Storage Bucket %v", projectID, b.Name()))
+ if r.Data, err = b.Marshal(); err != nil {
+ glog.Fatalf("Failed to marshal storage bucket: %v", err)
+ }
+
+ allUsersControl := report.NewCISControl(
+ "5.1",
+ "Bucket ACL should not include entity 'allUsers'",
+ )
+
+ if !b.AllowAllUsers() {
+ allUsersControl.Passed()
+ } else {
+ allUsersControl.Error = "Bucket ACL includes entity 'allUsers'"
+ }
+
+ // Add the `allAuthenticatedUsers` entity control if the spec says that allAuthenticatedUsers == false
+ allAuthenticatedUsersControl := report.NewCISControl(
+ "5.1",
+ "Bucket ACL should not include entity 'allAuthenticatedUsers'",
+ )
+
+ if !b.AllowAllAuthenticatedUsers() {
+ allAuthenticatedUsersControl.Passed()
+ } else {
+ allAuthenticatedUsersControl.Error = "Bucket ACL includes entity 'allAuthenticatedUsers'"
+ }
+
+ r.AddControls(allUsersControl, allAuthenticatedUsersControl)
+
+ // Add the bucket report to the final list of bucket reports
+ reports = append(reports, r)
+ c.incrementMetrics(typ, b.Name(), r.Status(), projectID)
+ }
+
+ }
+
+ return
+}
diff --git a/pkg/client/storage_test.go b/pkg/client/storage_test.go
new file mode 100644
index 0000000..da13c8e
--- /dev/null
+++ b/pkg/client/storage_test.go
@@ -0,0 +1 @@
+package client
diff --git a/pkg/client/utils.go b/pkg/client/utils.go
new file mode 100644
index 0000000..35d51b6
--- /dev/null
+++ b/pkg/client/utils.go
@@ -0,0 +1,41 @@
+package client
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/golang/glog"
+ cloudresourcemanager "google.golang.org/api/cloudresourcemanager/v1"
+)
+
+// listAllProjects returns the list of all Projects visible to the authenticated client
+func listAllProjects(filter string, client *cloudresourcemanager.Service) []*cloudresourcemanager.Project {
+
+ var projects []*cloudresourcemanager.Project
+
+ projectListCall, err := client.Projects.List().Filter(fmt.Sprintf("name:%v", filter)).Do()
+ if err != nil {
+ glog.Fatalf("Error retreiving projects: %v", err)
+ }
+
+ projects = append(projects, projectListCall.Projects...)
+
+ for projectListCall.NextPageToken != "" {
+ projectListCall, err := client.Projects.List().PageToken(projectListCall.NextPageToken).Do()
+ if err != nil {
+ glog.Fatalf("Error retreiving projects: %v", err)
+ }
+ projects = append(projects, projectListCall.Projects...)
+ }
+
+ return projects
+}
+
+func (c *Client) isServiceEnabled(projectID, servicename string) bool {
+ for _, api := range c.services[projectID] {
+ if strings.Contains(api.Name(), servicename) {
+ return true
+ }
+ }
+ return false
+}
diff --git a/pkg/client/utils_test.go b/pkg/client/utils_test.go
new file mode 100644
index 0000000..da13c8e
--- /dev/null
+++ b/pkg/client/utils_test.go
@@ -0,0 +1 @@
+package client
diff --git a/pkg/report/flags.go b/pkg/report/flags.go
new file mode 100644
index 0000000..169f700
--- /dev/null
+++ b/pkg/report/flags.go
@@ -0,0 +1,11 @@
+package report
+
+import (
+ "flag"
+
+ "github.com/Unity-Technologies/nemesis/pkg/utils"
+)
+
+var (
+ flagReportOnlyFailures = flag.Bool("reports.only-failures", utils.GetEnvBool("NEMESIS_ONLY_FAILURES"), "Limit output of controls to only failed controls")
+)
diff --git a/pkg/report/pubsub.go b/pkg/report/pubsub.go
new file mode 100644
index 0000000..3448531
--- /dev/null
+++ b/pkg/report/pubsub.go
@@ -0,0 +1,76 @@
+package report
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "sync"
+ "sync/atomic"
+
+ "github.com/golang/glog"
+
+ "cloud.google.com/go/pubsub"
+)
+
+// PubSubReporter is a wrapper around the pubsub.Client.
+type PubSubReporter struct {
+ c *pubsub.Client
+ topic string
+}
+
+// NewPubSubReporter returns a new PubSubReporter for outputting the findings of an audit
+func NewPubSubReporter(project string, topic string) *PubSubReporter {
+ psr := new(PubSubReporter)
+
+ ctx := context.Background()
+ c, err := pubsub.NewClient(ctx, project)
+ if err != nil {
+ glog.Fatalf("Failed to create PubSub client: %v", err)
+ }
+ psr.c = c
+ psr.topic = topic
+ return psr
+}
+
+// Publish sends a list of reports to the configured PubSub topic
+func (r *PubSubReporter) Publish(reports []Report) error {
+
+ // Create a pubsub publisher workgroup
+ ctx := context.Background()
+
+ var wg sync.WaitGroup
+ var errs uint64
+ topic := r.c.Topic(r.topic)
+
+ for i := 0; i < len(reports); i++ {
+
+ // Marshal and send the report
+ data, err := json.Marshal(&reports[i])
+ if err != nil {
+ glog.Fatalf("Failed to marshal report for pubsub: %v", err)
+ }
+
+ result := topic.Publish(ctx, &pubsub.Message{
+ Data: data,
+ })
+
+ wg.Add(1)
+ go func(i int, res *pubsub.PublishResult) {
+ defer wg.Done()
+
+ _, err := res.Get(ctx)
+ if err != nil {
+ glog.Errorf("Failed to publish: %v", err)
+ atomic.AddUint64(&errs, 1)
+ }
+ }(i, result)
+ }
+
+ wg.Wait()
+
+ if errs > 0 {
+ return fmt.Errorf("%d of %d reports did not publish", errs, len(reports))
+ }
+
+ return nil
+}
diff --git a/pkg/report/pubsub_test.go b/pkg/report/pubsub_test.go
new file mode 100644
index 0000000..80c499f
--- /dev/null
+++ b/pkg/report/pubsub_test.go
@@ -0,0 +1 @@
+package report
diff --git a/pkg/report/report.go b/pkg/report/report.go
new file mode 100644
index 0000000..5561da1
--- /dev/null
+++ b/pkg/report/report.go
@@ -0,0 +1,89 @@
+// Package report outlines how reports are formatted and validated
+package report
+
+import (
+ "encoding/json"
+
+ "github.com/Unity-Technologies/nemesis/pkg/cis"
+ "github.com/golang/glog"
+)
+
+const (
+ // Failed indicates that a resource did not match expected spec
+ Failed = "failed"
+
+ // Passed indicates that a resource met the expected spec
+ Passed = "passed"
+)
+
+// Control is a measurable unit of an audit
+type Control struct {
+ Title string `json:"title"`
+ Desc string `json:"desc"`
+ Status string `json:"status"`
+ Error string `json:"error,omitempty"`
+}
+
+// NewControl returns a new Control with the given title
+func NewControl(title string, desc string) Control {
+ return Control{
+ Title: title,
+ Desc: desc,
+ Status: Failed,
+ }
+}
+
+// NewCISControl returns a new Control based on the CIS controls with a description
+func NewCISControl(recommendationID string, desc string) Control {
+ rec, ok := cis.Registry[recommendationID]
+ if !ok {
+ glog.Fatalf("Couldn't find CIS recommendation with ID '%v'", recommendationID)
+ }
+ return Control{
+ Title: rec.Format(),
+ Desc: desc,
+ Status: Failed,
+ }
+}
+
+// Passed changes the status of the control from `false` to `true`.
+func (c *Control) Passed() {
+ c.Status = Passed
+}
+
+// Report is a top-level structure for capturing information generated from an audit on a resource
+type Report struct {
+ Type string `json:"type"`
+ Title string `json:"title"`
+ Controls []Control `json:"controls"`
+ Data json.RawMessage `json:"data"`
+}
+
+// NewReport returns a new top-level report with a given title
+func NewReport(typ string, title string) Report {
+ return Report{
+ Type: typ,
+ Title: title,
+ Controls: []Control{},
+ }
+}
+
+// Status returns whether a report passed all the controls it was assigned.
+func (r *Report) Status() string {
+ for _, c := range r.Controls {
+ if c.Status == Failed {
+ return Failed
+ }
+ }
+ return Passed
+}
+
+// AddControls appends controls to the report. If we only report failures, then controls that pass are not included in the report
+func (r *Report) AddControls(controls ...Control) {
+ for _, c := range controls {
+ if c.Status == Passed && *flagReportOnlyFailures {
+ continue
+ }
+ r.Controls = append(r.Controls, c)
+ }
+}
diff --git a/pkg/report/report_test.go b/pkg/report/report_test.go
new file mode 100644
index 0000000..80c499f
--- /dev/null
+++ b/pkg/report/report_test.go
@@ -0,0 +1 @@
+package report
diff --git a/pkg/report/reporter.go b/pkg/report/reporter.go
new file mode 100644
index 0000000..73986ea
--- /dev/null
+++ b/pkg/report/reporter.go
@@ -0,0 +1,6 @@
+package report
+
+// Reporter is a simple interface for publishing reports
+type Reporter interface {
+ Publish(reports []Report) error
+}
diff --git a/pkg/report/stdout.go b/pkg/report/stdout.go
new file mode 100644
index 0000000..35ffb76
--- /dev/null
+++ b/pkg/report/stdout.go
@@ -0,0 +1,27 @@
+package report
+
+import (
+ "encoding/json"
+ "fmt"
+
+ "github.com/golang/glog"
+)
+
+// StdOutReporter is a reporter that prints audit reports to stdout
+type StdOutReporter struct{}
+
+// NewStdOutReporter returns a new StdOutReporter for outputting the findings of an audit
+func NewStdOutReporter() *StdOutReporter {
+ r := new(StdOutReporter)
+ return r
+}
+
+// Publish prints a full list of reports to stdout
+func (r *StdOutReporter) Publish(reports []Report) error {
+ b, err := json.Marshal(&reports)
+ if err != nil {
+ glog.Fatalf("Failed to render report: %v", err)
+ }
+ fmt.Println(string(b))
+ return nil
+}
diff --git a/pkg/report/stdout_test.go b/pkg/report/stdout_test.go
new file mode 100644
index 0000000..80c499f
--- /dev/null
+++ b/pkg/report/stdout_test.go
@@ -0,0 +1 @@
+package report
diff --git a/pkg/resource/gcp/compute_address.go b/pkg/resource/gcp/compute_address.go
new file mode 100644
index 0000000..664396f
--- /dev/null
+++ b/pkg/resource/gcp/compute_address.go
@@ -0,0 +1,34 @@
+package gcp
+
+import (
+ "encoding/json"
+
+ compute "google.golang.org/api/compute/v1"
+)
+
+// ComputeAddressResource is a resource describing a Google Compute Global Address, or Public IPv4 address
+type ComputeAddressResource struct {
+ a *compute.Address
+}
+
+// NewComputeAddressResource returns a new ComputeAddressResource
+func NewComputeAddressResource(a *compute.Address) *ComputeAddressResource {
+ r := new(ComputeAddressResource)
+ r.a = a
+ return r
+}
+
+// Marshal returns the underlying resource's JSON representation
+func (r *ComputeAddressResource) Marshal() ([]byte, error) {
+ return json.Marshal(&r.a)
+}
+
+// Name returns the name of the firewall rule
+func (r *ComputeAddressResource) Name() string {
+ return r.a.Name
+}
+
+// Network returns the network the firewall rule resides within
+func (r *ComputeAddressResource) Network() string {
+ return r.a.Network
+}
diff --git a/pkg/resource/gcp/compute_address_test.go b/pkg/resource/gcp/compute_address_test.go
new file mode 100644
index 0000000..c5bf175
--- /dev/null
+++ b/pkg/resource/gcp/compute_address_test.go
@@ -0,0 +1,3 @@
+package gcp
+
+
diff --git a/pkg/resource/gcp/compute_firewall_rule.go b/pkg/resource/gcp/compute_firewall_rule.go
new file mode 100644
index 0000000..c40d2cc
--- /dev/null
+++ b/pkg/resource/gcp/compute_firewall_rule.go
@@ -0,0 +1,58 @@
+package gcp
+
+import (
+ "encoding/json"
+
+ compute "google.golang.org/api/compute/v1"
+)
+
+// ComputeFirewallRuleResource is a resource describing a Google Compute Firewall Rule
+type ComputeFirewallRuleResource struct {
+ f *compute.Firewall
+}
+
+// NewComputeFirewallRuleResource returns a new ComputeFirewallRuleResource
+func NewComputeFirewallRuleResource(f *compute.Firewall) *ComputeFirewallRuleResource {
+ r := new(ComputeFirewallRuleResource)
+ r.f = f
+ return r
+}
+
+// Marshal returns the underlying resource's JSON representation
+func (r *ComputeFirewallRuleResource) Marshal() ([]byte, error) {
+ return json.Marshal(&r.f)
+}
+
+// Name returns the name of the firewall rule
+func (r *ComputeFirewallRuleResource) Name() string {
+ return r.f.Name
+}
+
+// Network returns the network the firewall rule resides within
+func (r *ComputeFirewallRuleResource) Network() string {
+ return r.f.Network
+}
+
+// AllowsSourceRange returns whether a given CIDR range is allowed by the firewall rule
+func (r *ComputeFirewallRuleResource) AllowsSourceRange(sourceRange string) (result bool) {
+ for _, s := range r.f.SourceRanges {
+ if s == sourceRange {
+ result = true
+ }
+ }
+ return
+}
+
+// AllowsProtocolPort returns whether a given protocol:port combination is allowed by this firewall rule
+func (r *ComputeFirewallRuleResource) AllowsProtocolPort(protocol string, port string) (result bool) {
+ for _, allowRule := range r.f.Allowed {
+ if allowRule.IPProtocol == protocol {
+ for _, p := range allowRule.Ports {
+ if port == p {
+ result = true
+ }
+ }
+ }
+ }
+ return
+}
diff --git a/pkg/resource/gcp/compute_firewall_rule_test.go b/pkg/resource/gcp/compute_firewall_rule_test.go
new file mode 100644
index 0000000..67580e4
--- /dev/null
+++ b/pkg/resource/gcp/compute_firewall_rule_test.go
@@ -0,0 +1 @@
+package gcp
diff --git a/pkg/resource/gcp/compute_instance.go b/pkg/resource/gcp/compute_instance.go
new file mode 100644
index 0000000..ce0dd19
--- /dev/null
+++ b/pkg/resource/gcp/compute_instance.go
@@ -0,0 +1,127 @@
+package gcp
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "strings"
+
+ compute "google.golang.org/api/compute/v1"
+)
+
+// ComputeInstanceResource represents a Google Compute Engine instance
+type ComputeInstanceResource struct {
+ i *compute.Instance
+}
+
+// NewComputeInstanceResource returns a new ComputeInstanceResource
+func NewComputeInstanceResource(i *compute.Instance) *ComputeInstanceResource {
+ r := new(ComputeInstanceResource)
+ r.i = i
+ return r
+}
+
+// Name is the compute instance name
+func (r *ComputeInstanceResource) Name() string {
+ return r.i.Name
+}
+
+// Marshal returns the underlying resource's JSON representation
+func (r *ComputeInstanceResource) Marshal() ([]byte, error) {
+ return json.Marshal(&r.i)
+}
+
+// HasNatIP returns whether the instance has an external / NAT ip.
+func (r *ComputeInstanceResource) HasNatIP() bool {
+ return r.i.NetworkInterfaces[0].AccessConfigs != nil
+}
+
+// HasNumNetworkInterfaces returns whether the instance has the expected number of network interfaces
+func (r *ComputeInstanceResource) HasNumNetworkInterfaces(num int) (result bool, err error) {
+ actual := len(r.i.NetworkInterfaces)
+ result = actual == num
+ if !result {
+ err = fmt.Errorf("Expected %v interfaces, found %v", num, actual)
+ }
+ return
+}
+
+// KeyValueEquals returns whether the metadata key equals a given value.
+// Reports an error if they metadata key does not exist.
+func (r *ComputeInstanceResource) KeyValueEquals(key string, value string) (result bool, err error) {
+
+ // Loop over project metadata keys until we find the key.
+ result = false
+ found := false
+ for _, item := range r.i.Metadata.Items {
+ if item.Key == key {
+
+ // If we found the key, we want to check its value.
+ // If it is set correctly, all is well. Otherwise, report the
+ found = true
+ if strings.ToLower(*item.Value) == strings.ToLower(value) {
+ result = true
+ } else {
+ err = fmt.Errorf("Instance metadata key '%v' is set to '%v'", key, *item.Value)
+ }
+ return
+ }
+ }
+
+ // Report an error that the key did not exist
+ if !found {
+ err = fmt.Errorf("Could not find instance metadata key: %v", key)
+ }
+ return
+}
+
+// KeyAbsent returns whether the metadata key is absent
+// Reports an error if they metadata key does not exist.
+func (r *ComputeInstanceResource) KeyAbsent(key string) bool {
+
+ // Loop over project metadata keys until we find the key.
+ found := false
+ for _, item := range r.i.Metadata.Items {
+ if item.Key == key {
+
+ // If we found the key, return
+ found = true
+ break
+ }
+ }
+
+ return !found
+}
+
+// UsesDefaultServiceAccount returns whether the service account used to launch the instance
+// is a default compute service account for any project
+func (r *ComputeInstanceResource) UsesDefaultServiceAccount() bool {
+ return strings.Contains(r.i.ServiceAccounts[0].Email, "-compute@developer.gserviceaccount.com")
+}
+
+// HasIPForwardingEnabled returns whether an instance can forward packets for different sources
+func (r *ComputeInstanceResource) HasIPForwardingEnabled() bool {
+ return r.i.CanIpForward
+}
+
+// UsesCustomerSuppliedEncryptionKeys returns whether the instance's disks are encrypted with a CSEK
+func (r *ComputeInstanceResource) UsesCustomerSuppliedEncryptionKeys() (err error) {
+
+ var errBuilder strings.Builder
+
+ for _, d := range r.i.Disks {
+
+ if d.DiskEncryptionKey == nil {
+ errBuilder.WriteString(fmt.Sprintf("Disk does not use CSEK: %v", d.Source))
+ }
+ }
+
+ errString := errBuilder.String()
+ if errString != "" {
+ err = errors.New(errString)
+ } else {
+ err = nil
+ }
+
+ return
+}
diff --git a/pkg/resource/gcp/compute_instance_test.go b/pkg/resource/gcp/compute_instance_test.go
new file mode 100644
index 0000000..67580e4
--- /dev/null
+++ b/pkg/resource/gcp/compute_instance_test.go
@@ -0,0 +1 @@
+package gcp
diff --git a/pkg/resource/gcp/compute_networks.go b/pkg/resource/gcp/compute_networks.go
new file mode 100644
index 0000000..b761ec7
--- /dev/null
+++ b/pkg/resource/gcp/compute_networks.go
@@ -0,0 +1,48 @@
+package gcp
+
+import (
+ "encoding/json"
+
+ compute "google.golang.org/api/compute/v1"
+)
+
+// ComputeNetworkResource represents a Google Compute Engine network
+type ComputeNetworkResource struct {
+ n *compute.Network
+}
+
+// NewComputeNetworkResource returns a new ComputeNetworkResource
+func NewComputeNetworkResource(n *compute.Network) *ComputeNetworkResource {
+ r := new(ComputeNetworkResource)
+ r.n = n
+ return r
+}
+
+// Name returns the name of the Compute network
+func (r *ComputeNetworkResource) Name() string {
+ return r.n.Name
+}
+
+// Marshal returns the underlying resource's JSON representation
+func (r *ComputeNetworkResource) Marshal() ([]byte, error) {
+ return json.Marshal(&r.n)
+}
+
+// IsDefault tests whether the network's name is `default`, which usually comes with a project that
+// just enabled it's Compute API
+func (r *ComputeNetworkResource) IsDefault() bool {
+ return r.n.Name == "default"
+}
+
+// IsLegacy tests whether the network is a legacy network
+func (r *ComputeNetworkResource) IsLegacy() bool {
+
+ // If IPv4Range is non-empty, then it is a legacy network
+ return r.n.IPv4Range != ""
+}
+
+// NameEquals tests whether the network's name is equal to what is expected
+func (r *ComputeNetworkResource) NameEquals(name string) (result bool, err error) {
+ result = r.n.Name == name
+ return
+}
diff --git a/pkg/resource/gcp/compute_networks_test.go b/pkg/resource/gcp/compute_networks_test.go
new file mode 100644
index 0000000..67580e4
--- /dev/null
+++ b/pkg/resource/gcp/compute_networks_test.go
@@ -0,0 +1 @@
+package gcp
diff --git a/pkg/resource/gcp/compute_project.go b/pkg/resource/gcp/compute_project.go
new file mode 100644
index 0000000..e37d6bf
--- /dev/null
+++ b/pkg/resource/gcp/compute_project.go
@@ -0,0 +1,35 @@
+package gcp
+
+import (
+ "encoding/json"
+
+ compute "google.golang.org/api/compute/v1"
+)
+
+// ComputeProjectResource represents a Google Compute Engine's project information.
+type ComputeProjectResource struct {
+ p *compute.Project
+}
+
+// NewComputeProjectResource returns a new ComputeProjectResource
+func NewComputeProjectResource(p *compute.Project) *ComputeProjectResource {
+ r := new(ComputeProjectResource)
+ r.p = p
+ return r
+}
+
+// Name is the Project's Name
+func (r *ComputeProjectResource) Name() string {
+ return r.p.Name
+}
+
+// Marshal returns the underlying resource's JSON representation
+func (r *ComputeProjectResource) Marshal() ([]byte, error) {
+ return json.Marshal(&r.p)
+}
+
+// IsXpnHost tests whether the project is configured as a Shared VPC (Xpn) host project
+func (r *ComputeProjectResource) IsXpnHost() (result bool, err error) {
+ result = r.p.XpnProjectStatus == "HOST"
+ return
+}
diff --git a/pkg/resource/gcp/compute_project_metadata.go b/pkg/resource/gcp/compute_project_metadata.go
new file mode 100644
index 0000000..3c52156
--- /dev/null
+++ b/pkg/resource/gcp/compute_project_metadata.go
@@ -0,0 +1,88 @@
+package gcp
+
+import (
+ "encoding/json"
+ "fmt"
+ "strings"
+
+ compute "google.golang.org/api/compute/v1"
+)
+
+// ComputeProjectMetadataResource is a resource for testing information about a Project's compute metadata configuration
+type ComputeProjectMetadataResource struct {
+ m *compute.Metadata
+}
+
+// NewComputeProjectMetadataResource returns a new ComputeProjectMetadataResource
+func NewComputeProjectMetadataResource(m *compute.Metadata) *ComputeProjectMetadataResource {
+ r := new(ComputeProjectMetadataResource)
+ r.m = m
+ return r
+}
+
+// Marshal returns the underlying resource's JSON representation
+func (r *ComputeProjectMetadataResource) Marshal() ([]byte, error) {
+ return json.Marshal(&r.m)
+}
+
+// Includes indicates whether the Metadata object contains the key specified
+func (r *ComputeProjectMetadataResource) Includes(key string) (result bool, err error) {
+
+ // Loop over all project metadata keys
+ result = false
+ for _, item := range r.m.Items {
+ if item.Key == key {
+ result = true
+ break
+ }
+ }
+
+ return
+}
+
+// KeyValueEquals returns whether the metadata key equals a given value.
+// Reports an error if they metadata key does not exist.
+func (r *ComputeProjectMetadataResource) KeyValueEquals(key string, value string) (result bool, err error) {
+
+ // Loop over project metadata keys until we find the key.
+ result = false
+ found := false
+ for _, item := range r.m.Items {
+ if item.Key == key {
+
+ // If we found the key, we want to check its value.
+ // If it is set correctly, all is well. Otherwise, report the
+ found = true
+ if strings.ToLower(*item.Value) == strings.ToLower(value) {
+ result = true
+ } else {
+ err = fmt.Errorf("Project metadata key '%v' is set to '%v'", key, *item.Value)
+ }
+ return
+ }
+ }
+
+ // Report an error that the key did not exist
+ if !found {
+ err = fmt.Errorf("Could not find project metadata key: %v", key)
+ }
+ return
+}
+
+// KeyAbsent returns whether the metadata key is absent
+// Reports an error if they metadata key does not exist.
+func (r *ComputeProjectMetadataResource) KeyAbsent(key string) bool {
+
+ // Loop over project metadata keys until we find the key.
+ found := false
+ for _, item := range r.m.Items {
+ if item.Key == key {
+
+ // If we found the key, return
+ found = true
+ break
+ }
+ }
+
+ return !found
+}
diff --git a/pkg/resource/gcp/compute_project_metadata_test.go b/pkg/resource/gcp/compute_project_metadata_test.go
new file mode 100644
index 0000000..67580e4
--- /dev/null
+++ b/pkg/resource/gcp/compute_project_metadata_test.go
@@ -0,0 +1 @@
+package gcp
diff --git a/pkg/resource/gcp/compute_project_test.go b/pkg/resource/gcp/compute_project_test.go
new file mode 100644
index 0000000..67580e4
--- /dev/null
+++ b/pkg/resource/gcp/compute_project_test.go
@@ -0,0 +1 @@
+package gcp
diff --git a/pkg/resource/gcp/compute_subnetwork.go b/pkg/resource/gcp/compute_subnetwork.go
new file mode 100644
index 0000000..231015a
--- /dev/null
+++ b/pkg/resource/gcp/compute_subnetwork.go
@@ -0,0 +1,44 @@
+package gcp
+
+import (
+ "encoding/json"
+
+ compute "google.golang.org/api/compute/v1"
+)
+
+// ComputeSubnetworkResource represents a Google Compute Engine subnetwork
+type ComputeSubnetworkResource struct {
+ s *compute.Subnetwork
+}
+
+// NewComputeSubnetworkResource returns a new ComputeSubnetworkResource
+func NewComputeSubnetworkResource(s *compute.Subnetwork) *ComputeSubnetworkResource {
+ r := new(ComputeSubnetworkResource)
+ r.s = s
+ return r
+}
+
+// Name returns the name of the Compute subnetwork
+func (r *ComputeSubnetworkResource) Name() string {
+ return r.s.Name
+}
+
+// Region returns the GCP region of the Compute subnetwork
+func (r *ComputeSubnetworkResource) Region() string {
+ return r.s.Region
+}
+
+// Marshal returns the underlying resource's JSON representation
+func (r *ComputeSubnetworkResource) Marshal() ([]byte, error) {
+ return json.Marshal(&r.s)
+}
+
+// IsPrivateGoogleAccessEnabled returns whether private Google network access is enabled
+func (r *ComputeSubnetworkResource) IsPrivateGoogleAccessEnabled() bool {
+ return r.s.PrivateIpGoogleAccess
+}
+
+// IsFlowLogsEnabled returns whether the subnet has VPC flow logs enabled
+func (r *ComputeSubnetworkResource) IsFlowLogsEnabled() bool {
+ return r.s.EnableFlowLogs
+}
diff --git a/pkg/resource/gcp/compute_subnetwork_test.go b/pkg/resource/gcp/compute_subnetwork_test.go
new file mode 100644
index 0000000..67580e4
--- /dev/null
+++ b/pkg/resource/gcp/compute_subnetwork_test.go
@@ -0,0 +1 @@
+package gcp
diff --git a/pkg/resource/gcp/container_cluster.go b/pkg/resource/gcp/container_cluster.go
new file mode 100644
index 0000000..6d04442
--- /dev/null
+++ b/pkg/resource/gcp/container_cluster.go
@@ -0,0 +1,147 @@
+package gcp
+
+import (
+ "encoding/json"
+ "fmt"
+
+ container "google.golang.org/api/container/v1"
+)
+
+const (
+ loggingService = "logging.googleapis.com"
+ monitoringService = "monitoring.googleapis.com"
+)
+
+// ContainerClusterResource is a resource for testing information about a GKE Cluster's configuration
+type ContainerClusterResource struct {
+ c *container.Cluster
+}
+
+// NewContainerClusterResource returns a new ContainerClusterResource
+func NewContainerClusterResource(c *container.Cluster) *ContainerClusterResource {
+ r := new(ContainerClusterResource)
+ r.c = c
+ return r
+}
+
+// Marshal returns the underlying resource's JSON representation
+func (r *ContainerClusterResource) Marshal() ([]byte, error) {
+ return json.Marshal(&r.c)
+}
+
+// Name returns the name given to the container cluster
+func (r *ContainerClusterResource) Name() string {
+ return r.c.Name
+}
+
+// IsStackdriverLoggingEnabled indicates whether logging.googleapis.com is set as the logging service
+func (r *ContainerClusterResource) IsStackdriverLoggingEnabled() bool {
+ return r.c.LoggingService == loggingService
+}
+
+// IsStackdriverMonitoringEnabled indicates whether monitoring.googleapis.com is set as the monitoring service
+func (r *ContainerClusterResource) IsStackdriverMonitoringEnabled() bool {
+ return r.c.MonitoringService == monitoringService
+}
+
+// IsAliasIPEnabled indicates whether VPC Alias IPs are being used
+func (r *ContainerClusterResource) IsAliasIPEnabled() bool {
+ return r.c.IpAllocationPolicy.UseIpAliases
+}
+
+// IsPodSecurityPolicyControllerEnabled indicates whether PSP controller is enabled
+// TODO - currently no way to implement this check by default
+func (r *ContainerClusterResource) IsPodSecurityPolicyControllerEnabled() bool {
+ return false
+ // TODO - implement!
+}
+
+// IsDashboardAddonDisabled returns whether the GKE cluster has Kubernetes Dashboard add-on is enabled
+func (r *ContainerClusterResource) IsDashboardAddonDisabled() bool {
+ return r.c.AddonsConfig.KubernetesDashboard.Disabled
+}
+
+// IsMasterAuthorizedNetworksEnabled returns whether the GKE cluster is using master authorized networks
+func (r *ContainerClusterResource) IsMasterAuthorizedNetworksEnabled() bool {
+ return r.c.MasterAuthorizedNetworksConfig.Enabled
+}
+
+// IsAbacDisabled returns whether the GKE cluster is using (legacy) Atributed-Based Access Control
+func (r *ContainerClusterResource) IsAbacDisabled() bool {
+ if r.c.LegacyAbac == nil {
+ return true
+ }
+ return !r.c.LegacyAbac.Enabled
+}
+
+// IsNetworkPolicyAddonEnabled returns whether the GKE cluster has Network Policy add-on enabled
+func (r *ContainerClusterResource) IsNetworkPolicyAddonEnabled() bool {
+ return !r.c.AddonsConfig.NetworkPolicyConfig.Disabled
+
+}
+
+// IsClientCertificateDisabled checks whether client certificates are disabled
+func (r *ContainerClusterResource) IsClientCertificateDisabled() bool {
+ if r.c.MasterAuth.ClientCertificateConfig == nil {
+ return true
+ }
+ return !r.c.MasterAuth.ClientCertificateConfig.IssueClientCertificate
+}
+
+// IsMasterAuthPasswordDisabled returns whether the GKE cluster has username/password authentication enabled
+func (r *ContainerClusterResource) IsMasterAuthPasswordDisabled() bool {
+ return r.c.MasterAuth.Password == ""
+}
+
+// IsMasterPrivate returns whether the GKE cluster master is only accessible on private networks
+func (r *ContainerClusterResource) IsMasterPrivate() bool {
+ if r.c.PrivateClusterConfig == nil {
+ return false
+ }
+ return r.c.PrivateClusterConfig.EnablePrivateEndpoint
+}
+
+// IsNodesPrivate returns whether the GKE cluster nodes are only accessible on private networks
+func (r *ContainerClusterResource) IsNodesPrivate() bool {
+ if r.c.PrivateClusterConfig == nil {
+ return false
+ }
+ return r.c.PrivateClusterConfig.EnablePrivateNodes
+}
+
+// IsUsingDefaultServiceAccount returns whether the GKE cluster is using the default compute service account
+func (r *ContainerClusterResource) IsUsingDefaultServiceAccount() bool {
+ return r.c.NodeConfig.ServiceAccount == "default"
+}
+
+// IsUsingMinimalOAuthScopes returns whether the GKE cluster is using the defined minimal oauth scopes for a cluster
+func (r *ContainerClusterResource) IsUsingMinimalOAuthScopes() (result bool, err error) {
+
+ // Begin with the assumption that we are using minimal oauth scopes
+ extraScopes := []string{}
+
+ // Iterate over the cluster's OAuth scopes and determine if they are at most the minimal list provided.
+ // If there are any scopes that are not in the whitelist, track them and report them as an error
+ for _, scope := range r.c.NodeConfig.OauthScopes {
+
+ found := false
+ // Now check if the cluster's scope is in out oauth scopes
+ for _, minimalScope := range minimalOAuthScopes {
+ if minimalScope == scope {
+ found = true
+ break
+ }
+ }
+
+ if !found {
+ extraScopes = append(extraScopes, scope)
+ }
+ }
+
+ result = len(extraScopes) == 0
+ if !result {
+ err = fmt.Errorf("Cluster is not using minimal scopes. The following scopes are not considered minimal: %v", extraScopes)
+ }
+
+ return
+}
diff --git a/pkg/resource/gcp/container_cluster_test.go b/pkg/resource/gcp/container_cluster_test.go
new file mode 100644
index 0000000..67580e4
--- /dev/null
+++ b/pkg/resource/gcp/container_cluster_test.go
@@ -0,0 +1 @@
+package gcp
diff --git a/pkg/resource/gcp/container_nodepool.go b/pkg/resource/gcp/container_nodepool.go
new file mode 100644
index 0000000..ef583ce
--- /dev/null
+++ b/pkg/resource/gcp/container_nodepool.go
@@ -0,0 +1,64 @@
+package gcp
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+
+ container "google.golang.org/api/container/v1"
+)
+
+// ContainerNodePoolResource is a resource for testing information about a GKE Node Pool's configuration
+type ContainerNodePoolResource struct {
+ n *container.NodePool
+}
+
+// NewContainerNodePoolResource returns a new ContainerNodePoolResource
+func NewContainerNodePoolResource(n *container.NodePool) *ContainerNodePoolResource {
+ r := new(ContainerNodePoolResource)
+ r.n = n
+ return r
+}
+
+// Marshal returns the underlying resource's JSON representation
+func (r *ContainerNodePoolResource) Marshal() ([]byte, error) {
+ return json.Marshal(&r.n)
+}
+
+// Name returns the name given to the cluster nodepool
+func (r *ContainerNodePoolResource) Name() string {
+ return r.n.Name
+}
+
+// IsLegacyMetadataAPIDisabled returns whether the given Node Pool has legacy metadata APIs disabled
+func (r *ContainerNodePoolResource) IsLegacyMetadataAPIDisabled() (result bool, err error) {
+ var val string
+ var ok bool
+ if val, ok = r.n.Config.Metadata["disable-legacy-endpoints"]; !ok {
+ err = errors.New("Could not find key 'disable-legacy-endpoints'")
+ }
+ if val != "true" {
+ err = fmt.Errorf("Invalid value for `disable-legacy-endpoints`, got `%v'", val)
+ }
+ result = err == nil
+ return
+}
+
+// IsAutoRepairEnabled returns whether a Node Pool is configured to automatically repair on error
+func (r *ContainerNodePoolResource) IsAutoRepairEnabled() bool {
+ return r.n.Management.AutoRepair
+}
+
+// IsAutoUpgradeEnabled returns whether a Node Pool is configured to automatically upgrade GKE versions
+func (r *ContainerNodePoolResource) IsAutoUpgradeEnabled() bool {
+ return r.n.Management.AutoUpgrade
+}
+
+// CheckDistributionTypeIs returns whether a Node Pool's OS distribution is the expected type
+func (r *ContainerNodePoolResource) CheckDistributionTypeIs(expected string) (result bool, err error) {
+ result = r.n.Config.ImageType == expected
+ if !result {
+ err = fmt.Errorf("Node pool is using %v, not %v", r.n.Config.ImageType, expected)
+ }
+ return
+}
diff --git a/pkg/resource/gcp/container_nodepool_test.go b/pkg/resource/gcp/container_nodepool_test.go
new file mode 100644
index 0000000..67580e4
--- /dev/null
+++ b/pkg/resource/gcp/container_nodepool_test.go
@@ -0,0 +1 @@
+package gcp
diff --git a/pkg/resource/gcp/flags.go b/pkg/resource/gcp/flags.go
new file mode 100644
index 0000000..f496cc6
--- /dev/null
+++ b/pkg/resource/gcp/flags.go
@@ -0,0 +1,37 @@
+package gcp
+
+import (
+ "flag"
+ "strconv"
+ "strings"
+
+ "github.com/Unity-Technologies/nemesis/pkg/utils"
+ "github.com/golang/glog"
+)
+
+var (
+ // Default Values
+ defaultOAuthMinimalScopes = "https://www.googleapis.com/auth/devstorage.read_only,https://www.googleapis.com/auth/logging.write,https://www.googleapis.com/auth/monitoring,https://www.googleapis.com/auth/servicecontrol,https://www.googleapis.com/auth/service.management.readonly,https://www.googleapis.com/auth/trace.append"
+
+ // Values
+ userDomains = []string{}
+ minimalOAuthScopes = []string{}
+ saKeyExpirationTime = 0
+
+ // Flags
+ flagContainerMinimalOAuthScopes = flag.String("container.oauth-scopes", utils.GetEnv("NEMESIS_CONTAINER_OAUTHSCOPES", defaultOAuthMinimalScopes), "A comma-seperated list of OAuth scopes to allow for GKE clusters")
+ flagUserDomains = flag.String("iam.user-domains", utils.GetEnv("NEMESIS_IAM_USERDOMAINS", ""), "A comma-separated list of domains to allow users from")
+ flagSAKeyExpirationTime = flag.String("iam.sa-key-expiration-time", utils.GetEnv("NEMESIS_IAM_SA_KEY_EXPIRATION_TIME", "90"), "The time in days to allow service account keys to live before being rotated")
+)
+
+func init() {
+ var err error
+
+ minimalOAuthScopes = strings.Split(*flagContainerMinimalOAuthScopes, ",")
+ userDomains = strings.Split(*flagUserDomains, ",")
+ saKeyExpirationTime, err = strconv.Atoi(*flagSAKeyExpirationTime)
+
+ if err != nil {
+ glog.Fatalf("Failed to convert SA key expiration time to an integer: %v", err)
+ }
+}
diff --git a/pkg/resource/gcp/iam_policy.go b/pkg/resource/gcp/iam_policy.go
new file mode 100644
index 0000000..042b618
--- /dev/null
+++ b/pkg/resource/gcp/iam_policy.go
@@ -0,0 +1,310 @@
+package gcp
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "strings"
+
+ cloudresourcemanager "google.golang.org/api/cloudresourcemanager/v1"
+)
+
+const (
+ // Rules to check
+ editorRole = "roles/editor"
+ serviceAccountUserRole = "roles/iam.serviceAccountUser"
+ serviceAccountAdminRole = "roles/iam.serviceAccountAdmin"
+ kmsAdminRole = "roles/cloudkms.admin"
+ kmsRoleMatcher = "roles/cloudkms."
+)
+
+var (
+ // Cloud Audit log types
+ logTypes = []string{"ADMIN_READ", "DATA_READ", "DATA_WRITE"}
+)
+
+// Helper functions for identifying various types of users or roles
+func isGCPAccount(member string) bool {
+ return strings.Contains(member, "developer.gserviceaccount.com") || strings.Contains(member, "appspot.gserviceaccount.com")
+}
+
+func isAdminRole(role string) bool {
+ return strings.Contains(role, "admin") || strings.Contains(role, "owner") || strings.Contains(role, "editor")
+}
+
+func isIAMUserMember(member string) bool {
+ return strings.Contains(member, "user:") || strings.Contains(member, "group:") || strings.Contains(member, "domain:")
+}
+
+// IamPolicyResource is resource for testing IAM Policies in GCP
+type IamPolicyResource struct {
+ p *cloudresourcemanager.Policy
+}
+
+// NewIamPolicyResource returns a new IamPolicyResource
+func NewIamPolicyResource(p *cloudresourcemanager.Policy) *IamPolicyResource {
+ r := new(IamPolicyResource)
+ r.p = p
+ return r
+}
+
+// Marshal returns the underlying resource's JSON representation
+func (r *IamPolicyResource) Marshal() ([]byte, error) {
+ return json.Marshal(&r.p)
+}
+
+// PolicyViolatesUserDomainWhitelist returns whether the policy contains a user or domain that is not part of the domain whitelist
+func (r *IamPolicyResource) PolicyViolatesUserDomainWhitelist() (err error) {
+
+ var errBuilder strings.Builder
+ for _, b := range r.p.Bindings {
+ for _, member := range b.Members {
+ if isIAMUserMember(member) {
+ for _, domain := range userDomains {
+ if !strings.Contains(member, domain) {
+ errBuilder.WriteString(fmt.Sprintf("%v is not allowed by your domain whitelist. ", member))
+ }
+ }
+ }
+ }
+ }
+
+ // If we collected errors, report a failure
+ errString := errBuilder.String()
+ if errString != "" {
+ err = errors.New(errString)
+ } else {
+ err = nil
+ }
+
+ return
+}
+
+// MemberHasAdminRole returns whether a given member has an admin role
+func (r *IamPolicyResource) MemberHasAdminRole(member string) (err error) {
+
+ for _, b := range r.p.Bindings {
+ for _, m := range b.Members {
+ if member == m {
+
+ // Found the member, now check if it has an admin role
+ if isAdminRole(b.Role) {
+
+ // Allow the default compute and appengine service accounts
+ // to have "editor" role
+ if isGCPAccount(member) && b.Role == editorRole {
+ return
+ }
+
+ err = fmt.Errorf("Member has admin role %v", b.Role)
+ }
+ break
+ }
+ }
+ }
+
+ return
+}
+
+// PolicyAllowsIAMUserServiceAccountUserRole checks whether the policy allows non-service account
+// users to impersonate a service account (privelage escalation)
+func (r *IamPolicyResource) PolicyAllowsIAMUserServiceAccountUserRole() (err error) {
+
+ var errBuilder strings.Builder
+
+ for _, b := range r.p.Bindings {
+ if b.Role == serviceAccountUserRole {
+ for _, member := range b.Members {
+ if isIAMUserMember(member) {
+ errBuilder.WriteString(fmt.Sprintf("%v has Service Account User role. ", member))
+ }
+ }
+ break
+ }
+ }
+
+ errString := errBuilder.String()
+ if errString != "" {
+ err = errors.New(errString)
+ }
+
+ return
+}
+
+func (r *IamPolicyResource) findMembersWithOverlappingRoles(roleA string, roleB string) []string {
+
+ aMembers := []string{}
+ bMembers := []string{}
+
+ for _, b := range r.p.Bindings {
+
+ // Check for members that have the A role. If we don't that role,
+ // then there's nothing to check
+ if b.Role == roleA {
+
+ // Now check for a binding with the user role
+ for _, bb := range r.p.Bindings {
+
+ // If we find a binding, then we need to check for overlap.
+ if bb.Role == roleB {
+ aMembers = b.Members
+ bMembers = bb.Members
+ }
+ break
+ }
+ break
+ }
+ }
+
+ overlap := []string{}
+
+ // Now compare memberships for overlap
+ for _, m := range aMembers {
+ for _, mm := range bMembers {
+ if m == mm {
+ overlap = append(overlap, m)
+ }
+ }
+ }
+
+ return overlap
+}
+
+// PolicyViolatesServiceAccountSeparationoOfDuties returns whether the policy allows for IAM users
+// to both administrate and impersonate service accounts
+func (r *IamPolicyResource) PolicyViolatesServiceAccountSeparationoOfDuties() (err error) {
+
+ // We should report errors when we see a member that has both roles:
+ // -- roles/iam.serviceAccountUser
+ // -- roles/iam.serviceAccountAdmin
+ overlap := r.findMembersWithOverlappingRoles(serviceAccountAdminRole, serviceAccountUserRole)
+
+ var errBuilder strings.Builder
+
+ // Now compare memberships. If there is overlap, report these as errors
+ for _, m := range overlap {
+ errBuilder.WriteString(fmt.Sprintf("%v can both administrate and impersonate service accounts. ", m))
+ }
+
+ errString := errBuilder.String()
+ if errString != "" {
+ err = errors.New(errString)
+ }
+
+ return
+}
+
+// PolicyViolatesKMSSeparationoOfDuties returns whether the policy allows for KMS users
+// to both administrate keyrings and encrypt/decrypt with keys
+func (r *IamPolicyResource) PolicyViolatesKMSSeparationoOfDuties() (err error) {
+
+ // We should report errors when we see a member that has the KMS admin role and a non-admin role:
+ // -- roles/cloudkms.admin
+ // -- roles/cloudkms.*
+
+ kmsRolesDefined := []string{}
+
+ for _, b := range r.p.Bindings {
+
+ // If we have no admin role, then there's nothing to check
+ if b.Role == kmsAdminRole {
+ for _, bb := range r.p.Bindings {
+
+ if bb.Role != kmsAdminRole && strings.Contains(bb.Role, kmsRoleMatcher) {
+ kmsRolesDefined = append(kmsRolesDefined, bb.Role)
+ }
+ }
+
+ break
+ }
+ }
+
+ var errBuilder strings.Builder
+
+ // Now check each for overlap
+ for _, role := range kmsRolesDefined {
+ overlap := r.findMembersWithOverlappingRoles(kmsAdminRole, role)
+ for _, member := range overlap {
+ errBuilder.WriteString(fmt.Sprintf("%v can both administrate and perform actions with %v. ", member, role))
+ }
+ }
+
+ errString := errBuilder.String()
+ if errString != "" {
+ err = errors.New(errString)
+ }
+
+ return
+}
+
+// PolicyConfiguresAuditLogging returns whether the IAM policy defines Cloud Audit logging
+func (r *IamPolicyResource) PolicyConfiguresAuditLogging() error {
+
+ // Do we even have an auditConfig?
+ if r.p.AuditConfigs == nil {
+ return errors.New("Policy does not define auditConfigs")
+ }
+
+ if r.p.AuditConfigs[0].Service != "allServices" {
+ return errors.New("allServices is not the default audit config policy")
+ }
+
+ if r.p.AuditConfigs[0].AuditLogConfigs == nil {
+ return errors.New("Policy does not define auditLogConfigs")
+ }
+
+ // We must have the required number of audit log config types
+ if len(r.p.AuditConfigs[0].AuditLogConfigs) != len(logTypes) {
+ return errors.New("Policy does not define all required log types (requires ADMIN_READ, DATA_READ, DATA_WRITE)")
+ }
+
+ for _, cfg := range r.p.AuditConfigs[0].AuditLogConfigs {
+ found := false
+ for _, typ := range logTypes {
+ if cfg.LogType == typ {
+ found = true
+ break
+ }
+ }
+
+ if !found {
+ return errors.New("Policy has an unrecognized auditLogConfig type")
+ }
+ }
+
+ return nil
+}
+
+// PolicyDoesNotHaveAuditLogExceptions returns whether the IAM policy allows for exceptions to audit logging
+func (r *IamPolicyResource) PolicyDoesNotHaveAuditLogExceptions() error {
+
+ // Do we even have an auditConfig?
+ if r.p.AuditConfigs == nil {
+ return errors.New("Policy does not define auditConfigs")
+ }
+
+ if r.p.AuditConfigs[0].AuditLogConfigs == nil {
+ return errors.New("Policy does not define auditLogConfigs")
+ }
+
+ var errBuilder strings.Builder
+
+ for _, cfg := range r.p.AuditConfigs[0].AuditLogConfigs {
+ if len(cfg.ExemptedMembers) != 0 {
+ errBuilder.WriteString(fmt.Sprintf("%s has the following exceptions: ", cfg.LogType))
+ for _, exempt := range cfg.ExemptedMembers {
+ errBuilder.WriteString(exempt)
+ errBuilder.WriteString(",")
+ }
+ errBuilder.WriteString(". ")
+ }
+ }
+
+ errString := errBuilder.String()
+
+ if len(errString) != 0 {
+ return errors.New(errString)
+ }
+
+ return nil
+}
diff --git a/pkg/resource/gcp/iam_policy_test.go b/pkg/resource/gcp/iam_policy_test.go
new file mode 100644
index 0000000..67580e4
--- /dev/null
+++ b/pkg/resource/gcp/iam_policy_test.go
@@ -0,0 +1 @@
+package gcp
diff --git a/pkg/resource/gcp/iam_serviceaccount.go b/pkg/resource/gcp/iam_serviceaccount.go
new file mode 100644
index 0000000..9c0e0ed
--- /dev/null
+++ b/pkg/resource/gcp/iam_serviceaccount.go
@@ -0,0 +1,68 @@
+package gcp
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "strings"
+ "time"
+
+ "github.com/golang/glog"
+ iam "google.golang.org/api/iam/v1"
+)
+
+// IamServiceAccountResource is resource for testing IAM Service Accounts in GCP
+type IamServiceAccountResource struct {
+ s *iam.ServiceAccount
+ Keys []*iam.ServiceAccountKey
+}
+
+// NewIamServiceAccountResource returns a new IamServiceAccountResource
+func NewIamServiceAccountResource(s *iam.ServiceAccount) *IamServiceAccountResource {
+ r := new(IamServiceAccountResource)
+ r.s = s
+ r.Keys = []*iam.ServiceAccountKey{}
+ return r
+}
+
+// Email returns the email address of the service account
+func (r *IamServiceAccountResource) Email() string {
+ return r.s.Email
+}
+
+// Marshal returns the underlying resource's JSON representation
+func (r *IamServiceAccountResource) Marshal() ([]byte, error) {
+ return json.Marshal(&r.s)
+}
+
+// HasUserManagedKeys returns whether a service account has user-managed keys
+func (r *IamServiceAccountResource) HasUserManagedKeys() bool {
+ return len(r.Keys) != 0
+}
+
+// HasKeysNeedingRotation returns an error when the service account has keys older than the allowed time
+func (r *IamServiceAccountResource) HasKeysNeedingRotation() (err error) {
+
+ var errBuilder strings.Builder
+
+ for _, k := range r.Keys {
+ t, err := time.Parse(time.RFC3339, k.ValidAfterTime)
+ if err != nil {
+ glog.Fatalf("Failed to parse timestamp when checking keys: %v", err)
+ }
+
+ if t.Sub(time.Now()).Hours() > float64(saKeyExpirationTime*24) {
+ errBuilder.WriteString(fmt.Sprintf("%v has key older than %v days. ", k.Name, saKeyExpirationTime))
+ }
+
+ }
+
+ errString := errBuilder.String()
+ if errString != "" {
+ err = errors.New(errString)
+ } else {
+ err = nil
+ }
+
+ return
+}
diff --git a/pkg/resource/gcp/iam_serviceaccount_test.go b/pkg/resource/gcp/iam_serviceaccount_test.go
new file mode 100644
index 0000000..67580e4
--- /dev/null
+++ b/pkg/resource/gcp/iam_serviceaccount_test.go
@@ -0,0 +1 @@
+package gcp
diff --git a/pkg/resource/gcp/logging_metric.go b/pkg/resource/gcp/logging_metric.go
new file mode 100644
index 0000000..5f2ab4a
--- /dev/null
+++ b/pkg/resource/gcp/logging_metric.go
@@ -0,0 +1,27 @@
+package gcp
+
+import (
+ loggingpb "google.golang.org/genproto/googleapis/logging/v2"
+)
+
+// LoggingMetricResource represents a StackDriver log-based metric
+type LoggingMetricResource struct {
+ m *loggingpb.LogMetric
+}
+
+// NewLoggingMetricResource returns a new LoggingMetricResource
+func NewLoggingMetricResource(metric *loggingpb.LogMetric) *LoggingMetricResource {
+ r := new(LoggingMetricResource)
+ r.m = metric
+ return r
+}
+
+// Filter returns the filter of the metric
+func (r *LoggingMetricResource) Filter() string {
+ return r.m.Filter
+}
+
+// FilterMatches returns whether the configured filter matches a given string
+func (r *LoggingMetricResource) FilterMatches(filter string) bool {
+ return r.m.Filter == filter
+}
diff --git a/pkg/resource/gcp/logging_metric_test.go b/pkg/resource/gcp/logging_metric_test.go
new file mode 100644
index 0000000..67580e4
--- /dev/null
+++ b/pkg/resource/gcp/logging_metric_test.go
@@ -0,0 +1 @@
+package gcp
diff --git a/pkg/resource/gcp/logging_sink.go b/pkg/resource/gcp/logging_sink.go
new file mode 100644
index 0000000..60685d6
--- /dev/null
+++ b/pkg/resource/gcp/logging_sink.go
@@ -0,0 +1,25 @@
+package gcp
+
+import (
+ loggingpb "google.golang.org/genproto/googleapis/logging/v2"
+)
+
+// LoggingSinkResource represents a StackDriver logging sink
+type LoggingSinkResource struct {
+ s *loggingpb.LogSink
+}
+
+// NewLoggingSinkResource returns a new LoggingSinkResource
+func NewLoggingSinkResource(sink *loggingpb.LogSink) *LoggingSinkResource {
+ r := new(LoggingSinkResource)
+ r.s = sink
+ return r
+}
+
+// ShipsAllLogs indicates whether there is no filter (and thus all logs are shipped)
+func (r *LoggingSinkResource) ShipsAllLogs() bool {
+
+ // An empty string indicates that there is no filter - thus all logs
+ // that are generated are shipped to the logging sink destination
+ return r.s.Filter == ""
+}
diff --git a/pkg/resource/gcp/logging_sink_test.go b/pkg/resource/gcp/logging_sink_test.go
new file mode 100644
index 0000000..67580e4
--- /dev/null
+++ b/pkg/resource/gcp/logging_sink_test.go
@@ -0,0 +1 @@
+package gcp
diff --git a/pkg/resource/gcp/serviceusage.go b/pkg/resource/gcp/serviceusage.go
new file mode 100644
index 0000000..04f79d7
--- /dev/null
+++ b/pkg/resource/gcp/serviceusage.go
@@ -0,0 +1,29 @@
+package gcp
+
+import (
+ "encoding/json"
+
+ serviceusage "google.golang.org/api/serviceusage/v1"
+)
+
+// ServiceAPIResource represents a Google Service API resource
+type ServiceAPIResource struct {
+ a *serviceusage.GoogleApiServiceusageV1Service
+}
+
+// NewServiceAPIResource returns a new ServiceAPIResource
+func NewServiceAPIResource(a *serviceusage.GoogleApiServiceusageV1Service) *ServiceAPIResource {
+ r := new(ServiceAPIResource)
+ r.a = a
+ return r
+}
+
+// Name returns the bucket's name
+func (r *ServiceAPIResource) Name() string {
+ return r.a.Name
+}
+
+// Marshal returns the underlying resource's JSON representation
+func (r *ServiceAPIResource) Marshal() ([]byte, error) {
+ return json.Marshal(&r.a)
+}
diff --git a/pkg/resource/gcp/serviceusage_test.go b/pkg/resource/gcp/serviceusage_test.go
new file mode 100644
index 0000000..67580e4
--- /dev/null
+++ b/pkg/resource/gcp/serviceusage_test.go
@@ -0,0 +1 @@
+package gcp
diff --git a/pkg/resource/gcp/storage_bucket.go b/pkg/resource/gcp/storage_bucket.go
new file mode 100644
index 0000000..1d6e875
--- /dev/null
+++ b/pkg/resource/gcp/storage_bucket.go
@@ -0,0 +1,83 @@
+package gcp
+
+import (
+ "encoding/json"
+ "fmt"
+
+ storage "google.golang.org/api/storage/v1"
+)
+
+// StorageBucketResource represents a Google Storage bucket resource
+type StorageBucketResource struct {
+ b *storage.Bucket
+}
+
+// NewStorageBucketResource returns a new StorageBucketResource
+func NewStorageBucketResource(b *storage.Bucket) *StorageBucketResource {
+ r := new(StorageBucketResource)
+ r.b = b
+ return r
+}
+
+// Name returns the bucket's name
+func (r *StorageBucketResource) Name() string {
+ return r.b.Name
+}
+
+// Marshal returns the underlying resource's JSON representation
+func (r *StorageBucketResource) Marshal() ([]byte, error) {
+ return json.Marshal(&r.b)
+}
+
+// AllowAllUsers checks whether a bucket is configured to be world readable
+func (r *StorageBucketResource) AllowAllUsers() (result bool) {
+
+ acls := r.b.Acl
+
+ for _, acl := range acls {
+
+ // The `allUsers` entity denotes public access
+ if acl.Entity == "allUsers" {
+ result = true
+ return
+ }
+ }
+
+ return
+}
+
+// AllowAllAuthenticatedUsers checks whether a bucket is configured to be readable by anyone with a Google account
+func (r *StorageBucketResource) AllowAllAuthenticatedUsers() (result bool) {
+
+ acls := r.b.Acl
+
+ for _, acl := range acls {
+
+ // The `allAuthenticatedUsers` entity denotes access to any authenticated user to Google
+ if acl.Entity == "allAuthenticatedUsers" {
+ result = true
+ return
+ }
+ }
+
+ return
+}
+
+// HasBucketPolicyOnlyEnabled checks whether a bucket is configured to use permissions across the entire bucket
+func (r *StorageBucketResource) HasBucketPolicyOnlyEnabled() (result bool, err error) {
+
+ result = false
+ iamConfig := r.b.IamConfiguration
+
+ if iamConfig == nil {
+ err = fmt.Errorf("Could not retrieve IAM configuration for gs://%v", r.b.Name)
+ return
+ }
+
+ // Check if the policy exists. If not, then pass
+ if bucketPolicyOnly := iamConfig.BucketPolicyOnly; bucketPolicyOnly != nil {
+ // If the policy exists, return whether it is enabled
+ result = bucketPolicyOnly.Enabled
+ }
+ return result, err
+}
diff --git a/pkg/resource/gcp/storage_bucket_test.go b/pkg/resource/gcp/storage_bucket_test.go
new file mode 100644
index 0000000..c71e4b1
--- /dev/null
+++ b/pkg/resource/gcp/storage_bucket_test.go
@@ -0,0 +1,146 @@
+package gcp
+
+import (
+ "encoding/json"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ storage "google.golang.org/api/storage/v1"
+)
+
+// Helper function for making fake bucket resources
+func makeTestBucket(data []byte) *storage.Bucket {
+ bucket := new(storage.Bucket)
+ _ = json.Unmarshal(data, &bucket)
+ return bucket
+}
+
+var (
+ testValidBucketData = []byte(`
+{
+ "acl": [
+ {
+ "bucket": "my-test-bucket",
+ "entity": "project-owners-01010101010101",
+ "etag": "CAE=",
+ "id": "my-test-bucket/project-owners-01010101010101",
+ "kind": "storage#bucketAccessControl",
+ "projectTeam": {
+ "projectNumber": "01010101010101",
+ "team": "owners"
+ },
+ "role": "OWNER",
+ "selfLink": "https://www.googleapis.com/storage/v1/b/my-test-bucket/acl/project-owners-01010101010101"
+ },
+ {
+ "bucket": "my-test-bucket",
+ "entity": "project-editors-01010101010101",
+ "etag": "CAE=",
+ "id": "my-test-bucket/project-editors-01010101010101",
+ "kind": "storage#bucketAccessControl",
+ "projectTeam": {
+ "projectNumber": "01010101010101",
+ "team": "editors"
+ },
+ "role": "OWNER",
+ "selfLink": "https://www.googleapis.com/storage/v1/b/my-test-bucket/acl/project-editors-01010101010101"
+ },
+ {
+ "bucket": "my-test-bucket",
+ "entity": "project-viewers-01010101010101",
+ "etag": "CAE=",
+ "id": "my-test-bucket/project-viewers-01010101010101",
+ "kind": "storage#bucketAccessControl",
+ "projectTeam": {
+ "projectNumber": "01010101010101",
+ "team": "viewers"
+ },
+ "role": "READER",
+ "selfLink": "https://www.googleapis.com/storage/v1/b/my-test-bucket/acl/project-viewers-01010101010101"
+ }
+ ],
+ "etag": "CAE=",
+ "iamConfiguration": {
+ "bucketPolicyOnly": {}
+ },
+ "id": "my-test-bucket",
+ "kind": "storage#bucket",
+ "location": "US-CENTRAL1",
+ "metageneration": "1",
+ "name": "my-test-bucket",
+ "projectNumber": "01010101010101",
+ "selfLink": "https://www.googleapis.com/storage/v1/b/my-test-bucket",
+ "storageClass": "REGIONAL",
+ "timeCreated": "2019-01-18T14:14:07.472Z",
+ "updated": "2019-01-18T14:14:07.472Z"
+}
+`)
+
+ testValidBucket = makeTestBucket(testValidBucketData)
+)
+
+// Make sure that a bucket resource is created correctly
+func TestNewStorageBucketResource(t *testing.T) {
+
+ bucketResource := NewStorageBucketResource(testValidBucket)
+
+ // Make sure the resource is not nil
+ assert.NotNil(t, bucketResource)
+
+ // Make sure the underlying datasource is not nil
+ assert.NotNil(t, bucketResource.b)
+}
+
+func TestStorageBucketResourceName(t *testing.T) {
+
+ bucketResource := NewStorageBucketResource(testValidBucket)
+
+ // Make sure the name checks out
+ assert.Equal(t, "my-test-bucket", bucketResource.Name())
+
+}
+func TestStorageBucketResourceMarshal(t *testing.T) {
+
+ bucketResource := NewStorageBucketResource(testValidBucket)
+
+ // Marshal the original bucket
+ orig, err := json.Marshal(&testValidBucket)
+
+ // Make sure the data returns the same as we put in
+ data, err := bucketResource.Marshal()
+ assert.Nil(t, err)
+
+ assert.Equal(t, orig, data)
+}
+
+func TestStorageBucketResourceAllowAllUsers(t *testing.T) {
+
+ // Assert that the bucket does not contain the `allUsers` entity
+ bucketResource := NewStorageBucketResource(testValidBucket)
+ exists := bucketResource.AllowAllUsers()
+ assert.False(t, exists)
+
+ // TODO - add a bucket with the `allUsers` entity and check that it works
+
+}
+func TestStorageBucketResourceAllowAllAuthenticatedUsers(t *testing.T) {
+
+ // Assert that the bucket does not contain the `allAuthenticatedUsers` entity
+ bucketResource := NewStorageBucketResource(testValidBucket)
+ exists := bucketResource.AllowAllAuthenticatedUsers()
+ assert.False(t, exists)
+
+ // TODO - add a bucket with the `allAuthenticatedUsers` entity and check that it works
+
+}
+func TestStorageBucketResourceHasBucketPolicyOnlyEnabled(t *testing.T) {
+
+ // Assert that the bucket does not have the BucketPolicyOnly IAM configuration
+ bucketResource := NewStorageBucketResource(testValidBucket)
+ exists, err := bucketResource.HasBucketPolicyOnlyEnabled()
+ assert.Nil(t, err)
+ assert.False(t, exists)
+
+ // TODO - add a bucket with the BucketPolicyOnly IAM configuration and test it
+
+}
diff --git a/pkg/runner/runner.go b/pkg/runner/runner.go
new file mode 100644
index 0000000..6857f1b
--- /dev/null
+++ b/pkg/runner/runner.go
@@ -0,0 +1,136 @@
+// Package runner executes a configured audit
+package runner
+
+import (
+ "flag"
+
+ "github.com/Unity-Technologies/nemesis/pkg/client"
+ "github.com/Unity-Technologies/nemesis/pkg/report"
+ "github.com/Unity-Technologies/nemesis/pkg/utils"
+ "github.com/golang/glog"
+)
+
+var (
+ flagReportEnableStdout = flag.Bool("reports.stdout.enable", utils.GetEnvBool("NEMESIS_ENABLE_STDOUT"), "Enable outputting report via stdout")
+ flagReportEnablePubsub = flag.Bool("reports.pubsub.enable", utils.GetEnvBool("NEMESIS_ENABLE_PUBSUB"), "Enable outputting report via Google Pub/Sub")
+ flagReportPubsubProject = flag.String("reports.pubsub.project", utils.GetEnv("NEMESIS_PUBSUB_PROJECT", ""), "Indicate which GCP project to output Pub/Sub reports to")
+ flagReportPubsubTopic = flag.String("reports.pubsub.topic", utils.GetEnv("NEMESIS_PUBSUB_TOPIC", "nemesis"), "Indicate which topic to output Pub/Sub reports to")
+)
+
+// Audit is a runner that encapsulates the logic of an audit against GCP resources
+type Audit struct {
+ c *client.Client
+ reports []report.Report
+ reporters []report.Reporter
+}
+
+// NewAudit returns a new Audit runner
+func NewAudit() *Audit {
+ a := new(Audit)
+ a.reports = []report.Report{}
+ a.reporters = []report.Reporter{}
+ return a
+}
+
+// Setup configures an Audit runner and sets up audit resources
+func (a *Audit) Setup() {
+ a.c = client.New()
+
+ a.setupReporters()
+
+ if err := a.c.GetProjects(); err != nil {
+ glog.Fatalf("Failed to retrieve project resources: %v", err)
+ }
+
+ if err := a.c.GetIamResources(); err != nil {
+ glog.Fatalf("Failed to retrieve iam resources: %v", err)
+ }
+
+ if err := a.c.GetComputeResources(); err != nil {
+ glog.Fatalf("Failed to retrieve compute resources: %v", err)
+ }
+
+ if err := a.c.GetLoggingResources(); err != nil {
+ glog.Fatalf("Failed to retrieve logging resources: %v", err)
+ }
+
+ if err := a.c.GetNetworkResources(); err != nil {
+ glog.Fatalf("Failed to retrieve network resources: %v", err)
+ }
+
+ if err := a.c.GetContainerResources(); err != nil {
+ glog.Fatalf("Failed to retrieve container resources: %v", err)
+ }
+
+ if err := a.c.GetStorageResources(); err != nil {
+ glog.Fatalf("Failed to retrieve storage resources: %v", err)
+ }
+}
+
+func (a *Audit) setupReporters() {
+ // If pubsub client is required, create it here
+ if *flagReportEnablePubsub {
+
+ if *flagReportPubsubProject == "" {
+ glog.Fatal("PubSub project not specified")
+
+ }
+ if *flagReportPubsubTopic == "" {
+ glog.Fatal("PubSub topic not specified")
+ }
+
+ // Create the pubsub client
+ a.reporters = append(a.reporters, report.NewPubSubReporter(*flagReportPubsubProject, *flagReportPubsubTopic))
+ }
+
+ // Setup stdout
+ if *flagReportEnableStdout {
+ a.reporters = append(a.reporters, report.NewStdOutReporter())
+ }
+}
+
+// Execute performs the configured audits concurrently to completion
+func (a *Audit) Execute() {
+
+ // Setup goroutines for each set of reports we need to collect
+ // TODO - how to make this list dynamic?
+ generators := []func() (reports []report.Report, err error){
+ a.c.GenerateComputeMetadataReports,
+ a.c.GenerateComputeInstanceReports,
+ a.c.GenerateLoggingReports,
+ a.c.GenerateComputeNetworkReports,
+ a.c.GenerateComputeSubnetworkReports,
+ a.c.GenerateComputeFirewallRuleReports,
+ a.c.GenerateComputeAddressReports,
+ a.c.GenerateIAMPolicyReports,
+ a.c.GenerateStorageBucketReports,
+ a.c.GenerateContainerClusterReports,
+ a.c.GenerateContainerNodePoolReports,
+ }
+
+ for _, f := range generators {
+ reports, err := f()
+ if err != nil {
+ glog.Fatalf("Failed to generate reports: %v", err)
+ }
+ a.reports = append(a.reports, reports...)
+ }
+
+}
+
+// Report exports the configured reports to their final destination
+func (a *Audit) Report() {
+
+ // Push metrics
+ if err := a.c.PushMetrics(); err != nil {
+ glog.Fatalf("Failed to push metrics: %v", err)
+ }
+
+ // Push outputs
+ for _, r := range a.reporters {
+ err := r.Publish(a.reports)
+ if err != nil {
+ glog.Fatalf("Failed to publish reports: %v", err)
+ }
+ }
+}
diff --git a/pkg/runner/runner_test.go b/pkg/runner/runner_test.go
new file mode 100644
index 0000000..75c10db
--- /dev/null
+++ b/pkg/runner/runner_test.go
@@ -0,0 +1 @@
+package runner
diff --git a/pkg/utils/env.go b/pkg/utils/env.go
new file mode 100644
index 0000000..9630d00
--- /dev/null
+++ b/pkg/utils/env.go
@@ -0,0 +1,32 @@
+package utils
+
+import (
+ "os"
+ "strconv"
+)
+
+// GetEnv returns a string based on the OS environment variable, and returns a default value if not found
+func GetEnv(key string, defaultVal string) string {
+ if envVal, ok := os.LookupEnv(key); ok {
+ return envVal
+ }
+ return defaultVal
+}
+
+// GetEnvBool returns a boolean based on the OS environment variable, and returns false if not found
+func GetEnvBool(key string) (envValBool bool) {
+ if envVal, ok := os.LookupEnv(key); ok {
+ envValBool, _ = strconv.ParseBool(envVal)
+ }
+ return
+}
+
+// GetEnvInt retuns an integer based on the OS environment variable, and returns a default value if not found
+func GetEnvInt(key string, defaultVal int) int {
+ if envVal, ok := os.LookupEnv(key); ok {
+ if val, ok := strconv.ParseInt(envVal, 0, 0); ok == nil {
+ return int(val)
+ }
+ }
+ return defaultVal
+}
diff --git a/pkg/utils/env_test.go b/pkg/utils/env_test.go
new file mode 100644
index 0000000..97645fa
--- /dev/null
+++ b/pkg/utils/env_test.go
@@ -0,0 +1,51 @@
+package utils
+
+import (
+ "os"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestGetEnv(t *testing.T) {
+ // Assert correct value
+ err := os.Setenv("TEST_STRING", "my_value")
+ assert.Nil(t, err)
+ assert.Equal(t, "my_value", GetEnv("TEST_STRING", "my_value"))
+
+ // Assert default value
+ assert.Equal(t, "unset_value", GetEnv("UNSET_TEST_STRING", "unset_value"))
+
+ // Assert incorrect value
+ err = os.Setenv("TEST_STRING", "updated_value")
+ assert.Nil(t, err)
+ assert.False(t, "old_value" == GetEnv("TEST_STRING", "old_value"))
+}
+
+func TestGetEnvBool(t *testing.T) {
+
+ trues := []string{"1", "t", "T", "true", "TRUE", "True"}
+ falses := []string{"0", "f", "F", "false", "FALSE", "False"}
+ invalid := []string{"set", "foo", "bar", "2", "@"}
+
+ // Assert true values are parsed correctly
+ for _, s := range trues {
+ err := os.Setenv("TEST_BOOL", s)
+ assert.Nil(t, err)
+ assert.True(t, GetEnvBool("TEST_BOOL"))
+ }
+
+ // Assert false values are parsed correctly
+ for _, s := range falses {
+ err := os.Setenv("TEST_BOOL", s)
+ assert.Nil(t, err)
+ assert.False(t, GetEnvBool("TEST_BOOL"))
+ }
+
+ // Assert non-boolean values return false
+ for _, s := range invalid {
+ err := os.Setenv("TEST_BOOL", s)
+ assert.Nil(t, err)
+ assert.False(t, GetEnvBool("TEST_BOOL"))
+ }
+}
diff --git a/pkg/utils/flags.go b/pkg/utils/flags.go
new file mode 100644
index 0000000..b604142
--- /dev/null
+++ b/pkg/utils/flags.go
@@ -0,0 +1,7 @@
+package utils
+
+import "flag"
+
+var (
+ flagDebug = flag.Bool("debug", GetEnvBool("NEMESIS_DEBUG"), "Enable verbose output for debugging")
+)
diff --git a/pkg/utils/timing.go b/pkg/utils/timing.go
new file mode 100644
index 0000000..93809d4
--- /dev/null
+++ b/pkg/utils/timing.go
@@ -0,0 +1,17 @@
+package utils
+
+import (
+ "fmt"
+ "time"
+)
+
+// Elapsed sets up a goroutine to indicate how long it took to run a segment of code
+func Elapsed(what string) func() {
+ if *flagDebug {
+ start := time.Now()
+ return func() {
+ fmt.Printf("%s took %v\n", what, time.Since(start))
+ }
+ }
+ return func() {}
+}
diff --git a/pkg/version/version.go b/pkg/version/version.go
new file mode 100644
index 0000000..cdb4092
--- /dev/null
+++ b/pkg/version/version.go
@@ -0,0 +1,86 @@
+package version
+
+import (
+ "bytes"
+ "fmt"
+)
+
+var (
+ // The git commit that was compiled. This will be filled in by the compiler.
+ GitCommit string
+ GitDescribe string
+
+ // The main version number that is being run at the moment.
+ Version = "0.0.1"
+
+ // A pre-release marker for the version. If this is "" (empty string)
+ // then it means that it is a final release. Otherwise, this is a pre-release
+ // such as "dev" (in development), "beta", "rc1", etc.
+ VersionPrerelease = "dev"
+
+ // VersionMetadata is metadata further describing the build type.
+ VersionMetadata = ""
+)
+
+// Info contains info about the binary's version
+type Info struct {
+ Revision string
+ Version string
+ VersionPrerelease string
+ VersionMetadata string
+}
+
+// GetVersion is a utility for retrieiving the current version
+func GetVersion() *Info {
+ ver := Version
+ rel := VersionPrerelease
+ md := VersionMetadata
+ if GitDescribe != "" {
+ ver = GitDescribe
+ }
+ if GitDescribe == "" && rel == "" && VersionPrerelease != "" {
+ rel = "dev"
+ }
+
+ return &Info{
+ Revision: GitCommit,
+ Version: ver,
+ VersionPrerelease: rel,
+ VersionMetadata: md,
+ }
+}
+
+// VersionNumber builds a smaller string describing the nemesis version
+func (c *Info) VersionNumber() string {
+ version := fmt.Sprintf("%s", c.Version)
+
+ if c.VersionPrerelease != "" {
+ version = fmt.Sprintf("%s-%s", version, c.VersionPrerelease)
+ }
+
+ if c.VersionMetadata != "" {
+ version = fmt.Sprintf("%s+%s", version, c.VersionMetadata)
+ }
+
+ return version
+}
+
+// FullVersionNumber builds the string describing the nemesis version
+func (c *Info) FullVersionNumber(rev bool) string {
+ var versionString bytes.Buffer
+
+ fmt.Fprintf(&versionString, "nemesis v%s", c.Version)
+ if c.VersionPrerelease != "" {
+ fmt.Fprintf(&versionString, "-%s", c.VersionPrerelease)
+ }
+
+ if c.VersionMetadata != "" {
+ fmt.Fprintf(&versionString, "+%s", c.VersionMetadata)
+ }
+
+ if rev && c.Revision != "" {
+ fmt.Fprintf(&versionString, " (%s)", c.Revision)
+ }
+
+ return versionString.String()
+}
diff --git a/pkg/version/version_test.go b/pkg/version/version_test.go
new file mode 100644
index 0000000..f37d99d
--- /dev/null
+++ b/pkg/version/version_test.go
@@ -0,0 +1 @@
+package version