From 414b3cd1ca7a5098db6e551149e34474574a0788 Mon Sep 17 00:00:00 2001 From: kosta709 Date: Sun, 20 Jan 2019 19:07:51 +0200 Subject: [PATCH 01/16] Validator - initial --- validator/Chart.yaml | 19 +++++++++++++ validator/README.md | 2 ++ validator/templates/_helpers.tpl | 16 +++++++++++ validator/templates/storageclasses/pvcs.yaml | 29 ++++++++++++++++++++ 4 files changed, 66 insertions(+) create mode 100644 validator/Chart.yaml create mode 100644 validator/README.md create mode 100755 validator/templates/_helpers.tpl create mode 100644 validator/templates/storageclasses/pvcs.yaml diff --git a/validator/Chart.yaml b/validator/Chart.yaml new file mode 100644 index 0000000..8af5aa5 --- /dev/null +++ b/validator/Chart.yaml @@ -0,0 +1,19 @@ +apiVersion: "v1" +name: validator +version: 1.0.0 +kubeVersion: "1.9.0 - 2.0.0" +description: Validates possibility of Codefresh onprem installation +keywords: + - codefresh + - onoprem + - validator +home: https://codefresh.io/ +sources: + - https://github.com/codefresh-io/onprem +maintainers: + - name: Codefresh Authors + email: dev@codefresh.io +engine: gotpl +icon: https://codefresh.io/docs/assets/brand/codefresh-social-logo.png +appVersion: v2.0.10 +tillerVersion: ">2.9.0" \ No newline at end of file diff --git a/validator/README.md b/validator/README.md new file mode 100644 index 0000000..fe98033 --- /dev/null +++ b/validator/README.md @@ -0,0 +1,2 @@ +helm install -f $(realpath ./values.yaml) -ntst1 --wait --timeout 60 validator/ + diff --git a/validator/templates/_helpers.tpl b/validator/templates/_helpers.tpl new file mode 100755 index 0000000..f0d83d2 --- /dev/null +++ b/validator/templates/_helpers.tpl @@ -0,0 +1,16 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "fullname" -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} diff --git a/validator/templates/storageclasses/pvcs.yaml b/validator/templates/storageclasses/pvcs.yaml new file mode 100644 index 0000000..41711c2 --- /dev/null +++ b/validator/templates/storageclasses/pvcs.yaml @@ -0,0 +1,29 @@ +{{ $root := . }} +{{- $scDict := dict }} +{{- range $key, $value := .Values }} +{{- if and (not (empty $value)) (eq (kindOf $value) "map") }} + {{- with index $value "persistence" }} + {{- if .storageClass }} + {{- $_ := set $scDict .storageClass "exists" }} + {{- end }} + {{- end }} +{{- end }} +{{- end }} + + +{{- range $storageClass := keys $scDict }} +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: {{ template "fullname" $ }}-storageclass-{{ $storageClass }} + labels: + app: {{ $.Chart.Name }} +spec: + storageClassName: {{ $storageClass }} + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi +--- +{{- end }} From 3ae65819490210c77ec828dc4a63c3a942447c8f Mon Sep 17 00:00:00 2001 From: kosta709 Date: Mon, 21 Jan 2019 07:41:55 +0200 Subject: [PATCH 02/16] added values-validator.yaml --- validator/README.md | 3 ++- validator/templates/storageclasses/pvcs.yaml | 1 - validator/templates/values-validator.yaml | 9 +++++++++ 3 files changed, 11 insertions(+), 2 deletions(-) create mode 100644 validator/templates/values-validator.yaml diff --git a/validator/README.md b/validator/README.md index fe98033..cb21788 100644 --- a/validator/README.md +++ b/validator/README.md @@ -1,2 +1,3 @@ -helm install -f $(realpath ./values.yaml) -ntst1 --wait --timeout 60 validator/ + +helm install -f $(realpath ./values.yaml) -ntst1 --wait --timeout 60 validator/ \ No newline at end of file diff --git a/validator/templates/storageclasses/pvcs.yaml b/validator/templates/storageclasses/pvcs.yaml index 41711c2..f050486 100644 --- a/validator/templates/storageclasses/pvcs.yaml +++ b/validator/templates/storageclasses/pvcs.yaml @@ -1,4 +1,3 @@ -{{ $root := . }} {{- $scDict := dict }} {{- range $key, $value := .Values }} {{- if and (not (empty $value)) (eq (kindOf $value) "map") }} diff --git a/validator/templates/values-validator.yaml b/validator/templates/values-validator.yaml new file mode 100644 index 0000000..10deb62 --- /dev/null +++ b/validator/templates/values-validator.yaml @@ -0,0 +1,9 @@ +{{- $errors := "" }} +{{- if or (empty .Values.global.appUrl) (eq .Values.global.appUrl "your-domain.com") }} +{{- $errors = printf "%s\n%s" $errors "global.appUrl is empty" }} +{{- end }} + + +{{- if not ( empty $errors ) }} +{{- fail $errors }} +{{- end }} From edd56af61fac1bbd0a0ae7454d925e69ec550273 Mon Sep 17 00:00:00 2001 From: kosta709 Date: Mon, 21 Jan 2019 17:47:29 +0200 Subject: [PATCH 03/16] added run-validator.sh --- run-validator.sh | 42 ++++++++++++++++++++ scripts/helpers.sh | 30 ++++++++++++++ validator/templates/storageclasses/pvcs.yaml | 2 +- 3 files changed, 73 insertions(+), 1 deletion(-) create mode 100755 run-validator.sh create mode 100755 scripts/helpers.sh diff --git a/run-validator.sh b/run-validator.sh new file mode 100755 index 0000000..393757f --- /dev/null +++ b/run-validator.sh @@ -0,0 +1,42 @@ +#!/usr/bin/env bash +# + +DIR=$(dirname $0) +RELEASE=cf-validator +CHART=${DIR}/validator +NAMESPACE=${NAMESPACE:-codefresh} +HELM_TIMEOUT=60 + +RELEASE_STATUS=$(helm status $RELEASE 2>/dev/null | awk -F': ' '$1 == "STATUS" {print $2}') +if [[ -n "${RELEASE_STATUS}" ]]; then + echo "There is a previous run of $RELEASE with status $RELEASE_STATUS , deleting it" + helm delete $RELEASE --purge +fi + +VALUES_FILE=${DIR}/values.yaml + +HELM=${HELM:-helm} + +HELM_COMMAND="$HELM --namespace $NAMESPACE install -n $RELEASE $CHART -f ${VALUES_FILE} --timeout $HELM_TIMEOUT --wait $@" + +echo "Running ${RELEASE} helm release +$HELM_COMMAND +" + +eval $HELM_COMMAND & +HELM_PID=$! + +echo "Waiting ${HELM_TIMEOUT}s for validator release to complete ..." +wait $HELM_PID +HELM_EXIT_STATUS=$? + +if [[ "${HELM_EXIT_STATUS}" == 0 ]]; then + echo "Validation Complete Successfully. Cleaning validator release" + helm delete $RELEASE --purge +else + kubectl --namespace $NAMESPACE get pods,pvc,pv,svc -l app=${RELEASE} + echo "Validation Failed. Use kubectl desribe pod|pvc|pv $RELEASE to see the cause" + exit 1 +fi + + diff --git a/scripts/helpers.sh b/scripts/helpers.sh new file mode 100755 index 0000000..3236ffb --- /dev/null +++ b/scripts/helpers.sh @@ -0,0 +1,30 @@ +#!/bin/bash + +function parse_yaml { + local prefix=$2 + local s='[[:space:]]*' w='[a-zA-Z0-9_]*' fs=$(echo @|tr @ '\034') + sed -ne "s|^\($s\):|\1|" \ + -e "s|^\($s\)\($w\)$s:$s[\"']\(.*\)[\"']$s\$|\1$fs\2$fs\3|p" \ + -e "s|^\($s\)\($w\)$s:$s\(.*\)$s\$|\1$fs\2$fs\3|p" $1 | + awk -F$fs '{ + indent = length($1)/2; + vname[indent] = $2; + for (i in vname) {if (i > indent) {delete vname[i]}} + if (length($3) > 0) { + vn=""; for (i=0; i Date: Mon, 21 Jan 2019 19:07:21 +0200 Subject: [PATCH 04/16] after debug run-validator.sh --- cf-onprem | 1 + run-validator.sh | 31 ++++++++++++++++++++++++++++--- 2 files changed, 29 insertions(+), 3 deletions(-) diff --git a/cf-onprem b/cf-onprem index 0f4eccc..dfe1374 100755 --- a/cf-onprem +++ b/cf-onprem @@ -178,6 +178,7 @@ WEBTLSCERT_CFUI=$(cat ${WebTlsCert} | sed 's/^/ /') cat <<-EOF >${WEBTLS_VALUES_FILE} --- webTLS: + secretName: star.codefresh.io key: | ${WEBTLSKEY} cert: | diff --git a/run-validator.sh b/run-validator.sh index 393757f..28a3374 100755 --- a/run-validator.sh +++ b/run-validator.sh @@ -7,6 +7,22 @@ CHART=${DIR}/validator NAMESPACE=${NAMESPACE:-codefresh} HELM_TIMEOUT=60 +approveContext() { + echo "Your kubectl is configured with the following context: " + kubectl config current-context + read -r -p "Are you sure you want to continue? [y/N] " response + + if [[ "$response" =~ ^([yY][eE][sS]|[yY])+$ ]] + then + echo "" + else + echo "Exiting..." + exit 0 + fi +} + +approveContext + RELEASE_STATUS=$(helm status $RELEASE 2>/dev/null | awk -F': ' '$1 == "STATUS" {print $2}') if [[ -n "${RELEASE_STATUS}" ]]; then echo "There is a previous run of $RELEASE with status $RELEASE_STATUS , deleting it" @@ -26,16 +42,25 @@ $HELM_COMMAND eval $HELM_COMMAND & HELM_PID=$! -echo "Waiting ${HELM_TIMEOUT}s for validator release to complete ..." +echo "Waiting ${HELM_TIMEOUT}s for validator release to complete ... +You can view a progress by running the command below in separate shell + +kubectl --namespace $NAMESPACE get pods,pvc,pv,svc -l app=${RELEASE} \" + +" wait $HELM_PID HELM_EXIT_STATUS=$? if [[ "${HELM_EXIT_STATUS}" == 0 ]]; then - echo "Validation Complete Successfully. Cleaning validator release" + echo "Cleaning validator release" helm delete $RELEASE --purge + echo "Validation Complete Successfully" else kubectl --namespace $NAMESPACE get pods,pvc,pv,svc -l app=${RELEASE} - echo "Validation Failed. Use kubectl desribe pod|pvc|pv $RELEASE to see the cause" + echo "Validation FAILED. There are failed or pending resources + +Use kubectl desribe $RELEASE to see the cause + " exit 1 fi From cbc3e8268466d75c182572b8a9189e01b5a46ad8 Mon Sep 17 00:00:00 2001 From: kosta709 Date: Mon, 28 Jan 2019 17:54:37 +0200 Subject: [PATCH 05/16] changing values.yaml.tpl for pv-refactoring --- .gitignore | 1 + cf-onprem | 10 ++- values.yaml.tpl | 213 ++++++++++++++++++------------------------------ 3 files changed, 88 insertions(+), 136 deletions(-) diff --git a/.gitignore b/.gitignore index 6b8974d..86ad640 100644 --- a/.gitignore +++ b/.gitignore @@ -3,6 +3,7 @@ assets .kube +tmp/ # values.yaml values.yaml diff --git a/cf-onprem b/cf-onprem index dfe1374..03ad3cc 100755 --- a/cf-onprem +++ b/cf-onprem @@ -177,6 +177,8 @@ WEBTLSCERT_CFUI=$(cat ${WebTlsCert} | sed 's/^/ /') cat <<-EOF >${WEBTLS_VALUES_FILE} --- +ingress: + webTlsSecretName: "star.codefresh.io" webTLS: secretName: star.codefresh.io key: | @@ -199,7 +201,7 @@ EOF # run_as_root -while [[ $1 =~ ^(-(y)|--(yes|web-tls-key|web-tls-cert|set)) ]] +while [[ $1 =~ ^(-(y)|--(yes|web-tls-key|web-tls-cert|set|debug)) ]] do key=$1 value=$2 @@ -219,6 +221,9 @@ do SET_VALUES="$SET_VALUES --set $value" shift ;; + --debug) + SET_DEBUG="--debug" + ;; esac shift # past argument or value done @@ -331,5 +336,6 @@ helm upgrade cf codefresh-onprem-${CHANNEL}/codefresh \ --set cfapi.redeploy=true \ ${SEEDJOBS} \ ${CERTJOBS} \ - ${SET_VALUES} + ${SET_VALUES} \ + ${SET_DEBUG} # ${MTU_VALUE} diff --git a/values.yaml.tpl b/values.yaml.tpl index f17f76c..a3a9547 100644 --- a/values.yaml.tpl +++ b/values.yaml.tpl @@ -4,21 +4,13 @@ global: #seedJobs: true #certsJobs: true -#### Depending on git provider use matching values -### for Gitlab git provider -# gitlabClientID: -# gitlabClientSecret: -### for Bitbucket git provider -# bitbucketClientID: -# bitbucketClientSecret: -### for Github git provider -# githubClientID: -# githubClientSecret: -# githubInternalToken: appProtocol: https ### Codefresh App domain name appUrl: your-domain.com +# Storage class for all persistent services +# storageClass: {} + ### MTU Value for dockerd in builder and runner # mtu: 1400 @@ -28,139 +20,92 @@ global: # http_proxy: "http://myproxy.domain.com:8080" # HTTPS_PROXY: "http://myproxy.domain.com:8080" # https_proxy: "http://myproxy.domain.com:8080" -# NO_PROXY: "127.0.0.1,localhost,kubernetes.default.svc,.codefresh.svc,100.64.0.1,169.254.169.254,cf-builder,cf-cfapi,cf-cfui,cf-chartmuseum,cf-charts-manager,cf-cluster-providers,cf-consul,cf-consul-ui,cf-context-manager,cf-cronus,cf-helm-repo-manager,cf-hermes,cf-ingress-controller,cf-ingress-http-backend,cf-kube-integration,cf-mongodb,cf-nats,cf-nomios,cf-pipeline-manager,cf-postgresql,cf-rabbitmq,cf-redis,cf-registry,cf-runner,cf-runtime-environment-manager,cf-store" -# no_proxy: "127.0.0.1,localhost,kubernetes.default.svc,.codefresh.svc,100.64.0.1,169.254.169.254,cf-builder,cf-cfapi,cf-cfui,cf-chartmuseum,cf-charts-manager,cf-cluster-providers,cf-consul,cf-consul-ui,cf-context-manager,cf-cronus,cf-helm-repo-manager,cf-hermes,cf-ingress-controller,cf-ingress-http-backend,cf-kube-integration,cf-mongodb,cf-nats,cf-nomios,cf-pipeline-manager,cf-postgresql,cf-rabbitmq,cf-redis,cf-registry,cf-runner,cf-runtime-environment-manager,cf-store" - +# NO_PROXY: "127.0.0.1,localhost,kubernetes.default.svc,.codefresh.svc,100.64.0.1,169.254.169.254,cf-builder,cf-cfapi,cf-cfui,cf-chartmuseum,cf-charts-manager,cf-cluster-providers,cf-consul,cf-consul-ui,cf-context-manager,cf-cronus,cf-helm-repo-manager,cf-hermes,cf-ingress-controller,cf-ingress-http-backend,cf-kube-integration,cf-mongodb,cf-nats,cf-nomios,cf-pipeline-manager,cf-postgresql,cf-rabbitmq,cf-redis,cf-registry,cf-runner,cf-runtime-environment-manager,cf-store,cf-tasker-kubernetes" +# no_proxy: "127.0.0.1,localhost,kubernetes.default.svc,.codefresh.svc,100.64.0.1,169.254.169.254,cf-builder,cf-cfapi,cf-cfui,cf-chartmuseum,cf-charts-manager,cf-cluster-providers,cf-consul,cf-consul-ui,cf-context-manager,cf-cronus,cf-helm-repo-manager,cf-hermes,cf-ingress-controller,cf-ingress-http-backend,cf-kube-integration,cf-mongodb,cf-nats,cf-nomios,cf-pipeline-manager,cf-postgresql,cf-rabbitmq,cf-redis,cf-registry,cf-runner,cf-runtime-environment-manager,cf-store,cf-tasker-kubernetes" ### Firebase secret firebaseSecret: -### Uncomment if kubernetes cluster is RBAC enabled -rbacEnable: true - ## Custom annotations for Codefresh ingress resource that override defaults #annotations: - #kubernetes.io/ingress.class: nginx-codefresh - -ingress: -### Codefresh App domain name - domain: your-domain.com -### Uncomment if kubernetes cluster is RBAC enabled - rbacEnable: true -### The name of kebernetes secret with customer certificate and private key - webTlsSecretName: "star.codefresh.io" - -### For github provider (the apiHost and loginHost are different) -cfapi: - rbacEnable: true - github: - apiHost: api.github.com - loginHost: github.com - protocol: https - -### For gitlab provider (the apiHost and loginHost are the same) -#cfapi: -# gitlab: -# apiHost: gitlab-internal.codefresh.io -# loginHost: gitlab-internal.codefresh.io -# protocol: https - -### Define kubernetes secret name for customer certificate and private key -webTLS: - secretName: star.codefresh.io - +# kubernetes.io/ingress.class: nginx-codefresh + +## Persistent services (mongodb, consul, postgress, redit, rabbit) configuration +# you can configure storageClass for dynamic volume provisoning or precreated existingPvc name +# existingPvc should exist before launching the intallation and takes precedence over storageClass +# +# Specify node selector if +# Example 1, mongodb with storageClass for dynamic volume provisoning: +# mongodb: +# storageClass: ceph-pool-1 +# storageSize: 8Gi +# +# Example 2, rabbitmq on precreated pvc for local volume on cpecific volume +# +# postgresql: +# existingPvc: cf-postgress-lv +# nodeSelector: +# kubernetes.io/hostname: storage-host-01 -consul: -### If needed to use storage class that different from default - StorageClass: {} -### Use existing volume claim name - #pvcName: cf-consul -### Use NodeSelector to assing pod to a node +mongodb: + storageSize: 8Gi + storageClass: {} + existingPvc: {} nodeSelector: {} -# services: consul-postgresql postgresql: - persistence: - #existingClaim: cf-postgresql - storageClass: {} + storageSize: 8Gi + storageClass: {} + existingPvc: {} nodeSelector: {} -# services: consul-postgresql - -mongodb: -## Enable persistence using Persistent Volume Claims -## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ -## -## IMPORTANT ! -## It is not possible the combination when pvcName is defined and persistence:enabled = true -## Only one of two: -## pvcName is defined AND persistence:enabled = false -## OR -## pvcName is not defined (commented out) AND persistence:enabled = true -## -## Use existing volume claim name - #pvcName: cf-mongodb -## Provision new volume claim - persistence: - enabled: true - ## If defined, volume.beta.kubernetes.io/storage-class: - ## Default: volume.alpha.kubernetes.io/storage-class: default - ## - storageClass: {} - accessMode: ReadWriteOnce - size: 8Gi +consul: + storageSize: 1Gi + storageClass: {} + existingPvc: {} nodeSelector: {} -# provisioner: local-volume redis: - persistence: -## Use existing volume claim name - #existingClaim: cf-redis - storageClass: {} + storageSize: 8Gi + storageClass: {} + existingPvc: {} nodeSelector: {} -# provisioner: local-volume rabbitmq: - persistence: -## Use existing volume claim name - #existingClaim: cf-rabbitmq - storageClass: {} + storageSize: 8Gi + storageClass: {} + existingPvc: {} nodeSelector: {} -# services: rabbitmq-registry registry: + storageSize: 100Gi storageClass: {} -## Override default (4Gi) initial registry PV size - #storageSize: {} - ## Use existing volume claim name - #pvcName: cf-registry + existingPvc: {} nodeSelector: {} -# services: rabbitmq-registry -## Uncomment if needed to apply custom configuration to registry - #registryConfig: -## Insert custom registry configuration (https://docs.docker.com/registry/configuration/) - #version: 0.1 - #log: - #level: debug - #fields: - #service: registry - #storage: - #cache: - #blobdescriptor: inmemory - #s3: - #region: YOUR_REGION - #bucket: YOUR_BUCKET_NAME - #accesskey: AWS_ACCESS_KEY - #secretkey: AWS_SECRET_KEY - #http: - #addr: :5000 - #headers: - #X-Content-Type-Options: [nosniff] - #health: - #storagedriver: - #enabled: true - #interval: 10s - #threshold: 3 +# Insert custom registry configuration (https://docs.docker.com/registry/configuration/) +# registryConfig: +# version: 0.1 +# log: +# level: debug +# fields: +# service: registry +# storage: +# cache: +# blobdescriptor: inmemory +# s3: +# region: YOUR_REGION +# bucket: YOUR_BUCKET_NAME +# accesskey: AWS_ACCESS_KEY +# secretkey: AWS_SECRET_KEY +# http: +# addr: :5000 +# headers: +# X-Content-Type-Options: [nosniff] +# health: +# storagedriver: +# enabled: true +# interval: 10s +# threshold: 3 hermes: nodeSelector: {} @@ -183,31 +128,31 @@ cronus: # services: rabbitmq-registry builder: -## Use existing volume claim name - #pvcName: cf-builder + nodeSelector: {} ## Set time to run docker cleaner dockerCleanerCron: 0 0 * * * ## Override builder PV initial size varLibDockerVolume: - storageClass: {} storageSize: 100Gi + existingPvc: {} + storageClass: {} runner: -## Use existing volume claim name - #pvcName: cf-runner + nodeSelector: {} ## Set time to run docker cleaner dockerCleanerCron: 0 0 * * * ## Override runner PV initial size varLibDockerVolume: - storageClass: {} storageSize: 100Gi + existingPvc: {} + storageClass: {} -helm-repo-manager: - RepoUrlPrefix: "cm://" +# helm-repo-manager: +# RepoUrlPrefix: "cm://" -backups: - #enabled: true - awsAccessKey: - awsSecretAccessKey: - s3Url: s3:// +# backups: +# #enabled: true +# awsAccessKey: +# awsSecretAccessKey: +# s3Url: s3:// From 83e9149f78e8364821b6cd2351fb77fafd3d7216 Mon Sep 17 00:00:00 2001 From: kosta709 Date: Tue, 29 Jan 2019 19:43:30 +0200 Subject: [PATCH 06/16] 29-jan --- README.md | 24 ++++++------------------ env-vars | 2 +- values.yaml.tpl | 8 +++----- 3 files changed, 10 insertions(+), 24 deletions(-) diff --git a/README.md b/README.md index e61ec37..b9dc8f3 100644 --- a/README.md +++ b/README.md @@ -12,11 +12,8 @@ Before running `cf-onprem` script it is needed to: * make configuration changes specific for each customer There are three files that customize `codefresh` chart deployment: -* `sa-dec.json` contains GCP service account that enables a customer to pull codefresh images -* `values.yaml` contains different parameters for chart customization -* `values-dec.yaml` contains secrets such as `githubClientSecret`, etc. +* `values.yaml.tpl` contains template of values.yaml for different parameters for chart customization -Also to be able to encrypt `*-dec.*` files and decrypt `*-enc.*` files `aws cli` should be configured with permissions to use AWS KMS service and [sops](https://github.com/mozilla/sops/releases) binary installed on your system. ### How to run 1. Clone [onprem](https://github.com/codefresh-io/onprem) repository @@ -24,17 +21,8 @@ Also to be able to encrypt `*-dec.*` files and decrypt `*-enc.*` files `aws cli` git clone git@github.com:codefresh-io/onprem.git cd onprem ``` -2. Decrypt `sa-enc.json` and `values-enc.yaml` files -``` -./sops.sh -d -``` -3. Make configuration changes in `sa-dec.json`, `values.yaml`, `values-dec.yaml` files and customize variables in `env-vars` file -4. Run `cf-onprem` script -5. If it is needed to upload new configuration into remote repository then encrypt `sa-dec.json`, `values-dec.yaml` files -``` -./sops.sh -e -``` -6. Commit and push changes -``` -git push origin master -``` \ No newline at end of file +2. cp `values.yaml.tpl` `values.yaml` + +3. Edit values.yaml + +4. run `./cf-onprem [ --web-tls-key certs/key.pem --web-tls-cert certs/cert.pem ]` \ No newline at end of file diff --git a/env-vars b/env-vars index a93498f..db90cb7 100644 --- a/env-vars +++ b/env-vars @@ -1,2 +1,2 @@ -export CF_HELM_CHANNEL= +export CF_HELM_CHANNEL=test export CF_HELM_VERSION= \ No newline at end of file diff --git a/values.yaml.tpl b/values.yaml.tpl index a3a9547..3556809 100644 --- a/values.yaml.tpl +++ b/values.yaml.tpl @@ -113,12 +113,10 @@ hermes: redis: ## Set hermes store password. It is mandatory redisPassword: verysecurepassword + storageSize: 8Gi + storageClass: {} + existingPvc: {} nodeSelector: {} -# services: rabbitmq-registry - persistence: -## Use existing volume claim name - #existingClaim: cf-store - storageClass: {} cronus: storageClass: {} From 6fcf6ac5e9b7a825b0db7b68bdb74c2f561bd118 Mon Sep 17 00:00:00 2001 From: kosta709 Date: Fri, 1 Feb 2019 19:28:14 +0200 Subject: [PATCH 07/16] sc validator --- cf-onprem | 5 +- run-validator.sh | 18 ++++- validator/templates/storageclasses/pvcs.yaml | 78 +++++++++++++++++--- values.yaml.tpl | 5 +- 4 files changed, 87 insertions(+), 19 deletions(-) diff --git a/cf-onprem b/cf-onprem index 03ad3cc..607a66b 100755 --- a/cf-onprem +++ b/cf-onprem @@ -321,12 +321,13 @@ EOF [ -n "${WebTlsKey}" ] && [ -f "${WebTlsKey}" ] && [ -n "${WebTlsCert}" ] && [ -f "${WebTlsCert}" ] && [ -f "${WEBTLS_VALUES_FILE}" ] && WEBTLS_VALUES="--values ${WEBTLS_VALUES_FILE}" -cf_status=$(helm ls -q cf) +RELEASE=cf +cf_status=$(helm status $RELEASE 2>/dev/null | awk -F': ' '$1 == "STATUS" {print $2}') [ -z "${cf_status}" ] && SEEDJOBS="--set global.seedJobs=true" && CERTJOBS="--set global.certsJobs=true" msg "Installing/Updating Codefresh..." -helm upgrade cf codefresh-onprem-${CHANNEL}/codefresh \ +helm upgrade ${RELEASE} codefresh-onprem-${CHANNEL}/codefresh \ --install \ --namespace codefresh \ --values "${VALUES_FILE}" \ diff --git a/run-validator.sh b/run-validator.sh index 28a3374..4a28548 100755 --- a/run-validator.sh +++ b/run-validator.sh @@ -23,6 +23,16 @@ approveContext() { approveContext +## Get default storage class +SC_DEFAULT_QUERY='{{ range .items }}' +SC_DEFAULT_QUERY+='{{if .metadata.annotations }}{{if (index .metadata.annotations "storageclass.beta.kubernetes.io/is-default-class") }}' +SC_DEFAULT_QUERY+='{{ .metadata.name }}{{"\n"}}' +SC_DEFAULT_QUERY+='{{end}}{{end}}{{end}}' +DEFAULT_STORAGE_CLASS=$(kubectl -ogo-template="$SC_DEFAULT" get sc) +if [[ -n "${DEFAULT_STORAGE_CLASS}" ]]; then + DEFAULT_STORAGE_CLASS_PARAM="--set defaultStorageClass=${DEFAULT_STORAGE_CLASS}" +fi + RELEASE_STATUS=$(helm status $RELEASE 2>/dev/null | awk -F': ' '$1 == "STATUS" {print $2}') if [[ -n "${RELEASE_STATUS}" ]]; then echo "There is a previous run of $RELEASE with status $RELEASE_STATUS , deleting it" @@ -33,7 +43,7 @@ VALUES_FILE=${DIR}/values.yaml HELM=${HELM:-helm} -HELM_COMMAND="$HELM --namespace $NAMESPACE install -n $RELEASE $CHART -f ${VALUES_FILE} --timeout $HELM_TIMEOUT --wait $@" +HELM_COMMAND="$HELM --namespace $NAMESPACE install -n $RELEASE $CHART -f ${VALUES_FILE} ${DEFAULT_STORAGE_CLASS_PARAM} --timeout $HELM_TIMEOUT --wait $@" echo "Running ${RELEASE} helm release $HELM_COMMAND @@ -45,7 +55,7 @@ HELM_PID=$! echo "Waiting ${HELM_TIMEOUT}s for validator release to complete ... You can view a progress by running the command below in separate shell -kubectl --namespace $NAMESPACE get pods,pvc,pv,svc -l app=${RELEASE} \" +kubectl --namespace $NAMESPACE get pods,pvc,pv,svc -l app=${RELEASE} " wait $HELM_PID @@ -56,10 +66,10 @@ if [[ "${HELM_EXIT_STATUS}" == 0 ]]; then helm delete $RELEASE --purge echo "Validation Complete Successfully" else - kubectl --namespace $NAMESPACE get pods,pvc,pv,svc -l app=${RELEASE} + # kubectl --namespace $NAMESPACE get pods,pvc,pv,svc -l app=${RELEASE} echo "Validation FAILED. There are failed or pending resources -Use kubectl desribe $RELEASE to see the cause +Use kubectl desribe ${RELEASE}-* to see the cause " exit 1 fi diff --git a/validator/templates/storageclasses/pvcs.yaml b/validator/templates/storageclasses/pvcs.yaml index 889b1f7..d520e60 100644 --- a/validator/templates/storageclasses/pvcs.yaml +++ b/validator/templates/storageclasses/pvcs.yaml @@ -1,14 +1,40 @@ -{{- $scDict := dict }} -{{- range $key, $value := .Values }} -{{- if and (not (empty $value)) (eq (kindOf $value) "map") }} - {{- with index $value "persistence" }} - {{- if .storageClass }} - {{- $_ := set $scDict .storageClass "exists" }} - {{- end }} - {{- end }} -{{- end }} -{{- end }} +{{- $scDict := dict -}} +{{- $existingPvcsDict := dict -}} +{{- $checkSc := false -}} +{{- $checkDefaultSc := false -}} + +{{/* Fill dict persistent services */}} +{{- $persistentServices := dict -}} +{{- $persistentServiceNames := list "mongodb" "postgresql" "consul" "redis" "rabbitmq" "registry" "cronus" -}} +{{- range $persistentServiceNames -}} +{{- if (index $.Values . ) -}} +{{- $_ := set $persistentServices . (index $.Values .) -}} +{{- end -}} +{{- end -}} + +{{- if .Values.hermes -}} {{- if .Values.hermes.redis -}} +{{- $_ := set $persistentServices "store" .Values.hermes.redis -}} +{{- end -}}{{- end -}} +{{- range $k, $v := $persistentServices -}} + {{- if .existingPvc }} + {{- $_ := set $existingPvcsDict $v.existingPvc "exists" -}} + {{- else if $v.storageClass -}} + {{- $_ := set $scDict $v.storageClass "exists" -}} + {{- $checkSc = true -}} + {{- else -}} + {{- $checkDefaultSc = true -}} + {{- if and (empty $.Values.global.storageClass) (empty $.Values.defaultStorageClass ) -}} + {{- fail (printf "No global.storageClass or kubernetes default storage class defined, persistent service %s will not be able to start" $k) -}} + {{- end -}} + + {{- $_ := set $scDict (coalesce $.Values.global.storageClass $.Values.defaultStorageClass ) "exists" -}} + {{- end -}} +{{- end -}} + +{{- if .Values.global.storageClass -}} +{{- $_ := set $scDict .Values.global.storageClass "exists" -}} +{{- end -}} {{- range $storageClass := keys $scDict }} kind: PersistentVolumeClaim @@ -25,4 +51,36 @@ spec: requests: storage: 1Gi --- + {{- end }} + + +{{- range $pvc := keys $existingPvcDict }} +apiVersion: v1 +kind: Pod +metadata: + name: test-pvc-{{- -}} + labels: + app: {{ $.Release.Name }} +spec: + containers: + - image: alpine:3.7 + name: test-pvc-{{- -}} + imagePullPolicy: IfNotPresent + volumeMounts: + - mountPath: /data + name: data + readinessProbe: + exec: + command: + - touch + - /data/tst1 + initialDelaySeconds: 1 + periodSeconds: 3 + volumes: + - name: data + persistentVolumeClaim: + claimName: {{ }} +--- + +{{- end -}} \ No newline at end of file diff --git a/values.yaml.tpl b/values.yaml.tpl index 3556809..6f1fee5 100644 --- a/values.yaml.tpl +++ b/values.yaml.tpl @@ -119,11 +119,10 @@ hermes: nodeSelector: {} cronus: + storageSize: 1Gi storageClass: {} -## Use existing volume claim name - #pvcName: cf-cronus + existingPvc: {} nodeSelector: {} -# services: rabbitmq-registry builder: nodeSelector: {} From 3d2397543c6a4fab5fefa950f4a455a91eff0c73 Mon Sep 17 00:00:00 2001 From: kosta709 Date: Sat, 2 Feb 2019 17:06:20 +0200 Subject: [PATCH 08/16] local-volumes -1 --- README.md | 11 ++-- local-volumes/Chart.yaml | 19 +++++++ local-volumes/templates/_helpers.tpl | 16 ++++++ local-volumes/templates/mkdir-pod.yaml | 56 +++++++++++++++++++ .../templates/persistent-volumes.yaml | 47 ++++++++++++++++ local-volumes/templates/pvcs.yaml | 21 +++++++ local-volumes/templates/storageclass.yaml | 10 ++++ scripts/helpers.sh | 14 +++++ scripts/local-volumes.sh | 49 ++++++++++++++++ validator/templates/storageclasses/pvcs.yaml | 18 +++--- values.yaml.tpl | 3 + 11 files changed, 253 insertions(+), 11 deletions(-) create mode 100644 local-volumes/Chart.yaml create mode 100755 local-volumes/templates/_helpers.tpl create mode 100644 local-volumes/templates/mkdir-pod.yaml create mode 100644 local-volumes/templates/persistent-volumes.yaml create mode 100644 local-volumes/templates/pvcs.yaml create mode 100644 local-volumes/templates/storageclass.yaml create mode 100755 scripts/local-volumes.sh diff --git a/README.md b/README.md index b9dc8f3..7f4f017 100644 --- a/README.md +++ b/README.md @@ -16,13 +16,16 @@ There are three files that customize `codefresh` chart deployment: ### How to run -1. Clone [onprem](https://github.com/codefresh-io/onprem) repository +* Clone [onprem](https://github.com/codefresh-io/onprem) repository ``` git clone git@github.com:codefresh-io/onprem.git cd onprem ``` -2. cp `values.yaml.tpl` `values.yaml` +* cp `values.yaml.tpl` `values.yaml` -3. Edit values.yaml +* Edit values.yaml -4. run `./cf-onprem [ --web-tls-key certs/key.pem --web-tls-cert certs/cert.pem ]` \ No newline at end of file +* Validate values and cluster + `./run-validator.sh` + +* run `./cf-onprem [ --web-tls-key certs/key.pem --web-tls-cert certs/cert.pem ]` \ No newline at end of file diff --git a/local-volumes/Chart.yaml b/local-volumes/Chart.yaml new file mode 100644 index 0000000..c93f9ce --- /dev/null +++ b/local-volumes/Chart.yaml @@ -0,0 +1,19 @@ +apiVersion: "v1" +name: local-volumes +version: 1.0.0 +kubeVersion: "1.10.0 - 2.0.0" +description: Creates local volumes for Codefresh onprem installation +keywords: + - codefresh + - onprem + - local-volumes +home: https://codefresh.io/ +sources: + - https://github.com/codefresh-io/onprem +maintainers: + - name: Codefresh Authors + email: dev@codefresh.io +engine: gotpl +icon: https://codefresh.io/docs/assets/brand/codefresh-social-logo.png +appVersion: v2.0.10 +tillerVersion: ">2.9.0" \ No newline at end of file diff --git a/local-volumes/templates/_helpers.tpl b/local-volumes/templates/_helpers.tpl new file mode 100755 index 0000000..f0d83d2 --- /dev/null +++ b/local-volumes/templates/_helpers.tpl @@ -0,0 +1,16 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "fullname" -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} diff --git a/local-volumes/templates/mkdir-pod.yaml b/local-volumes/templates/mkdir-pod.yaml new file mode 100644 index 0000000..f9f2589 --- /dev/null +++ b/local-volumes/templates/mkdir-pod.yaml @@ -0,0 +1,56 @@ +{{- if .Values.mkdirPods }} +{{- $pathes := list }} +{{- range $k, $v := .Values.volumes }} + + {{- $path := "" }} + {{- if $v.localPath }} + {{- $path = isAbs $v.localPath | ternary $v.localPath (printf "%s/%s" $.Values.basePath $v.localPath) }} + {{- else }} + {{- $path = printf "%s/%s%s" $.Values.basePath $.Values.namePrefix $k }} + {{- end }} + {{- if or (empty $path) (eq $path "/") }} + {{- fail "Cannot calculate path for local volumes. Specify values for .Values.basePath or volumes.name.path " }} + {{- end }} + {{- $pathes = append $pathes $path }} +{{- end }} + +{{- range .Values.mkdirPods.nodes }} +{{- $nodeNameSplit := splitn "." 2 . }} +{{- $podName := $nodeNameSplit._0 }} +--- +apiVersion: v1 +kind: Pod +metadata: + name: {{ printf "mkdir-%s%s-%s" $.Values.namePrefix $podName (randAlphaNum 5 | lower)}} + labels: + app: {{ $.Release.Name }} + chart: "{{ $.Chart.Name }}-{{ $.Chart.Version }}" + release: "{{ $.Release.Name }}" + heritage: "{{ $.Release.Service }}" +spec: + restartPolicy: Never + nodeSelector: + kubernetes.io/hostname: {{ . }} + containers: + - image: alpine:3.7 + name: mkdir + command: + - /bin/sh + - "-ec" + - | + {{- range $pathes }} + mkdir -pv {{ printf "/hostroot%s" . }} + {{- end }} + securityContext: + privileged: true + volumeMounts: + - mountPath: /hostroot + readOnly: false + name: hostroot + volumes: + - name: hostroot + hostPath: + path: "/" + +{{- end }} +{{- end }} \ No newline at end of file diff --git a/local-volumes/templates/persistent-volumes.yaml b/local-volumes/templates/persistent-volumes.yaml new file mode 100644 index 0000000..3cbc57c --- /dev/null +++ b/local-volumes/templates/persistent-volumes.yaml @@ -0,0 +1,47 @@ +{{- range $k, $v := .Values.volumes }} + +{{- $nodeSelector := default $.Values.defaultNodeSelector $v.nodeSelector -}} +{{- if empty $nodeSelector -}} + {{- fail "Cannot find nodeSelector for local volumes. Specify values for defaultNodeSelector or volume specific nodeSelector" -}} +{{- end }} + +apiVersion: v1 +kind: PersistentVolume +metadata: + name: {{ printf "%s%s" $.Values.namePrefix $k }} + labels: + app: {{ $.Release.Name }} + chart: "{{ $.Chart.Name }}-{{ $.Chart.Version }}" + release: "{{ $.Release.Name }}" + heritage: "{{ $.Release.Service }}" +spec: + capacity: + storage: {{ default "40Gi" $v.storageSize}} + accessModes: + - ReadWriteOnce + persistentVolumeReclaimPolicy: Retain + storageClassName: {{$.Values.namePrefix}}local-storage + local: + {{- $path := "" }} + {{- if $v.path }} + {{- $path = isAbs $v.path | ternary $v.path (printf "%s/%s" $.Values.basePath $v.path) }} + {{- else }} + {{- $path = printf "%s/%s%s" $.Values.basePath $.Values.namePrefix $k }} + {{- end }} + {{- if or (empty $path) (eq $path "/") }} + {{- fail "Cannot calculate path for local volumes. Specify values for .Values.basePath or volumes..path " }} + {{- end }} + path: {{ $path }} + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + {{- range $s, $d := $nodeSelector }} + {{- if empty $d }}{{- fail (printf "Empty Node Selector Value for %s or all" $k) }}{{- end }} + - key: {{ $s }} + operator: In + values: + - {{ $d }} + {{- end }} +--- +{{- end }} \ No newline at end of file diff --git a/local-volumes/templates/pvcs.yaml b/local-volumes/templates/pvcs.yaml new file mode 100644 index 0000000..9579636 --- /dev/null +++ b/local-volumes/templates/pvcs.yaml @@ -0,0 +1,21 @@ +{{- range $k, $v := .Values.volumes }} + +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: {{ printf "%s%s" $.Values.namePrefix $k }} + labels: + app: {{ $.Release.Name }} + chart: "{{ $.Chart.Name }}-{{ $.Chart.Version }}" + release: "{{ $.Release.Name }}" + heritage: "{{ $.Release.Service }}" +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: {{ default "40Gi" $v.storageSize}} + volumeName: {{ printf "%s%s" $.Values.namePrefix $k }} + storageClassName: {{$.Values.namePrefix}}local-storage +--- +{{- end }} \ No newline at end of file diff --git a/local-volumes/templates/storageclass.yaml b/local-volumes/templates/storageclass.yaml new file mode 100644 index 0000000..6ed7d6e --- /dev/null +++ b/local-volumes/templates/storageclass.yaml @@ -0,0 +1,10 @@ +kind: StorageClass +apiVersion: storage.k8s.io/v1 +metadata: + name: {{.Values.namePrefix}}local-storage + labels: + app: {{ $.Release.Name }} + chart: "{{ $.Chart.Name }}-{{ $.Chart.Version }}" + release: "{{ $.Release.Name }}" + heritage: "{{ $.Release.Service }}" +provisioner: kubernetes.io/no-provisioner \ No newline at end of file diff --git a/scripts/helpers.sh b/scripts/helpers.sh index 3236ffb..7e46f99 100755 --- a/scripts/helpers.sh +++ b/scripts/helpers.sh @@ -27,4 +27,18 @@ function regexMatch() { else err "$key contains invalid value: $value" fi +} + +approveContext() { + echo "Your kubectl is configured with the following context: " + kubectl config current-context + read -r -p "Are you sure you want to continue? [y/N] " response + + if [[ "$response" =~ ^([yY][eE][sS]|[yY])+$ ]] + then + echo "" + else + echo "Exiting..." + exit 0 + fi } \ No newline at end of file diff --git a/scripts/local-volumes.sh b/scripts/local-volumes.sh new file mode 100755 index 0000000..1c8c06f --- /dev/null +++ b/scripts/local-volumes.sh @@ -0,0 +1,49 @@ +#!/usr/bin/env bash +# + +DIR=$(dirname $0) +RELEASE=cf-local-volumes +CHART=$(realpath ${DIR}/../local-volumes) +NAMESPACE=${NAMESPACE:-codefresh} +HELM_TIMEOUT=60 + +source ${DIR}/helpers.sh + +approveContext + +RELEASE_STATUS=$(helm status $RELEASE 2>/dev/null | awk -F': ' '$1 == "STATUS" {print $2}') +if [[ -n "${RELEASE_STATUS}" ]]; then + echo "There is a previous run of $RELEASE with status $RELEASE_STATUS +Run: helm status cf-local-volumes; to check the status of the release +Or run: helm del --purge cf-local-volumes; to delete it + + " + exit 1 +fi + +VALUES_FILE=${DIR}/values.yaml + +HELM=${HELM:-helm} + +HELM_COMMAND="$HELM --namespace $NAMESPACE install -n $RELEASE $CHART $@" + +echo "Running ${RELEASE} helm release +$HELM_COMMAND +" + +eval $HELM_COMMAND & +HELM_PID=$! + +wait $HELM_PID +HELM_EXIT_STATUS=$? + +if [[ "${HELM_EXIT_STATUS}" == 0 ]]; then + echo "Local Volumes chart has been submitted. Run the command below to insect the status + kubectl --namespace $NAMESPACE get pods,pvc,pv,svc -l app=${RELEASE} + " +else + echo " + Local Volumes chart submission FAILED." +fi + +exit $HELM_EXIT_STATUS \ No newline at end of file diff --git a/validator/templates/storageclasses/pvcs.yaml b/validator/templates/storageclasses/pvcs.yaml index d520e60..67b39b9 100644 --- a/validator/templates/storageclasses/pvcs.yaml +++ b/validator/templates/storageclasses/pvcs.yaml @@ -17,7 +17,7 @@ {{- end -}}{{- end -}} {{- range $k, $v := $persistentServices -}} - {{- if .existingPvc }} + {{- if $v.existingPvc }} {{- $_ := set $existingPvcsDict $v.existingPvc "exists" -}} {{- else if $v.storageClass -}} {{- $_ := set $scDict $v.storageClass "exists" -}} @@ -51,21 +51,21 @@ spec: requests: storage: 1Gi --- - {{- end }} -{{- range $pvc := keys $existingPvcDict }} +{{- range $k, $v := $persistentServices -}} +{{- if $v.existingPvc }} apiVersion: v1 kind: Pod metadata: - name: test-pvc-{{- -}} + name: test-pvc-{{ $v.existingPvc }} labels: app: {{ $.Release.Name }} spec: containers: - image: alpine:3.7 - name: test-pvc-{{- -}} + name: test-pvc-{{ $v.existingPvc }} imagePullPolicy: IfNotPresent volumeMounts: - mountPath: /data @@ -80,7 +80,11 @@ spec: volumes: - name: data persistentVolumeClaim: - claimName: {{ }} + claimName: {{ $v.existingPvc }} + {{- if $v.nodeSelector }} + nodeSelector: +{{ toYaml $v.nodeSelector | indent 4 }} + {{- end }} --- - +{{- end -}} {{- end -}} \ No newline at end of file diff --git a/values.yaml.tpl b/values.yaml.tpl index 6f1fee5..90c2ea2 100644 --- a/values.yaml.tpl +++ b/values.yaml.tpl @@ -10,6 +10,9 @@ global: # Storage class for all persistent services # storageClass: {} + localStorage: true + localStorageNodeSelector: + kubernetes.io/hostname: node-01 ### MTU Value for dockerd in builder and runner # mtu: 1400 From f6ab3f19ec47fd953b5ca23f679837f63400c1a7 Mon Sep 17 00:00:00 2001 From: kosta709 Date: Sun, 3 Feb 2019 18:36:50 +0200 Subject: [PATCH 09/16] before testing -1 --- README.md | 21 +++- .../create-local-pvcs.sh | 2 +- local-volumes/templates/mkdir-pod.yaml | 3 + run-validator.sh | 31 +++--- scripts/helpers.sh | 3 +- validator/templates/values-validator.yaml | 3 + values.yaml.tpl | 97 ++++++++++--------- 7 files changed, 93 insertions(+), 67 deletions(-) rename scripts/local-volumes.sh => local-volumes/create-local-pvcs.sh (96%) diff --git a/README.md b/README.md index 7f4f017..887cb76 100644 --- a/README.md +++ b/README.md @@ -24,8 +24,25 @@ cd onprem * cp `values.yaml.tpl` `values.yaml` * Edit values.yaml +Mandatory to set `global.appUrl` and `firebaseToken` + +* Running on local volumes +Codefresh can run on local volumes - https://kubernetes.io/docs/concepts/storage/volumes/#local + +To create local volumes edit `local-volumes/values.yaml`, set: + - defaultNodeSelector + - mkdirPods.nodes + +then run `local-volumes/create-local-pvcs.sh` + * Validate values and cluster `./run-validator.sh` - -* run `./cf-onprem [ --web-tls-key certs/key.pem --web-tls-cert certs/cert.pem ]` \ No newline at end of file + It will validate: + - values.yaml + - ability to create launch persistent services + +* run Intaller: + ``` + ./cf-onprem [ --web-tls-key certs/key.pem --web-tls-cert certs/cert.pem ] + ``` \ No newline at end of file diff --git a/scripts/local-volumes.sh b/local-volumes/create-local-pvcs.sh similarity index 96% rename from scripts/local-volumes.sh rename to local-volumes/create-local-pvcs.sh index 1c8c06f..6d78523 100755 --- a/scripts/local-volumes.sh +++ b/local-volumes/create-local-pvcs.sh @@ -7,7 +7,7 @@ CHART=$(realpath ${DIR}/../local-volumes) NAMESPACE=${NAMESPACE:-codefresh} HELM_TIMEOUT=60 -source ${DIR}/helpers.sh +source ${DIR}/../scripts/helpers.sh approveContext diff --git a/local-volumes/templates/mkdir-pod.yaml b/local-volumes/templates/mkdir-pod.yaml index f9f2589..d736b2f 100644 --- a/local-volumes/templates/mkdir-pod.yaml +++ b/local-volumes/templates/mkdir-pod.yaml @@ -1,4 +1,7 @@ {{- if .Values.mkdirPods }} +{{- if empty .Values.mkdirPods.nodes }} +{{- fail "mkdirPod cannot run - No nodes in .Values.mkdirPods.nodes" }} +{{- end }} {{- $pathes := list }} {{- range $k, $v := .Values.volumes }} diff --git a/run-validator.sh b/run-validator.sh index 4a28548..8702f94 100755 --- a/run-validator.sh +++ b/run-validator.sh @@ -7,19 +7,13 @@ CHART=${DIR}/validator NAMESPACE=${NAMESPACE:-codefresh} HELM_TIMEOUT=60 -approveContext() { - echo "Your kubectl is configured with the following context: " - kubectl config current-context - read -r -p "Are you sure you want to continue? [y/N] " response - - if [[ "$response" =~ ^([yY][eE][sS]|[yY])+$ ]] - then - echo "" - else - echo "Exiting..." - exit 0 - fi -} +VALUES_FILE=${DIR}/values.yaml +if [[ ! -f "${VALUES_FILE}" ]]; then + echo "Error: values file ${VALUES_FILE} does not exist" + exit 1 +fi + +source ${DIR}/scripts/helpers.sh approveContext @@ -28,7 +22,7 @@ SC_DEFAULT_QUERY='{{ range .items }}' SC_DEFAULT_QUERY+='{{if .metadata.annotations }}{{if (index .metadata.annotations "storageclass.beta.kubernetes.io/is-default-class") }}' SC_DEFAULT_QUERY+='{{ .metadata.name }}{{"\n"}}' SC_DEFAULT_QUERY+='{{end}}{{end}}{{end}}' -DEFAULT_STORAGE_CLASS=$(kubectl -ogo-template="$SC_DEFAULT" get sc) +DEFAULT_STORAGE_CLASS=$(kubectl -ogo-template="$SC_DEFAULT_QUERY" get sc) if [[ -n "${DEFAULT_STORAGE_CLASS}" ]]; then DEFAULT_STORAGE_CLASS_PARAM="--set defaultStorageClass=${DEFAULT_STORAGE_CLASS}" fi @@ -39,8 +33,6 @@ if [[ -n "${RELEASE_STATUS}" ]]; then helm delete $RELEASE --purge fi -VALUES_FILE=${DIR}/values.yaml - HELM=${HELM:-helm} HELM_COMMAND="$HELM --namespace $NAMESPACE install -n $RELEASE $CHART -f ${VALUES_FILE} ${DEFAULT_STORAGE_CLASS_PARAM} --timeout $HELM_TIMEOUT --wait $@" @@ -67,9 +59,10 @@ if [[ "${HELM_EXIT_STATUS}" == 0 ]]; then echo "Validation Complete Successfully" else # kubectl --namespace $NAMESPACE get pods,pvc,pv,svc -l app=${RELEASE} - echo "Validation FAILED. There are failed or pending resources - -Use kubectl desribe ${RELEASE}-* to see the cause + echo " +Validation FAILED. See the messages above +Check failed or pending resources by: +kubectl desribe ${RELEASE}-* to see the cause " exit 1 fi diff --git a/scripts/helpers.sh b/scripts/helpers.sh index 7e46f99..f808e17 100755 --- a/scripts/helpers.sh +++ b/scripts/helpers.sh @@ -31,7 +31,8 @@ function regexMatch() { approveContext() { echo "Your kubectl is configured with the following context: " - kubectl config current-context + CURRENT_CONTEXT=$(kubectl config current-context) + kubectl config get-contexts ${CURRENT_CONTEXT} read -r -p "Are you sure you want to continue? [y/N] " response if [[ "$response" =~ ^([yY][eE][sS]|[yY])+$ ]] diff --git a/validator/templates/values-validator.yaml b/validator/templates/values-validator.yaml index 10deb62..ab3d960 100644 --- a/validator/templates/values-validator.yaml +++ b/validator/templates/values-validator.yaml @@ -3,6 +3,9 @@ {{- $errors = printf "%s\n%s" $errors "global.appUrl is empty" }} {{- end }} +{{- if empty .Values.firebaseSecret }} +{{- $errors = printf "%s\n%s" $errors "firebaseSecret is empty" }} +{{- end }} {{- if not ( empty $errors ) }} {{- fail $errors }} diff --git a/values.yaml.tpl b/values.yaml.tpl index 90c2ea2..92254da 100644 --- a/values.yaml.tpl +++ b/values.yaml.tpl @@ -9,10 +9,11 @@ global: appUrl: your-domain.com # Storage class for all persistent services -# storageClass: {} - localStorage: true - localStorageNodeSelector: - kubernetes.io/hostname: node-01 +# storageClass: my-storage-class + +# Default nodeSelector for storage pods. Useful in case of local volumes +# storagePodNodeSelector: +# kubernetes.io/hostname: storage-host-01 ### MTU Value for dockerd in builder and runner # mtu: 1400 @@ -43,7 +44,7 @@ firebaseSecret: # storageClass: ceph-pool-1 # storageSize: 8Gi # -# Example 2, rabbitmq on precreated pvc for local volume on cpecific volume +# Example 2, postgresql on precreated pvc for local volume on cpecific volume # # postgresql: # existingPvc: cf-postgress-lv @@ -52,39 +53,62 @@ firebaseSecret: mongodb: storageSize: 8Gi - storageClass: {} - existingPvc: {} - nodeSelector: {} + storageClass: + #existingPvc: cf-mongodb + #nodeSelector: + # kubernetes.io/hostname: storage-host-01 postgresql: storageSize: 8Gi - storageClass: {} - existingPvc: {} - nodeSelector: {} + storageClass: + #existingPvc: cf-postgesql + #nodeSelector: + # kubernetes.io/hostname: storage-host-01 consul: storageSize: 1Gi - storageClass: {} - existingPvc: {} - nodeSelector: {} + storageClass: + #existingPvc: cf-consul + #nodeSelector: + # kubernetes.io/hostname: storage-host-01 redis: storageSize: 8Gi - storageClass: {} - existingPvc: {} - nodeSelector: {} + storageClass: + #existingPvc: cf-redis + #nodeSelector: + # kubernetes.io/hostname: storage-host-01 rabbitmq: storageSize: 8Gi - storageClass: {} - existingPvc: {} - nodeSelector: {} + storageClass: + #existingPvc: cf-rabbitmq + #nodeSelector: + # kubernetes.io/hostname: storage-host-01 + +cronus: + storageSize: 1Gi + storageClass: + #existingPvc: cf-cronus + #nodeSelector: + # kubernetes.io/hostname: storage-host-01 + +hermes: + redis: +## Set hermes store password. It is mandatory + redisPassword: verysecurepassword + storageSize: 8Gi + storageClass: + #existingPvc: cf-store + #nodeSelector: + # kubernetes.io/hostname: storage-host-01 registry: storageSize: 100Gi - storageClass: {} - existingPvc: {} - nodeSelector: {} + storageClass: + #existingPvc: cf-registry + #nodeSelector: + # kubernetes.io/hostname: storage-host-01 # Insert custom registry configuration (https://docs.docker.com/registry/configuration/) # registryConfig: # version: 0.1 @@ -110,23 +134,6 @@ registry: # interval: 10s # threshold: 3 -hermes: - nodeSelector: {} -# services: rabbitmq-registry - redis: -## Set hermes store password. It is mandatory - redisPassword: verysecurepassword - storageSize: 8Gi - storageClass: {} - existingPvc: {} - nodeSelector: {} - -cronus: - storageSize: 1Gi - storageClass: {} - existingPvc: {} - nodeSelector: {} - builder: nodeSelector: {} ## Set time to run docker cleaner @@ -134,8 +141,8 @@ builder: ## Override builder PV initial size varLibDockerVolume: storageSize: 100Gi - existingPvc: {} - storageClass: {} + storageClass: + #existingPvc: cf-builder-0 runner: nodeSelector: {} @@ -144,8 +151,10 @@ runner: ## Override runner PV initial size varLibDockerVolume: storageSize: 100Gi - existingPvc: {} - storageClass: {} + storageClass: + #existingPvc: cf-runner-0 + + # helm-repo-manager: # RepoUrlPrefix: "cm://" From 7a4dddbf3fc658c03da0f0412d8103120d917f38 Mon Sep 17 00:00:00 2001 From: kosta709 Date: Sun, 3 Feb 2019 19:12:52 +0200 Subject: [PATCH 10/16] before testing -1 --- README.md | 26 ++++++++++++++------------ 1 file changed, 14 insertions(+), 12 deletions(-) diff --git a/README.md b/README.md index 887cb76..2bdfe7e 100644 --- a/README.md +++ b/README.md @@ -16,31 +16,33 @@ There are three files that customize `codefresh` chart deployment: ### How to run -* Clone [onprem](https://github.com/codefresh-io/onprem) repository +1. Clone [onprem](https://github.com/codefresh-io/onprem) repository ``` git clone git@github.com:codefresh-io/onprem.git cd onprem ``` -* cp `values.yaml.tpl` `values.yaml` +2. cp `values.yaml.tpl` `values.yaml` -* Edit values.yaml +3. Edit values.yaml Mandatory to set `global.appUrl` and `firebaseToken` -* Running on local volumes -Codefresh can run on local volumes - https://kubernetes.io/docs/concepts/storage/volumes/#local + ##### Running on local volumes + Codefresh can run on local volumes - https://kubernetes.io/docs/concepts/storage/volumes/#local -To create local volumes edit `local-volumes/values.yaml`, set: - - defaultNodeSelector - - mkdirPods.nodes + To create local volumes edit `local-volumes/values.yaml`, set: + - defaultNodeSelector + - mkdirPods.nodes -then run `local-volumes/create-local-pvcs.sh` + then run `local-volumes/create-local-pvcs.sh` + edit values.yaml and set the values for `existingPvc`s - -* Validate values and cluster +4 Validate values and cluster `./run-validator.sh` It will validate: - values.yaml - - ability to create launch persistent services + - ability to launch persistent services on specified storage classes + - ability to launch persistent services on specified existing pvcs + - To do: validating networks, dns, loadbalances, ingress * run Intaller: ``` From 4b0e794cee760188340d47a75f875ba15e693fdd Mon Sep 17 00:00:00 2001 From: kosta709 Date: Sun, 3 Feb 2019 19:13:51 +0200 Subject: [PATCH 11/16] before testing -1 --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 2bdfe7e..eec5e8a 100644 --- a/README.md +++ b/README.md @@ -36,7 +36,7 @@ Mandatory to set `global.appUrl` and `firebaseToken` then run `local-volumes/create-local-pvcs.sh` edit values.yaml and set the values for `existingPvc`s -4 Validate values and cluster +4. Validate values and cluster `./run-validator.sh` It will validate: - values.yaml @@ -44,7 +44,7 @@ Mandatory to set `global.appUrl` and `firebaseToken` - ability to launch persistent services on specified existing pvcs - To do: validating networks, dns, loadbalances, ingress -* run Intaller: +5. run Intaller: ``` ./cf-onprem [ --web-tls-key certs/key.pem --web-tls-cert certs/cert.pem ] ``` \ No newline at end of file From 1ff1b0f33d64d573302c9724353320ef23c0d54b Mon Sep 17 00:00:00 2001 From: kosta709 Date: Tue, 5 Feb 2019 17:23:00 +0200 Subject: [PATCH 12/16] fixed nomios ingress --- cf-onprem | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/cf-onprem b/cf-onprem index 607a66b..ed7f082 100755 --- a/cf-onprem +++ b/cf-onprem @@ -15,7 +15,7 @@ readonly HELM_VERSION="${CF_HELM_VERSION:-2.10.0}" readonly CHANNEL="${CF_HELM_CHANNEL:-dev}" readonly CODEFRESH_REPOSITORY=http://charts.codefresh.io/${CHANNEL} -#export KUBECONFIG=./.kube/config +readonly RELEASE=cf readonly WORKING_DIR="$(dirname "$0")" readonly SERVICE_ACCOUNT="${WORKING_DIR}/sa.json" @@ -179,6 +179,9 @@ cat <<-EOF >${WEBTLS_VALUES_FILE} --- ingress: webTlsSecretName: "star.codefresh.io" +nomios: + ingress: + webTlsSecretName: "star.codefresh.io" webTLS: secretName: star.codefresh.io key: | @@ -321,7 +324,6 @@ EOF [ -n "${WebTlsKey}" ] && [ -f "${WebTlsKey}" ] && [ -n "${WebTlsCert}" ] && [ -f "${WebTlsCert}" ] && [ -f "${WEBTLS_VALUES_FILE}" ] && WEBTLS_VALUES="--values ${WEBTLS_VALUES_FILE}" -RELEASE=cf cf_status=$(helm status $RELEASE 2>/dev/null | awk -F': ' '$1 == "STATUS" {print $2}') [ -z "${cf_status}" ] && SEEDJOBS="--set global.seedJobs=true" && CERTJOBS="--set global.certsJobs=true" From 76d561e7484c0e6943405e99aae6bd03d319e380 Mon Sep 17 00:00:00 2001 From: kosta709 Date: Thu, 7 Feb 2019 10:45:52 +0200 Subject: [PATCH 13/16] refactoring functions --- cf-onprem | 145 +-------------------------------------------- run-validator.sh | 9 +++ scripts/helpers.sh | 122 ++++++++++++++++++++++++++++++++++++++ 3 files changed, 133 insertions(+), 143 deletions(-) diff --git a/cf-onprem b/cf-onprem index ed7f082..56bedb5 100755 --- a/cf-onprem +++ b/cf-onprem @@ -2,16 +2,13 @@ # #set -x - -msg() { echo -e "\e[32mINFO [$(date +%F\ %T)] ---> $1\e[0m"; } -warning() { echo -e "\e[33mWARNING [$(date +%F\ %T)] ---> $1\e[0m"; } -err() { echo -e "\e[31mERR [$(date +%F\ %T)] ---> $1\e[0m" ; exit 1; } +DIR=$(dirname $0) +source ${DIR}/scripts/helpers.sh if [ -f "./env-vars" ]; then . ./env-vars fi -readonly HELM_VERSION="${CF_HELM_VERSION:-2.10.0}" readonly CHANNEL="${CF_HELM_CHANNEL:-dev}" readonly CODEFRESH_REPOSITORY=http://charts.codefresh.io/${CHANNEL} @@ -29,144 +26,6 @@ usage() { exit 0 } -check() { command -v $1 >/dev/null 2>&1 || err "$1 binary is required!"; } - -ver() { printf "%03d%03d%03d%03d" $(echo "$1" | tr '.' ' '); } - -exists() { - if command -v $1 >/dev/null 2>&1; then - msg "$1 binary installed" - else - warning "Please install $1 to proceed" - exit 1 - fi -} - -run_as_root() { - if [[ $EUID > 0 ]]; then - err "Please run as root/sudo" - exit 1 - fi -} - -approveContext() { - msg "Your kubectl is configured with the following context: " - kubectl config current-context - read -r -p "Are you sure you want to continue? [y/N] " response - - if [[ "$response" =~ ^([yY][eE][sS]|[yY])+$ ]] - then - msg "No problem, continuing with the script..." - else - msg "Exiting..." - exit 0 - fi -} - -checkHelmInstalled() { - if command -v $1 >/dev/null 2>&1; then - helm_version=$(helm version --client --short | sed 's/.*\: v//' | sed 's/+.*//') - msg "helm is already installed and has version v$helm_version" - [ $(ver $helm_version) -lt $(ver $HELM_VERSION) ] && \ - err "You have older helm version than required. Please upgrade to v$HELM_VERSION or newer !" - else - warning "helm is not installed" - if [[ ! "$YES" == 'true' ]]; then - read -p "Do you want to install helm ? [y/n] " yn - case ${yn} in - y|Y) - helmInstall - ;; - *) - err "Need helm to deploy Codefresh app ! Exiting..." - #exit 1 - ;; - esac - else - helmInstall - fi - fi -} - -helmInstall() { - msg "Downloading and installing helm..." -<< //// - case "$(uname -s)" in - Linux) - os=linux - ;; - Darwin) - os=darwin - ;; - *) - ;; - esac -//// - wget https://storage.googleapis.com/kubernetes-helm/helm-v${HELM_VERSION}-${os}-amd64.tar.gz -P /tmp/ - tar xvf /tmp/helm-v${HELM_VERSION}-${os}-amd64.tar.gz -C /tmp/ - chmod +x /tmp/${os}-amd64/helm - sudo mv /tmp/${os}-amd64/helm /usr/local/bin/ - rm -rf /tmp/helm-v${HELM_VERSION}-${os}-amd64 /tmp/helm-v${HELM_VERSION}-${os}-amd64.tar.gz -} - -checkTillerInstalled() { - status=$(kubectl -nkube-system get pod -l app=helm -l name=tiller -o=go-template --template='{{ range $i, $v := .items }}{{ if eq $v.status.phase "Running" }}{{ $v.status.phase }}{{ end }}{{ end }}') - if [ "$status" == "Running" ]; then - msg "Tiller is installed and running" - helm init -c - helm_version=$(helm version --client --short | sed 's/.*\: v//' | sed 's/+.*//') - tiller_version=$(helm version --server --short | sed 's/.*\: v//' | sed 's/+.*//') - if [[ ! "$YES" == 'true' ]] && [ $(ver $tiller_version) -lt $(ver $helm_version) ]; then - warning "You're running helm v$helm_version but tiller has v$tiller_version." - read -p " Do you want to upgrade tiller to v$helm_version ? [y/n] " yn - case ${yn} in - y|Y) - kubectl create -f ./tiller-rbac-config.yaml > /dev/null 2>&1 - helm init --upgrade --service-account tiller --wait - ;; - *) - err "You need to upgrade tiller ! Exiting..." - ;; - esac - fi - if [[ "$YES" == 'true' ]] && [ $(ver $tiller_version) -lt $(ver $helm_version) ]; then - err "You're running helm v$helm_version but tiller has v$tiller_version . You need to upgrade tiller ! Exiting..." - fi - else - warning "Unable to determine tiller at its default location." - if [[ ! "$YES" == 'true' ]]; then - read -p " Do you want to deploy tiller ? [y/n] " yn - case ${yn} in - y|Y) - kubectl create -f ./tiller-rbac-config.yaml - helm init --service-account tiller --wait - ;; - *) - err "Need to deploy tiller ! Exiting..." - exit 1 - ;; - esac - else - kubectl create -f ./tiller-rbac-config.yaml - helm init --service-account tiller --wait - fi - fi - -} - -checkTillerStatus() { - while true; do - status=$(kubectl -nkube-system get pod -l app=helm -l name=tiller -o=go-template --template='{{ range $i, $v := .items }}{{ if eq $v.status.phase "Running" }}{{ $v.status.phase }}{{ end }}{{ end }}') - - msg "Tiller status = $status" - [ "$status" == "Running" ] && break - - msg "Sleeping 5 seconds ..." - sleep 5 - - done -} - generateWebTlsValuesFile() { WEBTLSKEY=$(cat ${WebTlsKey} | sed 's/^/ /') diff --git a/run-validator.sh b/run-validator.sh index 8702f94..6347da2 100755 --- a/run-validator.sh +++ b/run-validator.sh @@ -17,6 +17,15 @@ source ${DIR}/scripts/helpers.sh approveContext +msg "Checking helm binary on your system" +checkHelmInstalled "helm" + +msg "Checking if tiller is installed on kubernetes cluster" +checkTillerInstalled + +msg "Checking tiller status..." +checkTillerStatus + ## Get default storage class SC_DEFAULT_QUERY='{{ range .items }}' SC_DEFAULT_QUERY+='{{if .metadata.annotations }}{{if (index .metadata.annotations "storageclass.beta.kubernetes.io/is-default-class") }}' diff --git a/scripts/helpers.sh b/scripts/helpers.sh index f808e17..dc855bf 100755 --- a/scripts/helpers.sh +++ b/scripts/helpers.sh @@ -1,5 +1,22 @@ #!/bin/bash +msg() { echo -e "\e[32mINFO [$(date +%F\ %T)] ---> $1\e[0m"; } +warning() { echo -e "\e[33mWARNING [$(date +%F\ %T)] ---> $1\e[0m"; } +err() { echo -e "\e[31mERR [$(date +%F\ %T)] ---> $1\e[0m" ; exit 1; } + +check() { command -v $1 >/dev/null 2>&1 || err "$1 binary is required!"; } + +ver() { printf "%03d%03d%03d%03d" $(echo "$1" | tr '.' ' '); } + +exists() { + if command -v $1 >/dev/null 2>&1; then + msg "$1 binary installed" + else + warning "Please install $1 to proceed" + exit 1 + fi +} + function parse_yaml { local prefix=$2 local s='[[:space:]]*' w='[a-zA-Z0-9_]*' fs=$(echo @|tr @ '\034') @@ -42,4 +59,109 @@ approveContext() { echo "Exiting..." exit 0 fi +} + +readonly HELM_VERSION="${CF_HELM_VERSION:-2.10.0}" +checkHelmInstalled() { + if command -v $1 >/dev/null 2>&1; then + helm_version=$(helm version --client --short | sed 's/.*\: v//' | sed 's/+.*//') + msg "helm is already installed and has version v$helm_version" + [ $(ver $helm_version) -lt $(ver $HELM_VERSION) ] && \ + err "You have older helm version than required. Please upgrade to v$HELM_VERSION or newer !" + else + warning "helm is not installed" + if [[ ! "$YES" == 'true' ]]; then + read -p "Do you want to install helm ? [y/n] " yn + case ${yn} in + y|Y) + helmInstall + ;; + *) + err "Need helm to deploy Codefresh app ! Exiting..." + #exit 1 + ;; + esac + else + helmInstall + fi + fi +} + +helmInstall() { + msg "Downloading and installing helm..." +<< //// + case "$(uname -s)" in + Linux) + os=linux + ;; + Darwin) + os=darwin + ;; + *) + ;; + esac +//// + wget https://storage.googleapis.com/kubernetes-helm/helm-v${HELM_VERSION}-${os}-amd64.tar.gz -P /tmp/ + tar xvf /tmp/helm-v${HELM_VERSION}-${os}-amd64.tar.gz -C /tmp/ + chmod +x /tmp/${os}-amd64/helm + sudo mv /tmp/${os}-amd64/helm /usr/local/bin/ + rm -rf /tmp/helm-v${HELM_VERSION}-${os}-amd64 /tmp/helm-v${HELM_VERSION}-${os}-amd64.tar.gz +} + +checkTillerInstalled() { + status=$(kubectl -nkube-system get pod -l app=helm -l name=tiller -o=go-template --template='{{ range $i, $v := .items }}{{ if eq $v.status.phase "Running" }}{{ $v.status.phase }}{{ end }}{{ end }}') + if [ "$status" == "Running" ]; then + msg "Tiller is installed and running" + helm init -c + helm_version=$(helm version --client --short | sed 's/.*\: v//' | sed 's/+.*//') + tiller_version=$(helm version --server --short | sed 's/.*\: v//' | sed 's/+.*//') + if [[ ! "$YES" == 'true' ]] && [ $(ver $tiller_version) -lt $(ver $helm_version) ]; then + warning "You're running helm v$helm_version but tiller has v$tiller_version." + read -p " Do you want to upgrade tiller to v$helm_version ? [y/n] " yn + case ${yn} in + y|Y) + kubectl create -f ./tiller-rbac-config.yaml > /dev/null 2>&1 + helm init --upgrade --service-account tiller --wait + ;; + *) + err "You need to upgrade tiller ! Exiting..." + ;; + esac + fi + if [[ "$YES" == 'true' ]] && [ $(ver $tiller_version) -lt $(ver $helm_version) ]; then + err "You're running helm v$helm_version but tiller has v$tiller_version . You need to upgrade tiller ! Exiting..." + fi + else + warning "Unable to determine tiller at its default location." + if [[ ! "$YES" == 'true' ]]; then + read -p " Do you want to deploy tiller ? [y/n] " yn + case ${yn} in + y|Y) + kubectl create -f ./tiller-rbac-config.yaml + helm init --service-account tiller --wait + ;; + *) + err "Need to deploy tiller ! Exiting..." + exit 1 + ;; + esac + else + kubectl create -f ./tiller-rbac-config.yaml + helm init --service-account tiller --wait + fi + fi + +} + +checkTillerStatus() { + while true; do + status=$(kubectl -nkube-system get pod -l app=helm -l name=tiller -o=go-template --template='{{ range $i, $v := .items }}{{ if eq $v.status.phase "Running" }}{{ $v.status.phase }}{{ end }}{{ end }}') + + msg "Tiller status = $status" + [ "$status" == "Running" ] && break + + msg "Sleeping 5 seconds ..." + sleep 5 + + done } \ No newline at end of file From f7effaa943e5dbb5edb2832cf9b228d820b7c995 Mon Sep 17 00:00:00 2001 From: kosta709 Date: Mon, 11 Feb 2019 11:02:14 +0200 Subject: [PATCH 14/16] Local volumes values and readme --- .gitignore | 2 +- local-volumes/README.md | 11 +++++++ local-volumes/values.yaml.tmpl | 52 ++++++++++++++++++++++++++++++++++ 3 files changed, 64 insertions(+), 1 deletion(-) create mode 100644 local-volumes/README.md create mode 100644 local-volumes/values.yaml.tmpl diff --git a/.gitignore b/.gitignore index 86ad640..b580927 100644 --- a/.gitignore +++ b/.gitignore @@ -5,7 +5,7 @@ assets tmp/ # values.yaml -values.yaml +/values.yaml # decrypted files **/*-dec.* diff --git a/local-volumes/README.md b/local-volumes/README.md new file mode 100644 index 0000000..76931e4 --- /dev/null +++ b/local-volumes/README.md @@ -0,0 +1,11 @@ +### Local Volumes helm chart +Creates Loval volumes and pvcs, makes directories on the nodes + +Copy from template and edit values.yaml +Set +``` +cp values.yaml.tmpl values.yaml +vi values.yaml + +./create-local-pvcs.sh +``` diff --git a/local-volumes/values.yaml.tmpl b/local-volumes/values.yaml.tmpl new file mode 100644 index 0000000..bf59145 --- /dev/null +++ b/local-volumes/values.yaml.tmpl @@ -0,0 +1,52 @@ +namePrefix: cf- +basePath: /var/lib/codefresh + +# Enter default nodeSelector for volumes nodeAffinity - see https://kubernetes.io/docs/concepts/storage/volumes/#local +defaultNodeSelector: +# kubernetes.io/hostname: storage-node-01 + +# Enter nodes where to run mkdirs for all the volumes +mkdirPods: + nodes: +# - storage-node-01 + +volumes: + mongodb: + storageSize: 8Gi + nodeSelector: {} + + postgresql: + storageSize: 8Gi + nodeSelector: {} + + consul-0: + storageSize: 1Gi + nodeSelector: {} + + redis: + storageSize: 8Gi + nodeSelector: {} + + rabbitmq: + storageSize: 8Gi + nodeSelector: {} + + registry: + storageSize: 100Gi + nodeSelector: {} + + cronus: + storageSize: 1Gi + nodeSelector: {} + + store: + storageSize: 8Gi + nodeSelector: {} + + builder-0: + storageSize: 100Gi + nodeSelector: {} + + runner-0: + storageSize: 100Gi + nodeSelector: {} \ No newline at end of file From b1d896f9b6f46372cf0f12e4b6b91e6ee7f9b457 Mon Sep 17 00:00:00 2001 From: kosta709 Date: Mon, 11 Feb 2019 17:21:52 +0200 Subject: [PATCH 15/16] fixed validator for loval volumes --- local-volumes/templates/mkdir-pod.yaml | 6 ++++ run-validator.sh | 1 + validator/templates/storageclasses/pvcs.yaml | 31 ++++++++++++++++++-- values.yaml.tpl | 4 +-- 4 files changed, 37 insertions(+), 5 deletions(-) diff --git a/local-volumes/templates/mkdir-pod.yaml b/local-volumes/templates/mkdir-pod.yaml index d736b2f..57e9881 100644 --- a/local-volumes/templates/mkdir-pod.yaml +++ b/local-volumes/templates/mkdir-pod.yaml @@ -30,6 +30,12 @@ metadata: chart: "{{ $.Chart.Name }}-{{ $.Chart.Version }}" release: "{{ $.Release.Name }}" heritage: "{{ $.Release.Service }}" + annotations: + # This is what defines this resource as a hook. Without this line, the + # job is considered part of the release. + "helm.sh/hook": post-install + "helm.sh/hook-weight": "-5" + "helm.sh/hook-delete-policy": hook-succeeded spec: restartPolicy: Never nodeSelector: diff --git a/run-validator.sh b/run-validator.sh index 6347da2..eb6f121 100755 --- a/run-validator.sh +++ b/run-validator.sh @@ -40,6 +40,7 @@ RELEASE_STATUS=$(helm status $RELEASE 2>/dev/null | awk -F': ' '$1 == "STATUS" { if [[ -n "${RELEASE_STATUS}" ]]; then echo "There is a previous run of $RELEASE with status $RELEASE_STATUS , deleting it" helm delete $RELEASE --purge + sleep 10 fi HELM=${HELM:-helm} diff --git a/validator/templates/storageclasses/pvcs.yaml b/validator/templates/storageclasses/pvcs.yaml index 67b39b9..1c7205d 100644 --- a/validator/templates/storageclasses/pvcs.yaml +++ b/validator/templates/storageclasses/pvcs.yaml @@ -62,19 +62,35 @@ metadata: name: test-pvc-{{ $v.existingPvc }} labels: app: {{ $.Release.Name }} + annotations: + # This is what defines this resource as a hook. Without this line, the + # job is considered part of the release. + "helm.sh/hook": post-install + "helm.sh/hook-weight": "-5" + "helm.sh/hook-delete-policy": hook-succeeded spec: + restartPolicy: Never + terminationGracePeriodSeconds: 10 containers: - image: alpine:3.7 name: test-pvc-{{ $v.existingPvc }} imagePullPolicy: IfNotPresent volumeMounts: - - mountPath: /data + - mountPath: /test-pvc name: data + command: + - sh + - -c + - | + mount + ls -l /test-pvc + mkdir -pv /test-pvc/test-pvc + sleep 300 readinessProbe: exec: command: - - touch - - /data/tst1 + - touch + - /test-pvc/test-pvc/ready initialDelaySeconds: 1 periodSeconds: 3 volumes: @@ -87,4 +103,13 @@ spec: {{- end }} --- {{- end -}} + +apiVersion: v1 +kind: ConfigMap +metadata: + name: test-pvc-dummy-cm + labels: + app: {{ $.Release.Name }} +data: + dd: "11" {{- end -}} \ No newline at end of file diff --git a/values.yaml.tpl b/values.yaml.tpl index 92254da..70b2c3a 100644 --- a/values.yaml.tpl +++ b/values.yaml.tpl @@ -61,14 +61,14 @@ mongodb: postgresql: storageSize: 8Gi storageClass: - #existingPvc: cf-postgesql + #existingPvc: cf-postgresql #nodeSelector: # kubernetes.io/hostname: storage-host-01 consul: storageSize: 1Gi storageClass: - #existingPvc: cf-consul + #existingPvc: cf-consul-0 #nodeSelector: # kubernetes.io/hostname: storage-host-01 From 27fa07b407383c9eecb68ed110df633dc26ac461 Mon Sep 17 00:00:00 2001 From: kosta709 Date: Tue, 12 Feb 2019 08:08:37 +0200 Subject: [PATCH 16/16] fixed validator --- cf-onprem | 11 ++++++++++- run-validator.sh | 18 ++++++++++-------- validator/templates/storageclasses/pvcs.yaml | 8 +++++--- values.yaml.tpl | 10 ++++------ 4 files changed, 29 insertions(+), 18 deletions(-) diff --git a/cf-onprem b/cf-onprem index 56bedb5..d88e3a4 100755 --- a/cf-onprem +++ b/cf-onprem @@ -185,7 +185,16 @@ EOF cf_status=$(helm status $RELEASE 2>/dev/null | awk -F': ' '$1 == "STATUS" {print $2}') -[ -z "${cf_status}" ] && SEEDJOBS="--set global.seedJobs=true" && CERTJOBS="--set global.certsJobs=true" +if [[ -z "${cf_status}" ]]; then + SEEDJOBS="--set global.seedJobs=true" + CERTJOBS="--set global.certsJobs=true" + IN_INSTALLER="true" ${DIR}/run-validator.sh + if [[ $? != 0 ]]; then + echo "Validation failed" + exit 1 + fi +fi + msg "Installing/Updating Codefresh..." helm upgrade ${RELEASE} codefresh-onprem-${CHANNEL}/codefresh \ diff --git a/run-validator.sh b/run-validator.sh index eb6f121..204ea69 100755 --- a/run-validator.sh +++ b/run-validator.sh @@ -1,6 +1,6 @@ #!/usr/bin/env bash # - +echo "Starting validator" DIR=$(dirname $0) RELEASE=cf-validator CHART=${DIR}/validator @@ -15,16 +15,18 @@ fi source ${DIR}/scripts/helpers.sh -approveContext +if [[ -z "${IN_INSTALLER}" ]]; then + approveContext -msg "Checking helm binary on your system" -checkHelmInstalled "helm" + msg "Checking helm binary on your system" + checkHelmInstalled "helm" -msg "Checking if tiller is installed on kubernetes cluster" -checkTillerInstalled + msg "Checking if tiller is installed on kubernetes cluster" + checkTillerInstalled -msg "Checking tiller status..." -checkTillerStatus + msg "Checking tiller status..." + checkTillerStatus +fi ## Get default storage class SC_DEFAULT_QUERY='{{ range .items }}' diff --git a/validator/templates/storageclasses/pvcs.yaml b/validator/templates/storageclasses/pvcs.yaml index 1c7205d..d45b611 100644 --- a/validator/templates/storageclasses/pvcs.yaml +++ b/validator/templates/storageclasses/pvcs.yaml @@ -5,7 +5,7 @@ {{/* Fill dict persistent services */}} {{- $persistentServices := dict -}} -{{- $persistentServiceNames := list "mongodb" "postgresql" "consul" "redis" "rabbitmq" "registry" "cronus" -}} +{{- $persistentServiceNames := list "mongodb" "postgresql" "consul" "redis" "rabbitmq" "registry" "cronus" "runner" "builder" -}} {{- range $persistentServiceNames -}} {{- if (index $.Values . ) -}} {{- $_ := set $persistentServices . (index $.Values .) -}} @@ -104,6 +104,9 @@ spec: --- {{- end -}} +{{- end -}} + +--- apiVersion: v1 kind: ConfigMap metadata: @@ -111,5 +114,4 @@ metadata: labels: app: {{ $.Release.Name }} data: - dd: "11" -{{- end -}} \ No newline at end of file + dd: "11" \ No newline at end of file diff --git a/values.yaml.tpl b/values.yaml.tpl index 70b2c3a..64e23ea 100644 --- a/values.yaml.tpl +++ b/values.yaml.tpl @@ -139,9 +139,8 @@ builder: ## Set time to run docker cleaner dockerCleanerCron: 0 0 * * * ## Override builder PV initial size - varLibDockerVolume: - storageSize: 100Gi - storageClass: + storageSize: 100Gi + storageClass: #existingPvc: cf-builder-0 runner: @@ -149,9 +148,8 @@ runner: ## Set time to run docker cleaner dockerCleanerCron: 0 0 * * * ## Override runner PV initial size - varLibDockerVolume: - storageSize: 100Gi - storageClass: + storageSize: 100Gi + storageClass: #existingPvc: cf-runner-0