diff --git a/.gitignore b/.gitignore index 6b8974d..b580927 100644 --- a/.gitignore +++ b/.gitignore @@ -3,8 +3,9 @@ assets .kube +tmp/ # values.yaml -values.yaml +/values.yaml # decrypted files **/*-dec.* diff --git a/README.md b/README.md index e61ec37..eec5e8a 100644 --- a/README.md +++ b/README.md @@ -12,11 +12,8 @@ Before running `cf-onprem` script it is needed to: * make configuration changes specific for each customer There are three files that customize `codefresh` chart deployment: -* `sa-dec.json` contains GCP service account that enables a customer to pull codefresh images -* `values.yaml` contains different parameters for chart customization -* `values-dec.yaml` contains secrets such as `githubClientSecret`, etc. +* `values.yaml.tpl` contains template of values.yaml for different parameters for chart customization -Also to be able to encrypt `*-dec.*` files and decrypt `*-enc.*` files `aws cli` should be configured with permissions to use AWS KMS service and [sops](https://github.com/mozilla/sops/releases) binary installed on your system. ### How to run 1. Clone [onprem](https://github.com/codefresh-io/onprem) repository @@ -24,17 +21,30 @@ Also to be able to encrypt `*-dec.*` files and decrypt `*-enc.*` files `aws cli` git clone git@github.com:codefresh-io/onprem.git cd onprem ``` -2. Decrypt `sa-enc.json` and `values-enc.yaml` files -``` -./sops.sh -d -``` -3. Make configuration changes in `sa-dec.json`, `values.yaml`, `values-dec.yaml` files and customize variables in `env-vars` file -4. Run `cf-onprem` script -5. If it is needed to upload new configuration into remote repository then encrypt `sa-dec.json`, `values-dec.yaml` files -``` -./sops.sh -e -``` -6. Commit and push changes -``` -git push origin master -``` \ No newline at end of file +2. cp `values.yaml.tpl` `values.yaml` + +3. Edit values.yaml +Mandatory to set `global.appUrl` and `firebaseToken` + + ##### Running on local volumes + Codefresh can run on local volumes - https://kubernetes.io/docs/concepts/storage/volumes/#local + + To create local volumes edit `local-volumes/values.yaml`, set: + - defaultNodeSelector + - mkdirPods.nodes + + then run `local-volumes/create-local-pvcs.sh` + edit values.yaml and set the values for `existingPvc`s + +4. Validate values and cluster + `./run-validator.sh` + It will validate: + - values.yaml + - ability to launch persistent services on specified storage classes + - ability to launch persistent services on specified existing pvcs + - To do: validating networks, dns, loadbalances, ingress + +5. run Intaller: + ``` + ./cf-onprem [ --web-tls-key certs/key.pem --web-tls-cert certs/cert.pem ] + ``` \ No newline at end of file diff --git a/cf-onprem b/cf-onprem index dfe1374..d88e3a4 100755 --- a/cf-onprem +++ b/cf-onprem @@ -2,20 +2,17 @@ # #set -x - -msg() { echo -e "\e[32mINFO [$(date +%F\ %T)] ---> $1\e[0m"; } -warning() { echo -e "\e[33mWARNING [$(date +%F\ %T)] ---> $1\e[0m"; } -err() { echo -e "\e[31mERR [$(date +%F\ %T)] ---> $1\e[0m" ; exit 1; } +DIR=$(dirname $0) +source ${DIR}/scripts/helpers.sh if [ -f "./env-vars" ]; then . ./env-vars fi -readonly HELM_VERSION="${CF_HELM_VERSION:-2.10.0}" readonly CHANNEL="${CF_HELM_CHANNEL:-dev}" readonly CODEFRESH_REPOSITORY=http://charts.codefresh.io/${CHANNEL} -#export KUBECONFIG=./.kube/config +readonly RELEASE=cf readonly WORKING_DIR="$(dirname "$0")" readonly SERVICE_ACCOUNT="${WORKING_DIR}/sa.json" @@ -29,144 +26,6 @@ usage() { exit 0 } -check() { command -v $1 >/dev/null 2>&1 || err "$1 binary is required!"; } - -ver() { printf "%03d%03d%03d%03d" $(echo "$1" | tr '.' ' '); } - -exists() { - if command -v $1 >/dev/null 2>&1; then - msg "$1 binary installed" - else - warning "Please install $1 to proceed" - exit 1 - fi -} - -run_as_root() { - if [[ $EUID > 0 ]]; then - err "Please run as root/sudo" - exit 1 - fi -} - -approveContext() { - msg "Your kubectl is configured with the following context: " - kubectl config current-context - read -r -p "Are you sure you want to continue? [y/N] " response - - if [[ "$response" =~ ^([yY][eE][sS]|[yY])+$ ]] - then - msg "No problem, continuing with the script..." - else - msg "Exiting..." - exit 0 - fi -} - -checkHelmInstalled() { - if command -v $1 >/dev/null 2>&1; then - helm_version=$(helm version --client --short | sed 's/.*\: v//' | sed 's/+.*//') - msg "helm is already installed and has version v$helm_version" - [ $(ver $helm_version) -lt $(ver $HELM_VERSION) ] && \ - err "You have older helm version than required. Please upgrade to v$HELM_VERSION or newer !" - else - warning "helm is not installed" - if [[ ! "$YES" == 'true' ]]; then - read -p "Do you want to install helm ? [y/n] " yn - case ${yn} in - y|Y) - helmInstall - ;; - *) - err "Need helm to deploy Codefresh app ! Exiting..." - #exit 1 - ;; - esac - else - helmInstall - fi - fi -} - -helmInstall() { - msg "Downloading and installing helm..." -<< //// - case "$(uname -s)" in - Linux) - os=linux - ;; - Darwin) - os=darwin - ;; - *) - ;; - esac -//// - wget https://storage.googleapis.com/kubernetes-helm/helm-v${HELM_VERSION}-${os}-amd64.tar.gz -P /tmp/ - tar xvf /tmp/helm-v${HELM_VERSION}-${os}-amd64.tar.gz -C /tmp/ - chmod +x /tmp/${os}-amd64/helm - sudo mv /tmp/${os}-amd64/helm /usr/local/bin/ - rm -rf /tmp/helm-v${HELM_VERSION}-${os}-amd64 /tmp/helm-v${HELM_VERSION}-${os}-amd64.tar.gz -} - -checkTillerInstalled() { - status=$(kubectl -nkube-system get pod -l app=helm -l name=tiller -o=go-template --template='{{ range $i, $v := .items }}{{ if eq $v.status.phase "Running" }}{{ $v.status.phase }}{{ end }}{{ end }}') - if [ "$status" == "Running" ]; then - msg "Tiller is installed and running" - helm init -c - helm_version=$(helm version --client --short | sed 's/.*\: v//' | sed 's/+.*//') - tiller_version=$(helm version --server --short | sed 's/.*\: v//' | sed 's/+.*//') - if [[ ! "$YES" == 'true' ]] && [ $(ver $tiller_version) -lt $(ver $helm_version) ]; then - warning "You're running helm v$helm_version but tiller has v$tiller_version." - read -p " Do you want to upgrade tiller to v$helm_version ? [y/n] " yn - case ${yn} in - y|Y) - kubectl create -f ./tiller-rbac-config.yaml > /dev/null 2>&1 - helm init --upgrade --service-account tiller --wait - ;; - *) - err "You need to upgrade tiller ! Exiting..." - ;; - esac - fi - if [[ "$YES" == 'true' ]] && [ $(ver $tiller_version) -lt $(ver $helm_version) ]; then - err "You're running helm v$helm_version but tiller has v$tiller_version . You need to upgrade tiller ! Exiting..." - fi - else - warning "Unable to determine tiller at its default location." - if [[ ! "$YES" == 'true' ]]; then - read -p " Do you want to deploy tiller ? [y/n] " yn - case ${yn} in - y|Y) - kubectl create -f ./tiller-rbac-config.yaml - helm init --service-account tiller --wait - ;; - *) - err "Need to deploy tiller ! Exiting..." - exit 1 - ;; - esac - else - kubectl create -f ./tiller-rbac-config.yaml - helm init --service-account tiller --wait - fi - fi - -} - -checkTillerStatus() { - while true; do - status=$(kubectl -nkube-system get pod -l app=helm -l name=tiller -o=go-template --template='{{ range $i, $v := .items }}{{ if eq $v.status.phase "Running" }}{{ $v.status.phase }}{{ end }}{{ end }}') - - msg "Tiller status = $status" - [ "$status" == "Running" ] && break - - msg "Sleeping 5 seconds ..." - sleep 5 - - done -} - generateWebTlsValuesFile() { WEBTLSKEY=$(cat ${WebTlsKey} | sed 's/^/ /') @@ -177,6 +36,11 @@ WEBTLSCERT_CFUI=$(cat ${WebTlsCert} | sed 's/^/ /') cat <<-EOF >${WEBTLS_VALUES_FILE} --- +ingress: + webTlsSecretName: "star.codefresh.io" +nomios: + ingress: + webTlsSecretName: "star.codefresh.io" webTLS: secretName: star.codefresh.io key: | @@ -199,7 +63,7 @@ EOF # run_as_root -while [[ $1 =~ ^(-(y)|--(yes|web-tls-key|web-tls-cert|set)) ]] +while [[ $1 =~ ^(-(y)|--(yes|web-tls-key|web-tls-cert|set|debug)) ]] do key=$1 value=$2 @@ -219,6 +83,9 @@ do SET_VALUES="$SET_VALUES --set $value" shift ;; + --debug) + SET_DEBUG="--debug" + ;; esac shift # past argument or value done @@ -316,12 +183,21 @@ EOF [ -n "${WebTlsKey}" ] && [ -f "${WebTlsKey}" ] && [ -n "${WebTlsCert}" ] && [ -f "${WebTlsCert}" ] && [ -f "${WEBTLS_VALUES_FILE}" ] && WEBTLS_VALUES="--values ${WEBTLS_VALUES_FILE}" -cf_status=$(helm ls -q cf) +cf_status=$(helm status $RELEASE 2>/dev/null | awk -F': ' '$1 == "STATUS" {print $2}') + +if [[ -z "${cf_status}" ]]; then + SEEDJOBS="--set global.seedJobs=true" + CERTJOBS="--set global.certsJobs=true" + IN_INSTALLER="true" ${DIR}/run-validator.sh + if [[ $? != 0 ]]; then + echo "Validation failed" + exit 1 + fi +fi -[ -z "${cf_status}" ] && SEEDJOBS="--set global.seedJobs=true" && CERTJOBS="--set global.certsJobs=true" msg "Installing/Updating Codefresh..." -helm upgrade cf codefresh-onprem-${CHANNEL}/codefresh \ +helm upgrade ${RELEASE} codefresh-onprem-${CHANNEL}/codefresh \ --install \ --namespace codefresh \ --values "${VALUES_FILE}" \ @@ -331,5 +207,6 @@ helm upgrade cf codefresh-onprem-${CHANNEL}/codefresh \ --set cfapi.redeploy=true \ ${SEEDJOBS} \ ${CERTJOBS} \ - ${SET_VALUES} + ${SET_VALUES} \ + ${SET_DEBUG} # ${MTU_VALUE} diff --git a/env-vars b/env-vars index a93498f..db90cb7 100644 --- a/env-vars +++ b/env-vars @@ -1,2 +1,2 @@ -export CF_HELM_CHANNEL= +export CF_HELM_CHANNEL=test export CF_HELM_VERSION= \ No newline at end of file diff --git a/local-volumes/Chart.yaml b/local-volumes/Chart.yaml new file mode 100644 index 0000000..c93f9ce --- /dev/null +++ b/local-volumes/Chart.yaml @@ -0,0 +1,19 @@ +apiVersion: "v1" +name: local-volumes +version: 1.0.0 +kubeVersion: "1.10.0 - 2.0.0" +description: Creates local volumes for Codefresh onprem installation +keywords: + - codefresh + - onprem + - local-volumes +home: https://codefresh.io/ +sources: + - https://github.com/codefresh-io/onprem +maintainers: + - name: Codefresh Authors + email: dev@codefresh.io +engine: gotpl +icon: https://codefresh.io/docs/assets/brand/codefresh-social-logo.png +appVersion: v2.0.10 +tillerVersion: ">2.9.0" \ No newline at end of file diff --git a/local-volumes/README.md b/local-volumes/README.md new file mode 100644 index 0000000..76931e4 --- /dev/null +++ b/local-volumes/README.md @@ -0,0 +1,11 @@ +### Local Volumes helm chart +Creates Loval volumes and pvcs, makes directories on the nodes + +Copy from template and edit values.yaml +Set +``` +cp values.yaml.tmpl values.yaml +vi values.yaml + +./create-local-pvcs.sh +``` diff --git a/local-volumes/create-local-pvcs.sh b/local-volumes/create-local-pvcs.sh new file mode 100755 index 0000000..6d78523 --- /dev/null +++ b/local-volumes/create-local-pvcs.sh @@ -0,0 +1,49 @@ +#!/usr/bin/env bash +# + +DIR=$(dirname $0) +RELEASE=cf-local-volumes +CHART=$(realpath ${DIR}/../local-volumes) +NAMESPACE=${NAMESPACE:-codefresh} +HELM_TIMEOUT=60 + +source ${DIR}/../scripts/helpers.sh + +approveContext + +RELEASE_STATUS=$(helm status $RELEASE 2>/dev/null | awk -F': ' '$1 == "STATUS" {print $2}') +if [[ -n "${RELEASE_STATUS}" ]]; then + echo "There is a previous run of $RELEASE with status $RELEASE_STATUS +Run: helm status cf-local-volumes; to check the status of the release +Or run: helm del --purge cf-local-volumes; to delete it + + " + exit 1 +fi + +VALUES_FILE=${DIR}/values.yaml + +HELM=${HELM:-helm} + +HELM_COMMAND="$HELM --namespace $NAMESPACE install -n $RELEASE $CHART $@" + +echo "Running ${RELEASE} helm release +$HELM_COMMAND +" + +eval $HELM_COMMAND & +HELM_PID=$! + +wait $HELM_PID +HELM_EXIT_STATUS=$? + +if [[ "${HELM_EXIT_STATUS}" == 0 ]]; then + echo "Local Volumes chart has been submitted. Run the command below to insect the status + kubectl --namespace $NAMESPACE get pods,pvc,pv,svc -l app=${RELEASE} + " +else + echo " + Local Volumes chart submission FAILED." +fi + +exit $HELM_EXIT_STATUS \ No newline at end of file diff --git a/local-volumes/templates/_helpers.tpl b/local-volumes/templates/_helpers.tpl new file mode 100755 index 0000000..f0d83d2 --- /dev/null +++ b/local-volumes/templates/_helpers.tpl @@ -0,0 +1,16 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "fullname" -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} diff --git a/local-volumes/templates/mkdir-pod.yaml b/local-volumes/templates/mkdir-pod.yaml new file mode 100644 index 0000000..57e9881 --- /dev/null +++ b/local-volumes/templates/mkdir-pod.yaml @@ -0,0 +1,65 @@ +{{- if .Values.mkdirPods }} +{{- if empty .Values.mkdirPods.nodes }} +{{- fail "mkdirPod cannot run - No nodes in .Values.mkdirPods.nodes" }} +{{- end }} +{{- $pathes := list }} +{{- range $k, $v := .Values.volumes }} + + {{- $path := "" }} + {{- if $v.localPath }} + {{- $path = isAbs $v.localPath | ternary $v.localPath (printf "%s/%s" $.Values.basePath $v.localPath) }} + {{- else }} + {{- $path = printf "%s/%s%s" $.Values.basePath $.Values.namePrefix $k }} + {{- end }} + {{- if or (empty $path) (eq $path "/") }} + {{- fail "Cannot calculate path for local volumes. Specify values for .Values.basePath or volumes.name.path " }} + {{- end }} + {{- $pathes = append $pathes $path }} +{{- end }} + +{{- range .Values.mkdirPods.nodes }} +{{- $nodeNameSplit := splitn "." 2 . }} +{{- $podName := $nodeNameSplit._0 }} +--- +apiVersion: v1 +kind: Pod +metadata: + name: {{ printf "mkdir-%s%s-%s" $.Values.namePrefix $podName (randAlphaNum 5 | lower)}} + labels: + app: {{ $.Release.Name }} + chart: "{{ $.Chart.Name }}-{{ $.Chart.Version }}" + release: "{{ $.Release.Name }}" + heritage: "{{ $.Release.Service }}" + annotations: + # This is what defines this resource as a hook. Without this line, the + # job is considered part of the release. + "helm.sh/hook": post-install + "helm.sh/hook-weight": "-5" + "helm.sh/hook-delete-policy": hook-succeeded +spec: + restartPolicy: Never + nodeSelector: + kubernetes.io/hostname: {{ . }} + containers: + - image: alpine:3.7 + name: mkdir + command: + - /bin/sh + - "-ec" + - | + {{- range $pathes }} + mkdir -pv {{ printf "/hostroot%s" . }} + {{- end }} + securityContext: + privileged: true + volumeMounts: + - mountPath: /hostroot + readOnly: false + name: hostroot + volumes: + - name: hostroot + hostPath: + path: "/" + +{{- end }} +{{- end }} \ No newline at end of file diff --git a/local-volumes/templates/persistent-volumes.yaml b/local-volumes/templates/persistent-volumes.yaml new file mode 100644 index 0000000..3cbc57c --- /dev/null +++ b/local-volumes/templates/persistent-volumes.yaml @@ -0,0 +1,47 @@ +{{- range $k, $v := .Values.volumes }} + +{{- $nodeSelector := default $.Values.defaultNodeSelector $v.nodeSelector -}} +{{- if empty $nodeSelector -}} + {{- fail "Cannot find nodeSelector for local volumes. Specify values for defaultNodeSelector or volume specific nodeSelector" -}} +{{- end }} + +apiVersion: v1 +kind: PersistentVolume +metadata: + name: {{ printf "%s%s" $.Values.namePrefix $k }} + labels: + app: {{ $.Release.Name }} + chart: "{{ $.Chart.Name }}-{{ $.Chart.Version }}" + release: "{{ $.Release.Name }}" + heritage: "{{ $.Release.Service }}" +spec: + capacity: + storage: {{ default "40Gi" $v.storageSize}} + accessModes: + - ReadWriteOnce + persistentVolumeReclaimPolicy: Retain + storageClassName: {{$.Values.namePrefix}}local-storage + local: + {{- $path := "" }} + {{- if $v.path }} + {{- $path = isAbs $v.path | ternary $v.path (printf "%s/%s" $.Values.basePath $v.path) }} + {{- else }} + {{- $path = printf "%s/%s%s" $.Values.basePath $.Values.namePrefix $k }} + {{- end }} + {{- if or (empty $path) (eq $path "/") }} + {{- fail "Cannot calculate path for local volumes. Specify values for .Values.basePath or volumes..path " }} + {{- end }} + path: {{ $path }} + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + {{- range $s, $d := $nodeSelector }} + {{- if empty $d }}{{- fail (printf "Empty Node Selector Value for %s or all" $k) }}{{- end }} + - key: {{ $s }} + operator: In + values: + - {{ $d }} + {{- end }} +--- +{{- end }} \ No newline at end of file diff --git a/local-volumes/templates/pvcs.yaml b/local-volumes/templates/pvcs.yaml new file mode 100644 index 0000000..9579636 --- /dev/null +++ b/local-volumes/templates/pvcs.yaml @@ -0,0 +1,21 @@ +{{- range $k, $v := .Values.volumes }} + +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: {{ printf "%s%s" $.Values.namePrefix $k }} + labels: + app: {{ $.Release.Name }} + chart: "{{ $.Chart.Name }}-{{ $.Chart.Version }}" + release: "{{ $.Release.Name }}" + heritage: "{{ $.Release.Service }}" +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: {{ default "40Gi" $v.storageSize}} + volumeName: {{ printf "%s%s" $.Values.namePrefix $k }} + storageClassName: {{$.Values.namePrefix}}local-storage +--- +{{- end }} \ No newline at end of file diff --git a/local-volumes/templates/storageclass.yaml b/local-volumes/templates/storageclass.yaml new file mode 100644 index 0000000..6ed7d6e --- /dev/null +++ b/local-volumes/templates/storageclass.yaml @@ -0,0 +1,10 @@ +kind: StorageClass +apiVersion: storage.k8s.io/v1 +metadata: + name: {{.Values.namePrefix}}local-storage + labels: + app: {{ $.Release.Name }} + chart: "{{ $.Chart.Name }}-{{ $.Chart.Version }}" + release: "{{ $.Release.Name }}" + heritage: "{{ $.Release.Service }}" +provisioner: kubernetes.io/no-provisioner \ No newline at end of file diff --git a/local-volumes/values.yaml.tmpl b/local-volumes/values.yaml.tmpl new file mode 100644 index 0000000..bf59145 --- /dev/null +++ b/local-volumes/values.yaml.tmpl @@ -0,0 +1,52 @@ +namePrefix: cf- +basePath: /var/lib/codefresh + +# Enter default nodeSelector for volumes nodeAffinity - see https://kubernetes.io/docs/concepts/storage/volumes/#local +defaultNodeSelector: +# kubernetes.io/hostname: storage-node-01 + +# Enter nodes where to run mkdirs for all the volumes +mkdirPods: + nodes: +# - storage-node-01 + +volumes: + mongodb: + storageSize: 8Gi + nodeSelector: {} + + postgresql: + storageSize: 8Gi + nodeSelector: {} + + consul-0: + storageSize: 1Gi + nodeSelector: {} + + redis: + storageSize: 8Gi + nodeSelector: {} + + rabbitmq: + storageSize: 8Gi + nodeSelector: {} + + registry: + storageSize: 100Gi + nodeSelector: {} + + cronus: + storageSize: 1Gi + nodeSelector: {} + + store: + storageSize: 8Gi + nodeSelector: {} + + builder-0: + storageSize: 100Gi + nodeSelector: {} + + runner-0: + storageSize: 100Gi + nodeSelector: {} \ No newline at end of file diff --git a/run-validator.sh b/run-validator.sh new file mode 100755 index 0000000..204ea69 --- /dev/null +++ b/run-validator.sh @@ -0,0 +1,82 @@ +#!/usr/bin/env bash +# +echo "Starting validator" +DIR=$(dirname $0) +RELEASE=cf-validator +CHART=${DIR}/validator +NAMESPACE=${NAMESPACE:-codefresh} +HELM_TIMEOUT=60 + +VALUES_FILE=${DIR}/values.yaml +if [[ ! -f "${VALUES_FILE}" ]]; then + echo "Error: values file ${VALUES_FILE} does not exist" + exit 1 +fi + +source ${DIR}/scripts/helpers.sh + +if [[ -z "${IN_INSTALLER}" ]]; then + approveContext + + msg "Checking helm binary on your system" + checkHelmInstalled "helm" + + msg "Checking if tiller is installed on kubernetes cluster" + checkTillerInstalled + + msg "Checking tiller status..." + checkTillerStatus +fi + +## Get default storage class +SC_DEFAULT_QUERY='{{ range .items }}' +SC_DEFAULT_QUERY+='{{if .metadata.annotations }}{{if (index .metadata.annotations "storageclass.beta.kubernetes.io/is-default-class") }}' +SC_DEFAULT_QUERY+='{{ .metadata.name }}{{"\n"}}' +SC_DEFAULT_QUERY+='{{end}}{{end}}{{end}}' +DEFAULT_STORAGE_CLASS=$(kubectl -ogo-template="$SC_DEFAULT_QUERY" get sc) +if [[ -n "${DEFAULT_STORAGE_CLASS}" ]]; then + DEFAULT_STORAGE_CLASS_PARAM="--set defaultStorageClass=${DEFAULT_STORAGE_CLASS}" +fi + +RELEASE_STATUS=$(helm status $RELEASE 2>/dev/null | awk -F': ' '$1 == "STATUS" {print $2}') +if [[ -n "${RELEASE_STATUS}" ]]; then + echo "There is a previous run of $RELEASE with status $RELEASE_STATUS , deleting it" + helm delete $RELEASE --purge + sleep 10 +fi + +HELM=${HELM:-helm} + +HELM_COMMAND="$HELM --namespace $NAMESPACE install -n $RELEASE $CHART -f ${VALUES_FILE} ${DEFAULT_STORAGE_CLASS_PARAM} --timeout $HELM_TIMEOUT --wait $@" + +echo "Running ${RELEASE} helm release +$HELM_COMMAND +" + +eval $HELM_COMMAND & +HELM_PID=$! + +echo "Waiting ${HELM_TIMEOUT}s for validator release to complete ... +You can view a progress by running the command below in separate shell + +kubectl --namespace $NAMESPACE get pods,pvc,pv,svc -l app=${RELEASE} + +" +wait $HELM_PID +HELM_EXIT_STATUS=$? + +if [[ "${HELM_EXIT_STATUS}" == 0 ]]; then + echo "Cleaning validator release" + helm delete $RELEASE --purge + echo "Validation Complete Successfully" +else + # kubectl --namespace $NAMESPACE get pods,pvc,pv,svc -l app=${RELEASE} + echo " +Validation FAILED. See the messages above +Check failed or pending resources by: +kubectl desribe ${RELEASE}-* to see the cause + " + exit 1 +fi + + diff --git a/scripts/helpers.sh b/scripts/helpers.sh new file mode 100755 index 0000000..dc855bf --- /dev/null +++ b/scripts/helpers.sh @@ -0,0 +1,167 @@ +#!/bin/bash + +msg() { echo -e "\e[32mINFO [$(date +%F\ %T)] ---> $1\e[0m"; } +warning() { echo -e "\e[33mWARNING [$(date +%F\ %T)] ---> $1\e[0m"; } +err() { echo -e "\e[31mERR [$(date +%F\ %T)] ---> $1\e[0m" ; exit 1; } + +check() { command -v $1 >/dev/null 2>&1 || err "$1 binary is required!"; } + +ver() { printf "%03d%03d%03d%03d" $(echo "$1" | tr '.' ' '); } + +exists() { + if command -v $1 >/dev/null 2>&1; then + msg "$1 binary installed" + else + warning "Please install $1 to proceed" + exit 1 + fi +} + +function parse_yaml { + local prefix=$2 + local s='[[:space:]]*' w='[a-zA-Z0-9_]*' fs=$(echo @|tr @ '\034') + sed -ne "s|^\($s\):|\1|" \ + -e "s|^\($s\)\($w\)$s:$s[\"']\(.*\)[\"']$s\$|\1$fs\2$fs\3|p" \ + -e "s|^\($s\)\($w\)$s:$s\(.*\)$s\$|\1$fs\2$fs\3|p" $1 | + awk -F$fs '{ + indent = length($1)/2; + vname[indent] = $2; + for (i in vname) {if (i > indent) {delete vname[i]}} + if (length($3) > 0) { + vn=""; for (i=0; i/dev/null 2>&1; then + helm_version=$(helm version --client --short | sed 's/.*\: v//' | sed 's/+.*//') + msg "helm is already installed and has version v$helm_version" + [ $(ver $helm_version) -lt $(ver $HELM_VERSION) ] && \ + err "You have older helm version than required. Please upgrade to v$HELM_VERSION or newer !" + else + warning "helm is not installed" + if [[ ! "$YES" == 'true' ]]; then + read -p "Do you want to install helm ? [y/n] " yn + case ${yn} in + y|Y) + helmInstall + ;; + *) + err "Need helm to deploy Codefresh app ! Exiting..." + #exit 1 + ;; + esac + else + helmInstall + fi + fi +} + +helmInstall() { + msg "Downloading and installing helm..." +<< //// + case "$(uname -s)" in + Linux) + os=linux + ;; + Darwin) + os=darwin + ;; + *) + ;; + esac +//// + wget https://storage.googleapis.com/kubernetes-helm/helm-v${HELM_VERSION}-${os}-amd64.tar.gz -P /tmp/ + tar xvf /tmp/helm-v${HELM_VERSION}-${os}-amd64.tar.gz -C /tmp/ + chmod +x /tmp/${os}-amd64/helm + sudo mv /tmp/${os}-amd64/helm /usr/local/bin/ + rm -rf /tmp/helm-v${HELM_VERSION}-${os}-amd64 /tmp/helm-v${HELM_VERSION}-${os}-amd64.tar.gz +} + +checkTillerInstalled() { + status=$(kubectl -nkube-system get pod -l app=helm -l name=tiller -o=go-template --template='{{ range $i, $v := .items }}{{ if eq $v.status.phase "Running" }}{{ $v.status.phase }}{{ end }}{{ end }}') + if [ "$status" == "Running" ]; then + msg "Tiller is installed and running" + helm init -c + helm_version=$(helm version --client --short | sed 's/.*\: v//' | sed 's/+.*//') + tiller_version=$(helm version --server --short | sed 's/.*\: v//' | sed 's/+.*//') + if [[ ! "$YES" == 'true' ]] && [ $(ver $tiller_version) -lt $(ver $helm_version) ]; then + warning "You're running helm v$helm_version but tiller has v$tiller_version." + read -p " Do you want to upgrade tiller to v$helm_version ? [y/n] " yn + case ${yn} in + y|Y) + kubectl create -f ./tiller-rbac-config.yaml > /dev/null 2>&1 + helm init --upgrade --service-account tiller --wait + ;; + *) + err "You need to upgrade tiller ! Exiting..." + ;; + esac + fi + if [[ "$YES" == 'true' ]] && [ $(ver $tiller_version) -lt $(ver $helm_version) ]; then + err "You're running helm v$helm_version but tiller has v$tiller_version . You need to upgrade tiller ! Exiting..." + fi + else + warning "Unable to determine tiller at its default location." + if [[ ! "$YES" == 'true' ]]; then + read -p " Do you want to deploy tiller ? [y/n] " yn + case ${yn} in + y|Y) + kubectl create -f ./tiller-rbac-config.yaml + helm init --service-account tiller --wait + ;; + *) + err "Need to deploy tiller ! Exiting..." + exit 1 + ;; + esac + else + kubectl create -f ./tiller-rbac-config.yaml + helm init --service-account tiller --wait + fi + fi + +} + +checkTillerStatus() { + while true; do + status=$(kubectl -nkube-system get pod -l app=helm -l name=tiller -o=go-template --template='{{ range $i, $v := .items }}{{ if eq $v.status.phase "Running" }}{{ $v.status.phase }}{{ end }}{{ end }}') + + msg "Tiller status = $status" + [ "$status" == "Running" ] && break + + msg "Sleeping 5 seconds ..." + sleep 5 + + done +} \ No newline at end of file diff --git a/validator/Chart.yaml b/validator/Chart.yaml new file mode 100644 index 0000000..8af5aa5 --- /dev/null +++ b/validator/Chart.yaml @@ -0,0 +1,19 @@ +apiVersion: "v1" +name: validator +version: 1.0.0 +kubeVersion: "1.9.0 - 2.0.0" +description: Validates possibility of Codefresh onprem installation +keywords: + - codefresh + - onoprem + - validator +home: https://codefresh.io/ +sources: + - https://github.com/codefresh-io/onprem +maintainers: + - name: Codefresh Authors + email: dev@codefresh.io +engine: gotpl +icon: https://codefresh.io/docs/assets/brand/codefresh-social-logo.png +appVersion: v2.0.10 +tillerVersion: ">2.9.0" \ No newline at end of file diff --git a/validator/README.md b/validator/README.md new file mode 100644 index 0000000..cb21788 --- /dev/null +++ b/validator/README.md @@ -0,0 +1,3 @@ + + +helm install -f $(realpath ./values.yaml) -ntst1 --wait --timeout 60 validator/ \ No newline at end of file diff --git a/validator/templates/_helpers.tpl b/validator/templates/_helpers.tpl new file mode 100755 index 0000000..f0d83d2 --- /dev/null +++ b/validator/templates/_helpers.tpl @@ -0,0 +1,16 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "fullname" -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} diff --git a/validator/templates/storageclasses/pvcs.yaml b/validator/templates/storageclasses/pvcs.yaml new file mode 100644 index 0000000..d45b611 --- /dev/null +++ b/validator/templates/storageclasses/pvcs.yaml @@ -0,0 +1,117 @@ +{{- $scDict := dict -}} +{{- $existingPvcsDict := dict -}} +{{- $checkSc := false -}} +{{- $checkDefaultSc := false -}} + +{{/* Fill dict persistent services */}} +{{- $persistentServices := dict -}} +{{- $persistentServiceNames := list "mongodb" "postgresql" "consul" "redis" "rabbitmq" "registry" "cronus" "runner" "builder" -}} +{{- range $persistentServiceNames -}} +{{- if (index $.Values . ) -}} +{{- $_ := set $persistentServices . (index $.Values .) -}} +{{- end -}} +{{- end -}} + +{{- if .Values.hermes -}} {{- if .Values.hermes.redis -}} +{{- $_ := set $persistentServices "store" .Values.hermes.redis -}} +{{- end -}}{{- end -}} + +{{- range $k, $v := $persistentServices -}} + {{- if $v.existingPvc }} + {{- $_ := set $existingPvcsDict $v.existingPvc "exists" -}} + {{- else if $v.storageClass -}} + {{- $_ := set $scDict $v.storageClass "exists" -}} + {{- $checkSc = true -}} + {{- else -}} + {{- $checkDefaultSc = true -}} + {{- if and (empty $.Values.global.storageClass) (empty $.Values.defaultStorageClass ) -}} + {{- fail (printf "No global.storageClass or kubernetes default storage class defined, persistent service %s will not be able to start" $k) -}} + {{- end -}} + + {{- $_ := set $scDict (coalesce $.Values.global.storageClass $.Values.defaultStorageClass ) "exists" -}} + {{- end -}} +{{- end -}} + +{{- if .Values.global.storageClass -}} +{{- $_ := set $scDict .Values.global.storageClass "exists" -}} +{{- end -}} + +{{- range $storageClass := keys $scDict }} +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: {{ template "fullname" $ }}-storageclass-{{ $storageClass }} + labels: + app: {{ $.Release.Name }} +spec: + storageClassName: {{ $storageClass }} + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi +--- +{{- end }} + + +{{- range $k, $v := $persistentServices -}} +{{- if $v.existingPvc }} +apiVersion: v1 +kind: Pod +metadata: + name: test-pvc-{{ $v.existingPvc }} + labels: + app: {{ $.Release.Name }} + annotations: + # This is what defines this resource as a hook. Without this line, the + # job is considered part of the release. + "helm.sh/hook": post-install + "helm.sh/hook-weight": "-5" + "helm.sh/hook-delete-policy": hook-succeeded +spec: + restartPolicy: Never + terminationGracePeriodSeconds: 10 + containers: + - image: alpine:3.7 + name: test-pvc-{{ $v.existingPvc }} + imagePullPolicy: IfNotPresent + volumeMounts: + - mountPath: /test-pvc + name: data + command: + - sh + - -c + - | + mount + ls -l /test-pvc + mkdir -pv /test-pvc/test-pvc + sleep 300 + readinessProbe: + exec: + command: + - touch + - /test-pvc/test-pvc/ready + initialDelaySeconds: 1 + periodSeconds: 3 + volumes: + - name: data + persistentVolumeClaim: + claimName: {{ $v.existingPvc }} + {{- if $v.nodeSelector }} + nodeSelector: +{{ toYaml $v.nodeSelector | indent 4 }} + {{- end }} +--- +{{- end -}} + +{{- end -}} + +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: test-pvc-dummy-cm + labels: + app: {{ $.Release.Name }} +data: + dd: "11" \ No newline at end of file diff --git a/validator/templates/values-validator.yaml b/validator/templates/values-validator.yaml new file mode 100644 index 0000000..ab3d960 --- /dev/null +++ b/validator/templates/values-validator.yaml @@ -0,0 +1,12 @@ +{{- $errors := "" }} +{{- if or (empty .Values.global.appUrl) (eq .Values.global.appUrl "your-domain.com") }} +{{- $errors = printf "%s\n%s" $errors "global.appUrl is empty" }} +{{- end }} + +{{- if empty .Values.firebaseSecret }} +{{- $errors = printf "%s\n%s" $errors "firebaseSecret is empty" }} +{{- end }} + +{{- if not ( empty $errors ) }} +{{- fail $errors }} +{{- end }} diff --git a/values.yaml.tpl b/values.yaml.tpl index da0cb59..64e23ea 100644 --- a/values.yaml.tpl +++ b/values.yaml.tpl @@ -8,6 +8,13 @@ global: ### Codefresh App domain name appUrl: your-domain.com +# Storage class for all persistent services +# storageClass: my-storage-class + +# Default nodeSelector for storage pods. Useful in case of local volumes +# storagePodNodeSelector: +# kubernetes.io/hostname: storage-host-01 + ### MTU Value for dockerd in builder and runner # mtu: 1400 @@ -17,171 +24,142 @@ global: # http_proxy: "http://myproxy.domain.com:8080" # HTTPS_PROXY: "http://myproxy.domain.com:8080" # https_proxy: "http://myproxy.domain.com:8080" -# NO_PROXY: "127.0.0.1,localhost,kubernetes.default.svc,.codefresh.svc,100.64.0.1,169.254.169.254,cf-builder,cf-cfapi,cf-cfui,cf-chartmuseum,cf-charts-manager,cf-cluster-providers,cf-consul,cf-consul-ui,cf-context-manager,cf-cronus,cf-helm-repo-manager,cf-hermes,cf-ingress-controller,cf-ingress-http-backend,cf-kube-integration,cf-mongodb,cf-nats,cf-nomios,cf-pipeline-manager,cf-postgresql,cf-rabbitmq,cf-redis,cf-registry,cf-runner,cf-runtime-environment-manager,cf-store" -# no_proxy: "127.0.0.1,localhost,kubernetes.default.svc,.codefresh.svc,100.64.0.1,169.254.169.254,cf-builder,cf-cfapi,cf-cfui,cf-chartmuseum,cf-charts-manager,cf-cluster-providers,cf-consul,cf-consul-ui,cf-context-manager,cf-cronus,cf-helm-repo-manager,cf-hermes,cf-ingress-controller,cf-ingress-http-backend,cf-kube-integration,cf-mongodb,cf-nats,cf-nomios,cf-pipeline-manager,cf-postgresql,cf-rabbitmq,cf-redis,cf-registry,cf-runner,cf-runtime-environment-manager,cf-store" - +# NO_PROXY: "127.0.0.1,localhost,kubernetes.default.svc,.codefresh.svc,100.64.0.1,169.254.169.254,cf-builder,cf-cfapi,cf-cfui,cf-chartmuseum,cf-charts-manager,cf-cluster-providers,cf-consul,cf-consul-ui,cf-context-manager,cf-cronus,cf-helm-repo-manager,cf-hermes,cf-ingress-controller,cf-ingress-http-backend,cf-kube-integration,cf-mongodb,cf-nats,cf-nomios,cf-pipeline-manager,cf-postgresql,cf-rabbitmq,cf-redis,cf-registry,cf-runner,cf-runtime-environment-manager,cf-store,cf-tasker-kubernetes" +# no_proxy: "127.0.0.1,localhost,kubernetes.default.svc,.codefresh.svc,100.64.0.1,169.254.169.254,cf-builder,cf-cfapi,cf-cfui,cf-chartmuseum,cf-charts-manager,cf-cluster-providers,cf-consul,cf-consul-ui,cf-context-manager,cf-cronus,cf-helm-repo-manager,cf-hermes,cf-ingress-controller,cf-ingress-http-backend,cf-kube-integration,cf-mongodb,cf-nats,cf-nomios,cf-pipeline-manager,cf-postgresql,cf-rabbitmq,cf-redis,cf-registry,cf-runner,cf-runtime-environment-manager,cf-store,cf-tasker-kubernetes" ### Firebase secret firebaseSecret: -### Uncomment if kubernetes cluster is RBAC enabled -rbacEnable: true - ## Custom annotations for Codefresh ingress resource that override defaults #annotations: - #kubernetes.io/ingress.class: nginx-codefresh +# kubernetes.io/ingress.class: nginx-codefresh + +## Persistent services (mongodb, consul, postgress, redit, rabbit) configuration +# you can configure storageClass for dynamic volume provisoning or precreated existingPvc name +# existingPvc should exist before launching the intallation and takes precedence over storageClass +# +# Specify node selector if +# Example 1, mongodb with storageClass for dynamic volume provisoning: +# mongodb: +# storageClass: ceph-pool-1 +# storageSize: 8Gi +# +# Example 2, postgresql on precreated pvc for local volume on cpecific volume +# +# postgresql: +# existingPvc: cf-postgress-lv +# nodeSelector: +# kubernetes.io/hostname: storage-host-01 -ingress: -### Codefresh App domain name - domain: your-domain.com -### Uncomment if kubernetes cluster is RBAC enabled - rbacEnable: true -### The name of kebernetes secret with customer certificate and private key - webTlsSecretName: "star.codefresh.io" - -### For github provider (the apiHost and loginHost are different) -cfapi: - rbacEnable: true - - -consul: -### If needed to use storage class that different from default - StorageClass: {} -### Use existing volume claim name - #pvcName: cf-consul -### Use NodeSelector to assing pod to a node - nodeSelector: {} -# services: consul-postgresql +mongodb: + storageSize: 8Gi + storageClass: + #existingPvc: cf-mongodb + #nodeSelector: + # kubernetes.io/hostname: storage-host-01 postgresql: - persistence: - #existingClaim: cf-postgresql - storageClass: {} - nodeSelector: {} -# services: consul-postgresql + storageSize: 8Gi + storageClass: + #existingPvc: cf-postgresql + #nodeSelector: + # kubernetes.io/hostname: storage-host-01 -mongodb: -## Enable persistence using Persistent Volume Claims -## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ -## -## IMPORTANT ! -## It is not possible the combination when pvcName is defined and persistence:enabled = true -## Only one of two: -## pvcName is defined AND persistence:enabled = false -## OR -## pvcName is not defined (commented out) AND persistence:enabled = true -## -## Use existing volume claim name - #pvcName: cf-mongodb -## Provision new volume claim - persistence: - enabled: true - ## If defined, volume.beta.kubernetes.io/storage-class: - ## Default: volume.alpha.kubernetes.io/storage-class: default - ## - storageClass: {} - accessMode: ReadWriteOnce - size: 8Gi - - nodeSelector: {} -# provisioner: local-volume +consul: + storageSize: 1Gi + storageClass: + #existingPvc: cf-consul-0 + #nodeSelector: + # kubernetes.io/hostname: storage-host-01 redis: - persistence: -## Use existing volume claim name - #existingClaim: cf-redis - storageClass: {} - nodeSelector: {} -# provisioner: local-volume + storageSize: 8Gi + storageClass: + #existingPvc: cf-redis + #nodeSelector: + # kubernetes.io/hostname: storage-host-01 rabbitmq: - persistence: -## Use existing volume claim name - #existingClaim: cf-rabbitmq - storageClass: {} - nodeSelector: {} -# services: rabbitmq-registry + storageSize: 8Gi + storageClass: + #existingPvc: cf-rabbitmq + #nodeSelector: + # kubernetes.io/hostname: storage-host-01 -registry: - storageClass: {} -## Override default (4Gi) initial registry PV size - #storageSize: {} - ## Use existing volume claim name - #pvcName: cf-registry - nodeSelector: {} -# services: rabbitmq-registry -## Uncomment if needed to apply custom configuration to registry - #registryConfig: -## Insert custom registry configuration (https://docs.docker.com/registry/configuration/) - #version: 0.1 - #log: - #level: debug - #fields: - #service: registry - #storage: - #cache: - #blobdescriptor: inmemory - #s3: - #region: YOUR_REGION - #bucket: YOUR_BUCKET_NAME - #accesskey: AWS_ACCESS_KEY - #secretkey: AWS_SECRET_KEY - #http: - #addr: :5000 - #headers: - #X-Content-Type-Options: [nosniff] - #health: - #storagedriver: - #enabled: true - #interval: 10s - #threshold: 3 +cronus: + storageSize: 1Gi + storageClass: + #existingPvc: cf-cronus + #nodeSelector: + # kubernetes.io/hostname: storage-host-01 hermes: - nodeSelector: {} -# services: rabbitmq-registry redis: ## Set hermes store password. It is mandatory redisPassword: verysecurepassword - nodeSelector: {} -# services: rabbitmq-registry - persistence: -## Use existing volume claim name - #existingClaim: cf-store - storageClass: {} + storageSize: 8Gi + storageClass: + #existingPvc: cf-store + #nodeSelector: + # kubernetes.io/hostname: storage-host-01 -cronus: - storageClass: {} -## Use existing volume claim name - #pvcName: cf-cronus - nodeSelector: {} -# services: rabbitmq-registry +registry: + storageSize: 100Gi + storageClass: + #existingPvc: cf-registry + #nodeSelector: + # kubernetes.io/hostname: storage-host-01 +# Insert custom registry configuration (https://docs.docker.com/registry/configuration/) +# registryConfig: +# version: 0.1 +# log: +# level: debug +# fields: +# service: registry +# storage: +# cache: +# blobdescriptor: inmemory +# s3: +# region: YOUR_REGION +# bucket: YOUR_BUCKET_NAME +# accesskey: AWS_ACCESS_KEY +# secretkey: AWS_SECRET_KEY +# http: +# addr: :5000 +# headers: +# X-Content-Type-Options: [nosniff] +# health: +# storagedriver: +# enabled: true +# interval: 10s +# threshold: 3 builder: -## Use existing volume claim name - #pvcName: cf-builder + nodeSelector: {} ## Set time to run docker cleaner dockerCleanerCron: 0 0 * * * ## Override builder PV initial size - varLibDockerVolume: - storageClass: {} - storageSize: 100Gi + storageSize: 100Gi + storageClass: + #existingPvc: cf-builder-0 runner: -## Use existing volume claim name - #pvcName: cf-runner + nodeSelector: {} ## Set time to run docker cleaner dockerCleanerCron: 0 0 * * * ## Override runner PV initial size - varLibDockerVolume: - storageClass: {} - storageSize: 100Gi - -helm-repo-manager: - RepoUrlPrefix: "cm://" - -backups: - #enabled: true - awsAccessKey: - awsSecretAccessKey: - s3Url: s3:// + storageSize: 100Gi + storageClass: + #existingPvc: cf-runner-0 + + + +# helm-repo-manager: +# RepoUrlPrefix: "cm://" + +# backups: +# #enabled: true +# awsAccessKey: +# awsSecretAccessKey: +# s3Url: s3://