-
-
-
- REVOKE ALL ON *.*
- GRANT SELECT,INSERT,ALTER,CREATE,DROP,TRUNCATE,OPTIMIZE,SHOW,dictGet ON db1.*
- GRANT SELECT,INSERT,ALTER,CREATE,DROP,TRUNCATE,OPTIMIZE,SHOW,dictGet ON db2.*
- GRANT SELECT,INSERT,ALTER,CREATE,DROP,TRUNCATE,OPTIMIZE,SHOW,dictGet ON db3.*
- GRANT SELECT,SHOW,dictGet,REMOTE ON *.*
-
-
-
+apiVersion: clickhouse.altinity.com/v1
+kind: ClickHouseInstallation
+metadata:
+ name: grants-example
+spec:
+ configuration:
+ users:
+ myuser/profile: readonly
+ myuser/grants/query:
+ - "GRANT SELECT,INSERT,ALTER,CREATE,DROP,TRUNCATE,OPTIMIZE,SHOW,dictGet,REMOTE ON canarydb.*"
+ myuser/allow_databases/database:
+ - "canarydb"
+ myuser2/profile: default
+ myuser2/grants/query:
+ - "GRANT limited_role"
+ files:
+ users.d/limited_role.xml:
+
+
+
+
+ REVOKE ALL ON *.*
+ GRANT SELECT,INSERT,ALTER,CREATE,DROP,TRUNCATE,OPTIMIZE,SHOW,dictGet ON db1.*
+ GRANT SELECT,INSERT,ALTER,CREATE,DROP,TRUNCATE,OPTIMIZE,SHOW,dictGet ON db2.*
+ GRANT SELECT,INSERT,ALTER,CREATE,DROP,TRUNCATE,OPTIMIZE,SHOW,dictGet ON db3.*
+ GRANT SELECT,SHOW,dictGet,REMOTE ON *.*
+
+
+
\ No newline at end of file
diff --git a/docs/chi-examples/23-udf-example.yaml b/docs/chi-examples/23-udf-example.yaml
index c6b10c23d..ff3c992df 100644
--- a/docs/chi-examples/23-udf-example.yaml
+++ b/docs/chi-examples/23-udf-example.yaml
@@ -65,6 +65,9 @@ spec:
containers:
- name: clickhouse
image: clickhouse/clickhouse-server:latest
+ env:
+ - name: CLICKHOUSE_ALWAYS_RUN_INITDB_SCRIPTS
+ value: "true"
volumeMounts:
- name: initdb-volume
mountPath: /docker-entrypoint-initdb.d
diff --git a/docs/chi-examples/25-timeseries-engine-example.yaml b/docs/chi-examples/25-timeseries-engine-example.yaml
new file mode 100644
index 000000000..e5dd5e28c
--- /dev/null
+++ b/docs/chi-examples/25-timeseries-engine-example.yaml
@@ -0,0 +1,82 @@
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: clickhouse-initdb-schema
+data:
+ init_schema.sql: |
+ CREATE DATABASE IF NOT EXISTS timeseries_db;
+ CREATE TABLE IF NOT EXISTS timeseries_db.timeseires_table ENGINE=TimeSeries;
+---
+apiVersion: clickhouse.altinity.com/v1
+kind: ClickHouseInstallation
+metadata:
+ name: timeseries
+spec:
+ templates:
+ serviceTemplates:
+ - name: timeseries
+ spec:
+ type: ClusterIP
+ ports:
+ - name: http
+ port: 8123
+ - name: tcp
+ port: 9000
+ - name: prometheus
+ port: 9363
+ podTemplates:
+ - name: timeseries
+ spec:
+ volumes:
+ - name: initdb-volume
+ configMap:
+ name: clickhouse-initdb-schema
+ defaultMode: 0555
+ containers:
+ - name: clickhouse
+ image: clickhouse/clickhouse-server:latest
+ env:
+ - name: CLICKHOUSE_ALWAYS_RUN_INITDB_SCRIPTS
+ value: "true"
+ volumeMounts:
+ - name: initdb-volume
+ mountPath: /docker-entrypoint-initdb.d
+ readOnly: true
+ configuration:
+ profiles:
+ default/allow_experimental_time_series_table: 1
+ files:
+ config.d/prometheus_read_and_write.xml: |
+
+
+ 9363
+
+
+ /write
+
+ remote_write
+ timeseries_db
+
+
+
+
+ /read
+
+ remote_read
+ timeseries_db
+
+
+
+
+
+
+ clusters:
+ - name: timeseries
+ layout:
+ replicasCount: 1
+ shardsCount: 1
+ defaults:
+ templates:
+ podTemplate: timeseries
+ serviceTemplate: timeseries
\ No newline at end of file
diff --git a/docs/chk-examples/01-chi-simple-with-keeper.yaml b/docs/chk-examples/01-chi-simple-with-keeper.yaml
new file mode 100644
index 000000000..ed4ec54e1
--- /dev/null
+++ b/docs/chk-examples/01-chi-simple-with-keeper.yaml
@@ -0,0 +1,14 @@
+apiVersion: "clickhouse.altinity.com/v1"
+kind: "ClickHouseInstallation"
+metadata:
+ name: simple-with-keeper
+spec:
+ configuration:
+ zookeeper:
+ nodes:
+ - host: keeper-simple-1 # This is a service name of chk/simple-1
+ port: 2181
+ clusters:
+ - name: default
+ layout:
+ replicasCount: 2
diff --git a/docs/chk-examples/01-simple-1.yaml b/docs/chk-examples/01-simple-1.yaml
index 3d4195129..538be24ee 100644
--- a/docs/chk-examples/01-simple-1.yaml
+++ b/docs/chk-examples/01-simple-1.yaml
@@ -1,8 +1,9 @@
apiVersion: "clickhouse-keeper.altinity.com/v1"
kind: "ClickHouseKeeperInstallation"
metadata:
- name: chk-simple-1
+ name: simple-1
spec:
configuration:
clusters:
- - name: "simple-1"
\ No newline at end of file
+ - name: "cluster1"
+# see 01-chi-simple-with-keeper.yaml for CHI example using this
\ No newline at end of file
diff --git a/docs/chk-examples/01-simple-3.yaml b/docs/chk-examples/01-simple-3.yaml
index 7f5de4ad0..4f3c87585 100644
--- a/docs/chk-examples/01-simple-3.yaml
+++ b/docs/chk-examples/01-simple-3.yaml
@@ -1,10 +1,10 @@
apiVersion: "clickhouse-keeper.altinity.com/v1"
kind: "ClickHouseKeeperInstallation"
metadata:
- name: chk-simple-3
+ name: simple-3
spec:
configuration:
clusters:
- - name: "simple-3"
+ - name: "cluster1"
layout:
replicasCount: 3
diff --git a/docs/chk-examples/02-extended-1-node.yaml b/docs/chk-examples/02-extended-1-node.yaml
index b3d85d5e8..28e481bb9 100644
--- a/docs/chk-examples/02-extended-1-node.yaml
+++ b/docs/chk-examples/02-extended-1-node.yaml
@@ -1,28 +1,31 @@
apiVersion: "clickhouse-keeper.altinity.com/v1"
kind: "ClickHouseKeeperInstallation"
metadata:
- name: chk-1-node
+ name: extended-1
spec:
configuration:
clusters:
- - name: "simple-1"
+ - name: cluster1
layout:
replicasCount: 1
settings:
logger/level: "trace"
logger/console: "true"
listen_host: "0.0.0.0"
- keeper_server/storage_path: /var/lib/clickhouse-keeper
- keeper_server/tcp_port: "2181"
keeper_server/four_letter_word_white_list: "*"
keeper_server/coordination_settings/raft_logs_level: "information"
- keeper_server/raft_configuration/server/port: "9444"
prometheus/endpoint: "/metrics"
prometheus/port: "7000"
prometheus/metrics: "true"
prometheus/events: "true"
prometheus/asynchronous_metrics: "true"
prometheus/status_info: "false"
+
+ defaults:
+ templates:
+ # Templates are specified as default for all clusters
+ podTemplate: default
+
templates:
podTemplates:
- name: default
@@ -30,7 +33,7 @@ spec:
containers:
- name: clickhouse-keeper
imagePullPolicy: IfNotPresent
- image: "clickhouse/clickhouse-keeper:head-alpine"
+ image: "clickhouse/clickhouse-keeper:24.3.5.46"
resources:
requests:
memory: "256M"
@@ -38,13 +41,14 @@ spec:
limits:
memory: "4Gi"
cpu: "2"
+ securityContext:
+ fsGroup: 101
+
volumeClaimTemplates:
- name: default
- metadata:
- name: both-paths
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
- storage: 25Gi
+ storage: 10Gi
diff --git a/docs/chk-examples/02-extended-3-nodes.yaml b/docs/chk-examples/02-extended-3-nodes.yaml
index ee404be1a..da3cd3f68 100644
--- a/docs/chk-examples/02-extended-3-nodes.yaml
+++ b/docs/chk-examples/02-extended-3-nodes.yaml
@@ -1,28 +1,31 @@
apiVersion: "clickhouse-keeper.altinity.com/v1"
kind: "ClickHouseKeeperInstallation"
metadata:
- name: chk-3-nodes
+ name: extended-3
spec:
configuration:
clusters:
- - name: "simple-3"
+ - name: "cluster1"
layout:
replicasCount: 3
settings:
logger/level: "trace"
logger/console: "true"
listen_host: "0.0.0.0"
- keeper_server/storage_path: /var/lib/clickhouse-keeper
- keeper_server/tcp_port: "2181"
keeper_server/four_letter_word_white_list: "*"
keeper_server/coordination_settings/raft_logs_level: "information"
- keeper_server/raft_configuration/server/port: "9444"
prometheus/endpoint: "/metrics"
prometheus/port: "7000"
prometheus/metrics: "true"
prometheus/events: "true"
prometheus/asynchronous_metrics: "true"
prometheus/status_info: "false"
+
+ defaults:
+ templates:
+ # Templates are specified as default for all clusters
+ podTemplate: default
+
templates:
podTemplates:
- name: default
@@ -41,7 +44,7 @@ spec:
containers:
- name: clickhouse-keeper
imagePullPolicy: IfNotPresent
- image: "clickhouse/clickhouse-keeper:head-alpine"
+ image: "clickhouse/clickhouse-keeper:24.3.5.46"
resources:
requests:
memory: "256M"
@@ -49,6 +52,9 @@ spec:
limits:
memory: "4Gi"
cpu: "2"
+ securityContext:
+ fsGroup: 101
+
volumeClaimTemplates:
- name: default
spec:
@@ -57,10 +63,3 @@ spec:
resources:
requests:
storage: 10Gi
- - name: snapshot-storage-path
- spec:
- accessModes:
- - ReadWriteOnce
- resources:
- requests:
- storage: 10Gi
diff --git a/docs/chk-examples/clickhouse-keeper-1-node-for-test-only.yaml b/docs/chk-examples/clickhouse-keeper-1-node-for-test-only.yaml
index 29ac52e6e..6c1672289 100644
--- a/docs/chk-examples/clickhouse-keeper-1-node-for-test-only.yaml
+++ b/docs/chk-examples/clickhouse-keeper-1-node-for-test-only.yaml
@@ -14,31 +14,38 @@ spec:
- port: 7000
name: prometheus
selector:
- app: clickhouse-keeper
- what: node
+ clickhouse-keeper.altinity.com/chk: clickhouse-keeper
+ clickhouse-keeper.altinity.com/ready: "yes"
---
apiVersion: "clickhouse-keeper.altinity.com/v1"
kind: "ClickHouseKeeperInstallation"
metadata:
name: clickhouse-keeper
+ labels:
+ app: clickhouse-keeper
spec:
+ defaults:
+ templates:
+ volumeClaimTemplate: data-volume
+ podTemplate: latest-with-volume-mounts
configuration:
clusters:
- - name: "simple-1"
+ - name: "test-only"
layout:
replicasCount: 1
- settings:
- logger/level: "trace"
- logger/console: "true"
- listen_host: "0.0.0.0"
- keeper_server/storage_path: /var/lib/clickhouse-keeper
- keeper_server/tcp_port: "2181"
- keeper_server/four_letter_word_white_list: "*"
- keeper_server/coordination_settings/raft_logs_level: "information"
- keeper_server/raft_configuration/server/port: "9444"
- prometheus/endpoint: "/metrics"
- prometheus/port: "7000"
- prometheus/metrics: "true"
- prometheus/events: "true"
- prometheus/asynchronous_metrics: "true"
- prometheus/status_info: "false"
\ No newline at end of file
+ templates:
+ podTemplates:
+ - name: latest-with-volume-mounts
+ spec:
+ containers:
+ - name: clickhouse-keeper
+ imagePullPolicy: Always
+ image: "clickhouse/clickhouse-keeper:latest-alpine"
+ volumeClaimTemplates:
+ - name: data-volume
+ spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 1Gi
diff --git a/docs/chk-examples/clickhouse-keeper-3-node-for-test-only-version-24.yaml b/docs/chk-examples/clickhouse-keeper-3-node-for-test-only-version-24.yaml
new file mode 100644
index 000000000..8c0d8c905
--- /dev/null
+++ b/docs/chk-examples/clickhouse-keeper-3-node-for-test-only-version-24.yaml
@@ -0,0 +1,60 @@
+---
+# Fake Service to drop-in replacement in tests
+apiVersion: v1
+kind: Service
+metadata:
+ # DNS would be like zookeeper.namespace.svc
+ name: zookeeper
+ labels:
+ app: zookeeper
+spec:
+ ports:
+ - port: 2181
+ name: client
+ - port: 7000
+ name: prometheus
+ selector:
+ clickhouse-keeper.altinity.com/chk: clickhouse-keeper
+ clickhouse-keeper.altinity.com/ready: "yes"
+---
+apiVersion: "clickhouse-keeper.altinity.com/v1"
+kind: "ClickHouseKeeperInstallation"
+metadata:
+ name: clickhouse-keeper
+spec:
+ defaults:
+ templates:
+ podTemplate: default
+ volumeClaimTemplate: default
+ templates:
+ podTemplates:
+ - name: default
+ spec:
+ containers:
+ - name: clickhouse-keeper
+ imagePullPolicy: IfNotPresent
+ # IMPORTANT !!!
+ # clickhouse-keeper:24.3.5.46 version IS CHECKED IN TESTS and can be changed with TESTS only!
+ # DO NOT CHANGE THE VERSION !
+ image: "clickhouse/clickhouse-keeper:24.3.5.46"
+ volumeClaimTemplates:
+ - name: default
+ spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 1Gi
+ configuration:
+ clusters:
+ - name: "test-only"
+ layout:
+ replicasCount: 3
+ settings:
+ logger/level: "trace"
+ prometheus/endpoint: "/metrics"
+ prometheus/port: "7000"
+ prometheus/metrics: "true"
+ prometheus/events: "true"
+ prometheus/asynchronous_metrics: "true"
+ prometheus/status_info: "false"
diff --git a/docs/chk-examples/clickhouse-keeper-3-node-for-test-only.yaml b/docs/chk-examples/clickhouse-keeper-3-node-for-test-only.yaml
index fa978472e..67beb494c 100644
--- a/docs/chk-examples/clickhouse-keeper-3-node-for-test-only.yaml
+++ b/docs/chk-examples/clickhouse-keeper-3-node-for-test-only.yaml
@@ -14,31 +14,38 @@ spec:
- port: 7000
name: prometheus
selector:
- app: clickhouse-keeper
- what: node
+ clickhouse-keeper.altinity.com/chk: clickhouse-keeper
+ clickhouse-keeper.altinity.com/ready: "yes"
---
apiVersion: "clickhouse-keeper.altinity.com/v1"
kind: "ClickHouseKeeperInstallation"
metadata:
name: clickhouse-keeper
+ labels:
+ app: clickhouse-keeper
spec:
+ defaults:
+ templates:
+ volumeClaimTemplate: data-volume
+ podTemplate: latest-with-volume-mounts
configuration:
clusters:
- - name: "simple-3"
+ - name: "test-only"
layout:
replicasCount: 3
- settings:
- logger/level: "trace"
- logger/console: "true"
- listen_host: "0.0.0.0"
- keeper_server/storage_path: /var/lib/clickhouse-keeper
- keeper_server/tcp_port: "2181"
- keeper_server/four_letter_word_white_list: "*"
- keeper_server/coordination_settings/raft_logs_level: "information"
- keeper_server/raft_configuration/server/port: "9444"
- prometheus/endpoint: "/metrics"
- prometheus/port: "7000"
- prometheus/metrics: "true"
- prometheus/events: "true"
- prometheus/asynchronous_metrics: "true"
- prometheus/status_info: "false"
\ No newline at end of file
+ templates:
+ podTemplates:
+ - name: latest-with-volume-mounts
+ spec:
+ containers:
+ - name: clickhouse-keeper
+ imagePullPolicy: Always
+ image: "clickhouse/clickhouse-keeper:latest-alpine"
+ volumeClaimTemplates:
+ - name: data-volume
+ spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 1Gi
diff --git a/docs/keeper_migration_from_23_to_24.md b/docs/keeper_migration_from_23_to_24.md
new file mode 100644
index 000000000..959804e7f
--- /dev/null
+++ b/docs/keeper_migration_from_23_to_24.md
@@ -0,0 +1,48 @@
+Altinity Operator 0.23.x used an experimental implementation of Keeper resource that is not compatible with 0.24.0 and above. Direct upgrade will result in the loss of Keeper data, so dependent ClickHouse cluster will turn read-only.
+
+Here are some difference for CHK named 'test'.
+
+| | 0.23.x | 0.24+ |
+| --- | ------ | ----- |
+| Pod name | test-0 | chk-test-simple-0-0-0 |
+| Service name | test | keeper-test |
+| PVC name | both-paths-test-0 | default-chk-test-0-0-0 |
+| Volume mounts | - mountPath: /var/lib/clickhouse\_keeper
name: working-dir
- mountPath: /var/lib/clickhouse\_keeper/coordination/logs
name: both-paths
subPath: logs
- mountPath: /var/lib/clickhouse\_keeper/coordination/snapshots
name: both-paths
subPath: snapshots
| - mountPath: /var/lib/clickhouse\-keeper
name: default |
+
+There are no backwards compatibility guarantees for experimental features. Migration is possible using a manual procedure if needed.
+
+The biggest problem is volume. In order to remap volume, following steps need to be done:
+
+1. Find Persistent Volume (PV) in old CHK installation
+2. Patch it setting persistentVolumeReclaimPolicy to ‘Retain’
+
+`kubectl patch pv $PV -p '{"spec":{"persistentVolumeReclaimPolicy":"Retain"}}'`
+
+3. Delete old CHK installation
+4. Delete old PVC, since it is not deleted automatically
+5. Patch PV one more time, removing claimRef. That will make volume available for remounting.
+
+`kubectl patch pv $PV -p '{"spec":{"claimRef": null}}'`
+
+6. Upgrade operator to 0.24.x
+7. Deploy new CHK with following changes:
+ * Add ‘volumeName’ to CHK volumeClaimTemplate referencing the old volume
+
+```
+ volumeClaimTemplates:
+ - name: default
+ spec:
+ ...
+ volumeName: $PV
+```
+
+ * Add settings to mount logs and raft coordination to folders matching old operator:
+
+```
+ keeper_server/log_storage_path: /var/lib/clickhouse-keeper/logs
+ keeper_server/snapshot_storage_path: /var/lib/clickhouse-keeper/snapshots
+```
+
+Also, optionally serviceTemplate can be added matching old name in order to avoid changes in CHI.
+
+Please refer to [this example](https://github.com/Altinity/clickhouse-operator/blob/0.24.0/tests/e2e/manifests/chk/test-051-chk-chop-upgrade-3.yaml) and a tested [sequence of steps](https://github.com/Altinity/clickhouse-operator/blob/9d0fc9c9bb3532e0313b0405b02d147c958d3dff/tests/e2e/test_operator.py#L4868)
diff --git a/docs/operator_installation_details.md b/docs/operator_installation_details.md
index 1c63275de..423f0302d 100644
--- a/docs/operator_installation_details.md
+++ b/docs/operator_installation_details.md
@@ -1,183 +1,183 @@
-# Install ClickHouse Operator
-
-# Prerequisites
-
-1. Kubernetes instance with the following version considerations:
- 1. `clickhouse-operator` versions **before** `0.16.0` is compatible with [Kubenetes after `1.16` and prior `1.22`](https://kubernetes.io/releases/).
- 1. `clickhouse-operator` versions `0.16.0` **and after** is compatible [Kubernetes version `1.16` and after](https://kubernetes.io/releases/).
-1. Properly configured `kubectl`
-1. `curl`
-
-Verify the Docker manifest is available based on the version table, replacing `{OPERATOR_VERSION}` with the specific version. For example, for version `0.16.0`, the URL would be `https://github.com/Altinity/clickhouse-operator/raw/0.16.0/deploy/operator/clickhouse-operator-install-bundle.yaml`.
-
-| `clickhouse-operator` version | Kubernetes version | Kubernetes manifest URL |
-|---|---|---|
-| Current | Kubernetes 1.16+ | https://raw.githubusercontent.com/Altinity/clickhouse-operator/master/deploy/operator/clickhouse-operator-install-bundle.yaml |
-| Current | Kubernetes before 1.16 | **(Beta)** https://github.com/Altinity/clickhouse-operator/raw/master/deploy/operator/clickhouse-operator-install-bundle-v1beta1.yaml |
-| `0.16.0` and greater | Kubernetes 1.16+ | https://github.com/Altinity/clickhouse-operator/raw/{OPERATOR_VERSION}/deploy/operator/clickhouse-operator-install-bundle.yaml |
-| Before `0.16.0` | Kubernetes after 1.16 and before 1.22 | kubectl apply -f https://github.com/Altinity/clickhouse-operator/raw/{OPERATOR_VERSION}/deploy/operator/clickhouse-operator-install.yaml |
-
-[clickhouse-operator-install-bundle.yaml][clickhouse-operator-install-bundle.yaml] file availability.
-In is located in `deploy/operator` folder inside `clickhouse-operator` sources.
-
-## Install via kubectl
-
-Operator installation process is quite straightforward and consists of one main step - deploy **ClickHouse operator**.
-We'll apply operator manifest directly from github repo
-```bash
-kubectl apply -f https://raw.githubusercontent.com/Altinity/clickhouse-operator/master/deploy/operator/clickhouse-operator-install-bundle.yaml
-```
-
-The following results are expected:
-```text
-customresourcedefinition.apiextensions.k8s.io/clickhouseinstallations.clickhouse.altinity.com created
-serviceaccount/clickhouse-operator created
-clusterrolebinding.rbac.authorization.k8s.io/clickhouse-operator created
-deployment.apps/clickhouse-operator configured
-```
-
-## Verify operator is up and running
-
-Operator is deployed in **kube-system** namespace.
-
-```bash
-kubectl get pods --namespace kube-system
-```
-
-Expected results:
-```text
-NAME READY STATUS RESTARTS AGE
-...
-clickhouse-operator-5c46dfc7bd-7cz5l 1/1 Running 0 43m
-...
-```
-
-
-## Install via helm
-
-since 0.20.1 version official clickhouse-operator helm chart, also available
-
-installation
-```bash
-helm repo add clickhouse-operator https://docs.altinity.com/clickhouse-operator/
-helm install clickhouse-operator clickhouse-operator/altinity-clickhouse-operator
-```
-upgrade
-```bash
-helm repo upgrade clickhouse-operator
-helm upgrade clickhouse-operator clickhouse-operator/altinity-clickhouse-operator
-```
-
-Look https://github.com/Altinity/clickhouse-operator/tree/master/deploy/helm/clickhouse-operator/ for details
-
-## Resources Description
-
-Let's walk over all resources created along with ClickHouse operator, which are:
-1. Custom Resource Definition
-1. Service account
-1. Cluster Role Binding
-1. Deployment
-
-
-### Custom Resource Definition
-```text
-customresourcedefinition.apiextensions.k8s.io/clickhouseinstallations.clickhouse.altinity.com created
-```
-New [Custom Resource Definition][customresourcedefinitions] named **ClickHouseInstallation** is created.
-k8s API is extended with new kind `ClickHouseInstallation` and we'll be able to manage k8s resource of `kind: ClickHouseInstallation`
-
-### Service Account
-```text
-serviceaccount/clickhouse-operator created
-```
-New [Service Account][configure-service-account] named **clickhouse-operator** is created.
-A service account provides an identity used to contact the `apiserver` by the processes that run in a Pod.
-Processes in containers inside pods can contact the `apiserver`, and when they do, they are authenticated as a particular `Service Account` - `clickhouse-operator` in this case.
-
-### Cluster Role Binding
-```text
-clusterrolebinding.rbac.authorization.k8s.io/clickhouse-operator created
-```
-New [CluserRoleBinding][rolebinding-and-clusterrolebinding] named **clickhouse-operator** is created.
-A role binding grants the permissions defined in a role to a set of users.
-It holds a reference to the role being granted to the list of subjects (users, groups, or service accounts).
-In this case Role
-```yaml
-roleRef:
- apiGroup: rbac.authorization.k8s.io
- kind: ClusterRole
- name: cluster-admin
-```
-is being granted to
-```yaml
-subjects:
- - kind: ServiceAccount
- name: clickhouse-operator
- namespace: kube-system
-```
-`clickhouse-operator` Service Account created earlier.
-Permissions are granted cluster-wide with a `ClusterRoleBinding`.
-
-### Deployment
-```text
-deployment.apps/clickhouse-operator configured
-```
-New [Deployment][deployment] named **clickhouse-operator** is created.
-ClickHouse operator app would be run by this deployment in `kube-system` namespace.
-
-## Verify Resources
-
-Check Custom Resource Definition
-```bash
-kubectl get customresourcedefinitions
-```
-Expected result
-```text
-NAME CREATED AT
-...
-clickhouseinstallations.clickhouse.altinity.com 2019-01-25T10:17:57Z
-...
-```
-
-Check Service Account
-```bash
-kubectl get serviceaccounts -n kube-system
-```
-Expected result
-```text
-NAME SECRETS AGE
-...
-clickhouse-operator 1 27h
-...
-```
-
-Check Cluster Role Binding
-```bash
-kubectl get clusterrolebinding
-```
-Expected result
-```text
-NAME AGE
-...
-clickhouse-operator 31m
-...
-
-```
-Check deployment
-```bash
-kubectl get deployments --namespace kube-system
-```
-Expected result
-```text
-NAME READY UP-TO-DATE AVAILABLE AGE
-...
-clickhouse-operator 1/1 1 1 31m
-...
-
-```
-
-[clickhouse-operator-install-bundle.yaml]: ../deploy/operator/clickhouse-operator-install-bundle.yaml
-[customresourcedefinitions]: https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/#customresourcedefinitions
-[configure-service-account]: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
-[rolebinding-and-clusterrolebinding]: https://kubernetes.io/docs/reference/access-authn-authz/rbac/#rolebinding-and-clusterrolebinding
-[deployment]: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/
+# Install ClickHouse Operator
+
+# Prerequisites
+
+1. Kubernetes instance with the following version considerations:
+ 1. `clickhouse-operator` versions **before** `0.16.0` is compatible with [Kubenetes after `1.16` and prior `1.22`](https://kubernetes.io/releases/).
+ 1. `clickhouse-operator` versions `0.16.0` **and after** is compatible [Kubernetes version `1.16` and after](https://kubernetes.io/releases/).
+1. Properly configured `kubectl`
+1. `curl`
+
+Verify the Docker manifest is available based on the version table, replacing `{OPERATOR_VERSION}` with the specific version. For example, for version `0.16.0`, the URL would be `https://github.com/Altinity/clickhouse-operator/raw/0.16.0/deploy/operator/clickhouse-operator-install-bundle.yaml`.
+
+| `clickhouse-operator` version | Kubernetes version | Kubernetes manifest URL |
+|---|---|---|
+| Current | Kubernetes 1.16+ | https://raw.githubusercontent.com/Altinity/clickhouse-operator/master/deploy/operator/clickhouse-operator-install-bundle.yaml |
+| Current | Kubernetes before 1.16 | **(Beta)** https://github.com/Altinity/clickhouse-operator/raw/master/deploy/operator/clickhouse-operator-install-bundle-v1beta1.yaml |
+| `0.16.0` and greater | Kubernetes 1.16+ | https://github.com/Altinity/clickhouse-operator/raw/{OPERATOR_VERSION}/deploy/operator/clickhouse-operator-install-bundle.yaml |
+| Before `0.16.0` | Kubernetes after 1.16 and before 1.22 | kubectl apply -f https://github.com/Altinity/clickhouse-operator/raw/{OPERATOR_VERSION}/deploy/operator/clickhouse-operator-install.yaml |
+
+[clickhouse-operator-install-bundle.yaml][clickhouse-operator-install-bundle.yaml] file availability.
+In is located in `deploy/operator` folder inside `clickhouse-operator` sources.
+
+## Install via kubectl
+
+Operator installation process is quite straightforward and consists of one main step - deploy **ClickHouse operator**.
+We'll apply operator manifest directly from github repo
+```bash
+kubectl apply -f https://raw.githubusercontent.com/Altinity/clickhouse-operator/master/deploy/operator/clickhouse-operator-install-bundle.yaml
+```
+
+The following results are expected:
+```text
+customresourcedefinition.apiextensions.k8s.io/clickhouseinstallations.clickhouse.altinity.com created
+serviceaccount/clickhouse-operator created
+clusterrolebinding.rbac.authorization.k8s.io/clickhouse-operator created
+deployment.apps/clickhouse-operator configured
+```
+
+## Verify operator is up and running
+
+Operator is deployed in **kube-system** namespace.
+
+```bash
+kubectl get pods --namespace kube-system
+```
+
+Expected results:
+```text
+NAME READY STATUS RESTARTS AGE
+...
+clickhouse-operator-5c46dfc7bd-7cz5l 1/1 Running 0 43m
+...
+```
+
+
+## Install via helm
+
+since 0.20.1 version official clickhouse-operator helm chart, also available
+
+installation
+```bash
+helm repo add clickhouse-operator https://docs.altinity.com/clickhouse-operator/
+helm install clickhouse-operator clickhouse-operator/altinity-clickhouse-operator
+```
+upgrade
+```bash
+helm repo upgrade clickhouse-operator
+helm upgrade clickhouse-operator clickhouse-operator/altinity-clickhouse-operator
+```
+
+Look https://github.com/Altinity/clickhouse-operator/tree/master/deploy/helm/clickhouse-operator/ for details
+
+## Resources Description
+
+Let's walk over all resources created along with ClickHouse operator, which are:
+1. Custom Resource Definition
+1. Service account
+1. Cluster Role Binding
+1. Deployment
+
+
+### Custom Resource Definition
+```text
+customresourcedefinition.apiextensions.k8s.io/clickhouseinstallations.clickhouse.altinity.com created
+```
+New [Custom Resource Definition][customresourcedefinitions] named **ClickHouseInstallation** is created.
+k8s API is extended with new kind `ClickHouseInstallation` and we'll be able to manage k8s resource of `kind: ClickHouseInstallation`
+
+### Service Account
+```text
+serviceaccount/clickhouse-operator created
+```
+New [Service Account][configure-service-account] named **clickhouse-operator** is created.
+A service account provides an identity used to contact the `apiserver` by the processes that run in a Pod.
+Processes in containers inside pods can contact the `apiserver`, and when they do, they are authenticated as a particular `Service Account` - `clickhouse-operator` in this case.
+
+### Cluster Role Binding
+```text
+clusterrolebinding.rbac.authorization.k8s.io/clickhouse-operator created
+```
+New [CluserRoleBinding][rolebinding-and-clusterrolebinding] named **clickhouse-operator** is created.
+A role binding grants the permissions defined in a role to a set of users.
+It holds a reference to the role being granted to the list of subjects (users, groups, or service accounts).
+In this case Role
+```yaml
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: cluster-admin
+```
+is being granted to
+```yaml
+subjects:
+ - kind: ServiceAccount
+ name: clickhouse-operator
+ namespace: kube-system
+```
+`clickhouse-operator` Service Account created earlier.
+Permissions are granted cluster-wide with a `ClusterRoleBinding`.
+
+### Deployment
+```text
+deployment.apps/clickhouse-operator configured
+```
+New [Deployment][deployment] named **clickhouse-operator** is created.
+ClickHouse operator app would be run by this deployment in `kube-system` namespace.
+
+## Verify Resources
+
+Check Custom Resource Definition
+```bash
+kubectl get customresourcedefinitions
+```
+Expected result
+```text
+NAME CREATED AT
+...
+clickhouseinstallations.clickhouse.altinity.com 2019-01-25T10:17:57Z
+...
+```
+
+Check Service Account
+```bash
+kubectl get serviceaccounts -n kube-system
+```
+Expected result
+```text
+NAME SECRETS AGE
+...
+clickhouse-operator 1 27h
+...
+```
+
+Check Cluster Role Binding
+```bash
+kubectl get clusterrolebinding
+```
+Expected result
+```text
+NAME AGE
+...
+clickhouse-operator 31m
+...
+
+```
+Check deployment
+```bash
+kubectl get deployments --namespace kube-system
+```
+Expected result
+```text
+NAME READY UP-TO-DATE AVAILABLE AGE
+...
+clickhouse-operator 1/1 1 1 31m
+...
+
+```
+
+[clickhouse-operator-install-bundle.yaml]: ../deploy/operator/clickhouse-operator-install-bundle.yaml
+[customresourcedefinitions]: https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/#customresourcedefinitions
+[configure-service-account]: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
+[rolebinding-and-clusterrolebinding]: https://kubernetes.io/docs/reference/access-authn-authz/rbac/#rolebinding-and-clusterrolebinding
+[deployment]: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/
diff --git a/docs/quick_start.md b/docs/quick_start.md
index 48bb176f4..6e650a9f1 100644
--- a/docs/quick_start.md
+++ b/docs/quick_start.md
@@ -1,389 +1,389 @@
-# Quick Start Guides
-
-# Table of Contents
-
-* [Prerequisites](#prerequisites)
-* [ClickHouse Operator Installation](#clickhouse-operator-installation)
-* [Building ClickHouse Operator from Sources](#building-clickhouse-operator-from-sources)
-* [Examples](#examples)
- * [Trivial Example](#trivial-example)
- * [Connect to ClickHouse Database](#connect-to-clickhouse-database)
- * [Simple Persistent Volume Example](#simple-persistent-volume-example)
- * [Custom Deployment with Pod and VolumeClaim Templates](#custom-deployment-with-pod-and-volumeclaim-templates)
- * [Custom Deployment with Specific ClickHouse Configuration](#custom-deployment-with-specific-clickhouse-configuration)
-
-# Prerequisites
-
-1. Kubernetes cluster that observes the following version considerations:
- 1. `clickhouse-operator` versions **before** `0.16.0` are compatible with [Kubenetes after `1.16` and prior `1.22`](https://kubernetes.io/releases/).
- 1. `clickhouse-operator` versions `0.16.0` **and after** are compatible [Kubernetes version `1.16` and after](https://kubernetes.io/releases/).
-1. Properly configured `kubectl`
-1. `curl`
-
-# ClickHouse Operator Installation
-
-Apply `clickhouse-operator` installation manifest. The simplest way - directly from `github`.
-
-## **In case you are OK to install operator into `kube-system` namespace**
-
-just run:
-```bash
-kubectl apply -f https://raw.githubusercontent.com/Altinity/clickhouse-operator/master/deploy/operator/clickhouse-operator-install-bundle.yaml
-```
-## **If you want to install operator on kubernetes version prior to `1.17` in `kube-system` namespace**
-
-just run:
-```bash
-kubectl apply -f https://raw.githubusercontent.com/Altinity/clickhouse-operator/master/deploy/operator/clickhouse-operator-install-bundle-v1beta1.yaml
-```
-
-## **In case you would like to customize installation parameters**,
-
-such as namespace where to install operator or operator's image, use the special installer script.
-```bash
-curl -s https://raw.githubusercontent.com/Altinity/clickhouse-operator/master/deploy/operator-web-installer/clickhouse-operator-install.sh | OPERATOR_NAMESPACE=test-clickhouse-operator bash
-```
-Take into account explicitly specified namespace
-```bash
-OPERATOR_NAMESPACE=test-clickhouse-operator
-```
-This namespace would be created and used to install `clickhouse-operator` into.
-Install script would download some `.yaml` and `.xml` files and install `clickhouse-operator` into specified namespace.
-After installation **clickhouse-operator** will watch custom resources like a `kind: ClickhouseInstallation` only in `test-clickhouse-operator` namespace.
-
-If no `OPERATOR_NAMESPACE` specified, as:
-```bash
-cd ~
-curl -s https://raw.githubusercontent.com/Altinity/clickhouse-operator/master/deploy/operator-web-installer/clickhouse-operator-install.sh | bash
-```
-installer will install **clickhouse-operator** into `kube-system` namespace and will watch custom resources like a `kind: ClickhouseInstallation` in all available namespaces.
-
-
-## **In case you can not run scripts from the Internet in your protected environment**,
-
-you can download manually [this template file][clickhouse-operator-install-template.yaml]
-and edit it according to your choice. After that apply it with `kubectl`. Or you can use this snippet instead:
-```bash
-#!/bin/bash
-
-# Namespace to install operator into
-OPERATOR_NAMESPACE="${OPERATOR_NAMESPACE:-test-clickhouse-operator}"
-# Namespace to install metrics-exporter into
-METRICS_EXPORTER_NAMESPACE="${OPERATOR_NAMESPACE}"
-# Operator's docker image
-OPERATOR_IMAGE="${OPERATOR_IMAGE:-altinity/clickhouse-operator:latest}"
-# Metrics exporter's docker image
-METRICS_EXPORTER_IMAGE="${METRICS_EXPORTER_IMAGE:-altinity/metrics-exporter:latest}"
-
-# Setup clickhouse-operator into specified namespace
-kubectl apply --namespace="${OPERATOR_NAMESPACE}" -f <( \
- curl -s https://raw.githubusercontent.com/Altinity/clickhouse-operator/master/deploy/operator/clickhouse-operator-install-template.yaml | \
- OPERATOR_IMAGE="${OPERATOR_IMAGE}" \
- OPERATOR_NAMESPACE="${OPERATOR_NAMESPACE}" \
- METRICS_EXPORTER_IMAGE="${METRICS_EXPORTER_IMAGE}" \
- METRICS_EXPORTER_NAMESPACE="${METRICS_EXPORTER_NAMESPACE}" \
- envsubst \
-)
-```
-
-## Operator installation process
-```text
-Setup ClickHouse Operator into test-clickhouse-operator namespace
-namespace/test-clickhouse-operator created
-customresourcedefinition.apiextensions.k8s.io/clickhouseinstallations.clickhouse.altinity.com configured
-serviceaccount/clickhouse-operator created
-clusterrolebinding.rbac.authorization.k8s.io/clickhouse-operator configured
-service/clickhouse-operator-metrics created
-configmap/etc-clickhouse-operator-files created
-configmap/etc-clickhouse-operator-confd-files created
-configmap/etc-clickhouse-operator-configd-files created
-configmap/etc-clickhouse-operator-templatesd-files created
-configmap/etc-clickhouse-operator-usersd-files created
-deployment.apps/clickhouse-operator created
-```
-
-Check `clickhouse-operator` is running:
-```bash
-kubectl get pods -n test-clickhouse-operator
-```
-```text
-NAME READY STATUS RESTARTS AGE
-clickhouse-operator-5ddc6d858f-drppt 1/1 Running 0 1m
-```
-
-## Building ClickHouse Operator from Sources
-
-Complete instructions on how to build ClickHouse operator from sources as well as how to build a docker image and use it inside `kubernetes` described [here][build_from_sources].
-
-# Examples
-
-There are several ready-to-use [ClickHouseInstallation examples][chi-examples]. Below are a few to start with.
-
-## Create Custom Namespace
-It is a good practice to have all components run in dedicated namespaces. Let's run examples in `test` namespace
-```bash
-kubectl create namespace test-clickhouse-operator
-```
-```text
-namespace/test created
-```
-
-## Trivial example
-
-This is the trivial [1 shard 1 replica][01-simple-layout-01-1shard-1repl.yaml] example.
-
-**WARNING**: Do not use it for anything other than 'Hello, world!'. It does not have persistent storage!
-
-```bash
-kubectl apply -n test-clickhouse-operator -f https://raw.githubusercontent.com/Altinity/clickhouse-operator/master/docs/chi-examples/01-simple-layout-01-1shard-1repl.yaml
-```
-```text
-clickhouseinstallation.clickhouse.altinity.com/simple-01 created
-```
-
-Installation specification is straightforward and defines 1-replica cluster:
-```yaml
-apiVersion: "clickhouse.altinity.com/v1"
-kind: "ClickHouseInstallation"
-metadata:
- name: "simple-01"
-spec:
- configuration:
- users:
- # printf 'test_password' | sha256sum
- test_user/password_sha256_hex: 10a6e6cc8311a3e2bcc09bf6c199adecd5dd59408c343e926b129c4914f3cb01
- # to allow access outside from kubernetes
- test_user/networks/ip:
- - 0.0.0.0/0
- clusters:
- - name: "simple"
-```
-
-Once cluster is created, there are two checks to be made.
-
-```bash
-kubectl get pods -n test-clickhouse-operator
-```
-```text
-NAME READY STATUS RESTARTS AGE
-chi-b3d29f-a242-0-0-0 1/1 Running 0 10m
-```
-
-Ensure you see the 'Running' status. Also check services created by an operator:
-
-```bash
-kubectl get service -n test-clickhouse-operator
-```
-```text
-NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
-chi-b3d29f-a242-0-0 ClusterIP None 8123/TCP,9000/TCP,9009/TCP 11m
-clickhouse-example-01 LoadBalancer 100.64.167.170 abc-123.us-east-1.elb.amazonaws.com 8123:30954/TCP,9000:32697/TCP 11m
-```
-
-ClickHouse is up and running!
-
-## Connect to ClickHouse Database
-
-There are several ways to connect to ClickHouse on Kubenetes.
-
-1. In case the previous command `kubectl get service -n test-clickhouse-operator` reported **EXTERNAL-IP** (abc-123.us-east-1.elb.amazonaws.com in our case) we can directly access ClickHouse with:
-```bash
-clickhouse-client -h abc-123.us-east-1.elb.amazonaws.com -u test_user --password test_password
-```
-```text
-ClickHouse client version 18.14.12.
-Connecting to abc-123.us-east-1.elb.amazonaws.com:9000.
-Connected to ClickHouse server version 19.4.3 revision 54416.
-```
-2. In case there is no **EXTERNAL-IP** available, we can access ClickHouse from inside Kubernetes cluster
-```bash
-kubectl -n test-clickhouse-operator exec -it chi-b3d29f-a242-0-0-0 -- clickhouse-client
-```
-```text
-ClickHouse client version 19.4.3.11.
-Connecting to localhost:9000 as user default.
-Connected to ClickHouse server version 19.4.3 revision 54416.
-```
-3. If we have a clickhouse client installed locally we can also use port forwarding
-```bash
-kubectl -n test-clickhouse-operator port-forward chi-b3d29f-a242-0-0-0 9000:9000 &
-clickhouse-client
-```
-```text
-ClickHouse client version 19.4.3.11.
-Connecting to localhost:9000 as user default.
-Connected to ClickHouse server version 19.4.3 revision 54416.
-```
-
-## Simple Persistent Volume Example
-
-In cases where Dynamic Volume Provisioning is available - ex.: running on AWS - we are able to use PersistentVolumeClaims
-Manifest is [available in examples][03-persistent-volume-01-default-volume.yaml]
-
-```yaml
-apiVersion: "clickhouse.altinity.com/v1"
-kind: "ClickHouseInstallation"
-metadata:
- name: "pv-simple"
-spec:
- defaults:
- templates:
- dataVolumeClaimTemplate: data-volume-template
- logVolumeClaimTemplate: log-volume-template
- configuration:
- clusters:
- - name: "simple"
- layout:
- shardsCount: 1
- replicasCount: 1
- templates:
- volumeClaimTemplates:
- - name: data-volume-template
- spec:
- accessModes:
- - ReadWriteOnce
- resources:
- requests:
- storage: 1Gi
- - name: log-volume-template
- spec:
- accessModes:
- - ReadWriteOnce
- resources:
- requests:
- storage: 100Mi
-```
-
-## Custom Deployment with Pod and VolumeClaim Templates
-
-Let's install more complex example with:
-1. Deployment specified
-1. Pod template
-1. VolumeClaim template
-
-Manifest is [available in examples][03-persistent-volume-02-pod-template.yaml]
-
-```yaml
-apiVersion: "clickhouse.altinity.com/v1"
-kind: "ClickHouseInstallation"
-metadata:
- name: "pv-log"
-spec:
- configuration:
- clusters:
- - name: "deployment-pv"
- # Templates are specified for this cluster explicitly
- templates:
- podTemplate: pod-template-with-volumes
- layout:
- shardsCount: 2
- replicasCount: 2
-
- templates:
- podTemplates:
- - name: pod-template-with-volumes
- spec:
- containers:
- - name: clickhouse
- image: clickhouse/clickhouse-server:23.8
- volumeMounts:
- - name: data-storage-vc-template
- mountPath: /var/lib/clickhouse
- - name: log-storage-vc-template
- mountPath: /var/log/clickhouse-server
-
- volumeClaimTemplates:
- - name: data-storage-vc-template
- spec:
- accessModes:
- - ReadWriteOnce
- resources:
- requests:
- storage: 3Gi
- - name: log-storage-vc-template
- spec:
- accessModes:
- - ReadWriteOnce
- resources:
- requests:
- storage: 2Gi
-```
-
-## Custom Deployment with Specific ClickHouse Configuration
-
-You can tell the operator to configure your ClickHouse, as shown in the example below ([link to the manifest][05-settings-01-overview.yaml]):
-
-```yaml
-apiVersion: v1
-kind: Secret
-metadata:
- name: clickhouse-credentials
-type: Opaque
-stringData:
- testpwduser1: password
- testpwduser2: 65e84be33532fb784c48129675f9eff3a682b27168c0ea744b2cf58ee02337c5
- testpwduser3: 8bd66e4932b4968ec111da24d7e42d399a05cb90bf96f587c3fa191c56c401f8
----
-apiVersion: "clickhouse.altinity.com/v1"
-kind: "ClickHouseInstallation"
-metadata:
- name: "settings-01"
-spec:
- configuration:
- users:
- # test user has 'password' specified, while admin user has 'password_sha256_hex' specified
- test/password: qwerty
- test/networks/ip:
- - "127.0.0.1/32"
- - "192.168.74.1/24"
- test/profile: test_profile
- test/quota: test_quota
- test/allow_databases/database:
- - "dbname1"
- - "dbname2"
- - "dbname3"
- # reference to namespace/name/field in the secret with plain password
- testpwduser1/k8s_secret_password: dev/clickhouse-credentials/testpwduser1
- # reference to the same namespace as operator is running in/name/field in the secret with sha256 password
- testpwduser2/k8s_secret_password_sha256_hex: clickhouse-credentials/testpwduser2
- testpwduser3/k8s_secret_password_double_sha1_hex: clickhouse-credentials/testpwduser3
- # admin use has 'password_sha256_hex' so actual password value is not published
- admin/password_sha256_hex: 8bd66e4932b4968ec111da24d7e42d399a05cb90bf96f587c3fa191c56c401f8
- admin/networks/ip: "127.0.0.1/32"
- admin/profile: default
- admin/quota: default
- # readonly user has 'password' field specified, not 'password_sha256_hex' as admin user above
- readonly/password: readonly_password
- readonly/profile: readonly
- readonly/quota: default
- profiles:
- test_profile/max_memory_usage: 1000000000
- test_profile/readonly: 1
- readonly/readonly: 1
- quotas:
- test_quota/interval/duration: 3600
- settings:
- compression/case/method: zstd
- disable_internal_dns_cache: 1
- files:
- dict1.xml: |
-
-
-
- source1.csv: |
- a1,b1,c1,d1
- a2,b2,c2,d2
- clusters:
- - name: "standard"
- layout:
- shardsCount: 1
- replicasCount: 1
-```
-
-[build_from_sources]: ./operator_build_from_sources.md
-[clickhouse-operator-install-template.yaml]: ../deploy/operator/clickhouse-operator-install-template.yaml
-[chi-examples]: ./chi-examples/
-[01-simple-layout-01-1shard-1repl.yaml]: ./chi-examples/01-simple-layout-01-1shard-1repl.yaml
-[03-persistent-volume-01-default-volume.yaml]: ./chi-examples/03-persistent-volume-01-default-volume.yaml
-[03-persistent-volume-02-pod-template.yaml]: ./chi-examples/03-persistent-volume-02-pod-template.yaml
-[05-settings-01-overview.yaml]: ./chi-examples/05-settings-01-overview.yaml
+# Quick Start Guides
+
+# Table of Contents
+
+* [Prerequisites](#prerequisites)
+* [ClickHouse Operator Installation](#clickhouse-operator-installation)
+* [Building ClickHouse Operator from Sources](#building-clickhouse-operator-from-sources)
+* [Examples](#examples)
+ * [Trivial Example](#trivial-example)
+ * [Connect to ClickHouse Database](#connect-to-clickhouse-database)
+ * [Simple Persistent Volume Example](#simple-persistent-volume-example)
+ * [Custom Deployment with Pod and VolumeClaim Templates](#custom-deployment-with-pod-and-volumeclaim-templates)
+ * [Custom Deployment with Specific ClickHouse Configuration](#custom-deployment-with-specific-clickhouse-configuration)
+
+# Prerequisites
+
+1. Kubernetes cluster that observes the following version considerations:
+ 1. `clickhouse-operator` versions **before** `0.16.0` are compatible with [Kubenetes after `1.16` and prior `1.22`](https://kubernetes.io/releases/).
+ 1. `clickhouse-operator` versions `0.16.0` **and after** are compatible [Kubernetes version `1.16` and after](https://kubernetes.io/releases/).
+1. Properly configured `kubectl`
+1. `curl`
+
+# ClickHouse Operator Installation
+
+Apply `clickhouse-operator` installation manifest. The simplest way - directly from `github`.
+
+## **In case you are OK to install operator into `kube-system` namespace**
+
+just run:
+```bash
+kubectl apply -f https://raw.githubusercontent.com/Altinity/clickhouse-operator/master/deploy/operator/clickhouse-operator-install-bundle.yaml
+```
+## **If you want to install operator on kubernetes version prior to `1.17` in `kube-system` namespace**
+
+just run:
+```bash
+kubectl apply -f https://raw.githubusercontent.com/Altinity/clickhouse-operator/master/deploy/operator/clickhouse-operator-install-bundle-v1beta1.yaml
+```
+
+## **In case you would like to customize installation parameters**,
+
+such as namespace where to install operator or operator's image, use the special installer script.
+```bash
+curl -s https://raw.githubusercontent.com/Altinity/clickhouse-operator/master/deploy/operator-web-installer/clickhouse-operator-install.sh | OPERATOR_NAMESPACE=test-clickhouse-operator bash
+```
+Take into account explicitly specified namespace
+```bash
+OPERATOR_NAMESPACE=test-clickhouse-operator
+```
+This namespace would be created and used to install `clickhouse-operator` into.
+Install script would download some `.yaml` and `.xml` files and install `clickhouse-operator` into specified namespace.
+After installation **clickhouse-operator** will watch custom resources like a `kind: ClickhouseInstallation` only in `test-clickhouse-operator` namespace.
+
+If no `OPERATOR_NAMESPACE` specified, as:
+```bash
+cd ~
+curl -s https://raw.githubusercontent.com/Altinity/clickhouse-operator/master/deploy/operator-web-installer/clickhouse-operator-install.sh | bash
+```
+installer will install **clickhouse-operator** into `kube-system` namespace and will watch custom resources like a `kind: ClickhouseInstallation` in all available namespaces.
+
+
+## **In case you can not run scripts from the Internet in your protected environment**,
+
+you can download manually [this template file][clickhouse-operator-install-template.yaml]
+and edit it according to your choice. After that apply it with `kubectl`. Or you can use this snippet instead:
+```bash
+#!/bin/bash
+
+# Namespace to install operator into
+OPERATOR_NAMESPACE="${OPERATOR_NAMESPACE:-test-clickhouse-operator}"
+# Namespace to install metrics-exporter into
+METRICS_EXPORTER_NAMESPACE="${OPERATOR_NAMESPACE}"
+# Operator's docker image
+OPERATOR_IMAGE="${OPERATOR_IMAGE:-altinity/clickhouse-operator:latest}"
+# Metrics exporter's docker image
+METRICS_EXPORTER_IMAGE="${METRICS_EXPORTER_IMAGE:-altinity/metrics-exporter:latest}"
+
+# Setup clickhouse-operator into specified namespace
+kubectl apply --namespace="${OPERATOR_NAMESPACE}" -f <( \
+ curl -s https://raw.githubusercontent.com/Altinity/clickhouse-operator/master/deploy/operator/clickhouse-operator-install-template.yaml | \
+ OPERATOR_IMAGE="${OPERATOR_IMAGE}" \
+ OPERATOR_NAMESPACE="${OPERATOR_NAMESPACE}" \
+ METRICS_EXPORTER_IMAGE="${METRICS_EXPORTER_IMAGE}" \
+ METRICS_EXPORTER_NAMESPACE="${METRICS_EXPORTER_NAMESPACE}" \
+ envsubst \
+)
+```
+
+## Operator installation process
+```text
+Setup ClickHouse Operator into test-clickhouse-operator namespace
+namespace/test-clickhouse-operator created
+customresourcedefinition.apiextensions.k8s.io/clickhouseinstallations.clickhouse.altinity.com configured
+serviceaccount/clickhouse-operator created
+clusterrolebinding.rbac.authorization.k8s.io/clickhouse-operator configured
+service/clickhouse-operator-metrics created
+configmap/etc-clickhouse-operator-files created
+configmap/etc-clickhouse-operator-confd-files created
+configmap/etc-clickhouse-operator-configd-files created
+configmap/etc-clickhouse-operator-templatesd-files created
+configmap/etc-clickhouse-operator-usersd-files created
+deployment.apps/clickhouse-operator created
+```
+
+Check `clickhouse-operator` is running:
+```bash
+kubectl get pods -n test-clickhouse-operator
+```
+```text
+NAME READY STATUS RESTARTS AGE
+clickhouse-operator-5ddc6d858f-drppt 1/1 Running 0 1m
+```
+
+## Building ClickHouse Operator from Sources
+
+Complete instructions on how to build ClickHouse operator from sources as well as how to build a docker image and use it inside `kubernetes` described [here][build_from_sources].
+
+# Examples
+
+There are several ready-to-use [ClickHouseInstallation examples][chi-examples]. Below are a few to start with.
+
+## Create Custom Namespace
+It is a good practice to have all components run in dedicated namespaces. Let's run examples in `test` namespace
+```bash
+kubectl create namespace test-clickhouse-operator
+```
+```text
+namespace/test created
+```
+
+## Trivial example
+
+This is the trivial [1 shard 1 replica][01-simple-layout-01-1shard-1repl.yaml] example.
+
+**WARNING**: Do not use it for anything other than 'Hello, world!'. It does not have persistent storage!
+
+```bash
+kubectl apply -n test-clickhouse-operator -f https://raw.githubusercontent.com/Altinity/clickhouse-operator/master/docs/chi-examples/01-simple-layout-01-1shard-1repl.yaml
+```
+```text
+clickhouseinstallation.clickhouse.altinity.com/simple-01 created
+```
+
+Installation specification is straightforward and defines 1-replica cluster:
+```yaml
+apiVersion: "clickhouse.altinity.com/v1"
+kind: "ClickHouseInstallation"
+metadata:
+ name: "simple-01"
+spec:
+ configuration:
+ users:
+ # printf 'test_password' | sha256sum
+ test_user/password_sha256_hex: 10a6e6cc8311a3e2bcc09bf6c199adecd5dd59408c343e926b129c4914f3cb01
+ # to allow access outside from kubernetes
+ test_user/networks/ip:
+ - 0.0.0.0/0
+ clusters:
+ - name: "simple"
+```
+
+Once cluster is created, there are two checks to be made.
+
+```bash
+kubectl get pods -n test-clickhouse-operator
+```
+```text
+NAME READY STATUS RESTARTS AGE
+chi-b3d29f-a242-0-0-0 1/1 Running 0 10m
+```
+
+Ensure you see the 'Running' status. Also check services created by an operator:
+
+```bash
+kubectl get service -n test-clickhouse-operator
+```
+```text
+NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
+chi-b3d29f-a242-0-0 ClusterIP None 8123/TCP,9000/TCP,9009/TCP 11m
+clickhouse-example-01 LoadBalancer 100.64.167.170 abc-123.us-east-1.elb.amazonaws.com 8123:30954/TCP,9000:32697/TCP 11m
+```
+
+ClickHouse is up and running!
+
+## Connect to ClickHouse Database
+
+There are several ways to connect to ClickHouse on Kubenetes.
+
+1. In case the previous command `kubectl get service -n test-clickhouse-operator` reported **EXTERNAL-IP** (abc-123.us-east-1.elb.amazonaws.com in our case) we can directly access ClickHouse with:
+```bash
+clickhouse-client -h abc-123.us-east-1.elb.amazonaws.com -u test_user --password test_password
+```
+```text
+ClickHouse client version 18.14.12.
+Connecting to abc-123.us-east-1.elb.amazonaws.com:9000.
+Connected to ClickHouse server version 19.4.3 revision 54416.
+```
+2. In case there is no **EXTERNAL-IP** available, we can access ClickHouse from inside Kubernetes cluster
+```bash
+kubectl -n test-clickhouse-operator exec -it chi-b3d29f-a242-0-0-0 -- clickhouse-client
+```
+```text
+ClickHouse client version 19.4.3.11.
+Connecting to localhost:9000 as user default.
+Connected to ClickHouse server version 19.4.3 revision 54416.
+```
+3. If we have a clickhouse client installed locally we can also use port forwarding
+```bash
+kubectl -n test-clickhouse-operator port-forward chi-b3d29f-a242-0-0-0 9000:9000 &
+clickhouse-client
+```
+```text
+ClickHouse client version 19.4.3.11.
+Connecting to localhost:9000 as user default.
+Connected to ClickHouse server version 19.4.3 revision 54416.
+```
+
+## Simple Persistent Volume Example
+
+In cases where Dynamic Volume Provisioning is available - ex.: running on AWS - we are able to use PersistentVolumeClaims
+Manifest is [available in examples][03-persistent-volume-01-default-volume.yaml]
+
+```yaml
+apiVersion: "clickhouse.altinity.com/v1"
+kind: "ClickHouseInstallation"
+metadata:
+ name: "pv-simple"
+spec:
+ defaults:
+ templates:
+ dataVolumeClaimTemplate: data-volume-template
+ logVolumeClaimTemplate: log-volume-template
+ configuration:
+ clusters:
+ - name: "simple"
+ layout:
+ shardsCount: 1
+ replicasCount: 1
+ templates:
+ volumeClaimTemplates:
+ - name: data-volume-template
+ spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 1Gi
+ - name: log-volume-template
+ spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 100Mi
+```
+
+## Custom Deployment with Pod and VolumeClaim Templates
+
+Let's install more complex example with:
+1. Deployment specified
+1. Pod template
+1. VolumeClaim template
+
+Manifest is [available in examples][03-persistent-volume-02-pod-template.yaml]
+
+```yaml
+apiVersion: "clickhouse.altinity.com/v1"
+kind: "ClickHouseInstallation"
+metadata:
+ name: "pv-log"
+spec:
+ configuration:
+ clusters:
+ - name: "deployment-pv"
+ # Templates are specified for this cluster explicitly
+ templates:
+ podTemplate: pod-template-with-volumes
+ layout:
+ shardsCount: 2
+ replicasCount: 2
+
+ templates:
+ podTemplates:
+ - name: pod-template-with-volumes
+ spec:
+ containers:
+ - name: clickhouse
+ image: clickhouse/clickhouse-server:23.8
+ volumeMounts:
+ - name: data-storage-vc-template
+ mountPath: /var/lib/clickhouse
+ - name: log-storage-vc-template
+ mountPath: /var/log/clickhouse-server
+
+ volumeClaimTemplates:
+ - name: data-storage-vc-template
+ spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 3Gi
+ - name: log-storage-vc-template
+ spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 2Gi
+```
+
+## Custom Deployment with Specific ClickHouse Configuration
+
+You can tell the operator to configure your ClickHouse, as shown in the example below ([link to the manifest][05-settings-01-overview.yaml]):
+
+```yaml
+apiVersion: v1
+kind: Secret
+metadata:
+ name: clickhouse-credentials
+type: Opaque
+stringData:
+ testpwduser1: password
+ testpwduser2: 65e84be33532fb784c48129675f9eff3a682b27168c0ea744b2cf58ee02337c5
+ testpwduser3: 8bd66e4932b4968ec111da24d7e42d399a05cb90bf96f587c3fa191c56c401f8
+---
+apiVersion: "clickhouse.altinity.com/v1"
+kind: "ClickHouseInstallation"
+metadata:
+ name: "settings-01"
+spec:
+ configuration:
+ users:
+ # test user has 'password' specified, while admin user has 'password_sha256_hex' specified
+ test/password: qwerty
+ test/networks/ip:
+ - "127.0.0.1/32"
+ - "192.168.74.1/24"
+ test/profile: test_profile
+ test/quota: test_quota
+ test/allow_databases/database:
+ - "dbname1"
+ - "dbname2"
+ - "dbname3"
+ # reference to namespace/name/field in the secret with plain password
+ testpwduser1/k8s_secret_password: dev/clickhouse-credentials/testpwduser1
+ # reference to the same namespace as operator is running in/name/field in the secret with sha256 password
+ testpwduser2/k8s_secret_password_sha256_hex: clickhouse-credentials/testpwduser2
+ testpwduser3/k8s_secret_password_double_sha1_hex: clickhouse-credentials/testpwduser3
+ # admin use has 'password_sha256_hex' so actual password value is not published
+ admin/password_sha256_hex: 8bd66e4932b4968ec111da24d7e42d399a05cb90bf96f587c3fa191c56c401f8
+ admin/networks/ip: "127.0.0.1/32"
+ admin/profile: default
+ admin/quota: default
+ # readonly user has 'password' field specified, not 'password_sha256_hex' as admin user above
+ readonly/password: readonly_password
+ readonly/profile: readonly
+ readonly/quota: default
+ profiles:
+ test_profile/max_memory_usage: 1000000000
+ test_profile/readonly: 1
+ readonly/readonly: 1
+ quotas:
+ test_quota/interval/duration: 3600
+ settings:
+ compression/case/method: zstd
+ disable_internal_dns_cache: 1
+ files:
+ dict1.xml: |
+
+
+
+ source1.csv: |
+ a1,b1,c1,d1
+ a2,b2,c2,d2
+ clusters:
+ - name: "standard"
+ layout:
+ shardsCount: 1
+ replicasCount: 1
+```
+
+[build_from_sources]: ./operator_build_from_sources.md
+[clickhouse-operator-install-template.yaml]: ../deploy/operator/clickhouse-operator-install-template.yaml
+[chi-examples]: ./chi-examples/
+[01-simple-layout-01-1shard-1repl.yaml]: ./chi-examples/01-simple-layout-01-1shard-1repl.yaml
+[03-persistent-volume-01-default-volume.yaml]: ./chi-examples/03-persistent-volume-01-default-volume.yaml
+[03-persistent-volume-02-pod-template.yaml]: ./chi-examples/03-persistent-volume-02-pod-template.yaml
+[05-settings-01-overview.yaml]: ./chi-examples/05-settings-01-overview.yaml
diff --git a/go.mod b/go.mod
index 086a6a45e..5a3e18f3a 100644
--- a/go.mod
+++ b/go.mod
@@ -30,6 +30,7 @@ require (
github.com/MakeNowJust/heredoc v1.0.0
github.com/Masterminds/semver/v3 v3.2.0
github.com/go-logr/logr v1.4.1
+ github.com/go-zookeeper/zk v1.0.3
github.com/golang/glog v1.0.0
github.com/google/uuid v1.4.0
github.com/imdario/mergo v0.3.15
@@ -45,6 +46,7 @@ require (
go.opentelemetry.io/otel/metric v1.24.0
go.opentelemetry.io/otel/sdk v1.24.0
go.opentelemetry.io/otel/sdk/metric v1.24.0
+ golang.org/x/sync v0.3.0
gopkg.in/d4l3k/messagediff.v1 v1.2.1
gopkg.in/yaml.v3 v3.0.1
sigs.k8s.io/controller-runtime v0.15.1
@@ -92,11 +94,11 @@ require (
go.uber.org/multierr v1.6.0 // indirect
go.uber.org/zap v1.24.0 // indirect
golang.org/x/mod v0.10.0 // indirect
- golang.org/x/net v0.17.0 // indirect
+ golang.org/x/net v0.23.0 // indirect
golang.org/x/oauth2 v0.12.0 // indirect
- golang.org/x/sys v0.17.0 // indirect
- golang.org/x/term v0.13.0 // indirect
- golang.org/x/text v0.13.0 // indirect
+ golang.org/x/sys v0.18.0 // indirect
+ golang.org/x/term v0.18.0 // indirect
+ golang.org/x/text v0.14.0 // indirect
golang.org/x/time v0.3.0 // indirect
golang.org/x/tools v0.9.1 // indirect
gomodules.xyz/jsonpatch/v2 v2.3.0 // indirect
diff --git a/go.sum b/go.sum
index 0a268925f..d04b39587 100644
--- a/go.sum
+++ b/go.sum
@@ -146,6 +146,8 @@ github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/me
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI=
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls=
+github.com/go-zookeeper/zk v1.0.3 h1:7M2kwOsc//9VeeFiPtf+uSJlVpU66x9Ba5+8XK7/TDg=
+github.com/go-zookeeper/zk v1.0.3/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL+UX1Qcw=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
@@ -574,8 +576,8 @@ golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwY
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk=
-golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM=
-golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE=
+golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs=
+golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@@ -645,20 +647,20 @@ golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y=
-golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4=
+golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
-golang.org/x/term v0.13.0 h1:bb+I9cTfFazGW51MZqBVmZy7+JEJMouUHTUSKVQLBek=
-golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U=
+golang.org/x/term v0.18.0 h1:FcHjZXDMxI8mM3nwhX9HlKop4C0YQvCVCdwYl2wOtE8=
+golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k=
-golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
+golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
+golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
diff --git a/grafana-dashboard/ClickHouseKeeper_dashboard.json b/grafana-dashboard/ClickHouseKeeper_dashboard.json
index 292828adc..c02ab0e38 100644
--- a/grafana-dashboard/ClickHouseKeeper_dashboard.json
+++ b/grafana-dashboard/ClickHouseKeeper_dashboard.json
@@ -106,13 +106,13 @@
"steppedLine": false,
"targets": [
{
- "expr": "zk_avg_latency{namespace=~\"$namespace\", pod_name=~\"$pod_name\", container_name=\"clickhouse-keeper\"}",
+ "expr": "ClickHouseAsyncMetrics_KeeperAvgLatency{namespace=~\"$namespace\", pod_name=~\"$pod_name\", container_name=\"clickhouse-keeper\"}",
"interval": "",
"legendFormat": "avg {{namespace}}.{{pod_name}}",
"refId": "A"
},
{
- "expr": "zk_max_latency{namespace=~\"$namespace\", pod_name=~\"$pod_name\", container_name=\"clickhouse-keeper\"}",
+ "expr": "ClickHouseAsyncMetrics_KeeperMaxLatency{namespace=~\"$namespace\", pod_name=~\"$pod_name\", container_name=\"clickhouse-keeper\"}",
"interval": "",
"legendFormat": "max {{namespace}}.{{pod_name}}",
"refId": "B"
@@ -206,7 +206,7 @@
"steppedLine": false,
"targets": [
{
- "expr": "zk_num_alive_connections{namespace=~\"$namespace\", pod_name=~\"$pod_name\", container_name=\"clickhouse-keeper\"}",
+ "expr": "ClickHouseMetrics_KeeperAliveConnections{namespace=~\"$namespace\", pod_name=~\"$pod_name\", container_name=\"clickhouse-keeper\"}",
"hide": false,
"interval": "",
"legendFormat": "{{namespace}}.{{pod_name}}",
@@ -301,14 +301,14 @@
"steppedLine": false,
"targets": [
{
- "expr": "irate(zk_packets_sent{namespace=~\"$namespace\", pod_name=~\"$pod_name\", container_name=\"clickhouse-keeper\"}[1m])",
+ "expr": "irate(ClickHouseAsyncMetrics_KeeperPacketsSent{namespace=~\"$namespace\", pod_name=~\"$pod_name\", container_name=\"clickhouse-keeper\"}[1m])",
"hide": false,
"interval": "",
"legendFormat": "OUT {{namespace}}.{{pod_name}}",
"refId": "A"
},
{
- "expr": "-irate(zk_packets_received{namespace=~\"$namespace\", pod_name=~\"$pod_name\", container_name=\"clickhouse-keeper\"}[1m])",
+ "expr": "-irate(ClickHouseAsyncMetrics_KeeperPacketsReceived{namespace=~\"$namespace\", pod_name=~\"$pod_name\", container_name=\"clickhouse-keeper\"}[1m])",
"interval": "",
"legendFormat": "IN {{namespace}}.{{pod_name}}",
"refId": "B"
@@ -402,7 +402,7 @@
"steppedLine": false,
"targets": [
{
- "expr": "zk_znode_count{namespace=~\"$namespace\", pod_name=~\"$pod_name\", container_name=\"clickhouse-keeper\"}",
+ "expr": "ClickHouseAsyncMetrics_KeeperZnodeCount{namespace=~\"$namespace\", pod_name=~\"$pod_name\", container_name=\"clickhouse-keeper\"}",
"interval": "",
"legendFormat": "{{namespace}}.{{pod_name}}",
"refId": "A"
@@ -496,7 +496,7 @@
"steppedLine": false,
"targets": [
{
- "expr": "zk_watch_count{namespace=~\"$namespace\", pod_name=~\"$pod_name\", container_name=\"clickhouse-keeper\"}",
+ "expr": "ClickHouseAsyncMetrics_KeeperWatchCount{namespace=~\"$namespace\", pod_name=~\"$pod_name\", container_name=\"clickhouse-keeper\"}",
"interval": "",
"legendFormat": "{{namespace}}.{{pod_name}}",
"refId": "A"
@@ -590,7 +590,7 @@
"steppedLine": false,
"targets": [
{
- "expr": "zk_ephemerals_count{namespace=~\"$namespace\", pod_name=~\"$pod_name\", container_name=\"clickhouse-keeper\"}",
+ "expr": "ClickHouseAsyncMetrics_KeeperEphemeralsCount{namespace=~\"$namespace\", pod_name=~\"$pod_name\", container_name=\"clickhouse-keeper\"}",
"interval": "",
"legendFormat": "{{namespace}}.{{pod_name}}",
"refId": "A"
@@ -684,7 +684,7 @@
"steppedLine": false,
"targets": [
{
- "expr": "zk_approximate_data_size{namespace=~\"$namespace\", pod_name=~\"$pod_name\", container_name=\"clickhouse-keeper\"}",
+ "expr": "ClickHouseAsyncMetrics_KeeperApproximateDataSize{namespace=~\"$namespace\", pod_name=~\"$pod_name\", container_name=\"clickhouse-keeper\"}",
"interval": "",
"legendFormat": "{{namespace}}.{{pod_name}}",
"refId": "A"
@@ -784,7 +784,7 @@
"steppedLine": false,
"targets": [
{
- "expr": "irate(zk_outstanding_requests{namespace=~\"$namespace\", pod_name=~\"$pod_name\", container_name=\"clickhouse-keeper\"}[1m])",
+ "expr": "irate(ClickHouseMetrics_KeeperOutstandingRequests{namespace=~\"$namespace\", pod_name=~\"$pod_name\", container_name=\"clickhouse-keeper\"}[1m])",
"interval": "",
"legendFormat": "{{namespace}}.{{pod_name}}",
"refId": "A"
@@ -878,7 +878,7 @@
"steppedLine": false,
"targets": [
{
- "expr": "zk_open_file_descriptor_count{namespace=~\"$namespace\", pod_name=~\"$pod_name\", container_name=\"clickhouse-keeper\"}",
+ "expr": "ClickHouseAsyncMetrics_KeeperOpenFileDescriptorCount{namespace=~\"$namespace\", pod_name=~\"$pod_name\", container_name=\"clickhouse-keeper\"}",
"interval": "",
"legendFormat": "{{namespace}}.{{pod_name}}",
"refId": "A"
@@ -941,14 +941,14 @@
"allValue": ".+",
"current": {},
"datasource": "${DS_PROMETHEUS}",
- "definition": "label_values(zk_ruok, namespace)",
+ "definition": "label_values(up{container_name=\"clickhouse-keeper\"},namespace}",
"hide": 0,
"includeAll": true,
"label": null,
"multi": true,
"name": "namespace",
"options": [],
- "query": "label_values(zk_ruok, namespace)",
+ "query": "label_values(up{container_name=\"clickhouse-keeper\"},namespace}",
"refresh": 2,
"regex": "",
"skipUrlSync": false,
@@ -963,14 +963,14 @@
"allValue": ".+",
"current": {},
"datasource": "${DS_PROMETHEUS}",
- "definition": "label_values(zk_ruok, pod_name)",
+ "definition": "label_values(up{container_name=\"clickhouse-keeper\"},pod_name}",
"hide": 0,
"includeAll": true,
"label": null,
"multi": true,
"name": "pod_name",
"options": [],
- "query": "label_values(zk_ruok, pod_name)",
+ "query": "label_values(up{container_name=\"clickhouse-keeper\"},pod_name}",
"refresh": 2,
"regex": "",
"skipUrlSync": false,
@@ -1004,4 +1004,4 @@
"title": "ClickHouseKeeper Dashboard",
"uid": "clickhouse-keeper",
"version": 20220214
-}
\ No newline at end of file
+}
diff --git a/pkg/announcer/announcer.go b/pkg/announcer/announcer.go
index 8f36c4a9f..70b50c7bb 100644
--- a/pkg/announcer/announcer.go
+++ b/pkg/announcer/announcer.go
@@ -20,7 +20,7 @@ import (
log "github.com/golang/glog"
- v1 "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1"
+ api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1"
"github.com/altinity/clickhouse-operator/pkg/util/runtime"
)
@@ -176,16 +176,16 @@ func (a Announcer) M(m ...interface{}) Announcer {
switch typed := m[0].(type) {
case string:
b.meta = typed
- case *v1.ClickHouseInstallation:
+ case *api.ClickHouseInstallation:
if typed == nil {
return a
}
b.meta = typed.Namespace + "/" + typed.Name
- if typed.Spec.HasTaskID() {
- b.meta += "/" + typed.Spec.GetTaskID()
+ if typed.GetSpecT().HasTaskID() {
+ b.meta += "/" + typed.GetSpecT().GetTaskID()
}
default:
- if meta, ok := a.findMeta(m[0]); ok {
+ if meta, ok := a.tryToFindNamespaceNameEverywhere(m[0]); ok {
b.meta = meta
} else {
return a
@@ -340,36 +340,33 @@ func (a Announcer) prependFormat(format string) string {
return format
}
-// findMeta
-func (a Announcer) findMeta(m interface{}) (string, bool) {
- if meta, ok := a.findInObjectMeta(m); ok {
+// tryToFindNamespaceNameEverywhere
+func (a Announcer) tryToFindNamespaceNameEverywhere(m interface{}) (string, bool) {
+ if meta, ok := a.findNamespaceName(m); ok {
return meta, ok
}
- if meta, ok := a.findInCHI(m); ok {
- return meta, ok
- }
- if meta, ok := a.findInAddress(m); ok {
+ if meta, ok := a.findCHI(m); ok {
return meta, ok
}
return "", false
}
// findInObjectMeta
-func (a Announcer) findInObjectMeta(m interface{}) (string, bool) {
+func (a Announcer) findNamespaceName(m interface{}) (string, bool) {
if m == nil {
return "", false
}
- meta := reflect.ValueOf(m)
- if !meta.IsValid() || meta.IsZero() || ((meta.Kind() == reflect.Ptr) && meta.IsNil()) {
+ value := reflect.ValueOf(m)
+ if !value.IsValid() || value.IsZero() || ((value.Kind() == reflect.Ptr) && value.IsNil()) {
return "", false
}
var namespace, name reflect.Value
- if meta.Kind() == reflect.Ptr {
- namespace = meta.Elem().FieldByName("Namespace")
- name = meta.Elem().FieldByName("Name")
+ if value.Kind() == reflect.Ptr {
+ namespace = value.Elem().FieldByName("Namespace")
+ name = value.Elem().FieldByName("Name")
} else {
- namespace = meta.FieldByName("Namespace")
- name = meta.FieldByName("Name")
+ namespace = value.FieldByName("Namespace")
+ name = value.FieldByName("Name")
}
if !namespace.IsValid() {
return "", false
@@ -380,55 +377,34 @@ func (a Announcer) findInObjectMeta(m interface{}) (string, bool) {
return namespace.String() + "/" + name.String(), true
}
-// findInCHI
-func (a Announcer) findInCHI(m interface{}) (string, bool) {
+// findCHI
+func (a Announcer) findCHI(m interface{}) (string, bool) {
if m == nil {
return "", false
}
- object := reflect.ValueOf(m)
- if !object.IsValid() || object.IsZero() || ((object.Kind() == reflect.Ptr) && object.IsNil()) {
+ value := reflect.ValueOf(m)
+ if !value.IsValid() || value.IsZero() || ((value.Kind() == reflect.Ptr) && value.IsNil()) {
return "", false
}
- chiValue := object.Elem().FieldByName("CHI")
- if !chiValue.IsValid() ||
- chiValue.IsZero() ||
- ((chiValue.Kind() == reflect.Ptr) && chiValue.IsNil()) {
+ // Find CHI
+ var _chi reflect.Value
+ if value.Kind() == reflect.Ptr {
+ _chi = value.Elem().FieldByName("CHI")
+ } else {
+ _chi = value.FieldByName("CHI")
+ }
+ if !_chi.IsValid() || _chi.IsZero() || ((_chi.Kind() == reflect.Ptr) && _chi.IsNil()) {
return "", false
}
- chi, ok := chiValue.Interface().(v1.ClickHouseInstallation)
+ // Cast to CHI
+ chi, ok := _chi.Interface().(api.ClickHouseInstallation)
if !ok {
return "", false
}
res := chi.Namespace + "/" + chi.Name
- if chi.Spec.HasTaskID() {
- res += "/" + chi.Spec.GetTaskID()
+ if chi.GetSpecT().HasTaskID() {
+ res += "/" + chi.GetSpecT().GetTaskID()
}
return res, true
}
-
-// findInAddress
-func (a Announcer) findInAddress(m interface{}) (string, bool) {
- if m == nil {
- return "", false
- }
- address := reflect.ValueOf(m)
- if !address.IsValid() || address.IsZero() || ((address.Kind() == reflect.Ptr) && address.IsNil()) {
- return "", false
- }
- var namespace, name reflect.Value
- if address.Kind() == reflect.Ptr {
- namespace = address.Elem().FieldByName("Namespace")
- name = address.Elem().FieldByName("Name")
- } else {
- namespace = address.FieldByName("Namespace")
- name = address.FieldByName("Name")
- }
- if !namespace.IsValid() {
- return "", false
- }
- if !name.IsValid() {
- return "", false
- }
- return namespace.String() + "/" + name.String(), true
-}
diff --git a/pkg/apis/clickhouse-keeper.altinity.com/v1/type_chk.go b/pkg/apis/clickhouse-keeper.altinity.com/v1/type_chk.go
new file mode 100644
index 000000000..d52406022
--- /dev/null
+++ b/pkg/apis/clickhouse-keeper.altinity.com/v1/type_chk.go
@@ -0,0 +1,655 @@
+// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v1
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+
+ "github.com/imdario/mergo"
+ "gopkg.in/yaml.v3"
+
+ apiChi "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1"
+ "github.com/altinity/clickhouse-operator/pkg/apis/common/types"
+ "github.com/altinity/clickhouse-operator/pkg/util"
+)
+
+func (cr *ClickHouseKeeperInstallation) IsNonZero() bool {
+ return cr != nil
+}
+
+func (cr *ClickHouseKeeperInstallation) GetSpec() apiChi.ICRSpec {
+ return &cr.Spec
+}
+
+func (cr *ClickHouseKeeperInstallation) GetSpecT() *ChkSpec {
+ return &cr.Spec
+}
+
+func (cr *ClickHouseKeeperInstallation) GetSpecA() any {
+ return &cr.Spec
+}
+
+func (cr *ClickHouseKeeperInstallation) GetRuntime() apiChi.ICustomResourceRuntime {
+ return cr.ensureRuntime()
+}
+
+func (cr *ClickHouseKeeperInstallation) ensureRuntime() *ClickHouseKeeperInstallationRuntime {
+ if cr == nil {
+ return nil
+ }
+
+ // Assume that most of the time, we'll see a non-nil value.
+ if cr.runtime != nil {
+ return cr.runtime
+ }
+
+ // Otherwise, we need to acquire a lock to initialize the field.
+ cr.runtimeCreatorMutex.Lock()
+ defer cr.runtimeCreatorMutex.Unlock()
+ // Note that we have to check this property again to avoid a TOCTOU bug.
+ if cr.runtime == nil {
+ cr.runtime = newClickHouseKeeperInstallationRuntime()
+ }
+ return cr.runtime
+}
+
+func (cr *ClickHouseKeeperInstallation) IEnsureStatus() apiChi.IStatus {
+ return any(cr.EnsureStatus()).(apiChi.IStatus)
+}
+
+// EnsureStatus ensures status
+func (cr *ClickHouseKeeperInstallation) EnsureStatus() *Status {
+ if cr == nil {
+ return nil
+ }
+
+ // Assume that most of the time, we'll see a non-nil value.
+ if cr.Status != nil {
+ return cr.Status
+ }
+
+ // Otherwise, we need to acquire a lock to initialize the field.
+ cr.statusCreatorMutex.Lock()
+ defer cr.statusCreatorMutex.Unlock()
+ // Note that we have to check this property again to avoid a TOCTOU bug.
+ if cr.Status == nil {
+ cr.Status = &Status{}
+ }
+ return cr.Status
+}
+
+// GetStatus gets Status
+func (cr *ClickHouseKeeperInstallation) GetStatus() apiChi.IStatus {
+ if cr == nil {
+ return (*Status)(nil)
+ }
+ return cr.Status
+}
+
+// HasStatus checks whether CHI has Status
+func (cr *ClickHouseKeeperInstallation) HasStatus() bool {
+ if cr == nil {
+ return false
+ }
+ return cr.Status != nil
+}
+
+// HasAncestor checks whether CR has an ancestor
+func (cr *ClickHouseKeeperInstallation) HasAncestor() bool {
+ if !cr.HasStatus() {
+ return false
+ }
+ return cr.Status.HasNormalizedCRCompleted()
+}
+
+// GetAncestor gets ancestor of a CR
+func (cr *ClickHouseKeeperInstallation) GetAncestor() apiChi.ICustomResource {
+ if !cr.HasAncestor() {
+ return (*ClickHouseKeeperInstallation)(nil)
+ }
+ return cr.Status.GetNormalizedCRCompleted()
+}
+
+// GetAncestorT gets ancestor of a CR
+func (cr *ClickHouseKeeperInstallation) GetAncestorT() *ClickHouseKeeperInstallation {
+ if !cr.HasAncestor() {
+ return nil
+ }
+ return cr.Status.GetNormalizedCRCompleted()
+}
+
+// SetAncestor sets ancestor of a CR
+func (cr *ClickHouseKeeperInstallation) SetAncestor(a *ClickHouseKeeperInstallation) {
+ if cr == nil {
+ return
+ }
+ cr.EnsureStatus().NormalizedCRCompleted = a
+}
+
+// HasTarget checks whether CR has a target
+func (cr *ClickHouseKeeperInstallation) HasTarget() bool {
+ if !cr.HasStatus() {
+ return false
+ }
+ return cr.Status.HasNormalizedCR()
+}
+
+// GetTarget gets target of a CR
+func (cr *ClickHouseKeeperInstallation) GetTarget() *ClickHouseKeeperInstallation {
+ if !cr.HasTarget() {
+ return nil
+ }
+ return cr.Status.GetNormalizedCR()
+}
+
+// SetTarget sets target of a CR
+func (cr *ClickHouseKeeperInstallation) SetTarget(a *ClickHouseKeeperInstallation) {
+ if cr == nil {
+ return
+ }
+ cr.EnsureStatus().NormalizedCR = a
+}
+
+func (cr *ClickHouseKeeperInstallation) GetUsedTemplates() []*apiChi.TemplateRef {
+ return nil
+}
+
+// FillStatus fills .Status
+func (cr *ClickHouseKeeperInstallation) FillStatus(endpoint string, pods, fqdns []string, ip string) {
+ cr.EnsureStatus().Fill(&FillStatusParams{
+ CHOpIP: ip,
+ ClustersCount: cr.ClustersCount(),
+ ShardsCount: cr.ShardsCount(),
+ HostsCount: cr.HostsCount(),
+ TaskID: "",
+ HostsUpdatedCount: 0,
+ HostsAddedCount: 0,
+ HostsUnchangedCount: 0,
+ HostsCompletedCount: 0,
+ HostsDeleteCount: 0,
+ HostsDeletedCount: 0,
+ Pods: pods,
+ FQDNs: fqdns,
+ Endpoint: endpoint,
+ NormalizedCR: cr.Copy(types.CopyCROptions{
+ SkipStatus: true,
+ SkipManagedFields: true,
+ }),
+ })
+}
+
+func (cr *ClickHouseKeeperInstallation) Fill() {
+ apiChi.FillCR(cr)
+}
+
+// MergeFrom merges from CHI
+func (cr *ClickHouseKeeperInstallation) MergeFrom(from *ClickHouseKeeperInstallation, _type apiChi.MergeType) {
+ if from == nil {
+ return
+ }
+
+ // Merge Meta
+ switch _type {
+ case apiChi.MergeTypeFillEmptyValues:
+ _ = mergo.Merge(&cr.TypeMeta, from.TypeMeta)
+ _ = mergo.Merge(&cr.ObjectMeta, from.ObjectMeta)
+ case apiChi.MergeTypeOverrideByNonEmptyValues:
+ _ = mergo.Merge(&cr.TypeMeta, from.TypeMeta, mergo.WithOverride)
+ _ = mergo.Merge(&cr.ObjectMeta, from.ObjectMeta, mergo.WithOverride)
+ }
+ // Exclude skipped annotations
+ cr.SetAnnotations(
+ util.CopyMapFilter(
+ cr.GetAnnotations(),
+ nil,
+ util.ListSkippedAnnotations(),
+ ),
+ )
+
+ // Do actual merge for Spec
+ cr.GetSpecT().MergeFrom(from.GetSpecT(), _type)
+
+ // Copy service attributes
+ //cr.ensureRuntime().attributes = from.ensureRuntime().attributes
+
+ cr.EnsureStatus().CopyFrom(from.Status, types.CopyStatusOptions{
+ InheritableFields: true,
+ })
+}
+
+// FindCluster finds cluster by name or index.
+// Expectations: name is expected to be a string, index is expected to be an int.
+func (cr *ClickHouseKeeperInstallation) FindCluster(needle interface{}) apiChi.ICluster {
+ var resultCluster *Cluster
+ cr.WalkClustersFullPath(func(chk *ClickHouseKeeperInstallation, clusterIndex int, cluster *Cluster) error {
+ switch v := needle.(type) {
+ case string:
+ if cluster.Name == v {
+ resultCluster = cluster
+ }
+ case int:
+ if clusterIndex == v {
+ resultCluster = cluster
+ }
+ }
+ return nil
+ })
+ return resultCluster
+}
+
+// FindShard finds shard by name or index
+// Expectations: name is expected to be a string, index is expected to be an int.
+func (cr *ClickHouseKeeperInstallation) FindShard(needleCluster interface{}, needleShard interface{}) apiChi.IShard {
+ return cr.FindCluster(needleCluster).FindShard(needleShard)
+}
+
+// FindHost finds shard by name or index
+// Expectations: name is expected to be a string, index is expected to be an int.
+func (cr *ClickHouseKeeperInstallation) FindHost(needleCluster interface{}, needleShard interface{}, needleHost interface{}) *apiChi.Host {
+ return cr.FindCluster(needleCluster).FindHost(needleShard, needleHost)
+}
+
+// ClustersCount counts clusters
+func (cr *ClickHouseKeeperInstallation) ClustersCount() int {
+ count := 0
+ cr.WalkClusters(func(cluster apiChi.ICluster) error {
+ count++
+ return nil
+ })
+ return count
+}
+
+// ShardsCount counts shards
+func (cr *ClickHouseKeeperInstallation) ShardsCount() int {
+ count := 0
+ cr.WalkShards(func(shard *ChkShard) error {
+ count++
+ return nil
+ })
+ return count
+}
+
+// HostsCount counts hosts
+func (cr *ClickHouseKeeperInstallation) HostsCount() int {
+ count := 0
+ cr.WalkHosts(func(host *apiChi.Host) error {
+ count++
+ return nil
+ })
+ return count
+}
+
+// HostsCountAttributes counts hosts by attributes
+func (cr *ClickHouseKeeperInstallation) HostsCountAttributes(a *apiChi.HostReconcileAttributes) int {
+ count := 0
+ cr.WalkHosts(func(host *apiChi.Host) error {
+ if host.GetReconcileAttributes().Any(a) {
+ count++
+ }
+ return nil
+ })
+ return count
+}
+
+// GetHostTemplate gets HostTemplate by name
+func (cr *ClickHouseKeeperInstallation) GetHostTemplate(name string) (*apiChi.HostTemplate, bool) {
+ if !cr.GetSpecT().GetTemplates().GetHostTemplatesIndex().Has(name) {
+ return nil, false
+ }
+ return cr.GetSpecT().GetTemplates().GetHostTemplatesIndex().Get(name), true
+}
+
+// GetPodTemplate gets PodTemplate by name
+func (cr *ClickHouseKeeperInstallation) GetPodTemplate(name string) (*apiChi.PodTemplate, bool) {
+ if !cr.GetSpecT().GetTemplates().GetPodTemplatesIndex().Has(name) {
+ return nil, false
+ }
+ return cr.GetSpecT().GetTemplates().GetPodTemplatesIndex().Get(name), true
+}
+
+// WalkPodTemplates walks over all PodTemplates
+func (cr *ClickHouseKeeperInstallation) WalkPodTemplates(f func(template *apiChi.PodTemplate)) {
+ cr.GetSpecT().GetTemplates().GetPodTemplatesIndex().Walk(f)
+}
+
+// GetVolumeClaimTemplate gets VolumeClaimTemplate by name
+func (cr *ClickHouseKeeperInstallation) GetVolumeClaimTemplate(name string) (*apiChi.VolumeClaimTemplate, bool) {
+ if cr.GetSpecT().GetTemplates().GetVolumeClaimTemplatesIndex().Has(name) {
+ return cr.GetSpecT().GetTemplates().GetVolumeClaimTemplatesIndex().Get(name), true
+ }
+ return nil, false
+}
+
+// WalkVolumeClaimTemplates walks over all VolumeClaimTemplates
+func (cr *ClickHouseKeeperInstallation) WalkVolumeClaimTemplates(f func(template *apiChi.VolumeClaimTemplate)) {
+ if cr == nil {
+ return
+ }
+ cr.GetSpecT().GetTemplates().GetVolumeClaimTemplatesIndex().Walk(f)
+}
+
+// GetServiceTemplate gets ServiceTemplate by name
+func (cr *ClickHouseKeeperInstallation) GetServiceTemplate(name string) (*apiChi.ServiceTemplate, bool) {
+ if !cr.GetSpecT().GetTemplates().GetServiceTemplatesIndex().Has(name) {
+ return nil, false
+ }
+ return cr.GetSpecT().GetTemplates().GetServiceTemplatesIndex().Get(name), true
+}
+
+// GetRootServiceTemplate gets ServiceTemplate of a CHI
+func (cr *ClickHouseKeeperInstallation) GetRootServiceTemplate() (*apiChi.ServiceTemplate, bool) {
+ if !cr.GetSpecT().GetDefaults().Templates.HasServiceTemplate() {
+ return nil, false
+ }
+ name := cr.GetSpecT().GetDefaults().Templates.GetServiceTemplate()
+ return cr.GetServiceTemplate(name)
+}
+
+// MatchNamespace matches namespace
+func (cr *ClickHouseKeeperInstallation) MatchNamespace(namespace string) bool {
+ if cr == nil {
+ return false
+ }
+ return cr.Namespace == namespace
+}
+
+// MatchFullName matches full name
+func (cr *ClickHouseKeeperInstallation) MatchFullName(namespace, name string) bool {
+ if cr == nil {
+ return false
+ }
+ return (cr.Namespace == namespace) && (cr.Name == name)
+}
+
+// FoundIn checks whether CHI can be found in haystack
+func (cr *ClickHouseKeeperInstallation) FoundIn(haystack []*ClickHouseKeeperInstallation) bool {
+ if cr == nil {
+ return false
+ }
+
+ for _, candidate := range haystack {
+ if candidate.MatchFullName(cr.Namespace, cr.Name) {
+ return true
+ }
+ }
+
+ return false
+}
+
+// IsAuto checks whether templating policy is auto
+func (cr *ClickHouseKeeperInstallation) IsAuto() bool {
+ return false
+}
+
+// IsStopped checks whether CHI is stopped
+func (cr *ClickHouseKeeperInstallation) IsStopped() bool {
+ return false
+}
+
+// IsRollingUpdate checks whether CHI should perform rolling update
+func (cr *ClickHouseKeeperInstallation) IsRollingUpdate() bool {
+ return false
+}
+
+// IsTroubleshoot checks whether CHI is in troubleshoot mode
+func (cr *ClickHouseKeeperInstallation) IsTroubleshoot() bool {
+ return false
+}
+
+// GetReconciling gets reconciling spec
+func (cr *ClickHouseKeeperInstallation) GetReconciling() *apiChi.Reconciling {
+ if cr == nil {
+ return nil
+ }
+ return cr.GetSpecT().Reconciling
+}
+
+// Copy makes copy of a CHI, filtering fields according to specified CopyOptions
+func (cr *ClickHouseKeeperInstallation) Copy(opts types.CopyCROptions) *ClickHouseKeeperInstallation {
+ if cr == nil {
+ return nil
+ }
+ jsonBytes, err := json.Marshal(cr)
+ if err != nil {
+ return nil
+ }
+
+ var chi2 *ClickHouseKeeperInstallation
+ if err := json.Unmarshal(jsonBytes, &chi2); err != nil {
+ return nil
+ }
+
+ if opts.SkipStatus {
+ chi2.Status = nil
+ }
+
+ if opts.SkipManagedFields {
+ chi2.SetManagedFields(nil)
+ }
+
+ return chi2
+}
+
+// JSON returns JSON string
+func (cr *ClickHouseKeeperInstallation) JSON(opts types.CopyCROptions) string {
+ if cr == nil {
+ return ""
+ }
+
+ filtered := cr.Copy(opts)
+ jsonBytes, err := json.MarshalIndent(filtered, "", " ")
+ if err != nil {
+ return fmt.Sprintf("unable to parse. err: %v", err)
+ }
+ return string(jsonBytes)
+
+}
+
+// YAML return YAML string
+func (cr *ClickHouseKeeperInstallation) YAML(opts types.CopyCROptions) string {
+ if cr == nil {
+ return ""
+ }
+
+ filtered := cr.Copy(opts)
+ yamlBytes, err := yaml.Marshal(filtered)
+ if err != nil {
+ return fmt.Sprintf("unable to parse. err: %v", err)
+ }
+ return string(yamlBytes)
+}
+
+// FirstHost returns first host of the CHI
+func (cr *ClickHouseKeeperInstallation) FirstHost() *apiChi.Host {
+ var result *apiChi.Host
+ cr.WalkHosts(func(host *apiChi.Host) error {
+ if result == nil {
+ result = host
+ }
+ return nil
+ })
+ return result
+}
+
+func (cr *ClickHouseKeeperInstallation) GetName() string {
+ if cr == nil {
+ return ""
+ }
+ return cr.Name
+}
+
+func (cr *ClickHouseKeeperInstallation) GetNamespace() string {
+ if cr == nil {
+ return ""
+ }
+ return cr.Namespace
+}
+
+func (cr *ClickHouseKeeperInstallation) GetLabels() map[string]string {
+ if cr == nil {
+ return nil
+ }
+ return cr.Labels
+}
+
+func (cr *ClickHouseKeeperInstallation) GetAnnotations() map[string]string {
+ if cr == nil {
+ return nil
+ }
+ return cr.Annotations
+}
+
+// WalkClustersFullPath walks clusters with full path
+func (cr *ClickHouseKeeperInstallation) WalkClustersFullPath(
+ f func(chi *ClickHouseKeeperInstallation, clusterIndex int, cluster *Cluster) error,
+) []error {
+ if cr == nil {
+ return nil
+ }
+ res := make([]error, 0)
+
+ for clusterIndex := range cr.GetSpecT().Configuration.Clusters {
+ res = append(res, f(cr, clusterIndex, cr.GetSpecT().Configuration.Clusters[clusterIndex]))
+ }
+
+ return res
+}
+
+// WalkClusters walks clusters
+func (cr *ClickHouseKeeperInstallation) WalkClusters(f func(i apiChi.ICluster) error) []error {
+ if cr == nil {
+ return nil
+ }
+ res := make([]error, 0)
+
+ for clusterIndex := range cr.GetSpecT().Configuration.Clusters {
+ res = append(res, f(cr.GetSpecT().Configuration.Clusters[clusterIndex]))
+ }
+
+ return res
+}
+
+// WalkShards walks shards
+func (cr *ClickHouseKeeperInstallation) WalkShards(
+ f func(
+ shard *ChkShard,
+ ) error,
+) []error {
+ if cr == nil {
+ return nil
+ }
+ res := make([]error, 0)
+
+ for clusterIndex := range cr.GetSpecT().Configuration.Clusters {
+ cluster := cr.GetSpecT().Configuration.Clusters[clusterIndex]
+ for shardIndex := range cluster.Layout.Shards {
+ shard := cluster.Layout.Shards[shardIndex]
+ res = append(res, f(shard))
+ }
+ }
+
+ return res
+}
+
+// WalkHostsFullPathAndScope walks hosts with full path
+func (cr *ClickHouseKeeperInstallation) WalkHostsFullPathAndScope(
+ crScopeCycleSize int,
+ clusterScopeCycleSize int,
+ f apiChi.WalkHostsAddressFn,
+) (res []error) {
+ if cr == nil {
+ return nil
+ }
+ address := types.NewHostScopeAddress(crScopeCycleSize, clusterScopeCycleSize)
+ for clusterIndex := range cr.GetSpecT().Configuration.Clusters {
+ cluster := cr.GetSpecT().Configuration.Clusters[clusterIndex]
+ address.ClusterScopeAddress.Init()
+ for shardIndex := range cluster.Layout.Shards {
+ shard := cluster.GetShard(shardIndex)
+ for replicaIndex, host := range shard.Hosts {
+ replica := cluster.GetReplica(replicaIndex)
+ address.ClusterIndex = clusterIndex
+ address.ShardIndex = shardIndex
+ address.ReplicaIndex = replicaIndex
+ res = append(res, f(cr, cluster, shard, replica, host, address))
+ address.CRScopeAddress.Inc()
+ address.ClusterScopeAddress.Inc()
+ }
+ }
+ }
+ return res
+}
+
+// WalkHostsFullPath walks hosts with a function
+func (cr *ClickHouseKeeperInstallation) WalkHostsFullPath(f apiChi.WalkHostsAddressFn) []error {
+ return cr.WalkHostsFullPathAndScope(0, 0, f)
+}
+
+// WalkHosts walks hosts with a function
+func (cr *ClickHouseKeeperInstallation) WalkHosts(f func(host *apiChi.Host) error) []error {
+ if cr == nil {
+ return nil
+ }
+ res := make([]error, 0)
+
+ for clusterIndex := range cr.GetSpecT().Configuration.Clusters {
+ cluster := cr.GetSpecT().Configuration.Clusters[clusterIndex]
+ for shardIndex := range cluster.Layout.Shards {
+ shard := cluster.Layout.Shards[shardIndex]
+ for replicaIndex := range shard.Hosts {
+ host := shard.Hosts[replicaIndex]
+ res = append(res, f(host))
+ }
+ }
+ }
+
+ return res
+}
+
+// WalkTillError walks hosts with a function until an error met
+func (cr *ClickHouseKeeperInstallation) WalkTillError(
+ ctx context.Context,
+ fCRPreliminary func(ctx context.Context, chi *ClickHouseKeeperInstallation) error,
+ fCluster func(ctx context.Context, cluster *Cluster) error,
+ fShards func(ctx context.Context, shards []*ChkShard) error,
+ fCRFinal func(ctx context.Context, chi *ClickHouseKeeperInstallation) error,
+) error {
+ if err := fCRPreliminary(ctx, cr); err != nil {
+ return err
+ }
+
+ for clusterIndex := range cr.GetSpecT().Configuration.Clusters {
+ cluster := cr.GetSpecT().Configuration.Clusters[clusterIndex]
+ if err := fCluster(ctx, cluster); err != nil {
+ return err
+ }
+
+ shards := make([]*ChkShard, 0, len(cluster.Layout.Shards))
+ for shardIndex := range cluster.Layout.Shards {
+ shards = append(shards, cluster.Layout.Shards[shardIndex])
+ }
+ if err := fShards(ctx, shards); err != nil {
+ return err
+ }
+ }
+
+ if err := fCRFinal(ctx, cr); err != nil {
+ return err
+ }
+
+ return nil
+}
diff --git a/pkg/apis/clickhouse-keeper.altinity.com/v1/type_cluster.go b/pkg/apis/clickhouse-keeper.altinity.com/v1/type_cluster.go
new file mode 100644
index 000000000..45f25c1c6
--- /dev/null
+++ b/pkg/apis/clickhouse-keeper.altinity.com/v1/type_cluster.go
@@ -0,0 +1,356 @@
+// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v1
+
+import (
+ apiChi "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1"
+ "github.com/altinity/clickhouse-operator/pkg/apis/common/types"
+)
+
+// Cluster defines item of a clusters section of .configuration
+type Cluster struct {
+ Name string `json:"name,omitempty" yaml:"name,omitempty"`
+
+ Settings *apiChi.Settings `json:"settings,omitempty" yaml:"settings,omitempty"`
+ Files *apiChi.Settings `json:"files,omitempty" yaml:"files,omitempty"`
+ Templates *apiChi.TemplatesList `json:"templates,omitempty" yaml:"templates,omitempty"`
+ Layout *ChkClusterLayout `json:"layout,omitempty" yaml:"layout,omitempty"`
+
+ Runtime ChkClusterRuntime `json:"-" yaml:"-"`
+}
+
+type ChkClusterRuntime struct {
+ Address ChkClusterAddress `json:"-" yaml:"-"`
+ CHK *ClickHouseKeeperInstallation `json:"-" yaml:"-" testdiff:"ignore"`
+}
+
+func (r *ChkClusterRuntime) GetAddress() apiChi.IClusterAddress {
+ return &r.Address
+}
+
+func (r ChkClusterRuntime) GetCR() apiChi.ICustomResource {
+ return r.CHK
+}
+
+func (r *ChkClusterRuntime) SetCR(cr apiChi.ICustomResource) {
+ r.CHK = cr.(*ClickHouseKeeperInstallation)
+}
+
+// ChkClusterAddress defines address of a cluster within ClickHouseInstallation
+type ChkClusterAddress struct {
+ Namespace string `json:"namespace,omitempty" yaml:"namespace,omitempty"`
+ CHIName string `json:"chiName,omitempty" yaml:"chiName,omitempty"`
+ ClusterName string `json:"clusterName,omitempty" yaml:"clusterName,omitempty"`
+ ClusterIndex int `json:"clusterIndex,omitempty" yaml:"clusterIndex,omitempty"`
+}
+
+func (a *ChkClusterAddress) GetNamespace() string {
+ return a.Namespace
+}
+
+func (a *ChkClusterAddress) SetNamespace(namespace string) {
+ a.Namespace = namespace
+}
+
+func (a *ChkClusterAddress) GetCRName() string {
+ return a.CHIName
+}
+
+func (a *ChkClusterAddress) SetCRName(name string) {
+ a.CHIName = name
+}
+
+func (a *ChkClusterAddress) GetClusterName() string {
+ return a.ClusterName
+}
+
+func (a *ChkClusterAddress) SetClusterName(name string) {
+ a.ClusterName = name
+}
+
+func (a *ChkClusterAddress) GetClusterIndex() int {
+ return a.ClusterIndex
+}
+
+func (a *ChkClusterAddress) SetClusterIndex(index int) {
+ a.ClusterIndex = index
+}
+
+func (cluster *Cluster) GetName() string {
+ return cluster.Name
+}
+
+func (c *Cluster) GetZookeeper() *apiChi.ZookeeperConfig {
+ return nil
+}
+
+func (c *Cluster) GetSchemaPolicy() *apiChi.SchemaPolicy {
+ return nil
+}
+
+// GetInsecure is a getter
+func (cluster *Cluster) GetInsecure() *types.StringBool {
+ return nil
+}
+
+// GetSecure is a getter
+func (cluster *Cluster) GetSecure() *types.StringBool {
+ return nil
+}
+
+func (c *Cluster) GetSecret() *apiChi.ClusterSecret {
+ return nil
+}
+
+func (cluster *Cluster) GetRuntime() apiChi.IClusterRuntime {
+ return &cluster.Runtime
+}
+
+func (cluster *Cluster) GetPDBMaxUnavailable() *types.Int32 {
+ return types.NewInt32(1)
+}
+
+// FillShardReplicaSpecified fills whether shard or replicas are explicitly specified
+func (cluster *Cluster) FillShardReplicaSpecified() {
+ if len(cluster.Layout.Shards) > 0 {
+ cluster.Layout.ShardsSpecified = true
+ }
+ if len(cluster.Layout.Replicas) > 0 {
+ cluster.Layout.ReplicasSpecified = true
+ }
+}
+
+// isShardSpecified checks whether shard is explicitly specified
+func (cluster *Cluster) isShardSpecified() bool {
+ return cluster.Layout.ShardsSpecified == true
+}
+
+// isReplicaSpecified checks whether replica is explicitly specified
+func (cluster *Cluster) isReplicaSpecified() bool {
+ return (cluster.Layout.ShardsSpecified == false) && (cluster.Layout.ReplicasSpecified == true)
+}
+
+// IsShardSpecified checks whether shard is explicitly specified
+func (cluster *Cluster) IsShardSpecified() bool {
+ if !cluster.isShardSpecified() && !cluster.isReplicaSpecified() {
+ return true
+ }
+
+ return cluster.isShardSpecified()
+}
+
+// InheritFilesFrom inherits files from CHI
+func (cluster *Cluster) InheritFilesFrom(chk *ClickHouseKeeperInstallation) {
+ if chk.GetSpecT().Configuration == nil {
+ return
+ }
+ if chk.GetSpecT().Configuration.Files == nil {
+ return
+ }
+
+ // Propagate host section only
+ cluster.Files = cluster.Files.MergeFromCB(chk.GetSpecT().Configuration.Files, func(path string, _ *apiChi.Setting) bool {
+ if section, err := apiChi.GetSectionFromPath(path); err == nil {
+ if section.Equal(apiChi.SectionHost) {
+ return true
+ }
+ }
+
+ return false
+ })
+}
+
+// InheritTemplatesFrom inherits templates from CHI
+func (cluster *Cluster) InheritTemplatesFrom(chk *ClickHouseKeeperInstallation) {
+ if chk.GetSpec().GetDefaults() == nil {
+ return
+ }
+ if chk.GetSpec().GetDefaults().Templates == nil {
+ return
+ }
+ cluster.Templates = cluster.Templates.MergeFrom(chk.GetSpec().GetDefaults().Templates, apiChi.MergeTypeFillEmptyValues)
+ cluster.Templates.HandleDeprecatedFields()
+}
+
+// GetServiceTemplate returns service template, if exists
+func (cluster *Cluster) GetServiceTemplate() (*apiChi.ServiceTemplate, bool) {
+ return nil, false
+}
+
+// GetShard gets shard with specified index
+func (cluster *Cluster) GetShard(shard int) *ChkShard {
+ return cluster.Layout.Shards[shard]
+}
+
+// GetOrCreateHost gets or creates host on specified coordinates
+func (cluster *Cluster) GetOrCreateHost(shard, replica int) *apiChi.Host {
+ return cluster.Layout.HostsField.GetOrCreate(shard, replica)
+}
+
+// GetReplica gets replica with specified index
+func (cluster *Cluster) GetReplica(replica int) *ChkReplica {
+ return cluster.Layout.Replicas[replica]
+}
+
+// FindShard finds shard by name or index.
+// Expectations: name is expected to be a string, index is expected to be an int.
+func (cluster *Cluster) FindShard(needle interface{}) apiChi.IShard {
+ var resultShard *ChkShard
+ cluster.WalkShards(func(index int, shard apiChi.IShard) error {
+ switch v := needle.(type) {
+ case string:
+ if shard.GetName() == v {
+ resultShard = shard.(*ChkShard)
+ }
+ case int:
+ if index == v {
+ resultShard = shard.(*ChkShard)
+ }
+ }
+ return nil
+ })
+ return resultShard
+}
+
+// FindHost finds host by name or index.
+// Expectations: name is expected to be a string, index is expected to be an int.
+func (cluster *Cluster) FindHost(needleShard interface{}, needleHost interface{}) *apiChi.Host {
+ return cluster.FindShard(needleShard).FindHost(needleHost)
+}
+
+// FirstHost finds first host in the cluster
+func (cluster *Cluster) FirstHost() *apiChi.Host {
+ var result *apiChi.Host
+ cluster.WalkHosts(func(host *apiChi.Host) error {
+ if result == nil {
+ result = host
+ }
+ return nil
+ })
+ return result
+}
+
+// WalkShards walks shards
+func (cluster *Cluster) WalkShards(f func(index int, shard apiChi.IShard) error) []error {
+ if cluster == nil {
+ return nil
+ }
+ res := make([]error, 0)
+
+ for shardIndex := range cluster.Layout.Shards {
+ shard := cluster.Layout.Shards[shardIndex]
+ res = append(res, f(shardIndex, shard))
+ }
+
+ return res
+}
+
+// WalkReplicas walks replicas
+func (cluster *Cluster) WalkReplicas(f func(index int, replica *ChkReplica) error) []error {
+ res := make([]error, 0)
+
+ for replicaIndex := range cluster.Layout.Replicas {
+ replica := cluster.Layout.Replicas[replicaIndex]
+ res = append(res, f(replicaIndex, replica))
+ }
+
+ return res
+}
+
+// WalkHosts walks hosts
+func (cluster *Cluster) WalkHosts(f func(host *apiChi.Host) error) []error {
+ res := make([]error, 0)
+
+ for shardIndex := range cluster.Layout.Shards {
+ shard := cluster.Layout.Shards[shardIndex]
+ for replicaIndex := range shard.Hosts {
+ host := shard.Hosts[replicaIndex]
+ res = append(res, f(host))
+ }
+ }
+
+ return res
+}
+
+// WalkHostsByShards walks hosts by shards
+func (cluster *Cluster) WalkHostsByShards(f func(shard, replica int, host *apiChi.Host) error) []error {
+
+ res := make([]error, 0)
+
+ for shardIndex := range cluster.Layout.Shards {
+ shard := cluster.Layout.Shards[shardIndex]
+ for replicaIndex := range shard.Hosts {
+ host := shard.Hosts[replicaIndex]
+ res = append(res, f(shardIndex, replicaIndex, host))
+ }
+ }
+
+ return res
+}
+
+func (cluster *Cluster) GetLayout() *ChkClusterLayout {
+ return cluster.Layout
+}
+
+// WalkHostsByReplicas walks hosts by replicas
+func (cluster *Cluster) WalkHostsByReplicas(f func(shard, replica int, host *apiChi.Host) error) []error {
+
+ res := make([]error, 0)
+
+ for replicaIndex := range cluster.Layout.Replicas {
+ replica := cluster.Layout.Replicas[replicaIndex]
+ for shardIndex := range replica.Hosts {
+ host := replica.Hosts[shardIndex]
+ res = append(res, f(shardIndex, replicaIndex, host))
+ }
+ }
+
+ return res
+}
+
+// HostsCount counts hosts
+func (cluster *Cluster) HostsCount() int {
+ count := 0
+ cluster.WalkHosts(func(host *apiChi.Host) error {
+ count++
+ return nil
+ })
+ return count
+}
+
+// ChkClusterLayout defines layout section of .spec.configuration.clusters
+type ChkClusterLayout struct {
+ ShardsCount int `json:"shardsCount,omitempty" yaml:"shardsCount,omitempty"`
+ ReplicasCount int `json:"replicasCount,omitempty" yaml:"replicasCount,omitempty"`
+
+ // TODO refactor into map[string]ChiShard
+ Shards []*ChkShard `json:"shards,omitempty" yaml:"shards,omitempty"`
+ Replicas []*ChkReplica `json:"replicas,omitempty" yaml:"replicas,omitempty"`
+
+ // Internal data
+ // Whether shards or replicas are explicitly specified as Shards []ChiShard or Replicas []ChiReplica
+ ShardsSpecified bool `json:"-" yaml:"-" testdiff:"ignore"`
+ ReplicasSpecified bool `json:"-" yaml:"-" testdiff:"ignore"`
+ HostsField *apiChi.HostsField `json:"-" yaml:"-" testdiff:"ignore"`
+}
+
+// NewChiClusterLayout creates new cluster layout
+func NewChkClusterLayout() *ChkClusterLayout {
+ return new(ChkClusterLayout)
+}
+
+func (l *ChkClusterLayout) GetReplicasCount() int {
+ return l.ReplicasCount
+}
diff --git a/pkg/apis/clickhouse-keeper.altinity.com/v1/type_configuration.go b/pkg/apis/clickhouse-keeper.altinity.com/v1/type_configuration.go
new file mode 100644
index 000000000..1636f2452
--- /dev/null
+++ b/pkg/apis/clickhouse-keeper.altinity.com/v1/type_configuration.go
@@ -0,0 +1,90 @@
+// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v1
+
+import (
+ apiChi "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1"
+)
+
+// Configuration defines configuration section of .spec
+type Configuration struct {
+ Settings *apiChi.Settings `json:"settings,omitempty" yaml:"settings,omitempty"`
+ Files *apiChi.Settings `json:"files,omitempty" yaml:"files,omitempty"`
+ Clusters []*Cluster `json:"clusters,omitempty" yaml:"clusters,omitempty"`
+}
+
+// NewConfiguration creates new ChkConfiguration objects
+func NewConfiguration() *Configuration {
+ return new(Configuration)
+}
+
+func (c *Configuration) GetProfiles() *apiChi.Settings {
+ return nil
+}
+
+func (c *Configuration) GetQuotas() *apiChi.Settings {
+ return nil
+}
+
+func (c *Configuration) GetSettings() *apiChi.Settings {
+ if c == nil {
+ return nil
+ }
+
+ return c.Settings
+}
+
+func (c *Configuration) GetFiles() *apiChi.Settings {
+ return c.Files
+}
+
+func (c *Configuration) GetClusters() []*Cluster {
+ if c == nil {
+ return nil
+ }
+
+ return c.Clusters
+}
+
+func (c *Configuration) GetCluster(i int) *Cluster {
+ clusters := c.GetClusters()
+ if clusters == nil {
+ return nil
+ }
+ if i >= len(clusters) {
+ return nil
+ }
+ return clusters[i]
+}
+
+// MergeFrom merges from specified source
+func (c *Configuration) MergeFrom(from *Configuration, _type apiChi.MergeType) *Configuration {
+ if from == nil {
+ return c
+ }
+
+ if c == nil {
+ c = NewConfiguration()
+ }
+
+ c.Settings = c.Settings.MergeFrom(from.Settings)
+ c.Files = c.Files.MergeFrom(from.Files)
+
+ // TODO merge clusters
+ // Copy Clusters for now
+ c.Clusters = from.Clusters
+
+ return c
+}
diff --git a/pkg/apis/clickhouse-keeper.altinity.com/v1/type_replica.go b/pkg/apis/clickhouse-keeper.altinity.com/v1/type_replica.go
new file mode 100644
index 000000000..07325edf4
--- /dev/null
+++ b/pkg/apis/clickhouse-keeper.altinity.com/v1/type_replica.go
@@ -0,0 +1,202 @@
+// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v1
+
+import apiChi "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1"
+
+// ChiReplica defines item of a replica section of .spec.configuration.clusters[n].replicas
+// TODO unify with ChiShard based on HostsSet
+type ChkReplica struct {
+ Name string `json:"name,omitempty" yaml:"name,omitempty"`
+ Settings *apiChi.Settings `json:"settings,omitempty" yaml:"settings,omitempty"`
+ Files *apiChi.Settings `json:"files,omitempty" yaml:"files,omitempty"`
+ Templates *apiChi.TemplatesList `json:"templates,omitempty" yaml:"templates,omitempty"`
+ ShardsCount int `json:"shardsCount,omitempty" yaml:"shardsCount,omitempty"`
+ // TODO refactor into map[string]Host
+ Hosts []*apiChi.Host `json:"shards,omitempty" yaml:"shards,omitempty"`
+
+ Runtime ChkReplicaRuntime `json:"-" yaml:"-"`
+}
+
+type ChkReplicaRuntime struct {
+ Address ChkReplicaAddress `json:"-" yaml:"-"`
+ CHK *ClickHouseKeeperInstallation `json:"-" yaml:"-" testdiff:"ignore"`
+}
+
+func (r ChkReplicaRuntime) GetAddress() apiChi.IReplicaAddress {
+ return &r.Address
+}
+
+func (r *ChkReplicaRuntime) SetCR(cr apiChi.ICustomResource) {
+ r.CHK = cr.(*ClickHouseKeeperInstallation)
+}
+
+func (replica *ChkReplica) GetName() string {
+ return replica.Name
+}
+
+// InheritSettingsFrom inherits settings from specified cluster
+func (replica *ChkReplica) InheritSettingsFrom(cluster *Cluster) {
+ replica.Settings = replica.Settings.MergeFrom(cluster.Settings)
+}
+
+// InheritFilesFrom inherits files from specified cluster
+func (replica *ChkReplica) InheritFilesFrom(cluster *Cluster) {
+ replica.Files = replica.Files.MergeFrom(cluster.Files)
+}
+
+// InheritTemplatesFrom inherits templates from specified cluster
+func (replica *ChkReplica) InheritTemplatesFrom(cluster *Cluster) {
+ replica.Templates = replica.Templates.MergeFrom(cluster.Templates, apiChi.MergeTypeFillEmptyValues)
+ replica.Templates.HandleDeprecatedFields()
+}
+
+// GetServiceTemplate gets service template
+func (replica *ChkReplica) GetServiceTemplate() (*apiChi.ServiceTemplate, bool) {
+ if !replica.Templates.HasReplicaServiceTemplate() {
+ return nil, false
+ }
+ name := replica.Templates.GetReplicaServiceTemplate()
+ return replica.Runtime.CHK.GetServiceTemplate(name)
+}
+
+// HasShardsCount checks whether replica has shards count specified
+func (replica *ChkReplica) HasShardsCount() bool {
+ if replica == nil {
+ return false
+ }
+
+ return replica.ShardsCount > 0
+}
+
+// WalkHosts walks over hosts
+func (replica *ChkReplica) WalkHosts(f func(host *apiChi.Host) error) []error {
+ res := make([]error, 0)
+
+ for shardIndex := range replica.Hosts {
+ host := replica.Hosts[shardIndex]
+ res = append(res, f(host))
+ }
+
+ return res
+}
+
+// HostsCount returns number of hosts
+func (replica *ChkReplica) HostsCount() int {
+ count := 0
+ replica.WalkHosts(func(host *apiChi.Host) error {
+ count++
+ return nil
+ })
+ return count
+}
+
+func (replica *ChkReplica) HasSettings() bool {
+ return replica.GetSettings() != nil
+}
+
+func (replica *ChkReplica) GetSettings() *apiChi.Settings {
+ if replica == nil {
+ return nil
+ }
+ return replica.Settings
+}
+
+func (replica *ChkReplica) HasFiles() bool {
+ return replica.GetFiles() != nil
+}
+
+func (replica *ChkReplica) GetFiles() *apiChi.Settings {
+ if replica == nil {
+ return nil
+ }
+ return replica.Files
+}
+
+func (replica *ChkReplica) HasTemplates() bool {
+ return replica.GetTemplates() != nil
+}
+
+func (replica *ChkReplica) GetTemplates() *apiChi.TemplatesList {
+ if replica == nil {
+ return nil
+ }
+ return replica.Templates
+}
+
+func (replica *ChkReplica) GetRuntime() apiChi.IReplicaRuntime {
+ if replica == nil {
+ return (*ChkReplicaRuntime)(nil)
+ }
+ return &replica.Runtime
+}
+
+// ChiReplicaAddress defines address of a replica within ClickHouseInstallation
+type ChkReplicaAddress struct {
+ Namespace string `json:"namespace,omitempty" yaml:"namespace,omitempty"`
+ CHIName string `json:"chiName,omitempty" yaml:"chiName,omitempty"`
+ ClusterName string `json:"clusterName,omitempty" yaml:"clusterName,omitempty"`
+ ClusterIndex int `json:"clusterIndex,omitempty" yaml:"clusterIndex,omitempty"`
+ ReplicaName string `json:"replicaName,omitempty" yaml:"replicaName,omitempty"`
+ ReplicaIndex int `json:"replicaIndex,omitempty" yaml:"replicaIndex,omitempty"`
+}
+
+func (a *ChkReplicaAddress) GetNamespace() string {
+ return a.Namespace
+}
+
+func (a *ChkReplicaAddress) SetNamespace(namespace string) {
+ a.Namespace = namespace
+}
+
+func (a *ChkReplicaAddress) GetCRName() string {
+ return a.CHIName
+}
+
+func (a *ChkReplicaAddress) SetCRName(name string) {
+ a.CHIName = name
+}
+
+func (a *ChkReplicaAddress) GetClusterName() string {
+ return a.ClusterName
+}
+
+func (a *ChkReplicaAddress) SetClusterName(name string) {
+ a.ClusterName = name
+}
+
+func (a *ChkReplicaAddress) GetClusterIndex() int {
+ return a.ClusterIndex
+}
+
+func (a *ChkReplicaAddress) SetClusterIndex(index int) {
+ a.ClusterIndex = index
+}
+
+func (a *ChkReplicaAddress) GetReplicaName() string {
+ return a.ReplicaName
+}
+
+func (a *ChkReplicaAddress) SetReplicaName(name string) {
+ a.ReplicaName = name
+}
+
+func (a *ChkReplicaAddress) GetReplicaIndex() int {
+ return a.ReplicaIndex
+}
+
+func (a *ChkReplicaAddress) SetReplicaIndex(index int) {
+ a.ReplicaIndex = index
+}
diff --git a/pkg/apis/clickhouse-keeper.altinity.com/v1/type_shard.go b/pkg/apis/clickhouse-keeper.altinity.com/v1/type_shard.go
new file mode 100644
index 000000000..89ed9e15f
--- /dev/null
+++ b/pkg/apis/clickhouse-keeper.altinity.com/v1/type_shard.go
@@ -0,0 +1,298 @@
+// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v1
+
+import (
+ apiChi "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1"
+ "github.com/altinity/clickhouse-operator/pkg/apis/common/types"
+)
+
+// ChiShard defines item of a shard section of .spec.configuration.clusters[n].shards
+// TODO unify with ChiReplica based on HostsSet
+type ChkShard struct {
+ Name string `json:"name,omitempty" yaml:"name,omitempty"`
+ Weight *int `json:"weight,omitempty" yaml:"weight,omitempty"`
+ InternalReplication *types.StringBool `json:"internalReplication,omitempty" yaml:"internalReplication,omitempty"`
+ Settings *apiChi.Settings `json:"settings,omitempty" yaml:"settings,omitempty"`
+ Files *apiChi.Settings `json:"files,omitempty" yaml:"files,omitempty"`
+ Templates *apiChi.TemplatesList `json:"templates,omitempty" yaml:"templates,omitempty"`
+ ReplicasCount int `json:"replicasCount,omitempty" yaml:"replicasCount,omitempty"`
+ // TODO refactor into map[string]Host
+ Hosts []*apiChi.Host `json:"replicas,omitempty" yaml:"replicas,omitempty"`
+
+ Runtime ChkShardRuntime `json:"-" yaml:"-"`
+
+ // DefinitionType is DEPRECATED - to be removed soon
+ DefinitionType string `json:"definitionType,omitempty" yaml:"definitionType,omitempty"`
+}
+
+type ChkShardRuntime struct {
+ Address ChkShardAddress `json:"-" yaml:"-"`
+ CHK *ClickHouseKeeperInstallation `json:"-" yaml:"-" testdiff:"ignore"`
+}
+
+func (r ChkShardRuntime) GetAddress() apiChi.IShardAddress {
+ return &r.Address
+}
+
+func (r *ChkShardRuntime) GetCR() apiChi.ICustomResource {
+ return r.CHK
+}
+
+func (r *ChkShardRuntime) SetCR(cr apiChi.ICustomResource) {
+ r.CHK = cr.(*ClickHouseKeeperInstallation)
+}
+
+func (shard *ChkShard) GetName() string {
+ return shard.Name
+}
+
+func (shard *ChkShard) GetInternalReplication() *types.StringBool {
+ return shard.InternalReplication
+}
+
+// InheritSettingsFrom inherits settings from specified cluster
+func (shard *ChkShard) InheritSettingsFrom(cluster *Cluster) {
+ shard.Settings = shard.Settings.MergeFrom(cluster.Settings)
+}
+
+// InheritFilesFrom inherits files from specified cluster
+func (shard *ChkShard) InheritFilesFrom(cluster *Cluster) {
+ shard.Files = shard.Files.MergeFrom(cluster.Files)
+}
+
+// InheritTemplatesFrom inherits templates from specified cluster
+func (shard *ChkShard) InheritTemplatesFrom(cluster *Cluster) {
+ shard.Templates = shard.Templates.MergeFrom(cluster.Templates, apiChi.MergeTypeFillEmptyValues)
+ shard.Templates.HandleDeprecatedFields()
+}
+
+// GetServiceTemplate gets service template
+func (shard *ChkShard) GetServiceTemplate() (*apiChi.ServiceTemplate, bool) {
+ if !shard.Templates.HasShardServiceTemplate() {
+ return nil, false
+ }
+ name := shard.Templates.GetShardServiceTemplate()
+ return shard.Runtime.CHK.GetServiceTemplate(name)
+}
+
+// HasReplicasCount checks whether shard has replicas count specified
+func (shard *ChkShard) HasReplicasCount() bool {
+ if shard == nil {
+ return false
+ }
+
+ return shard.ReplicasCount > 0
+}
+
+// WalkHosts runs specified function on each host
+func (shard *ChkShard) WalkHosts(f func(host *apiChi.Host) error) []error {
+ if shard == nil {
+ return nil
+ }
+
+ res := make([]error, 0)
+
+ for replicaIndex := range shard.Hosts {
+ host := shard.Hosts[replicaIndex]
+ res = append(res, f(host))
+ }
+
+ return res
+}
+
+// WalkHosts runs specified function on each host
+func (shard *ChkShard) WalkHostsAbortOnError(f func(host *apiChi.Host) error) error {
+ if shard == nil {
+ return nil
+ }
+
+ for replicaIndex := range shard.Hosts {
+ host := shard.Hosts[replicaIndex]
+ if err := f(host); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// FindHost finds host by name or index.
+// Expectations: name is expected to be a string, index is expected to be an int.
+func (shard *ChkShard) FindHost(needle interface{}) (res *apiChi.Host) {
+ shard.WalkHosts(func(host *apiChi.Host) error {
+ switch v := needle.(type) {
+ case string:
+ if host.Runtime.Address.HostName == v {
+ res = host
+ }
+ case int:
+ if host.Runtime.Address.ShardScopeIndex == v {
+ res = host
+ }
+ }
+ return nil
+ })
+ return
+}
+
+// FirstHost finds first host in the shard
+func (shard *ChkShard) FirstHost() *apiChi.Host {
+ var result *apiChi.Host
+ shard.WalkHosts(func(host *apiChi.Host) error {
+ if result == nil {
+ result = host
+ }
+ return nil
+ })
+ return result
+}
+
+// HostsCount returns count of hosts in the shard
+func (shard *ChkShard) HostsCount() int {
+ count := 0
+ shard.WalkHosts(func(host *apiChi.Host) error {
+ count++
+ return nil
+ })
+ return count
+}
+
+// GetCHK gets Custom Resource of the shard
+func (shard *ChkShard) GetCHK() *ClickHouseKeeperInstallation {
+ return shard.Runtime.CHK
+}
+
+// GetCluster gets cluster of the shard
+func (shard *ChkShard) GetCluster() *Cluster {
+ return shard.Runtime.CHK.GetSpecT().Configuration.Clusters[shard.Runtime.Address.ClusterIndex]
+}
+
+// HasWeight checks whether shard has applicable weight value specified
+func (shard *ChkShard) HasWeight() bool {
+ if shard == nil {
+ return false
+ }
+ if shard.Weight == nil {
+ return false
+ }
+ return *shard.Weight >= 0
+}
+
+// GetWeight gets weight
+func (shard *ChkShard) GetWeight() int {
+ if shard.HasWeight() {
+ return *shard.Weight
+ }
+ return 0
+}
+
+func (shard *ChkShard) GetRuntime() apiChi.IShardRuntime {
+ if shard == nil {
+ return (*ChkShardRuntime)(nil)
+ }
+ return &shard.Runtime
+}
+
+func (shard *ChkShard) HasSettings() bool {
+ return shard.GetSettings() != nil
+}
+
+func (shard *ChkShard) GetSettings() *apiChi.Settings {
+ if shard == nil {
+ return nil
+ }
+ return shard.Settings
+}
+
+func (shard *ChkShard) HasFiles() bool {
+ return shard.GetFiles() != nil
+}
+
+func (shard *ChkShard) GetFiles() *apiChi.Settings {
+ if shard == nil {
+ return nil
+ }
+ return shard.Files
+}
+
+func (shard *ChkShard) HasTemplates() bool {
+ return shard.GetTemplates() != nil
+}
+
+func (shard *ChkShard) GetTemplates() *apiChi.TemplatesList {
+ if shard == nil {
+ return nil
+ }
+ return shard.Templates
+}
+
+// ChiShardAddress defines address of a shard within ClickHouseInstallation
+type ChkShardAddress struct {
+ Namespace string `json:"namespace,omitempty" yaml:"namespace,omitempty"`
+ CHIName string `json:"chiName,omitempty" yaml:"chiName,omitempty"`
+ ClusterName string `json:"clusterName,omitempty" yaml:"clusterName,omitempty"`
+ ClusterIndex int `json:"clusterIndex,omitempty" yaml:"clusterIndex,omitempty"`
+ ShardName string `json:"shardName,omitempty" yaml:"shardName,omitempty"`
+ ShardIndex int `json:"shardIndex,omitempty" yaml:"shardIndex,omitempty"`
+}
+
+func (a *ChkShardAddress) GetNamespace() string {
+ return a.Namespace
+}
+
+func (a *ChkShardAddress) SetNamespace(namespace string) {
+ a.Namespace = namespace
+}
+
+func (a *ChkShardAddress) GetCRName() string {
+ return a.CHIName
+}
+
+func (a *ChkShardAddress) SetCRName(name string) {
+ a.CHIName = name
+}
+
+func (a *ChkShardAddress) GetClusterName() string {
+ return a.ClusterName
+}
+
+func (a *ChkShardAddress) SetClusterName(name string) {
+ a.ClusterName = name
+}
+
+func (a *ChkShardAddress) GetClusterIndex() int {
+ return a.ClusterIndex
+}
+
+func (a *ChkShardAddress) SetClusterIndex(index int) {
+ a.ClusterIndex = index
+}
+
+func (a *ChkShardAddress) GetShardName() string {
+ return a.ShardName
+}
+
+func (a *ChkShardAddress) SetShardName(name string) {
+ a.ShardName = name
+}
+
+func (a *ChkShardAddress) GetShardIndex() int {
+ return a.ShardIndex
+}
+
+func (a *ChkShardAddress) SetShardIndex(index int) {
+ a.ShardIndex = index
+}
diff --git a/pkg/apis/clickhouse-keeper.altinity.com/v1/type_spec.go b/pkg/apis/clickhouse-keeper.altinity.com/v1/type_spec.go
new file mode 100644
index 000000000..9a097628b
--- /dev/null
+++ b/pkg/apis/clickhouse-keeper.altinity.com/v1/type_spec.go
@@ -0,0 +1,85 @@
+// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v1
+
+import (
+ apiChi "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1"
+ "github.com/altinity/clickhouse-operator/pkg/apis/common/types"
+)
+
+// ChkSpec defines spec section of ClickHouseKeeper resource
+type ChkSpec struct {
+ TaskID *types.String `json:"taskID,omitempty" yaml:"taskID,omitempty"`
+ NamespaceDomainPattern *types.String `json:"namespaceDomainPattern,omitempty" yaml:"namespaceDomainPattern,omitempty"`
+ Reconciling *apiChi.Reconciling `json:"reconciling,omitempty" yaml:"reconciling,omitempty"`
+ Defaults *apiChi.Defaults `json:"defaults,omitempty" yaml:"defaults,omitempty"`
+ Configuration *Configuration `json:"configuration,omitempty" yaml:"configuration,omitempty"`
+ Templates *apiChi.Templates `json:"templates,omitempty" yaml:"templates,omitempty"`
+}
+
+// HasTaskID checks whether task id is specified
+func (spec *ChkSpec) HasTaskID() bool {
+ return len(spec.TaskID.Value()) > 0
+}
+
+// GetTaskID gets task id as a string
+func (spec *ChkSpec) GetTaskID() string {
+ return spec.TaskID.Value()
+}
+
+func (spec *ChkSpec) GetNamespaceDomainPattern() *types.String {
+ return spec.NamespaceDomainPattern
+}
+
+func (spec *ChkSpec) GetDefaults() *apiChi.Defaults {
+ return spec.Defaults
+}
+
+func (spec *ChkSpec) GetConfiguration() apiChi.IConfiguration {
+ return spec.Configuration
+}
+
+func (spec *ChkSpec) GetTemplates() *apiChi.Templates {
+ return spec.Templates
+}
+
+// MergeFrom merges from spec
+func (spec *ChkSpec) MergeFrom(from *ChkSpec, _type apiChi.MergeType) {
+ if from == nil {
+ return
+ }
+
+ switch _type {
+ case apiChi.MergeTypeFillEmptyValues:
+ if !spec.HasTaskID() {
+ spec.TaskID = spec.TaskID.MergeFrom(from.TaskID)
+ }
+ if !spec.NamespaceDomainPattern.HasValue() {
+ spec.NamespaceDomainPattern = spec.NamespaceDomainPattern.MergeFrom(from.NamespaceDomainPattern)
+ }
+ case apiChi.MergeTypeOverrideByNonEmptyValues:
+ if from.HasTaskID() {
+ spec.TaskID = spec.TaskID.MergeFrom(from.TaskID)
+ }
+ if from.NamespaceDomainPattern.HasValue() {
+ spec.NamespaceDomainPattern = spec.NamespaceDomainPattern.MergeFrom(from.NamespaceDomainPattern)
+ }
+ }
+
+ spec.Reconciling = spec.Reconciling.MergeFrom(from.Reconciling, _type)
+ spec.Defaults = spec.Defaults.MergeFrom(from.Defaults, _type)
+ spec.Configuration = spec.Configuration.MergeFrom(from.Configuration, _type)
+ spec.Templates = spec.Templates.MergeFrom(from.Templates, _type)
+}
diff --git a/pkg/apis/clickhouse-keeper.altinity.com/v1/type_status.go b/pkg/apis/clickhouse-keeper.altinity.com/v1/type_status.go
index b4d84a316..400440c9a 100644
--- a/pkg/apis/clickhouse-keeper.altinity.com/v1/type_status.go
+++ b/pkg/apis/clickhouse-keeper.altinity.com/v1/type_status.go
@@ -15,95 +15,733 @@
package v1
import (
+ "sort"
+ "sync"
+
apiChi "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1"
+ "github.com/altinity/clickhouse-operator/pkg/apis/common/types"
+ "github.com/altinity/clickhouse-operator/pkg/util"
+ "github.com/altinity/clickhouse-operator/pkg/version"
)
-// ChkStatus defines status section of ClickHouseKeeper resource
-type ChkStatus struct {
- CHOpVersion string `json:"chop-version,omitempty" yaml:"chop-version,omitempty"`
- CHOpCommit string `json:"chop-commit,omitempty" yaml:"chop-commit,omitempty"`
- CHOpDate string `json:"chop-date,omitempty" yaml:"chop-date,omitempty"`
- CHOpIP string `json:"chop-ip,omitempty" yaml:"chop-ip,omitempty"`
-
- Status string `json:"status,omitempty" yaml:"status,omitempty"`
-
- // Replicas is the number of number of desired replicas in the cluster
- Replicas int32 `json:"replicas,omitempty"`
+const (
+ maxActions = 10
+ maxErrors = 10
+ maxTaskIDs = 10
+)
- // ReadyReplicas is the number of number of ready replicas in the cluster
- ReadyReplicas []apiChi.ChiZookeeperNode `json:"readyReplicas,omitempty"`
+// Possible CR statuses
+const (
+ StatusInProgress = "InProgress"
+ StatusCompleted = "Completed"
+ StatusAborted = "Aborted"
+ StatusTerminating = "Terminating"
+)
+// Status defines status section of the custom resource.
+//
+// Note: application level reads and writes to Status fields should be done through synchronized getter/setter functions.
+// While all of these fields need to be exported for JSON and YAML serialization/deserialization, we can at least audit
+// that application logic sticks to the synchronized getter/setters by auditing whether all explicit Go field-level
+// accesses are strictly within _this_ source file OR the generated deep copy source file.
+type Status struct {
+ CHOpVersion string `json:"chop-version,omitempty" yaml:"chop-version,omitempty"`
+ CHOpCommit string `json:"chop-commit,omitempty" yaml:"chop-commit,omitempty"`
+ CHOpDate string `json:"chop-date,omitempty" yaml:"chop-date,omitempty"`
+ CHOpIP string `json:"chop-ip,omitempty" yaml:"chop-ip,omitempty"`
+ ClustersCount int `json:"clusters,omitempty" yaml:"clusters,omitempty"`
+ ShardsCount int `json:"shards,omitempty" yaml:"shards,omitempty"`
+ ReplicasCount int `json:"replicas,omitempty" yaml:"replicas,omitempty"`
+ HostsCount int `json:"hosts,omitempty" yaml:"hosts,omitempty"`
+ Status string `json:"status,omitempty" yaml:"status,omitempty"`
+ TaskID string `json:"taskID,omitempty" yaml:"taskID,omitempty"`
+ TaskIDsStarted []string `json:"taskIDsStarted,omitempty" yaml:"taskIDsStarted,omitempty"`
+ TaskIDsCompleted []string `json:"taskIDsCompleted,omitempty" yaml:"taskIDsCompleted,omitempty"`
+ Action string `json:"action,omitempty" yaml:"action,omitempty"`
+ Actions []string `json:"actions,omitempty" yaml:"actions,omitempty"`
+ Error string `json:"error,omitempty" yaml:"error,omitempty"`
+ Errors []string `json:"errors,omitempty" yaml:"errors,omitempty"`
+ HostsUpdatedCount int `json:"hostsUpdated,omitempty" yaml:"hostsUpdated,omitempty"`
+ HostsAddedCount int `json:"hostsAdded,omitempty" yaml:"hostsAdded,omitempty"`
+ HostsUnchangedCount int `json:"hostsUnchanged,omitempty" yaml:"hostsUnchanged,omitempty"`
+ HostsFailedCount int `json:"hostsFailed,omitempty" yaml:"hostsFailed,omitempty"`
+ HostsCompletedCount int `json:"hostsCompleted,omitempty" yaml:"hostsCompleted,omitempty"`
+ HostsDeletedCount int `json:"hostsDeleted,omitempty" yaml:"hostsDeleted,omitempty"`
+ HostsDeleteCount int `json:"hostsDelete,omitempty" yaml:"hostsDelete,omitempty"`
Pods []string `json:"pods,omitempty" yaml:"pods,omitempty"`
PodIPs []string `json:"pod-ips,omitempty" yaml:"pod-ips,omitempty"`
FQDNs []string `json:"fqdns,omitempty" yaml:"fqdns,omitempty"`
- NormalizedCHK *ClickHouseKeeperInstallation `json:"normalized,omitempty" yaml:"normalized,omitempty"`
- NormalizedCHKCompleted *ClickHouseKeeperInstallation `json:"normalizedCompleted,omitempty" yaml:"normalizedCompleted,omitempty"`
+ Endpoint string `json:"endpoint,omitempty" yaml:"endpoint,omitempty"`
+ NormalizedCR *ClickHouseKeeperInstallation `json:"normalized,omitempty" yaml:"normalized,omitempty"`
+ NormalizedCRCompleted *ClickHouseKeeperInstallation `json:"normalizedCompleted,omitempty" yaml:"normalizedCompleted,omitempty"`
+ HostsWithTablesCreated []string `json:"hostsWithTablesCreated,omitempty" yaml:"hostsWithTablesCreated,omitempty"`
+ UsedTemplates []*apiChi.TemplateRef `json:"usedTemplates,omitempty" yaml:"usedTemplates,omitempty"`
+
+ mu sync.RWMutex `json:"-" yaml:"-"`
+}
+
+// FillStatusParams is a struct used to fill status params
+type FillStatusParams struct {
+ CHOpIP string
+ ClustersCount int
+ ShardsCount int
+ HostsCount int
+ TaskID string
+ HostsUpdatedCount int
+ HostsAddedCount int
+ HostsUnchangedCount int
+ HostsCompletedCount int
+ HostsDeleteCount int
+ HostsDeletedCount int
+ Pods []string
+ FQDNs []string
+ Endpoint string
+ NormalizedCR *ClickHouseKeeperInstallation
+}
+
+// Fill is a synchronized setter for a fairly large number of fields. We take a struct type "params" argument to avoid
+// confusion of similarly typed positional arguments, and to avoid defining a lot of separate synchronized setters
+// for these fields that are typically all set together at once (during "fills").
+func (s *Status) Fill(params *FillStatusParams) {
+ doWithWriteLock(s, func(s *Status) {
+ // We always set these (build-hardcoded) version fields.
+ s.CHOpVersion = version.Version
+ s.CHOpCommit = version.GitSHA
+ s.CHOpDate = version.BuiltAt
+
+ // Now, set fields from the provided input.
+ s.CHOpIP = params.CHOpIP
+ s.ClustersCount = params.ClustersCount
+ s.ShardsCount = params.ShardsCount
+ s.HostsCount = params.HostsCount
+ s.TaskID = params.TaskID
+ s.HostsUpdatedCount = params.HostsUpdatedCount
+ s.HostsAddedCount = params.HostsAddedCount
+ s.HostsUnchangedCount = params.HostsUnchangedCount
+ s.HostsCompletedCount = params.HostsCompletedCount
+ s.HostsDeleteCount = params.HostsDeleteCount
+ s.HostsDeletedCount = params.HostsDeletedCount
+ s.Pods = params.Pods
+ s.FQDNs = params.FQDNs
+ s.Endpoint = params.Endpoint
+ s.NormalizedCR = params.NormalizedCR
+ })
+}
+
+// SetError sets status error
+func (s *Status) SetError(err string) {
+ doWithWriteLock(s, func(s *Status) {
+ s.Error = err
+ })
+}
+
+// SetAndPushError sets and pushes error into status
+func (s *Status) SetAndPushError(err string) {
+ doWithWriteLock(s, func(s *Status) {
+ s.Error = err
+ s.Errors = append([]string{err}, s.Errors...)
+ if len(s.Errors) > maxErrors {
+ s.Errors = s.Errors[:maxErrors]
+ }
+ })
+}
+
+// PushHostTablesCreated pushes host to the list of hosts with created tables
+func (s *Status) PushHostTablesCreated(host string) {
+ doWithWriteLock(s, func(s *Status) {
+ if util.InArray(host, s.HostsWithTablesCreated) {
+ return
+ }
+ s.HostsWithTablesCreated = append(s.HostsWithTablesCreated, host)
+ })
+}
+
+// SyncHostTablesCreated syncs list of hosts with tables created with actual list of hosts
+func (s *Status) SyncHostTablesCreated() {
+ doWithWriteLock(s, func(s *Status) {
+ if s.FQDNs == nil {
+ return
+ }
+ s.HostsWithTablesCreated = util.IntersectStringArrays(s.HostsWithTablesCreated, s.FQDNs)
+ })
+}
+
+// PushUsedTemplate pushes used template to the list of used templates
+func (s *Status) PushUsedTemplate(templateRef *apiChi.TemplateRef) {
+ doWithWriteLock(s, func(s *Status) {
+ s.UsedTemplates = append(s.UsedTemplates, templateRef)
+ })
+}
+
+// GetUsedTemplatesCount gets used templates count
+func (s *Status) GetUsedTemplatesCount() int {
+ return getIntWithReadLock(s, func(s *Status) int {
+ return len(s.UsedTemplates)
+ })
+}
+
+// SetAction action setter
+func (s *Status) SetAction(action string) {
+ doWithWriteLock(s, func(s *Status) {
+ s.Action = action
+ })
+}
+
+// HasNormalizedCRCompleted is a checker
+func (s *Status) HasNormalizedCRCompleted() bool {
+ return s.GetNormalizedCRCompleted() != nil
+}
+
+// HasNormalizedCR is a checker
+func (s *Status) HasNormalizedCR() bool {
+ return s.GetNormalizedCR() != nil
+}
+
+// PushAction pushes action into status
+func (s *Status) PushAction(action string) {
+ doWithWriteLock(s, func(s *Status) {
+ s.Actions = append([]string{action}, s.Actions...)
+ trimActionsNoSync(s)
+ })
+}
+
+// PushError sets and pushes error into status
+func (s *Status) PushError(error string) {
+ doWithWriteLock(s, func(s *Status) {
+ s.Errors = append([]string{error}, s.Errors...)
+ if len(s.Errors) > maxErrors {
+ s.Errors = s.Errors[:maxErrors]
+ }
+ })
+}
+
+// SetPodIPs sets pod IPs
+func (s *Status) SetPodIPs(podIPs []string) {
+ doWithWriteLock(s, func(s *Status) {
+ s.PodIPs = podIPs
+ })
+}
+
+// HostDeleted increments deleted hosts counter
+func (s *Status) HostDeleted() {
+ doWithWriteLock(s, func(s *Status) {
+ s.HostsDeletedCount++
+ })
+}
+
+// HostUpdated increments updated hosts counter
+func (s *Status) HostUpdated() {
+ doWithWriteLock(s, func(s *Status) {
+ s.HostsUpdatedCount++
+ })
+}
+
+// HostAdded increments added hosts counter
+func (s *Status) HostAdded() {
+ doWithWriteLock(s, func(s *Status) {
+ s.HostsAddedCount++
+ })
+}
+
+// HostUnchanged increments unchanged hosts counter
+func (s *Status) HostUnchanged() {
+ doWithWriteLock(s, func(s *Status) {
+ s.HostsUnchangedCount++
+ })
+}
+
+// HostFailed increments failed hosts counter
+func (s *Status) HostFailed() {
+ doWithWriteLock(s, func(s *Status) {
+ s.HostsFailedCount++
+ })
+}
+
+// HostCompleted increments completed hosts counter
+func (s *Status) HostCompleted() {
+ doWithWriteLock(s, func(s *Status) {
+ s.HostsCompletedCount++
+ })
+}
+
+// ReconcileStart marks reconcile start
+func (s *Status) ReconcileStart(deleteHostsCount int) {
+ doWithWriteLock(s, func(s *Status) {
+ if s == nil {
+ return
+ }
+ s.Status = StatusInProgress
+ s.HostsUpdatedCount = 0
+ s.HostsAddedCount = 0
+ s.HostsUnchangedCount = 0
+ s.HostsCompletedCount = 0
+ s.HostsDeletedCount = 0
+ s.HostsDeleteCount = deleteHostsCount
+ pushTaskIDStartedNoSync(s)
+ })
+}
+
+// ReconcileComplete marks reconcile completion
+func (s *Status) ReconcileComplete() {
+ doWithWriteLock(s, func(s *Status) {
+ if s == nil {
+ return
+ }
+ s.Status = StatusCompleted
+ s.Action = ""
+ pushTaskIDCompletedNoSync(s)
+ })
+}
+
+// ReconcileAbort marks reconcile abortion
+func (s *Status) ReconcileAbort() {
+ doWithWriteLock(s, func(s *Status) {
+ if s == nil {
+ return
+ }
+ s.Status = StatusAborted
+ s.Action = ""
+ pushTaskIDCompletedNoSync(s)
+ })
+}
+
+// DeleteStart marks deletion start
+func (s *Status) DeleteStart() {
+ doWithWriteLock(s, func(s *Status) {
+ if s == nil {
+ return
+ }
+ s.Status = StatusTerminating
+ s.HostsUpdatedCount = 0
+ s.HostsAddedCount = 0
+ s.HostsUnchangedCount = 0
+ s.HostsCompletedCount = 0
+ s.HostsDeletedCount = 0
+ s.HostsDeleteCount = 0
+ pushTaskIDStartedNoSync(s)
+ })
+}
+
+// CopyFrom copies the state of a given Status f into the receiver Status of the call.
+func (s *Status) CopyFrom(f *Status, opts types.CopyStatusOptions) {
+ doWithWriteLock(s, func(s *Status) {
+ doWithReadLock(f, func(from *Status) {
+ if s == nil || from == nil {
+ return
+ }
+
+ if opts.InheritableFields {
+ s.TaskIDsStarted = from.TaskIDsStarted
+ s.TaskIDsCompleted = from.TaskIDsCompleted
+ s.Actions = from.Actions
+ s.Errors = from.Errors
+ s.HostsWithTablesCreated = from.HostsWithTablesCreated
+ }
+
+ if opts.Actions {
+ s.Action = from.Action
+ mergeActionsNoSync(s, from)
+ s.HostsWithTablesCreated = nil
+ if len(from.HostsWithTablesCreated) > 0 {
+ s.HostsWithTablesCreated = append(s.HostsWithTablesCreated, from.HostsWithTablesCreated...)
+ }
+ s.UsedTemplates = nil
+ if len(from.UsedTemplates) > 0 {
+ s.UsedTemplates = append(s.UsedTemplates, from.UsedTemplates...)
+ }
+ }
+
+ if opts.Errors {
+ s.Error = from.Error
+ s.Errors = util.MergeStringArrays(s.Errors, from.Errors)
+ sort.Sort(sort.Reverse(sort.StringSlice(s.Errors)))
+ }
+
+ if opts.MainFields {
+ s.CHOpVersion = from.CHOpVersion
+ s.CHOpCommit = from.CHOpCommit
+ s.CHOpDate = from.CHOpDate
+ s.CHOpIP = from.CHOpIP
+ s.ClustersCount = from.ClustersCount
+ s.ShardsCount = from.ShardsCount
+ s.ReplicasCount = from.ReplicasCount
+ s.HostsCount = from.HostsCount
+ s.Status = from.Status
+ s.TaskID = from.TaskID
+ s.TaskIDsStarted = from.TaskIDsStarted
+ s.TaskIDsCompleted = from.TaskIDsCompleted
+ s.Action = from.Action
+ mergeActionsNoSync(s, from)
+ s.Error = from.Error
+ s.Errors = from.Errors
+ s.HostsUpdatedCount = from.HostsUpdatedCount
+ s.HostsAddedCount = from.HostsAddedCount
+ s.HostsUnchangedCount = from.HostsUnchangedCount
+ s.HostsCompletedCount = from.HostsCompletedCount
+ s.HostsDeletedCount = from.HostsDeletedCount
+ s.HostsDeleteCount = from.HostsDeleteCount
+ s.Pods = from.Pods
+ s.PodIPs = from.PodIPs
+ s.FQDNs = from.FQDNs
+ s.Endpoint = from.Endpoint
+ s.NormalizedCR = from.NormalizedCR
+ }
+
+ if opts.Normalized {
+ s.NormalizedCR = from.NormalizedCR
+ }
+
+ if opts.WholeStatus {
+ s.CHOpVersion = from.CHOpVersion
+ s.CHOpCommit = from.CHOpCommit
+ s.CHOpDate = from.CHOpDate
+ s.CHOpIP = from.CHOpIP
+ s.ClustersCount = from.ClustersCount
+ s.ShardsCount = from.ShardsCount
+ s.ReplicasCount = from.ReplicasCount
+ s.HostsCount = from.HostsCount
+ s.Status = from.Status
+ s.TaskID = from.TaskID
+ s.TaskIDsStarted = from.TaskIDsStarted
+ s.TaskIDsCompleted = from.TaskIDsCompleted
+ s.Action = from.Action
+ mergeActionsNoSync(s, from)
+ s.Error = from.Error
+ s.Errors = from.Errors
+ s.HostsUpdatedCount = from.HostsUpdatedCount
+ s.HostsAddedCount = from.HostsAddedCount
+ s.HostsUnchangedCount = from.HostsUnchangedCount
+ s.HostsCompletedCount = from.HostsCompletedCount
+ s.HostsDeletedCount = from.HostsDeletedCount
+ s.HostsDeleteCount = from.HostsDeleteCount
+ s.Pods = from.Pods
+ s.PodIPs = from.PodIPs
+ s.FQDNs = from.FQDNs
+ s.Endpoint = from.Endpoint
+ s.NormalizedCR = from.NormalizedCR
+ s.NormalizedCRCompleted = from.NormalizedCRCompleted
+ }
+ })
+ })
+}
+
+// ClearNormalizedCR clears normalized CR in status
+func (s *Status) ClearNormalizedCR() {
+ doWithWriteLock(s, func(s *Status) {
+ s.NormalizedCR = nil
+ })
+}
+
+// SetNormalizedCompletedFromCurrentNormalized sets completed CR from current CR
+func (s *Status) SetNormalizedCompletedFromCurrentNormalized() {
+ doWithWriteLock(s, func(s *Status) {
+ s.NormalizedCRCompleted = s.NormalizedCR
+ })
+}
+
+// GetCHOpVersion gets operator version
+func (s *Status) GetCHOpVersion() string {
+ return getStringWithReadLock(s, func(s *Status) string {
+ return s.CHOpVersion
+ })
+}
+
+// GetCHOpCommit gets operator build commit
+func (s *Status) GetCHOpCommit() string {
+ return getStringWithReadLock(s, func(s *Status) string {
+ return s.CHOpCommit
+ })
}
-// CopyFrom copies the state of a given ChiStatus f into the receiver ChiStatus of the call.
-func (s *ChkStatus) CopyFrom(from *ChkStatus, opts apiChi.CopyCHIStatusOptions) {
- if s == nil || from == nil {
+// GetCHOpDate gets operator build date
+func (s *Status) GetCHOpDate() string {
+ return getStringWithReadLock(s, func(s *Status) string {
+ return s.CHOpDate
+ })
+}
+
+// GetCHOpIP gets operator pod's IP
+func (s *Status) GetCHOpIP() string {
+ return getStringWithReadLock(s, func(s *Status) string {
+ return s.CHOpIP
+ })
+}
+
+// GetClustersCount gets clusters count
+func (s *Status) GetClustersCount() int {
+ return getIntWithReadLock(s, func(s *Status) int {
+ return s.ClustersCount
+ })
+}
+
+// GetShardsCount gets shards count
+func (s *Status) GetShardsCount() int {
+ return getIntWithReadLock(s, func(s *Status) int {
+ return s.ShardsCount
+ })
+}
+
+// GetReplicasCount gets replicas count
+func (s *Status) GetReplicasCount() int {
+ return getIntWithReadLock(s, func(s *Status) int {
+ return s.ReplicasCount
+ })
+}
+
+// GetHostsCount gets hosts count
+func (s *Status) GetHostsCount() int {
+ return getIntWithReadLock(s, func(s *Status) int {
+ return s.HostsCount
+ })
+}
+
+// GetStatus gets status
+func (s *Status) GetStatus() string {
+ return getStringWithReadLock(s, func(s *Status) string {
+ return s.Status
+ })
+}
+
+// GetTaskID gets task ipd
+func (s *Status) GetTaskID() string {
+ return getStringWithReadLock(s, func(s *Status) string {
+ return s.TaskID
+ })
+}
+
+// GetTaskIDsStarted gets started task id
+func (s *Status) GetTaskIDsStarted() []string {
+ return getStringArrWithReadLock(s, func(s *Status) []string {
+ return s.TaskIDsStarted
+ })
+}
+
+// GetTaskIDsCompleted gets completed task id
+func (s *Status) GetTaskIDsCompleted() []string {
+ return getStringArrWithReadLock(s, func(s *Status) []string {
+ return s.TaskIDsCompleted
+ })
+}
+
+// GetAction gets last action
+func (s *Status) GetAction() string {
+ return getStringWithReadLock(s, func(s *Status) string {
+ return s.Action
+ })
+}
+
+// GetActions gets all actions
+func (s *Status) GetActions() []string {
+ return getStringArrWithReadLock(s, func(s *Status) []string {
+ return s.Actions
+ })
+}
+
+// GetError gets last error
+func (s *Status) GetError() string {
+ return getStringWithReadLock(s, func(s *Status) string {
+ return s.Error
+ })
+}
+
+// GetErrors gets all errors
+func (s *Status) GetErrors() []string {
+ return getStringArrWithReadLock(s, func(s *Status) []string {
+ return s.Errors
+ })
+}
+
+// GetHostsUpdatedCount gets updated hosts counter
+func (s *Status) GetHostsUpdatedCount() int {
+ return getIntWithReadLock(s, func(s *Status) int {
+ return s.HostsUpdatedCount
+ })
+}
+
+// GetHostsAddedCount gets added hosts counter
+func (s *Status) GetHostsAddedCount() int {
+ return getIntWithReadLock(s, func(s *Status) int {
+ return s.HostsAddedCount
+ })
+}
+
+// GetHostsUnchangedCount gets unchanged hosts counter
+func (s *Status) GetHostsUnchangedCount() int {
+ return getIntWithReadLock(s, func(s *Status) int {
+ return s.HostsUnchangedCount
+ })
+}
+
+// GetHostsFailedCount gets failed hosts counter
+func (s *Status) GetHostsFailedCount() int {
+ return getIntWithReadLock(s, func(s *Status) int {
+ return s.HostsFailedCount
+ })
+}
+
+// GetHostsCompletedCount gets completed hosts counter
+func (s *Status) GetHostsCompletedCount() int {
+ return getIntWithReadLock(s, func(s *Status) int {
+ return s.HostsCompletedCount
+ })
+}
+
+// GetHostsDeletedCount gets deleted hosts counter
+func (s *Status) GetHostsDeletedCount() int {
+ return getIntWithReadLock(s, func(s *Status) int {
+ return s.HostsDeletedCount
+ })
+}
+
+// GetHostsDeleteCount gets hosts to be deleted counter
+func (s *Status) GetHostsDeleteCount() int {
+ return getIntWithReadLock(s, func(s *Status) int {
+ return s.HostsDeleteCount
+ })
+}
+
+// GetPods gets list of pods
+func (s *Status) GetPods() []string {
+ return getStringArrWithReadLock(s, func(s *Status) []string {
+ return s.Pods
+ })
+}
+
+// GetPodIPs gets list of pod ips
+func (s *Status) GetPodIPs() []string {
+ return getStringArrWithReadLock(s, func(s *Status) []string {
+ return s.PodIPs
+ })
+}
+
+// GetFQDNs gets list of all FQDNs of hosts
+func (s *Status) GetFQDNs() []string {
+ return getStringArrWithReadLock(s, func(s *Status) []string {
+ return s.FQDNs
+ })
+}
+
+// GetEndpoint gets API endpoint
+func (s *Status) GetEndpoint() string {
+ return getStringWithReadLock(s, func(s *Status) string {
+ return s.Endpoint
+ })
+}
+
+// GetNormalizedCR gets target CR
+func (s *Status) GetNormalizedCR() *ClickHouseKeeperInstallation {
+ return getInstallationWithReadLock(s, func(s *Status) *ClickHouseKeeperInstallation {
+ return s.NormalizedCR
+ })
+}
+
+// GetNormalizedCRCompleted gets completed CR
+func (s *Status) GetNormalizedCRCompleted() *ClickHouseKeeperInstallation {
+ return getInstallationWithReadLock(s, func(s *Status) *ClickHouseKeeperInstallation {
+ return s.NormalizedCRCompleted
+ })
+}
+
+// GetHostsWithTablesCreated gets hosts with created tables
+func (s *Status) GetHostsWithTablesCreated() []string {
+ return getStringArrWithReadLock(s, func(s *Status) []string {
+ return s.HostsWithTablesCreated
+ })
+}
+
+// Begin helpers
+
+func doWithWriteLock(s *Status, f func(s *Status)) {
+ if s == nil {
return
}
- if opts.InheritableFields {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ f(s)
+}
+
+func doWithReadLock(s *Status, f func(s *Status)) {
+ if s == nil {
+ return
}
- if opts.MainFields {
- s.CHOpVersion = from.CHOpVersion
- s.CHOpCommit = from.CHOpCommit
- s.CHOpDate = from.CHOpDate
- s.CHOpIP = from.CHOpIP
- s.Status = from.Status
- s.Replicas = from.Replicas
- s.ReadyReplicas = from.ReadyReplicas
- s.Pods = from.Pods
- s.PodIPs = from.PodIPs
- s.FQDNs = from.FQDNs
- s.NormalizedCHK = from.NormalizedCHK
+ s.mu.RLock()
+ defer s.mu.RUnlock()
+ f(s)
+}
+
+func getIntWithReadLock(s *Status, f func(s *Status) int) int {
+ var zeroVal int
+ if s == nil {
+ return zeroVal
}
- if opts.Normalized {
- s.NormalizedCHK = from.NormalizedCHK
+ s.mu.RLock()
+ defer s.mu.RUnlock()
+ return f(s)
+}
+
+func getStringWithReadLock(s *Status, f func(s *Status) string) string {
+ var zeroVal string
+ if s == nil {
+ return zeroVal
}
- if opts.WholeStatus {
- s.CHOpVersion = from.CHOpVersion
- s.CHOpCommit = from.CHOpCommit
- s.CHOpDate = from.CHOpDate
- s.CHOpIP = from.CHOpIP
- s.Status = from.Status
- s.Replicas = from.Replicas
- s.ReadyReplicas = from.ReadyReplicas
- s.Pods = from.Pods
- s.PodIPs = from.PodIPs
- s.FQDNs = from.FQDNs
- s.NormalizedCHK = from.NormalizedCHK
- s.NormalizedCHKCompleted = from.NormalizedCHKCompleted
+ s.mu.RLock()
+ defer s.mu.RUnlock()
+ return f(s)
+}
+
+func getInstallationWithReadLock(s *Status, f func(s *Status) *ClickHouseKeeperInstallation) *ClickHouseKeeperInstallation {
+ var zeroVal *ClickHouseKeeperInstallation
+ if s == nil {
+ return zeroVal
}
+
+ s.mu.RLock()
+ defer s.mu.RUnlock()
+ return f(s)
}
-// HasNormalizedCHKCompleted is a checker
-func (s *ChkStatus) HasNormalizedCHKCompleted() bool {
- return s.GetNormalizedCHKCompleted() != nil
+func getStringArrWithReadLock(s *Status, f func(s *Status) []string) []string {
+ emptyArr := make([]string, 0, 0)
+ if s == nil {
+ return emptyArr
+ }
+
+ s.mu.RLock()
+ defer s.mu.RUnlock()
+ return f(s)
}
-// HasNormalizedCHK is a checker
-func (s *ChkStatus) HasNormalizedCHK() bool {
- return s.GetNormalizedCHK() != nil
+// mergeActionsNoSync merges the actions of from into those of s (without synchronization, because synchronized
+// functions call into this).
+func mergeActionsNoSync(s *Status, from *Status) {
+ s.Actions = util.MergeStringArrays(s.Actions, from.Actions)
+ sort.Sort(sort.Reverse(sort.StringSlice(s.Actions)))
+ trimActionsNoSync(s)
}
-// ClearNormalizedCHK clears normalized CHK in status
-func (s *ChkStatus) ClearNormalizedCHK() {
- s.NormalizedCHK = nil
+// trimActionsNoSync trims actions (without synchronization, because synchronized functions call into this).
+func trimActionsNoSync(s *Status) {
+ if len(s.Actions) > maxActions {
+ s.Actions = s.Actions[:maxActions]
+ }
}
-// GetNormalizedCHK gets target CHK
-func (s *ChkStatus) GetNormalizedCHK() *ClickHouseKeeperInstallation {
- return s.NormalizedCHK
+// pushTaskIDStartedNoSync pushes task id into status
+func pushTaskIDStartedNoSync(s *Status) {
+ s.TaskIDsStarted = append([]string{s.TaskID}, s.TaskIDsStarted...)
+ if len(s.TaskIDsStarted) > maxTaskIDs {
+ s.TaskIDsStarted = s.TaskIDsStarted[:maxTaskIDs]
+ }
}
-// GetNormalizedCHKCompleted gets completed CHI
-func (s *ChkStatus) GetNormalizedCHKCompleted() *ClickHouseKeeperInstallation {
- return s.NormalizedCHKCompleted
+// pushTaskIDCompletedNoSync pushes task id into status w/o sync
+func pushTaskIDCompletedNoSync(s *Status) {
+ s.TaskIDsCompleted = append([]string{s.TaskID}, s.TaskIDsCompleted...)
+ if len(s.TaskIDsCompleted) > maxTaskIDs {
+ s.TaskIDsCompleted = s.TaskIDsCompleted[:maxTaskIDs]
+ }
}
diff --git a/pkg/apis/clickhouse-keeper.altinity.com/v1/types.go b/pkg/apis/clickhouse-keeper.altinity.com/v1/types.go
index ec075bfc0..b1344c46a 100644
--- a/pkg/apis/clickhouse-keeper.altinity.com/v1/types.go
+++ b/pkg/apis/clickhouse-keeper.altinity.com/v1/types.go
@@ -17,9 +17,6 @@ package v1
import (
"sync"
- "github.com/altinity/clickhouse-operator/pkg/util"
- "github.com/imdario/mergo"
-
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
apiChi "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1"
@@ -32,283 +29,35 @@ type ClickHouseKeeperInstallation struct {
meta.TypeMeta `json:",inline" yaml:",inline"`
meta.ObjectMeta `json:"metadata,omitempty" yaml:"metadata,omitempty"`
- Spec ChkSpec `json:"spec" yaml:"spec"`
- Status *ChkStatus `json:"status,omitempty" yaml:"status,omitempty"`
+ Spec ChkSpec `json:"spec" yaml:"spec"`
+ Status *Status `json:"status,omitempty" yaml:"status,omitempty"`
- Runtime ClickHouseKeeperInstallationRuntime `json:"-" yaml:"-"`
+ runtime *ClickHouseKeeperInstallationRuntime `json:"-" yaml:"-"`
+ statusCreatorMutex sync.Mutex `json:"-" yaml:"-"`
+ runtimeCreatorMutex sync.Mutex `json:"-" yaml:"-"`
}
type ClickHouseKeeperInstallationRuntime struct {
- statusCreatorMutex sync.Mutex `json:"-" yaml:"-"`
-}
-
-// EnsureStatus ensures status
-func (chk *ClickHouseKeeperInstallation) EnsureStatus() *ChkStatus {
- if chk == nil {
- return nil
- }
-
- // Assume that most of the time, we'll see a non-nil value.
- if chk.Status != nil {
- return chk.Status
- }
-
- // Otherwise, we need to acquire a lock to initialize the field.
- chk.Runtime.statusCreatorMutex.Lock()
- defer chk.Runtime.statusCreatorMutex.Unlock()
- // Note that we have to check this property again to avoid a TOCTOU bug.
- if chk.Status == nil {
- chk.Status = &ChkStatus{}
- }
- return chk.Status
-}
-
-// GetStatus gets Status
-func (chk *ClickHouseKeeperInstallation) GetStatus() *ChkStatus {
- if chk == nil {
- return nil
- }
- return chk.Status
-}
-
-// HasStatus checks whether CHI has Status
-func (chk *ClickHouseKeeperInstallation) HasStatus() bool {
- if chk == nil {
- return false
- }
- return chk.Status != nil
-}
-
-// HasAncestor checks whether CHI has an ancestor
-func (chk *ClickHouseKeeperInstallation) HasAncestor() bool {
- if !chk.HasStatus() {
- return false
- }
- return chk.Status.HasNormalizedCHKCompleted()
-}
-
-// GetAncestor gets ancestor of a CHI
-func (chk *ClickHouseKeeperInstallation) GetAncestor() *ClickHouseKeeperInstallation {
- if !chk.HasAncestor() {
- return nil
- }
- return chk.Status.GetNormalizedCHKCompleted()
-}
-
-// SetAncestor sets ancestor of a CHI
-func (chk *ClickHouseKeeperInstallation) SetAncestor(a *ClickHouseKeeperInstallation) {
- if chk == nil {
- return
- }
- chk.EnsureStatus().NormalizedCHKCompleted = a
-}
-
-// HasTarget checks whether CHI has a target
-func (chk *ClickHouseKeeperInstallation) HasTarget() bool {
- if !chk.HasStatus() {
- return false
- }
- return chk.Status.HasNormalizedCHK()
-}
-
-// GetTarget gets target of a CHI
-func (chk *ClickHouseKeeperInstallation) GetTarget() *ClickHouseKeeperInstallation {
- if !chk.HasTarget() {
- return nil
- }
- return chk.Status.GetNormalizedCHK()
-}
-
-// SetTarget sets target of a CHI
-func (chk *ClickHouseKeeperInstallation) SetTarget(a *ClickHouseKeeperInstallation) {
- if chk == nil {
- return
- }
- chk.EnsureStatus().NormalizedCHK = a
-}
-
-// MergeFrom merges from CHI
-func (chk *ClickHouseKeeperInstallation) MergeFrom(from *ClickHouseKeeperInstallation, _type apiChi.MergeType) {
- if from == nil {
- return
- }
-
- // Merge Meta
- switch _type {
- case apiChi.MergeTypeFillEmptyValues:
- _ = mergo.Merge(&chk.TypeMeta, from.TypeMeta)
- _ = mergo.Merge(&chk.ObjectMeta, from.ObjectMeta)
- case apiChi.MergeTypeOverrideByNonEmptyValues:
- _ = mergo.Merge(&chk.TypeMeta, from.TypeMeta, mergo.WithOverride)
- _ = mergo.Merge(&chk.ObjectMeta, from.ObjectMeta, mergo.WithOverride)
- }
- // Exclude skipped annotations
- chk.Annotations = util.CopyMapFilter(
- chk.Annotations,
- nil,
- util.ListSkippedAnnotations(),
- )
-
- // Do actual merge for Spec
- (&chk.Spec).MergeFrom(&from.Spec, _type)
-
- chk.EnsureStatus().CopyFrom(from.Status, apiChi.CopyCHIStatusOptions{
- InheritableFields: true,
- })
-}
-
-// ChkSpec defines spec section of ClickHouseKeeper resource
-type ChkSpec struct {
- Configuration *ChkConfiguration `json:"configuration,omitempty" yaml:"configuration,omitempty"`
- Templates *apiChi.Templates `json:"templates,omitempty" yaml:"templates,omitempty"`
-}
-
-func (spec ChkSpec) GetConfiguration() *ChkConfiguration {
- return spec.Configuration
-}
-
-func (spec ChkSpec) EnsureConfiguration() *ChkConfiguration {
- if spec.GetConfiguration() == nil {
- spec.Configuration = new(ChkConfiguration)
- }
- return spec.Configuration
-}
-
-func (spec ChkSpec) GetTemplates() *apiChi.Templates {
- return spec.Templates
-}
-
-// MergeFrom merges from spec
-func (spec *ChkSpec) MergeFrom(from *ChkSpec, _type apiChi.MergeType) {
- if from == nil {
- return
- }
-
- spec.Configuration = spec.Configuration.MergeFrom(from.Configuration, _type)
- spec.Templates = spec.Templates.MergeFrom(from.Templates, _type)
-}
-
-// ChkConfiguration defines configuration section of .spec
-type ChkConfiguration struct {
- Settings *apiChi.Settings `json:"settings,omitempty" yaml:"settings,omitempty"`
- Clusters []*ChkCluster `json:"clusters,omitempty" yaml:"clusters,omitempty"`
-}
-
-// NewConfiguration creates new ChkConfiguration objects
-func NewConfiguration() *ChkConfiguration {
- return new(ChkConfiguration)
-}
-
-func (c *ChkConfiguration) GetSettings() *apiChi.Settings {
- if c == nil {
- return nil
- }
-
- return c.Settings
-}
-
-func (c *ChkConfiguration) GetClusters() []*ChkCluster {
- if c == nil {
- return nil
- }
-
- return c.Clusters
-}
-
-func (c *ChkConfiguration) GetCluster(i int) *ChkCluster {
- clusters := c.GetClusters()
- if clusters == nil {
- return nil
- }
- if i >= len(clusters) {
- return nil
- }
- return clusters[i]
+ attributes *apiChi.ComparableAttributes `json:"-" yaml:"-"`
+ commonConfigMutex sync.Mutex `json:"-" yaml:"-"`
}
-// MergeFrom merges from specified source
-func (configuration *ChkConfiguration) MergeFrom(from *ChkConfiguration, _type apiChi.MergeType) *ChkConfiguration {
- if from == nil {
- return configuration
+func newClickHouseKeeperInstallationRuntime() *ClickHouseKeeperInstallationRuntime {
+ return &ClickHouseKeeperInstallationRuntime{
+ attributes: &apiChi.ComparableAttributes{},
}
-
- if configuration == nil {
- configuration = NewConfiguration()
- }
-
- configuration.Settings = configuration.Settings.MergeFrom(from.Settings)
-
- // TODO merge clusters
- // Copy Clusters for now
- configuration.Clusters = from.Clusters
-
- return configuration
-}
-
-// ChkCluster defines item of a clusters section of .configuration
-type ChkCluster struct {
- Name string `json:"name,omitempty" yaml:"name,omitempty"`
- Layout *ChkClusterLayout `json:"layout,omitempty" yaml:"layout,omitempty"`
-}
-
-func (c *ChkCluster) GetLayout() *ChkClusterLayout {
- if c == nil {
- return nil
- }
- return c.Layout
-}
-
-// ChkClusterLayout defines layout section of .spec.configuration.clusters
-type ChkClusterLayout struct {
- // The valid range of size is from 1 to 7.
- ReplicasCount int `json:"replicasCount,omitempty" yaml:"replicasCount,omitempty"`
-}
-
-// NewChkClusterLayout creates new cluster layout
-func NewChkClusterLayout() *ChkClusterLayout {
- return new(ChkClusterLayout)
-}
-
-func (c *ChkClusterLayout) GetReplicasCount() int {
- if c == nil {
- return 0
- }
- return c.ReplicasCount
-}
-
-func (spec *ChkSpec) GetPath() string {
- switch {
- case spec.GetConfiguration().GetSettings().Has("keeper_server/storage_path"):
- return spec.GetConfiguration().GetSettings().Get("keeper_server/storage_path").String()
-
- case spec.GetConfiguration().GetSettings().Has("keeper_server/path"):
- return spec.GetConfiguration().GetSettings().Get("keeper_server/path").String()
-
- default:
- return "/var/lib/clickhouse_keeper"
- }
-}
-
-func (spec *ChkSpec) GetPort(name string, defaultValue int) int {
- // Has no setting - use default value
- if !spec.GetConfiguration().GetSettings().Has(name) {
- return defaultValue
- }
-
- // Port name is specified
- return spec.GetConfiguration().GetSettings().Get(name).ScalarInt()
}
-func (spec *ChkSpec) GetClientPort() int {
- return spec.GetPort("keeper_server/tcp_port", 9181)
+func (runtime *ClickHouseKeeperInstallationRuntime) GetAttributes() *apiChi.ComparableAttributes {
+ return runtime.attributes
}
-func (spec *ChkSpec) GetRaftPort() int {
- return spec.GetPort("keeper_server/raft_configuration/server/port", 9234)
+func (runtime *ClickHouseKeeperInstallationRuntime) LockCommonConfig() {
+ runtime.commonConfigMutex.Lock()
}
-func (spec *ChkSpec) GetPrometheusPort() int {
- return spec.GetPort("prometheus/port", -1)
+func (runtime *ClickHouseKeeperInstallationRuntime) UnlockCommonConfig() {
+ runtime.commonConfigMutex.Unlock()
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
diff --git a/pkg/apis/clickhouse-keeper.altinity.com/v1/zz_generated.deepcopy.go b/pkg/apis/clickhouse-keeper.altinity.com/v1/zz_generated.deepcopy.go
index 5df513df4..576453393 100644
--- a/pkg/apis/clickhouse-keeper.altinity.com/v1/zz_generated.deepcopy.go
+++ b/pkg/apis/clickhouse-keeper.altinity.com/v1/zz_generated.deepcopy.go
@@ -23,26 +23,22 @@ package v1
import (
clickhousealtinitycomv1 "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1"
+ types "github.com/altinity/clickhouse-operator/pkg/apis/common/types"
runtime "k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *ChkCluster) DeepCopyInto(out *ChkCluster) {
+func (in *ChkClusterAddress) DeepCopyInto(out *ChkClusterAddress) {
*out = *in
- if in.Layout != nil {
- in, out := &in.Layout, &out.Layout
- *out = new(ChkClusterLayout)
- **out = **in
- }
return
}
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ChkCluster.
-func (in *ChkCluster) DeepCopy() *ChkCluster {
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ChkClusterAddress.
+func (in *ChkClusterAddress) DeepCopy() *ChkClusterAddress {
if in == nil {
return nil
}
- out := new(ChkCluster)
+ out := new(ChkClusterAddress)
in.DeepCopyInto(out)
return out
}
@@ -50,6 +46,33 @@ func (in *ChkCluster) DeepCopy() *ChkCluster {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ChkClusterLayout) DeepCopyInto(out *ChkClusterLayout) {
*out = *in
+ if in.Shards != nil {
+ in, out := &in.Shards, &out.Shards
+ *out = make([]*ChkShard, len(*in))
+ for i := range *in {
+ if (*in)[i] != nil {
+ in, out := &(*in)[i], &(*out)[i]
+ *out = new(ChkShard)
+ (*in).DeepCopyInto(*out)
+ }
+ }
+ }
+ if in.Replicas != nil {
+ in, out := &in.Replicas, &out.Replicas
+ *out = make([]*ChkReplica, len(*in))
+ for i := range *in {
+ if (*in)[i] != nil {
+ in, out := &(*in)[i], &(*out)[i]
+ *out = new(ChkReplica)
+ (*in).DeepCopyInto(*out)
+ }
+ }
+ }
+ if in.HostsField != nil {
+ in, out := &in.HostsField, &out.HostsField
+ *out = new(clickhousealtinitycomv1.HostsField)
+ (*in).DeepCopyInto(*out)
+ }
return
}
@@ -64,107 +87,241 @@ func (in *ChkClusterLayout) DeepCopy() *ChkClusterLayout {
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *ChkConfiguration) DeepCopyInto(out *ChkConfiguration) {
+func (in *ChkClusterRuntime) DeepCopyInto(out *ChkClusterRuntime) {
+ *out = *in
+ out.Address = in.Address
+ if in.CHK != nil {
+ in, out := &in.CHK, &out.CHK
+ *out = new(ClickHouseKeeperInstallation)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ChkClusterRuntime.
+func (in *ChkClusterRuntime) DeepCopy() *ChkClusterRuntime {
+ if in == nil {
+ return nil
+ }
+ out := new(ChkClusterRuntime)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ChkReplica) DeepCopyInto(out *ChkReplica) {
*out = *in
if in.Settings != nil {
in, out := &in.Settings, &out.Settings
*out = new(clickhousealtinitycomv1.Settings)
(*in).DeepCopyInto(*out)
}
- if in.Clusters != nil {
- in, out := &in.Clusters, &out.Clusters
- *out = make([]*ChkCluster, len(*in))
+ if in.Files != nil {
+ in, out := &in.Files, &out.Files
+ *out = new(clickhousealtinitycomv1.Settings)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Templates != nil {
+ in, out := &in.Templates, &out.Templates
+ *out = new(clickhousealtinitycomv1.TemplatesList)
+ **out = **in
+ }
+ if in.Hosts != nil {
+ in, out := &in.Hosts, &out.Hosts
+ *out = make([]*clickhousealtinitycomv1.Host, len(*in))
for i := range *in {
if (*in)[i] != nil {
in, out := &(*in)[i], &(*out)[i]
- *out = new(ChkCluster)
+ *out = new(clickhousealtinitycomv1.Host)
(*in).DeepCopyInto(*out)
}
}
}
+ in.Runtime.DeepCopyInto(&out.Runtime)
return
}
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ChkConfiguration.
-func (in *ChkConfiguration) DeepCopy() *ChkConfiguration {
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ChkReplica.
+func (in *ChkReplica) DeepCopy() *ChkReplica {
if in == nil {
return nil
}
- out := new(ChkConfiguration)
+ out := new(ChkReplica)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *ChkSpec) DeepCopyInto(out *ChkSpec) {
+func (in *ChkReplicaAddress) DeepCopyInto(out *ChkReplicaAddress) {
*out = *in
- if in.Configuration != nil {
- in, out := &in.Configuration, &out.Configuration
- *out = new(ChkConfiguration)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ChkReplicaAddress.
+func (in *ChkReplicaAddress) DeepCopy() *ChkReplicaAddress {
+ if in == nil {
+ return nil
+ }
+ out := new(ChkReplicaAddress)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ChkReplicaRuntime) DeepCopyInto(out *ChkReplicaRuntime) {
+ *out = *in
+ out.Address = in.Address
+ if in.CHK != nil {
+ in, out := &in.CHK, &out.CHK
+ *out = new(ClickHouseKeeperInstallation)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ChkReplicaRuntime.
+func (in *ChkReplicaRuntime) DeepCopy() *ChkReplicaRuntime {
+ if in == nil {
+ return nil
+ }
+ out := new(ChkReplicaRuntime)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ChkShard) DeepCopyInto(out *ChkShard) {
+ *out = *in
+ if in.Weight != nil {
+ in, out := &in.Weight, &out.Weight
+ *out = new(int)
+ **out = **in
+ }
+ if in.InternalReplication != nil {
+ in, out := &in.InternalReplication, &out.InternalReplication
+ *out = new(types.StringBool)
+ **out = **in
+ }
+ if in.Settings != nil {
+ in, out := &in.Settings, &out.Settings
+ *out = new(clickhousealtinitycomv1.Settings)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Files != nil {
+ in, out := &in.Files, &out.Files
+ *out = new(clickhousealtinitycomv1.Settings)
(*in).DeepCopyInto(*out)
}
if in.Templates != nil {
in, out := &in.Templates, &out.Templates
- *out = new(clickhousealtinitycomv1.Templates)
+ *out = new(clickhousealtinitycomv1.TemplatesList)
+ **out = **in
+ }
+ if in.Hosts != nil {
+ in, out := &in.Hosts, &out.Hosts
+ *out = make([]*clickhousealtinitycomv1.Host, len(*in))
+ for i := range *in {
+ if (*in)[i] != nil {
+ in, out := &(*in)[i], &(*out)[i]
+ *out = new(clickhousealtinitycomv1.Host)
+ (*in).DeepCopyInto(*out)
+ }
+ }
+ }
+ in.Runtime.DeepCopyInto(&out.Runtime)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ChkShard.
+func (in *ChkShard) DeepCopy() *ChkShard {
+ if in == nil {
+ return nil
+ }
+ out := new(ChkShard)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ChkShardAddress) DeepCopyInto(out *ChkShardAddress) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ChkShardAddress.
+func (in *ChkShardAddress) DeepCopy() *ChkShardAddress {
+ if in == nil {
+ return nil
+ }
+ out := new(ChkShardAddress)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ChkShardRuntime) DeepCopyInto(out *ChkShardRuntime) {
+ *out = *in
+ out.Address = in.Address
+ if in.CHK != nil {
+ in, out := &in.CHK, &out.CHK
+ *out = new(ClickHouseKeeperInstallation)
(*in).DeepCopyInto(*out)
}
return
}
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ChkSpec.
-func (in *ChkSpec) DeepCopy() *ChkSpec {
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ChkShardRuntime.
+func (in *ChkShardRuntime) DeepCopy() *ChkShardRuntime {
if in == nil {
return nil
}
- out := new(ChkSpec)
+ out := new(ChkShardRuntime)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *ChkStatus) DeepCopyInto(out *ChkStatus) {
+func (in *ChkSpec) DeepCopyInto(out *ChkSpec) {
*out = *in
- if in.ReadyReplicas != nil {
- in, out := &in.ReadyReplicas, &out.ReadyReplicas
- *out = make([]clickhousealtinitycomv1.ChiZookeeperNode, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
+ if in.TaskID != nil {
+ in, out := &in.TaskID, &out.TaskID
+ *out = new(types.String)
+ **out = **in
}
- if in.Pods != nil {
- in, out := &in.Pods, &out.Pods
- *out = make([]string, len(*in))
- copy(*out, *in)
+ if in.NamespaceDomainPattern != nil {
+ in, out := &in.NamespaceDomainPattern, &out.NamespaceDomainPattern
+ *out = new(types.String)
+ **out = **in
}
- if in.PodIPs != nil {
- in, out := &in.PodIPs, &out.PodIPs
- *out = make([]string, len(*in))
- copy(*out, *in)
+ if in.Reconciling != nil {
+ in, out := &in.Reconciling, &out.Reconciling
+ *out = new(clickhousealtinitycomv1.Reconciling)
+ (*in).DeepCopyInto(*out)
}
- if in.FQDNs != nil {
- in, out := &in.FQDNs, &out.FQDNs
- *out = make([]string, len(*in))
- copy(*out, *in)
+ if in.Defaults != nil {
+ in, out := &in.Defaults, &out.Defaults
+ *out = new(clickhousealtinitycomv1.Defaults)
+ (*in).DeepCopyInto(*out)
}
- if in.NormalizedCHK != nil {
- in, out := &in.NormalizedCHK, &out.NormalizedCHK
- *out = new(ClickHouseKeeperInstallation)
+ if in.Configuration != nil {
+ in, out := &in.Configuration, &out.Configuration
+ *out = new(Configuration)
(*in).DeepCopyInto(*out)
}
- if in.NormalizedCHKCompleted != nil {
- in, out := &in.NormalizedCHKCompleted, &out.NormalizedCHKCompleted
- *out = new(ClickHouseKeeperInstallation)
+ if in.Templates != nil {
+ in, out := &in.Templates, &out.Templates
+ *out = new(clickhousealtinitycomv1.Templates)
(*in).DeepCopyInto(*out)
}
return
}
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ChkStatus.
-func (in *ChkStatus) DeepCopy() *ChkStatus {
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ChkSpec.
+func (in *ChkSpec) DeepCopy() *ChkSpec {
if in == nil {
return nil
}
- out := new(ChkStatus)
+ out := new(ChkSpec)
in.DeepCopyInto(out)
return out
}
@@ -177,10 +334,16 @@ func (in *ClickHouseKeeperInstallation) DeepCopyInto(out *ClickHouseKeeperInstal
in.Spec.DeepCopyInto(&out.Spec)
if in.Status != nil {
in, out := &in.Status, &out.Status
- *out = new(ChkStatus)
+ *out = new(Status)
(*in).DeepCopyInto(*out)
}
- out.Runtime = in.Runtime
+ if in.runtime != nil {
+ in, out := &in.runtime, &out.runtime
+ *out = new(ClickHouseKeeperInstallationRuntime)
+ (*in).DeepCopyInto(*out)
+ }
+ out.statusCreatorMutex = in.statusCreatorMutex
+ out.runtimeCreatorMutex = in.runtimeCreatorMutex
return
}
@@ -238,7 +401,12 @@ func (in *ClickHouseKeeperInstallationList) DeepCopyObject() runtime.Object {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ClickHouseKeeperInstallationRuntime) DeepCopyInto(out *ClickHouseKeeperInstallationRuntime) {
*out = *in
- out.statusCreatorMutex = in.statusCreatorMutex
+ if in.attributes != nil {
+ in, out := &in.attributes, &out.attributes
+ *out = new(clickhousealtinitycomv1.ComparableAttributes)
+ (*in).DeepCopyInto(*out)
+ }
+ out.commonConfigMutex = in.commonConfigMutex
return
}
@@ -251,3 +419,186 @@ func (in *ClickHouseKeeperInstallationRuntime) DeepCopy() *ClickHouseKeeperInsta
in.DeepCopyInto(out)
return out
}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Cluster) DeepCopyInto(out *Cluster) {
+ *out = *in
+ if in.Settings != nil {
+ in, out := &in.Settings, &out.Settings
+ *out = new(clickhousealtinitycomv1.Settings)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Files != nil {
+ in, out := &in.Files, &out.Files
+ *out = new(clickhousealtinitycomv1.Settings)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Templates != nil {
+ in, out := &in.Templates, &out.Templates
+ *out = new(clickhousealtinitycomv1.TemplatesList)
+ **out = **in
+ }
+ if in.Layout != nil {
+ in, out := &in.Layout, &out.Layout
+ *out = new(ChkClusterLayout)
+ (*in).DeepCopyInto(*out)
+ }
+ in.Runtime.DeepCopyInto(&out.Runtime)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Cluster.
+func (in *Cluster) DeepCopy() *Cluster {
+ if in == nil {
+ return nil
+ }
+ out := new(Cluster)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Configuration) DeepCopyInto(out *Configuration) {
+ *out = *in
+ if in.Settings != nil {
+ in, out := &in.Settings, &out.Settings
+ *out = new(clickhousealtinitycomv1.Settings)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Files != nil {
+ in, out := &in.Files, &out.Files
+ *out = new(clickhousealtinitycomv1.Settings)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Clusters != nil {
+ in, out := &in.Clusters, &out.Clusters
+ *out = make([]*Cluster, len(*in))
+ for i := range *in {
+ if (*in)[i] != nil {
+ in, out := &(*in)[i], &(*out)[i]
+ *out = new(Cluster)
+ (*in).DeepCopyInto(*out)
+ }
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Configuration.
+func (in *Configuration) DeepCopy() *Configuration {
+ if in == nil {
+ return nil
+ }
+ out := new(Configuration)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *FillStatusParams) DeepCopyInto(out *FillStatusParams) {
+ *out = *in
+ if in.Pods != nil {
+ in, out := &in.Pods, &out.Pods
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.FQDNs != nil {
+ in, out := &in.FQDNs, &out.FQDNs
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.NormalizedCR != nil {
+ in, out := &in.NormalizedCR, &out.NormalizedCR
+ *out = new(ClickHouseKeeperInstallation)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FillStatusParams.
+func (in *FillStatusParams) DeepCopy() *FillStatusParams {
+ if in == nil {
+ return nil
+ }
+ out := new(FillStatusParams)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Status) DeepCopyInto(out *Status) {
+ *out = *in
+ if in.TaskIDsStarted != nil {
+ in, out := &in.TaskIDsStarted, &out.TaskIDsStarted
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.TaskIDsCompleted != nil {
+ in, out := &in.TaskIDsCompleted, &out.TaskIDsCompleted
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.Actions != nil {
+ in, out := &in.Actions, &out.Actions
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.Errors != nil {
+ in, out := &in.Errors, &out.Errors
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.Pods != nil {
+ in, out := &in.Pods, &out.Pods
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.PodIPs != nil {
+ in, out := &in.PodIPs, &out.PodIPs
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.FQDNs != nil {
+ in, out := &in.FQDNs, &out.FQDNs
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.NormalizedCR != nil {
+ in, out := &in.NormalizedCR, &out.NormalizedCR
+ *out = new(ClickHouseKeeperInstallation)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.NormalizedCRCompleted != nil {
+ in, out := &in.NormalizedCRCompleted, &out.NormalizedCRCompleted
+ *out = new(ClickHouseKeeperInstallation)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.HostsWithTablesCreated != nil {
+ in, out := &in.HostsWithTablesCreated, &out.HostsWithTablesCreated
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.UsedTemplates != nil {
+ in, out := &in.UsedTemplates, &out.UsedTemplates
+ *out = make([]*clickhousealtinitycomv1.TemplateRef, len(*in))
+ for i := range *in {
+ if (*in)[i] != nil {
+ in, out := &(*in)[i], &(*out)[i]
+ *out = new(clickhousealtinitycomv1.TemplateRef)
+ **out = **in
+ }
+ }
+ }
+ out.mu = in.mu
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Status.
+func (in *Status) DeepCopy() *Status {
+ if in == nil {
+ return nil
+ }
+ out := new(Status)
+ in.DeepCopyInto(out)
+ return out
+}
diff --git a/pkg/apis/clickhouse.altinity.com/v1/cr.go b/pkg/apis/clickhouse.altinity.com/v1/cr.go
new file mode 100644
index 000000000..0076836e2
--- /dev/null
+++ b/pkg/apis/clickhouse.altinity.com/v1/cr.go
@@ -0,0 +1,166 @@
+// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v1
+
+import (
+ "math"
+
+ "github.com/altinity/clickhouse-operator/pkg/apis/common/types"
+ "github.com/altinity/clickhouse-operator/pkg/apis/deployment"
+)
+
+// func getMaxNumberOfPodsPerNode
+// What is the max number of Pods allowed per Node
+// TODO need to support multi-cluster
+func getMaxNumberOfPodsPerNode(cr ICustomResource) int {
+ maxNumberOfPodsPerNode := 0
+ cr.WalkPodTemplates(func(template *PodTemplate) {
+ for i := range template.PodDistribution {
+ podDistribution := &template.PodDistribution[i]
+ if podDistribution.Type == deployment.PodDistributionMaxNumberPerNode {
+ maxNumberOfPodsPerNode = podDistribution.Number
+ }
+ }
+ })
+ return maxNumberOfPodsPerNode
+}
+
+func calcCRAndClusterScopeCycleSizes(cr ICustomResource, maxNumberOfPodsPerNode int) (crScopeCycleSize int, clusterScopeCycleSize int) {
+ // 1perNode 2perNode 3perNode 4perNode 5perNode
+ // sh1r1 n1 a n1 a n1 a n1 a n1 a
+ // sh1r2 n2 a n2 a n2 a n2 a n2 a
+ // sh1r3 n3 a n3 a n3 a n3 a n3 a
+ // sh2r1 n4 a n4 a n4 a n4 a n1 b
+ // sh2r2 n5 a n5 a n5 a n1 b n2 b
+ // sh2r3 n6 a n6 a n1 b n2 b n3 b
+ // sh3r1 n7 a n7 a n2 b n3 b n1 c
+ // sh3r2 n8 a n8 a n3 b n4 b n2 c
+ // sh3r3 n9 a n1 b n4 b n1 c n3 c
+ // sh4r1 n10 a n2 b n5 b n2 c n1 d
+ // sh4r2 n11 a n3 b n1 c n3 c n2 d
+ // sh4r3 n12 a n4 b n2 c n4 c n3 d
+ // sh5r1 n13 a n5 b n3 c n1 d n1 e
+ // sh5r2 n14 a n6 b n4 c n2 d n2 e
+ // sh5r3 n15 a n7 b n5 c n3 d n3 e
+ // 1perNode = ceil(15 / 1 'cycles num') = 15 'cycle len'
+ // 2perNode = ceil(15 / 2 'cycles num') = 8 'cycle len'
+ // 3perNode = ceil(15 / 3 'cycles num') = 5 'cycle len'
+ // 4perNode = ceil(15 / 4 'cycles num') = 4 'cycle len'
+ // 5perNode = ceil(15 / 5 'cycles num') = 3 'cycle len'
+
+ // Number of requested cycles equals to max number of ClickHouses per node, but can't be less than 1
+ requestedClusterScopeCyclesNum := maxNumberOfPodsPerNode
+ if requestedClusterScopeCyclesNum <= 0 {
+ requestedClusterScopeCyclesNum = 1
+ }
+
+ crScopeCycleSize = 0 // Unlimited
+ clusterScopeCycleSize = 0
+ if requestedClusterScopeCyclesNum == 1 {
+ // One cycle only requested
+ clusterScopeCycleSize = 0 // Unlimited
+ } else {
+ clusterScopeCycleSize = int(math.Ceil(float64(cr.HostsCount()) / float64(requestedClusterScopeCyclesNum)))
+ }
+
+ return crScopeCycleSize, clusterScopeCycleSize
+}
+
+// fillSelfCalculatedAddressInfo calculates and fills address info
+func fillSelfCalculatedAddressInfo(cr ICustomResource) {
+ // What is the max number of Pods allowed per Node
+ maxNumberOfPodsPerNode := getMaxNumberOfPodsPerNode(cr)
+ chiScopeCycleSize, clusterScopeCycleSize := calcCRAndClusterScopeCycleSizes(cr, maxNumberOfPodsPerNode)
+
+ cr.WalkHostsFullPathAndScope(
+ chiScopeCycleSize,
+ clusterScopeCycleSize,
+ func(
+ chi ICustomResource,
+ cluster ICluster,
+ shard IShard,
+ replica IReplica,
+ host IHost,
+ address *types.HostScopeAddress,
+ ) error {
+ cluster.GetRuntime().GetAddress().SetNamespace(chi.GetNamespace())
+ cluster.GetRuntime().GetAddress().SetCRName(chi.GetName())
+ cluster.GetRuntime().GetAddress().SetClusterName(cluster.GetName())
+ cluster.GetRuntime().GetAddress().SetClusterIndex(address.ClusterIndex)
+
+ shard.GetRuntime().GetAddress().SetNamespace(chi.GetNamespace())
+ shard.GetRuntime().GetAddress().SetCRName(chi.GetName())
+ shard.GetRuntime().GetAddress().SetClusterName(cluster.GetName())
+ shard.GetRuntime().GetAddress().SetClusterIndex(address.ClusterIndex)
+ shard.GetRuntime().GetAddress().SetShardName(shard.GetName())
+ shard.GetRuntime().GetAddress().SetShardIndex(address.ShardIndex)
+
+ replica.GetRuntime().GetAddress().SetNamespace(chi.GetNamespace())
+ replica.GetRuntime().GetAddress().SetCRName(chi.GetName())
+ replica.GetRuntime().GetAddress().SetClusterName(cluster.GetName())
+ replica.GetRuntime().GetAddress().SetClusterIndex(address.ClusterIndex)
+ replica.GetRuntime().GetAddress().SetReplicaName(replica.GetName())
+ replica.GetRuntime().GetAddress().SetReplicaIndex(address.ReplicaIndex)
+
+ host.GetRuntime().GetAddress().SetNamespace(chi.GetNamespace())
+ // Skip StatefulSet as impossible to self-calculate
+ // host.Address.StatefulSet = CreateStatefulSetName(host)
+ host.GetRuntime().GetAddress().SetCRName(chi.GetName())
+ host.GetRuntime().GetAddress().SetClusterName(cluster.GetName())
+ host.GetRuntime().GetAddress().SetClusterIndex(address.ClusterIndex)
+ host.GetRuntime().GetAddress().SetShardName(shard.GetName())
+ host.GetRuntime().GetAddress().SetShardIndex(address.ShardIndex)
+ host.GetRuntime().GetAddress().SetReplicaName(replica.GetName())
+ host.GetRuntime().GetAddress().SetReplicaIndex(address.ReplicaIndex)
+ host.GetRuntime().GetAddress().SetHostName(host.GetName())
+ host.GetRuntime().GetAddress().SetCRScopeIndex(address.CRScopeAddress.Index)
+ host.GetRuntime().GetAddress().SetCRScopeCycleSize(address.CRScopeAddress.CycleSpec.Size)
+ host.GetRuntime().GetAddress().SetCRScopeCycleIndex(address.CRScopeAddress.CycleAddress.CycleIndex)
+ host.GetRuntime().GetAddress().SetCRScopeCycleOffset(address.CRScopeAddress.CycleAddress.Index)
+ host.GetRuntime().GetAddress().SetClusterScopeIndex(address.ClusterScopeAddress.Index)
+ host.GetRuntime().GetAddress().SetClusterScopeCycleSize(address.ClusterScopeAddress.CycleSpec.Size)
+ host.GetRuntime().GetAddress().SetClusterScopeCycleIndex(address.ClusterScopeAddress.CycleAddress.CycleIndex)
+ host.GetRuntime().GetAddress().SetClusterScopeCycleOffset(address.ClusterScopeAddress.CycleAddress.Index)
+ host.GetRuntime().GetAddress().SetShardScopeIndex(address.ReplicaIndex)
+ host.GetRuntime().GetAddress().SetReplicaScopeIndex(address.ShardIndex)
+
+ return nil
+ },
+ )
+}
+
+func fillCRPointer(cr ICustomResource) {
+ cr.WalkHostsFullPath(
+ func(
+ cr ICustomResource,
+ cluster ICluster,
+ shard IShard,
+ replica IReplica,
+ host IHost,
+ address *types.HostScopeAddress,
+ ) error {
+ cluster.GetRuntime().SetCR(cr)
+ shard.GetRuntime().SetCR(cr)
+ replica.GetRuntime().SetCR(cr)
+ host.GetRuntime().SetCR(cr)
+ return nil
+ },
+ )
+}
+
+func FillCR(cr ICustomResource) {
+ fillSelfCalculatedAddressInfo(cr)
+ fillCRPointer(cr)
+}
diff --git a/pkg/apis/clickhouse.altinity.com/v1/interface.go b/pkg/apis/clickhouse.altinity.com/v1/interface.go
new file mode 100644
index 000000000..263ac3a75
--- /dev/null
+++ b/pkg/apis/clickhouse.altinity.com/v1/interface.go
@@ -0,0 +1,261 @@
+// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v1
+
+import (
+ "github.com/altinity/clickhouse-operator/pkg/apis/common/types"
+ meta "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+type ICustomResource interface {
+ meta.Object
+
+ IsNonZero() bool
+
+ GetSpecA() any
+ GetSpec() ICRSpec
+ GetRuntime() ICustomResourceRuntime
+ GetRootServiceTemplate() (*ServiceTemplate, bool)
+ GetReconciling() *Reconciling
+
+ WalkClusters(f func(cluster ICluster) error) []error
+ WalkHosts(func(host *Host) error) []error
+ WalkPodTemplates(f func(template *PodTemplate))
+ WalkVolumeClaimTemplates(f func(template *VolumeClaimTemplate))
+ WalkHostsFullPath(f WalkHostsAddressFn) []error
+ WalkHostsFullPathAndScope(crScopeCycleSize int, clusterScopeCycleSize int, f WalkHostsAddressFn) (res []error)
+
+ FindCluster(needle interface{}) ICluster
+ FindShard(needleCluster interface{}, needleShard interface{}) IShard
+ FindHost(needleCluster interface{}, needleShard interface{}, needleHost interface{}) *Host
+
+ GetHostTemplate(name string) (*HostTemplate, bool)
+ GetPodTemplate(name string) (*PodTemplate, bool)
+ GetVolumeClaimTemplate(name string) (*VolumeClaimTemplate, bool)
+ GetServiceTemplate(name string) (*ServiceTemplate, bool)
+
+ HasAncestor() bool
+ GetAncestor() ICustomResource
+
+ IsStopped() bool
+ IsTroubleshoot() bool
+ IsRollingUpdate() bool
+
+ HostsCount() int
+ IEnsureStatus() IStatus
+ GetStatus() IStatus
+
+ YAML(opts types.CopyCROptions) string
+}
+
+type ICRSpec interface {
+ GetNamespaceDomainPattern() *types.String
+ GetDefaults() *Defaults
+ GetConfiguration() IConfiguration
+}
+
+type IConfiguration interface {
+ GetProfiles() *Settings
+ GetQuotas() *Settings
+ GetSettings() *Settings
+ GetFiles() *Settings
+}
+
+type ICustomResourceRuntime interface {
+ GetAttributes() *ComparableAttributes
+ LockCommonConfig()
+ UnlockCommonConfig()
+}
+
+type IStatus interface {
+ SetAction(string)
+ PushAction(string)
+ SetError(string)
+ PushError(string)
+ GetHostsCount() int
+ GetHostsCompletedCount() int
+ GetHostsAddedCount() int
+ GetHostsWithTablesCreated() []string
+ PushHostTablesCreated(host string)
+
+ HasNormalizedCRCompleted() bool
+
+ HostUnchanged()
+ HostUpdated()
+ HostAdded()
+ HostFailed()
+ HostCompleted()
+}
+
+type ICluster interface {
+ GetName() string
+ GetZookeeper() *ZookeeperConfig
+ GetSchemaPolicy() *SchemaPolicy
+ GetInsecure() *types.StringBool
+ GetSecure() *types.StringBool
+ GetSecret() *ClusterSecret
+ GetPDBMaxUnavailable() *types.Int32
+
+ WalkShards(f func(index int, shard IShard) error) []error
+ WalkHosts(func(host *Host) error) []error
+
+ HostsCount() int
+
+ FindShard(needle interface{}) IShard
+ FindHost(needleShard interface{}, needleHost interface{}) *Host
+
+ IsShardSpecified() bool
+
+ GetRuntime() IClusterRuntime
+ GetServiceTemplate() (*ServiceTemplate, bool)
+}
+
+type IClusterRuntime interface {
+ GetAddress() IClusterAddress
+ GetCR() ICustomResource
+ SetCR(cr ICustomResource)
+}
+
+type IClusterAddress interface {
+ GetNamespace() string
+ SetNamespace(string)
+
+ GetCRName() string
+ SetCRName(string)
+
+ GetClusterName() string
+ SetClusterName(string)
+
+ GetClusterIndex() int
+ SetClusterIndex(int)
+}
+
+type IShard interface {
+ GetName() string
+ GetRuntime() IShardRuntime
+ GetServiceTemplate() (*ServiceTemplate, bool)
+ GetInternalReplication() *types.StringBool
+ HasWeight() bool
+ GetWeight() int
+ HasSettings() bool
+ GetSettings() *Settings
+ HasFiles() bool
+ GetFiles() *Settings
+ HasTemplates() bool
+ GetTemplates() *TemplatesList
+
+ WalkHosts(func(host *Host) error) []error
+ WalkHostsAbortOnError(f func(host *Host) error) error
+
+ FindHost(needleHost interface{}) *Host
+ FirstHost() *Host
+
+ HostsCount() int
+}
+
+type IShardRuntime interface {
+ GetAddress() IShardAddress
+ GetCR() ICustomResource
+ SetCR(cr ICustomResource)
+}
+
+type IShardAddress interface {
+ IClusterAddress
+
+ GetShardName() string
+ SetShardName(string)
+
+ GetShardIndex() int
+ SetShardIndex(int)
+}
+
+type IReplica interface {
+ GetName() string
+ GetRuntime() IReplicaRuntime
+ HasSettings() bool
+ GetSettings() *Settings
+ HasFiles() bool
+ GetFiles() *Settings
+ HasTemplates() bool
+ GetTemplates() *TemplatesList
+}
+
+type IReplicaRuntime interface {
+ GetAddress() IReplicaAddress
+ SetCR(cr ICustomResource)
+}
+
+type IReplicaAddress interface {
+ IClusterAddress
+
+ GetReplicaName() string
+ SetReplicaName(string)
+
+ GetReplicaIndex() int
+ SetReplicaIndex(int)
+}
+
+type IHost interface {
+ GetName() string
+ GetRuntime() IHostRuntime
+}
+
+type IHostRuntime interface {
+ GetAddress() IHostAddress
+ GetCR() ICustomResource
+ SetCR(cr ICustomResource)
+}
+
+type IHostAddress interface {
+ IReplicaAddress
+ IShardAddress
+
+ GetStatefulSet() string
+ GetFQDN() string
+
+ GetHostName() string
+ SetHostName(string)
+
+ GetCRScopeIndex() int
+ SetCRScopeIndex(int)
+ GetCRScopeCycleSize() int
+ SetCRScopeCycleSize(int)
+ GetCRScopeCycleIndex() int
+ SetCRScopeCycleIndex(int)
+ GetCRScopeCycleOffset() int
+ SetCRScopeCycleOffset(int)
+ GetClusterScopeIndex() int
+ SetClusterScopeIndex(int)
+ GetClusterScopeCycleSize() int
+ SetClusterScopeCycleSize(int)
+ GetClusterScopeCycleIndex() int
+ SetClusterScopeCycleIndex(int)
+ GetClusterScopeCycleOffset() int
+ SetClusterScopeCycleOffset(int)
+ GetShardScopeIndex() int
+ SetShardScopeIndex(int)
+ GetReplicaScopeIndex() int
+ SetReplicaScopeIndex(int)
+}
+
+// WalkHostsAddressFn specifies function to walk over hosts
+type WalkHostsAddressFn func(
+ cr ICustomResource,
+ cluster ICluster,
+ shard IShard,
+ replica IReplica,
+ host IHost,
+ address *types.HostScopeAddress,
+) error
diff --git a/pkg/apis/clickhouse.altinity.com/v1/type_attributes.go b/pkg/apis/clickhouse.altinity.com/v1/type_attributes.go
new file mode 100644
index 000000000..c7643ee31
--- /dev/null
+++ b/pkg/apis/clickhouse.altinity.com/v1/type_attributes.go
@@ -0,0 +1,144 @@
+// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v1
+
+import core "k8s.io/api/core/v1"
+
+// ComparableAttributes specifies CHI attributes that are comparable
+type ComparableAttributes struct {
+ additionalEnvVars []core.EnvVar `json:"-" yaml:"-"`
+ additionalVolumes []core.Volume `json:"-" yaml:"-"`
+ additionalVolumeMounts []core.VolumeMount `json:"-" yaml:"-"`
+ skipOwnerRef bool `json:"-" yaml:"-"`
+}
+
+func (a *ComparableAttributes) GetAdditionalEnvVars() []core.EnvVar {
+ if a == nil {
+ return nil
+ }
+ return a.additionalEnvVars
+}
+
+func (a *ComparableAttributes) AppendAdditionalEnvVar(envVar core.EnvVar) {
+ if a == nil {
+ return
+ }
+ a.additionalEnvVars = append(a.additionalEnvVars, envVar)
+}
+
+func (a *ComparableAttributes) AppendAdditionalEnvVarIfNotExists(envVar core.EnvVar) {
+ if a == nil {
+ return
+ }
+
+ // Sanity check
+ if envVar.Name == "" {
+ // This env var is incorrect
+ return
+ }
+
+ for _, existingEnvVar := range a.GetAdditionalEnvVars() {
+ if existingEnvVar.Name == envVar.Name {
+ // Such a variable already exists
+ return
+ }
+ }
+
+ a.AppendAdditionalEnvVar(envVar)
+}
+
+func (a *ComparableAttributes) GetAdditionalVolumes() []core.Volume {
+ if a == nil {
+ return nil
+ }
+ return a.additionalVolumes
+}
+
+func (a *ComparableAttributes) AppendAdditionalVolume(volume core.Volume) {
+ if a == nil {
+ return
+ }
+ a.additionalVolumes = append(a.additionalVolumes, volume)
+}
+
+func (a *ComparableAttributes) AppendAdditionalVolumeIfNotExists(volume core.Volume) {
+ if a == nil {
+ return
+ }
+
+ // Sanity check
+ if volume.Name == "" {
+ // This volume is incorrect
+ return
+ }
+
+ for _, existingVolume := range a.GetAdditionalVolumes() {
+ if existingVolume.Name == volume.Name {
+ // Such a volume already exists
+ return
+ }
+ }
+
+ // Volume looks good
+ a.AppendAdditionalVolume(volume)
+}
+
+func (a *ComparableAttributes) GetAdditionalVolumeMounts() []core.VolumeMount {
+ if a == nil {
+ return nil
+ }
+ return a.additionalVolumeMounts
+}
+
+func (a *ComparableAttributes) AppendAdditionalVolumeMount(volumeMount core.VolumeMount) {
+ if a == nil {
+ return
+ }
+ a.additionalVolumeMounts = append(a.additionalVolumeMounts, volumeMount)
+}
+
+func (a *ComparableAttributes) AppendAdditionalVolumeMountIfNotExists(volumeMount core.VolumeMount) {
+ if a == nil {
+ return
+ }
+
+ // Sanity check
+ if volumeMount.Name == "" {
+ return
+ }
+
+ for _, existingVolumeMount := range a.GetAdditionalVolumeMounts() {
+ if existingVolumeMount.Name == volumeMount.Name {
+ // Such a volume mount already exists
+ return
+ }
+ }
+
+ a.AppendAdditionalVolumeMount(volumeMount)
+}
+
+func (a *ComparableAttributes) GetSkipOwnerRef() bool {
+ if a == nil {
+ return false
+ }
+ return a.skipOwnerRef
+}
+
+func (a *ComparableAttributes) SetSkipOwnerRef(skip bool) {
+ if a == nil {
+ return
+ }
+ a.skipOwnerRef = skip
+}
diff --git a/pkg/apis/clickhouse.altinity.com/v1/type_chi.go b/pkg/apis/clickhouse.altinity.com/v1/type_chi.go
index 7c410e3b7..71727284d 100644
--- a/pkg/apis/clickhouse.altinity.com/v1/type_chi.go
+++ b/pkg/apis/clickhouse.altinity.com/v1/type_chi.go
@@ -18,310 +18,185 @@ import (
"context"
"encoding/json"
"fmt"
- "math"
"github.com/imdario/mergo"
"gopkg.in/yaml.v3"
- "github.com/altinity/clickhouse-operator/pkg/apis/deployment"
+ "github.com/altinity/clickhouse-operator/pkg/apis/common/types"
"github.com/altinity/clickhouse-operator/pkg/util"
)
-// FillStatus fills .Status
-func (chi *ClickHouseInstallation) FillStatus(endpoint string, pods, fqdns []string, ip string) {
- chi.EnsureStatus().Fill(&FillStatusParams{
- CHOpIP: ip,
- ClustersCount: chi.ClustersCount(),
- ShardsCount: chi.ShardsCount(),
- HostsCount: chi.HostsCount(),
- TaskID: chi.Spec.GetTaskID(),
- HostsUpdatedCount: 0,
- HostsAddedCount: 0,
- HostsUnchangedCount: 0,
- HostsCompletedCount: 0,
- HostsDeleteCount: 0,
- HostsDeletedCount: 0,
- Pods: pods,
- FQDNs: fqdns,
- Endpoint: endpoint,
- NormalizedCHI: chi.Copy(CopyCHIOptions{
- SkipStatus: true,
- SkipManagedFields: true,
- }),
- })
+func (cr *ClickHouseInstallation) IsNonZero() bool {
+ return cr != nil
}
-// FillSelfCalculatedAddressInfo calculates and fills address info
-func (chi *ClickHouseInstallation) FillSelfCalculatedAddressInfo() {
- // What is the max number of Pods allowed per Node
- // TODO need to support multi-cluster
- maxNumberOfPodsPerNode := 0
- chi.WalkPodTemplates(func(template *PodTemplate) {
- for i := range template.PodDistribution {
- podDistribution := &template.PodDistribution[i]
- if podDistribution.Type == deployment.PodDistributionMaxNumberPerNode {
- maxNumberOfPodsPerNode = podDistribution.Number
- }
- }
- })
+func (cr *ClickHouseInstallation) GetSpec() ICRSpec {
+ return &cr.Spec
+}
- // 1perNode 2perNode 3perNode 4perNode 5perNode
- // sh1r1 n1 a n1 a n1 a n1 a n1 a
- // sh1r2 n2 a n2 a n2 a n2 a n2 a
- // sh1r3 n3 a n3 a n3 a n3 a n3 a
- // sh2r1 n4 a n4 a n4 a n4 a n1 b
- // sh2r2 n5 a n5 a n5 a n1 b n2 b
- // sh2r3 n6 a n6 a n1 b n2 b n3 b
- // sh3r1 n7 a n7 a n2 b n3 b n1 c
- // sh3r2 n8 a n8 a n3 b n4 b n2 c
- // sh3r3 n9 a n1 b n4 b n1 c n3 c
- // sh4r1 n10 a n2 b n5 b n2 c n1 d
- // sh4r2 n11 a n3 b n1 c n3 c n2 d
- // sh4r3 n12 a n4 b n2 c n4 c n3 d
- // sh5r1 n13 a n5 b n3 c n1 d n1 e
- // sh5r2 n14 a n6 b n4 c n2 d n2 e
- // sh5r3 n15 a n7 b n5 c n3 d n3 e
- // 1perNode = ceil(15 / 1 'cycles num') = 15 'cycle len'
- // 2perNode = ceil(15 / 2 'cycles num') = 8 'cycle len'
- // 3perNode = ceil(15 / 3 'cycles num') = 5 'cycle len'
- // 4perNode = ceil(15 / 4 'cycles num') = 4 'cycle len'
- // 5perNode = ceil(15 / 5 'cycles num') = 3 'cycle len'
-
- // Number of requested cycles equals to max number of ClickHouses per node, but can't be less than 1
- requestedClusterScopeCyclesNum := maxNumberOfPodsPerNode
- if requestedClusterScopeCyclesNum <= 0 {
- requestedClusterScopeCyclesNum = 1
- }
-
- chiScopeCycleSize := 0 // Unlimited
- clusterScopeCycleSize := 0
- if requestedClusterScopeCyclesNum == 1 {
- // One cycle only requested
- clusterScopeCycleSize = 0 // Unlimited
- } else {
- clusterScopeCycleSize = int(math.Ceil(float64(chi.HostsCount()) / float64(requestedClusterScopeCyclesNum)))
- }
-
- chi.WalkHostsFullPathAndScope(
- chiScopeCycleSize,
- clusterScopeCycleSize,
- func(
- chi *ClickHouseInstallation,
- cluster *Cluster,
- shard *ChiShard,
- replica *ChiReplica,
- host *ChiHost,
- address *HostAddress,
- ) error {
- cluster.Runtime.Address.Namespace = chi.Namespace
- cluster.Runtime.Address.CHIName = chi.Name
- cluster.Runtime.Address.ClusterName = cluster.Name
- cluster.Runtime.Address.ClusterIndex = address.ClusterIndex
-
- shard.Runtime.Address.Namespace = chi.Namespace
- shard.Runtime.Address.CHIName = chi.Name
- shard.Runtime.Address.ClusterName = cluster.Name
- shard.Runtime.Address.ClusterIndex = address.ClusterIndex
- shard.Runtime.Address.ShardName = shard.Name
- shard.Runtime.Address.ShardIndex = address.ShardIndex
-
- replica.Runtime.Address.Namespace = chi.Namespace
- replica.Runtime.Address.CHIName = chi.Name
- replica.Runtime.Address.ClusterName = cluster.Name
- replica.Runtime.Address.ClusterIndex = address.ClusterIndex
- replica.Runtime.Address.ReplicaName = replica.Name
- replica.Runtime.Address.ReplicaIndex = address.ReplicaIndex
-
- host.Runtime.Address.Namespace = chi.Namespace
- // Skip StatefulSet as impossible to self-calculate
- // host.Address.StatefulSet = CreateStatefulSetName(host)
- host.Runtime.Address.CHIName = chi.Name
- host.Runtime.Address.ClusterName = cluster.Name
- host.Runtime.Address.ClusterIndex = address.ClusterIndex
- host.Runtime.Address.ShardName = shard.Name
- host.Runtime.Address.ShardIndex = address.ShardIndex
- host.Runtime.Address.ReplicaName = replica.Name
- host.Runtime.Address.ReplicaIndex = address.ReplicaIndex
- host.Runtime.Address.HostName = host.Name
- host.Runtime.Address.CHIScopeIndex = address.CHIScopeAddress.Index
- host.Runtime.Address.CHIScopeCycleSize = address.CHIScopeAddress.CycleSpec.Size
- host.Runtime.Address.CHIScopeCycleIndex = address.CHIScopeAddress.CycleAddress.CycleIndex
- host.Runtime.Address.CHIScopeCycleOffset = address.CHIScopeAddress.CycleAddress.Index
- host.Runtime.Address.ClusterScopeIndex = address.ClusterScopeAddress.Index
- host.Runtime.Address.ClusterScopeCycleSize = address.ClusterScopeAddress.CycleSpec.Size
- host.Runtime.Address.ClusterScopeCycleIndex = address.ClusterScopeAddress.CycleAddress.CycleIndex
- host.Runtime.Address.ClusterScopeCycleOffset = address.ClusterScopeAddress.CycleAddress.Index
- host.Runtime.Address.ShardScopeIndex = address.ReplicaIndex
- host.Runtime.Address.ReplicaScopeIndex = address.ShardIndex
-
- return nil
- },
- )
+func (cr *ClickHouseInstallation) GetSpecT() *ChiSpec {
+ return &cr.Spec
}
-// FillCHIPointer fills CHI pointer
-func (chi *ClickHouseInstallation) FillCHIPointer() {
- chi.WalkHostsFullPath(
- func(
- chi *ClickHouseInstallation,
- cluster *Cluster,
- shard *ChiShard,
- replica *ChiReplica,
- host *ChiHost,
- address *HostAddress,
- ) error {
- cluster.Runtime.CHI = chi
- shard.Runtime.CHI = chi
- replica.Runtime.CHI = chi
- host.Runtime.CHI = chi
- return nil
- },
- )
+func (cr *ClickHouseInstallation) GetSpecA() any {
+ return &cr.Spec
}
-// WalkClustersFullPath walks clusters with full path
-func (chi *ClickHouseInstallation) WalkClustersFullPath(
- f func(chi *ClickHouseInstallation, clusterIndex int, cluster *Cluster) error,
-) []error {
- if chi == nil {
+func (cr *ClickHouseInstallation) GetRuntime() ICustomResourceRuntime {
+ return cr.ensureRuntime()
+}
+
+func (cr *ClickHouseInstallation) ensureRuntime() *ClickHouseInstallationRuntime {
+ if cr == nil {
return nil
}
- res := make([]error, 0)
- for clusterIndex := range chi.Spec.Configuration.Clusters {
- res = append(res, f(chi, clusterIndex, chi.Spec.Configuration.Clusters[clusterIndex]))
+ // Assume that most of the time, we'll see a non-nil value.
+ if cr.runtime != nil {
+ return cr.runtime
}
- return res
+ // Otherwise, we need to acquire a lock to initialize the field.
+ cr.runtimeCreatorMutex.Lock()
+ defer cr.runtimeCreatorMutex.Unlock()
+ // Note that we have to check this property again to avoid a TOCTOU bug.
+ if cr.runtime == nil {
+ cr.runtime = newClickHouseInstallationRuntime()
+ }
+ return cr.runtime
}
-// WalkClusters walks clusters
-func (chi *ClickHouseInstallation) WalkClusters(f func(cluster *Cluster) error) []error {
- if chi == nil {
+func (cr *ClickHouseInstallation) IEnsureStatus() IStatus {
+ return any(cr.EnsureStatus()).(IStatus)
+}
+
+// EnsureStatus ensures status
+func (cr *ClickHouseInstallation) EnsureStatus() *Status {
+ if cr == nil {
return nil
}
- res := make([]error, 0)
- for clusterIndex := range chi.Spec.Configuration.Clusters {
- res = append(res, f(chi.Spec.Configuration.Clusters[clusterIndex]))
+ // Assume that most of the time, we'll see a non-nil value.
+ if cr.Status != nil {
+ return cr.Status
}
- return res
+ // Otherwise, we need to acquire a lock to initialize the field.
+ cr.statusCreatorMutex.Lock()
+ defer cr.statusCreatorMutex.Unlock()
+ // Note that we have to check this property again to avoid a TOCTOU bug.
+ if cr.Status == nil {
+ cr.Status = &Status{}
+ }
+ return cr.Status
}
-// WalkShards walks shards
-func (chi *ClickHouseInstallation) WalkShards(
- f func(
- shard *ChiShard,
- ) error,
-) []error {
- if chi == nil {
- return nil
+// GetStatus gets Status
+func (cr *ClickHouseInstallation) GetStatus() IStatus {
+ if cr == nil {
+ return (*Status)(nil)
}
- res := make([]error, 0)
+ return cr.Status
+}
- for clusterIndex := range chi.Spec.Configuration.Clusters {
- cluster := chi.Spec.Configuration.Clusters[clusterIndex]
- for shardIndex := range cluster.Layout.Shards {
- shard := &cluster.Layout.Shards[shardIndex]
- res = append(res, f(shard))
- }
+// HasStatus checks whether CHI has Status
+func (cr *ClickHouseInstallation) HasStatus() bool {
+ if cr == nil {
+ return false
}
-
- return res
+ return cr.Status != nil
}
-// WalkHostsFullPathAndScope walks hosts with full path
-func (chi *ClickHouseInstallation) WalkHostsFullPathAndScope(
- chiScopeCycleSize int,
- clusterScopeCycleSize int,
- f WalkHostsAddressFn,
-) (res []error) {
- if chi == nil {
- return nil
- }
- address := NewHostAddress(chiScopeCycleSize, clusterScopeCycleSize)
- for clusterIndex := range chi.Spec.Configuration.Clusters {
- cluster := chi.Spec.Configuration.Clusters[clusterIndex]
- address.ClusterScopeAddress.Init()
- for shardIndex := range cluster.Layout.Shards {
- shard := cluster.GetShard(shardIndex)
- for replicaIndex, host := range shard.Hosts {
- replica := cluster.GetReplica(replicaIndex)
- address.ClusterIndex = clusterIndex
- address.ShardIndex = shardIndex
- address.ReplicaIndex = replicaIndex
- res = append(res, f(chi, cluster, shard, replica, host, address))
- address.CHIScopeAddress.Inc()
- address.ClusterScopeAddress.Inc()
- }
- }
+// HasAncestor checks whether CR has an ancestor
+func (cr *ClickHouseInstallation) HasAncestor() bool {
+ if !cr.HasStatus() {
+ return false
}
- return res
+ return cr.Status.HasNormalizedCRCompleted()
}
-// WalkHostsFullPath walks hosts with a function
-func (chi *ClickHouseInstallation) WalkHostsFullPath(f WalkHostsAddressFn) []error {
- return chi.WalkHostsFullPathAndScope(0, 0, f)
+// GetAncestor gets ancestor of a CR
+func (cr *ClickHouseInstallation) GetAncestor() ICustomResource {
+ if !cr.HasAncestor() {
+ return (*ClickHouseInstallation)(nil)
+ }
+ return cr.Status.GetNormalizedCRCompleted()
}
-// WalkHosts walks hosts with a function
-func (chi *ClickHouseInstallation) WalkHosts(f func(host *ChiHost) error) []error {
- if chi == nil {
+// GetAncestorT gets ancestor of a CR
+func (cr *ClickHouseInstallation) GetAncestorT() *ClickHouseInstallation {
+ if !cr.HasAncestor() {
return nil
}
- res := make([]error, 0)
+ return cr.Status.GetNormalizedCRCompleted()
+}
- for clusterIndex := range chi.Spec.Configuration.Clusters {
- cluster := chi.Spec.Configuration.Clusters[clusterIndex]
- for shardIndex := range cluster.Layout.Shards {
- shard := &cluster.Layout.Shards[shardIndex]
- for replicaIndex := range shard.Hosts {
- host := shard.Hosts[replicaIndex]
- res = append(res, f(host))
- }
- }
+// SetAncestor sets ancestor of a CR
+func (cr *ClickHouseInstallation) SetAncestor(a *ClickHouseInstallation) {
+ if cr == nil {
+ return
}
-
- return res
+ cr.EnsureStatus().NormalizedCRCompleted = a
}
-// WalkTillError walks hosts with a function until an error met
-func (chi *ClickHouseInstallation) WalkTillError(
- ctx context.Context,
- fCHIPreliminary func(ctx context.Context, chi *ClickHouseInstallation) error,
- fCluster func(ctx context.Context, cluster *Cluster) error,
- fShards func(ctx context.Context, shards []*ChiShard) error,
- fCHIFinal func(ctx context.Context, chi *ClickHouseInstallation) error,
-) error {
- if err := fCHIPreliminary(ctx, chi); err != nil {
- return err
+// HasTarget checks whether CR has a target
+func (cr *ClickHouseInstallation) HasTarget() bool {
+ if !cr.HasStatus() {
+ return false
}
+ return cr.Status.HasNormalizedCR()
+}
- for clusterIndex := range chi.Spec.Configuration.Clusters {
- cluster := chi.Spec.Configuration.Clusters[clusterIndex]
- if err := fCluster(ctx, cluster); err != nil {
- return err
- }
-
- shards := make([]*ChiShard, 0, len(cluster.Layout.Shards))
- for shardIndex := range cluster.Layout.Shards {
- shards = append(shards, &cluster.Layout.Shards[shardIndex])
- }
- if err := fShards(ctx, shards); err != nil {
- return err
- }
+// GetTarget gets target of a CR
+func (cr *ClickHouseInstallation) GetTarget() *ClickHouseInstallation {
+ if !cr.HasTarget() {
+ return nil
}
+ return cr.Status.GetNormalizedCR()
+}
- if err := fCHIFinal(ctx, chi); err != nil {
- return err
+// SetTarget sets target of a CR
+func (cr *ClickHouseInstallation) SetTarget(a *ClickHouseInstallation) {
+ if cr == nil {
+ return
}
+ cr.EnsureStatus().NormalizedCR = a
+}
- return nil
+func (cr *ClickHouseInstallation) GetUsedTemplates() []*TemplateRef {
+ return cr.GetSpecT().UseTemplates
+}
+
+// FillStatus fills .Status
+func (cr *ClickHouseInstallation) FillStatus(endpoint string, pods, fqdns []string, ip string) {
+ cr.EnsureStatus().Fill(&FillStatusParams{
+ CHOpIP: ip,
+ ClustersCount: cr.ClustersCount(),
+ ShardsCount: cr.ShardsCount(),
+ HostsCount: cr.HostsCount(),
+ TaskID: cr.GetSpecT().GetTaskID(),
+ HostsUpdatedCount: 0,
+ HostsAddedCount: 0,
+ HostsUnchangedCount: 0,
+ HostsCompletedCount: 0,
+ HostsDeleteCount: 0,
+ HostsDeletedCount: 0,
+ Pods: pods,
+ FQDNs: fqdns,
+ Endpoint: endpoint,
+ NormalizedCR: cr.Copy(types.CopyCROptions{
+ SkipStatus: true,
+ SkipManagedFields: true,
+ }),
+ })
+}
+
+func (cr *ClickHouseInstallation) Fill() {
+ FillCR(cr)
}
// MergeFrom merges from CHI
-func (chi *ClickHouseInstallation) MergeFrom(from *ClickHouseInstallation, _type MergeType) {
+func (cr *ClickHouseInstallation) MergeFrom(from *ClickHouseInstallation, _type MergeType) {
if from == nil {
return
}
@@ -329,110 +204,37 @@ func (chi *ClickHouseInstallation) MergeFrom(from *ClickHouseInstallation, _type
// Merge Meta
switch _type {
case MergeTypeFillEmptyValues:
- _ = mergo.Merge(&chi.TypeMeta, from.TypeMeta)
- _ = mergo.Merge(&chi.ObjectMeta, from.ObjectMeta)
+ _ = mergo.Merge(&cr.TypeMeta, from.TypeMeta)
+ _ = mergo.Merge(&cr.ObjectMeta, from.ObjectMeta)
case MergeTypeOverrideByNonEmptyValues:
- _ = mergo.Merge(&chi.TypeMeta, from.TypeMeta, mergo.WithOverride)
- _ = mergo.Merge(&chi.ObjectMeta, from.ObjectMeta, mergo.WithOverride)
+ _ = mergo.Merge(&cr.TypeMeta, from.TypeMeta, mergo.WithOverride)
+ _ = mergo.Merge(&cr.ObjectMeta, from.ObjectMeta, mergo.WithOverride)
}
// Exclude skipped annotations
- chi.Annotations = util.CopyMapFilter(
- chi.Annotations,
- nil,
- util.ListSkippedAnnotations(),
+ cr.SetAnnotations(
+ util.CopyMapFilter(
+ cr.GetAnnotations(),
+ nil,
+ util.ListSkippedAnnotations(),
+ ),
)
// Do actual merge for Spec
- (&chi.Spec).MergeFrom(&from.Spec, _type)
+ cr.GetSpecT().MergeFrom(from.GetSpecT(), _type)
// Copy service attributes
- chi.EnsureRuntime().attributes = from.EnsureRuntime().attributes
+ cr.ensureRuntime().attributes = from.ensureRuntime().attributes
- chi.EnsureStatus().CopyFrom(from.Status, CopyCHIStatusOptions{
+ cr.EnsureStatus().CopyFrom(from.Status, types.CopyStatusOptions{
InheritableFields: true,
})
}
-// HasTaskID checks whether task id is specified
-func (spec *ChiSpec) HasTaskID() bool {
- switch {
- case spec == nil:
- return false
- case spec.TaskID == nil:
- return false
- case len(*spec.TaskID) == 0:
- return false
- default:
- return true
- }
-}
-
-// GetTaskID gets task id as a string
-func (spec *ChiSpec) GetTaskID() string {
- if spec.HasTaskID() {
- return *spec.TaskID
- }
- return ""
-}
-
-// MergeFrom merges from spec
-func (spec *ChiSpec) MergeFrom(from *ChiSpec, _type MergeType) {
- if from == nil {
- return
- }
-
- switch _type {
- case MergeTypeFillEmptyValues:
- if !spec.HasTaskID() {
- spec.TaskID = from.TaskID
- }
- if !spec.Stop.HasValue() {
- spec.Stop = spec.Stop.MergeFrom(from.Stop)
- }
- if spec.Restart == "" {
- spec.Restart = from.Restart
- }
- if !spec.Troubleshoot.HasValue() {
- spec.Troubleshoot = spec.Troubleshoot.MergeFrom(from.Troubleshoot)
- }
- if spec.NamespaceDomainPattern == "" {
- spec.NamespaceDomainPattern = from.NamespaceDomainPattern
- }
- case MergeTypeOverrideByNonEmptyValues:
- if from.HasTaskID() {
- spec.TaskID = from.TaskID
- }
- if from.Stop.HasValue() {
- // Override by non-empty values only
- spec.Stop = from.Stop
- }
- if from.Restart != "" {
- // Override by non-empty values only
- spec.Restart = from.Restart
- }
- if from.Troubleshoot.HasValue() {
- // Override by non-empty values only
- spec.Troubleshoot = from.Troubleshoot
- }
- if from.NamespaceDomainPattern != "" {
- spec.NamespaceDomainPattern = from.NamespaceDomainPattern
- }
- }
-
- spec.Templating = spec.Templating.MergeFrom(from.Templating, _type)
- spec.Reconciling = spec.Reconciling.MergeFrom(from.Reconciling, _type)
- spec.Defaults = spec.Defaults.MergeFrom(from.Defaults, _type)
- spec.Configuration = spec.Configuration.MergeFrom(from.Configuration, _type)
- spec.Templates = spec.Templates.MergeFrom(from.Templates, _type)
- // TODO may be it would be wiser to make more intelligent merge
- spec.UseTemplates = append(spec.UseTemplates, from.UseTemplates...)
-}
-
// FindCluster finds cluster by name or index.
// Expectations: name is expected to be a string, index is expected to be an int.
-func (chi *ClickHouseInstallation) FindCluster(needle interface{}) *Cluster {
+func (cr *ClickHouseInstallation) FindCluster(needle interface{}) ICluster {
var resultCluster *Cluster
- chi.WalkClustersFullPath(func(chi *ClickHouseInstallation, clusterIndex int, cluster *Cluster) error {
+ cr.WalkClustersFullPath(func(chi *ClickHouseInstallation, clusterIndex int, cluster *Cluster) error {
switch v := needle.(type) {
case string:
if cluster.Name == v {
@@ -450,20 +252,20 @@ func (chi *ClickHouseInstallation) FindCluster(needle interface{}) *Cluster {
// FindShard finds shard by name or index
// Expectations: name is expected to be a string, index is expected to be an int.
-func (chi *ClickHouseInstallation) FindShard(needleCluster interface{}, needleShard interface{}) *ChiShard {
- return chi.FindCluster(needleCluster).FindShard(needleShard)
+func (cr *ClickHouseInstallation) FindShard(needleCluster interface{}, needleShard interface{}) IShard {
+ return cr.FindCluster(needleCluster).FindShard(needleShard)
}
// FindHost finds shard by name or index
// Expectations: name is expected to be a string, index is expected to be an int.
-func (chi *ClickHouseInstallation) FindHost(needleCluster interface{}, needleShard interface{}, needleHost interface{}) *ChiHost {
- return chi.FindCluster(needleCluster).FindHost(needleShard, needleHost)
+func (cr *ClickHouseInstallation) FindHost(needleCluster interface{}, needleShard interface{}, needleHost interface{}) *Host {
+ return cr.FindCluster(needleCluster).FindHost(needleShard, needleHost)
}
// ClustersCount counts clusters
-func (chi *ClickHouseInstallation) ClustersCount() int {
+func (cr *ClickHouseInstallation) ClustersCount() int {
count := 0
- chi.WalkClusters(func(cluster *Cluster) error {
+ cr.WalkClusters(func(cluster ICluster) error {
count++
return nil
})
@@ -471,9 +273,9 @@ func (chi *ClickHouseInstallation) ClustersCount() int {
}
// ShardsCount counts shards
-func (chi *ClickHouseInstallation) ShardsCount() int {
+func (cr *ClickHouseInstallation) ShardsCount() int {
count := 0
- chi.WalkShards(func(shard *ChiShard) error {
+ cr.WalkShards(func(shard *ChiShard) error {
count++
return nil
})
@@ -481,9 +283,9 @@ func (chi *ClickHouseInstallation) ShardsCount() int {
}
// HostsCount counts hosts
-func (chi *ClickHouseInstallation) HostsCount() int {
+func (cr *ClickHouseInstallation) HostsCount() int {
count := 0
- chi.WalkHosts(func(host *ChiHost) error {
+ cr.WalkHosts(func(host *Host) error {
count++
return nil
})
@@ -491,9 +293,9 @@ func (chi *ClickHouseInstallation) HostsCount() int {
}
// HostsCountAttributes counts hosts by attributes
-func (chi *ClickHouseInstallation) HostsCountAttributes(a *HostReconcileAttributes) int {
+func (cr *ClickHouseInstallation) HostsCountAttributes(a *HostReconcileAttributes) int {
count := 0
- chi.WalkHosts(func(host *ChiHost) error {
+ cr.WalkHosts(func(host *Host) error {
if host.GetReconcileAttributes().Any(a) {
count++
}
@@ -503,83 +305,83 @@ func (chi *ClickHouseInstallation) HostsCountAttributes(a *HostReconcileAttribut
}
// GetHostTemplate gets HostTemplate by name
-func (chi *ClickHouseInstallation) GetHostTemplate(name string) (*HostTemplate, bool) {
- if !chi.Spec.Templates.GetHostTemplatesIndex().Has(name) {
+func (cr *ClickHouseInstallation) GetHostTemplate(name string) (*HostTemplate, bool) {
+ if !cr.GetSpecT().GetTemplates().GetHostTemplatesIndex().Has(name) {
return nil, false
}
- return chi.Spec.Templates.GetHostTemplatesIndex().Get(name), true
+ return cr.GetSpecT().GetTemplates().GetHostTemplatesIndex().Get(name), true
}
// GetPodTemplate gets PodTemplate by name
-func (chi *ClickHouseInstallation) GetPodTemplate(name string) (*PodTemplate, bool) {
- if !chi.Spec.Templates.GetPodTemplatesIndex().Has(name) {
+func (cr *ClickHouseInstallation) GetPodTemplate(name string) (*PodTemplate, bool) {
+ if !cr.GetSpecT().GetTemplates().GetPodTemplatesIndex().Has(name) {
return nil, false
}
- return chi.Spec.Templates.GetPodTemplatesIndex().Get(name), true
+ return cr.GetSpecT().GetTemplates().GetPodTemplatesIndex().Get(name), true
}
// WalkPodTemplates walks over all PodTemplates
-func (chi *ClickHouseInstallation) WalkPodTemplates(f func(template *PodTemplate)) {
- chi.Spec.Templates.GetPodTemplatesIndex().Walk(f)
+func (cr *ClickHouseInstallation) WalkPodTemplates(f func(template *PodTemplate)) {
+ cr.GetSpecT().GetTemplates().GetPodTemplatesIndex().Walk(f)
}
// GetVolumeClaimTemplate gets VolumeClaimTemplate by name
-func (chi *ClickHouseInstallation) GetVolumeClaimTemplate(name string) (*VolumeClaimTemplate, bool) {
- if chi.Spec.Templates.GetVolumeClaimTemplatesIndex().Has(name) {
- return chi.Spec.Templates.GetVolumeClaimTemplatesIndex().Get(name), true
+func (cr *ClickHouseInstallation) GetVolumeClaimTemplate(name string) (*VolumeClaimTemplate, bool) {
+ if cr.GetSpecT().GetTemplates().GetVolumeClaimTemplatesIndex().Has(name) {
+ return cr.GetSpecT().GetTemplates().GetVolumeClaimTemplatesIndex().Get(name), true
}
return nil, false
}
// WalkVolumeClaimTemplates walks over all VolumeClaimTemplates
-func (chi *ClickHouseInstallation) WalkVolumeClaimTemplates(f func(template *VolumeClaimTemplate)) {
- if chi == nil {
+func (cr *ClickHouseInstallation) WalkVolumeClaimTemplates(f func(template *VolumeClaimTemplate)) {
+ if cr == nil {
return
}
- chi.Spec.Templates.GetVolumeClaimTemplatesIndex().Walk(f)
+ cr.GetSpecT().GetTemplates().GetVolumeClaimTemplatesIndex().Walk(f)
}
// GetServiceTemplate gets ServiceTemplate by name
-func (chi *ClickHouseInstallation) GetServiceTemplate(name string) (*ServiceTemplate, bool) {
- if !chi.Spec.Templates.GetServiceTemplatesIndex().Has(name) {
+func (cr *ClickHouseInstallation) GetServiceTemplate(name string) (*ServiceTemplate, bool) {
+ if !cr.GetSpecT().GetTemplates().GetServiceTemplatesIndex().Has(name) {
return nil, false
}
- return chi.Spec.Templates.GetServiceTemplatesIndex().Get(name), true
+ return cr.GetSpecT().GetTemplates().GetServiceTemplatesIndex().Get(name), true
}
-// GetCHIServiceTemplate gets ServiceTemplate of a CHI
-func (chi *ClickHouseInstallation) GetCHIServiceTemplate() (*ServiceTemplate, bool) {
- if !chi.Spec.Defaults.Templates.HasServiceTemplate() {
+// GetRootServiceTemplate gets ServiceTemplate of a CHI
+func (cr *ClickHouseInstallation) GetRootServiceTemplate() (*ServiceTemplate, bool) {
+ if !cr.GetSpec().GetDefaults().Templates.HasServiceTemplate() {
return nil, false
}
- name := chi.Spec.Defaults.Templates.GetServiceTemplate()
- return chi.GetServiceTemplate(name)
+ name := cr.GetSpec().GetDefaults().Templates.GetServiceTemplate()
+ return cr.GetServiceTemplate(name)
}
// MatchNamespace matches namespace
-func (chi *ClickHouseInstallation) MatchNamespace(namespace string) bool {
- if chi == nil {
+func (cr *ClickHouseInstallation) MatchNamespace(namespace string) bool {
+ if cr == nil {
return false
}
- return chi.Namespace == namespace
+ return cr.Namespace == namespace
}
// MatchFullName matches full name
-func (chi *ClickHouseInstallation) MatchFullName(namespace, name string) bool {
- if chi == nil {
+func (cr *ClickHouseInstallation) MatchFullName(namespace, name string) bool {
+ if cr == nil {
return false
}
- return (chi.Namespace == namespace) && (chi.Name == name)
+ return (cr.Namespace == namespace) && (cr.Name == name)
}
// FoundIn checks whether CHI can be found in haystack
-func (chi *ClickHouseInstallation) FoundIn(haystack []*ClickHouseInstallation) bool {
- if chi == nil {
+func (cr *ClickHouseInstallation) FoundIn(haystack []*ClickHouseInstallation) bool {
+ if cr == nil {
return false
}
for _, candidate := range haystack {
- if candidate.MatchFullName(chi.Namespace, chi.Name) {
+ if candidate.MatchFullName(cr.Namespace, cr.Name) {
return true
}
}
@@ -594,22 +396,22 @@ const (
)
// IsAuto checks whether templating policy is auto
-func (chi *ClickHouseInstallation) IsAuto() bool {
- if chi == nil {
+func (cr *ClickHouseInstallation) IsAuto() bool {
+ if cr == nil {
return false
}
- if (chi.Namespace == "") && (chi.Name == "") {
+ if (cr.Namespace == "") && (cr.Name == "") {
return false
}
- return chi.Spec.Templating.GetPolicy() == TemplatingPolicyAuto
+ return cr.GetSpecT().GetTemplating().GetPolicy() == TemplatingPolicyAuto
}
// IsStopped checks whether CHI is stopped
-func (chi *ClickHouseInstallation) IsStopped() bool {
- if chi == nil {
+func (cr *ClickHouseInstallation) IsStopped() bool {
+ if cr == nil {
return false
}
- return chi.Spec.Stop.Value()
+ return cr.GetSpecT().GetStop().Value()
}
// Restart constants present available values for .spec.restart
@@ -621,48 +423,40 @@ const (
)
// IsRollingUpdate checks whether CHI should perform rolling update
-func (chi *ClickHouseInstallation) IsRollingUpdate() bool {
- if chi == nil {
+func (cr *ClickHouseInstallation) IsRollingUpdate() bool {
+ if cr == nil {
return false
}
- return chi.Spec.Restart == RestartRollingUpdate
+ return cr.GetSpecT().GetRestart().Value() == RestartRollingUpdate
}
// IsTroubleshoot checks whether CHI is in troubleshoot mode
-func (chi *ClickHouseInstallation) IsTroubleshoot() bool {
- if chi == nil {
+func (cr *ClickHouseInstallation) IsTroubleshoot() bool {
+ if cr == nil {
return false
}
- return chi.Spec.Troubleshoot.Value()
+ return cr.GetSpecT().GetTroubleshoot().Value()
}
// GetReconciling gets reconciling spec
-func (chi *ClickHouseInstallation) GetReconciling() *ChiReconciling {
- if chi == nil {
+func (cr *ClickHouseInstallation) GetReconciling() *Reconciling {
+ if cr == nil {
return nil
}
- return chi.Spec.Reconciling
-}
-
-// CopyCHIOptions specifies options for CHI copier
-type CopyCHIOptions struct {
- // SkipStatus specifies whether to copy status
- SkipStatus bool
- // SkipManagedFields specifies whether to copy managed fields
- SkipManagedFields bool
+ return cr.GetSpecT().Reconciling
}
// Copy makes copy of a CHI, filtering fields according to specified CopyOptions
-func (chi *ClickHouseInstallation) Copy(opts CopyCHIOptions) *ClickHouseInstallation {
- if chi == nil {
+func (cr *ClickHouseInstallation) Copy(opts types.CopyCROptions) *ClickHouseInstallation {
+ if cr == nil {
return nil
}
- jsonBytes, err := json.Marshal(chi)
+ jsonBytes, err := json.Marshal(cr)
if err != nil {
return nil
}
- var chi2 ClickHouseInstallation
+ var chi2 *ClickHouseInstallation
if err := json.Unmarshal(jsonBytes, &chi2); err != nil {
return nil
}
@@ -672,19 +466,19 @@ func (chi *ClickHouseInstallation) Copy(opts CopyCHIOptions) *ClickHouseInstalla
}
if opts.SkipManagedFields {
- chi2.ObjectMeta.ManagedFields = nil
+ chi2.SetManagedFields(nil)
}
- return &chi2
+ return chi2
}
// JSON returns JSON string
-func (chi *ClickHouseInstallation) JSON(opts CopyCHIOptions) string {
- if chi == nil {
+func (cr *ClickHouseInstallation) JSON(opts types.CopyCROptions) string {
+ if cr == nil {
return ""
}
- filtered := chi.Copy(opts)
+ filtered := cr.Copy(opts)
jsonBytes, err := json.MarshalIndent(filtered, "", " ")
if err != nil {
return fmt.Sprintf("unable to parse. err: %v", err)
@@ -694,12 +488,12 @@ func (chi *ClickHouseInstallation) JSON(opts CopyCHIOptions) string {
}
// YAML return YAML string
-func (chi *ClickHouseInstallation) YAML(opts CopyCHIOptions) string {
- if chi == nil {
+func (cr *ClickHouseInstallation) YAML(opts types.CopyCROptions) string {
+ if cr == nil {
return ""
}
- filtered := chi.Copy(opts)
+ filtered := cr.Copy(opts)
yamlBytes, err := yaml.Marshal(filtered)
if err != nil {
return fmt.Sprintf("unable to parse. err: %v", err)
@@ -707,147 +501,183 @@ func (chi *ClickHouseInstallation) YAML(opts CopyCHIOptions) string {
return string(yamlBytes)
}
-func (chi *ClickHouseInstallation) EnsureRuntime() *ClickHouseInstallationRuntime {
- if chi == nil {
+// FirstHost returns first host of the CHI
+func (cr *ClickHouseInstallation) FirstHost() *Host {
+ var result *Host
+ cr.WalkHosts(func(host *Host) error {
+ if result == nil {
+ result = host
+ }
return nil
+ })
+ return result
+}
+
+func (cr *ClickHouseInstallation) GetName() string {
+ if cr == nil {
+ return ""
}
+ return cr.Name
+}
- // Assume that most of the time, we'll see a non-nil value.
- if chi.runtime != nil {
- return chi.runtime
+func (cr *ClickHouseInstallation) GetNamespace() string {
+ if cr == nil {
+ return ""
}
+ return cr.Namespace
+}
- // Otherwise, we need to acquire a lock to initialize the field.
- chi.runtimeCreatorMutex.Lock()
- defer chi.runtimeCreatorMutex.Unlock()
- // Note that we have to check this property again to avoid a TOCTOU bug.
- if chi.runtime == nil {
- chi.runtime = newClickHouseInstallationRuntime()
+func (cr *ClickHouseInstallation) GetLabels() map[string]string {
+ if cr == nil {
+ return nil
}
- return chi.runtime
+ return cr.Labels
}
-// EnsureStatus ensures status
-func (chi *ClickHouseInstallation) EnsureStatus() *ChiStatus {
- if chi == nil {
+func (cr *ClickHouseInstallation) GetAnnotations() map[string]string {
+ if cr == nil {
return nil
}
+ return cr.Annotations
+}
- // Assume that most of the time, we'll see a non-nil value.
- if chi.Status != nil {
- return chi.Status
+// WalkClustersFullPath walks clusters with full path
+func (cr *ClickHouseInstallation) WalkClustersFullPath(
+ f func(chi *ClickHouseInstallation, clusterIndex int, cluster *Cluster) error,
+) []error {
+ if cr == nil {
+ return nil
}
+ res := make([]error, 0)
- // Otherwise, we need to acquire a lock to initialize the field.
- chi.statusCreatorMutex.Lock()
- defer chi.statusCreatorMutex.Unlock()
- // Note that we have to check this property again to avoid a TOCTOU bug.
- if chi.Status == nil {
- chi.Status = &ChiStatus{}
+ for clusterIndex := range cr.GetSpecT().Configuration.Clusters {
+ res = append(res, f(cr, clusterIndex, cr.GetSpecT().Configuration.Clusters[clusterIndex]))
}
- return chi.Status
+
+ return res
}
-// GetStatus gets Status
-func (chi *ClickHouseInstallation) GetStatus() *ChiStatus {
- if chi == nil {
+// WalkClusters walks clusters
+func (cr *ClickHouseInstallation) WalkClusters(f func(i ICluster) error) []error {
+ if cr == nil {
return nil
}
- return chi.Status
-}
+ res := make([]error, 0)
-// HasStatus checks whether CHI has Status
-func (chi *ClickHouseInstallation) HasStatus() bool {
- if chi == nil {
- return false
+ for clusterIndex := range cr.GetSpecT().Configuration.Clusters {
+ res = append(res, f(cr.GetSpecT().Configuration.Clusters[clusterIndex]))
}
- return chi.Status != nil
-}
-// HasAncestor checks whether CHI has an ancestor
-func (chi *ClickHouseInstallation) HasAncestor() bool {
- if !chi.HasStatus() {
- return false
- }
- return chi.Status.HasNormalizedCHICompleted()
+ return res
}
-// GetAncestor gets ancestor of a CHI
-func (chi *ClickHouseInstallation) GetAncestor() *ClickHouseInstallation {
- if !chi.HasAncestor() {
+// WalkShards walks shards
+func (cr *ClickHouseInstallation) WalkShards(
+ f func(
+ shard *ChiShard,
+ ) error,
+) []error {
+ if cr == nil {
return nil
}
- return chi.Status.GetNormalizedCHICompleted()
-}
+ res := make([]error, 0)
-// SetAncestor sets ancestor of a CHI
-func (chi *ClickHouseInstallation) SetAncestor(a *ClickHouseInstallation) {
- if chi == nil {
- return
+ for clusterIndex := range cr.GetSpecT().Configuration.Clusters {
+ cluster := cr.GetSpecT().Configuration.Clusters[clusterIndex]
+ for shardIndex := range cluster.Layout.Shards {
+ shard := cluster.Layout.Shards[shardIndex]
+ res = append(res, f(shard))
+ }
}
- chi.EnsureStatus().NormalizedCHICompleted = a
-}
-// HasTarget checks whether CHI has a target
-func (chi *ClickHouseInstallation) HasTarget() bool {
- if !chi.HasStatus() {
- return false
- }
- return chi.Status.HasNormalizedCHI()
+ return res
}
-// GetTarget gets target of a CHI
-func (chi *ClickHouseInstallation) GetTarget() *ClickHouseInstallation {
- if !chi.HasTarget() {
+// WalkHostsFullPathAndScope walks hosts with full path
+func (cr *ClickHouseInstallation) WalkHostsFullPathAndScope(
+ crScopeCycleSize int,
+ clusterScopeCycleSize int,
+ f WalkHostsAddressFn,
+) (res []error) {
+ if cr == nil {
return nil
}
- return chi.Status.GetNormalizedCHI()
+ address := types.NewHostScopeAddress(crScopeCycleSize, clusterScopeCycleSize)
+ for clusterIndex := range cr.GetSpecT().Configuration.Clusters {
+ cluster := cr.GetSpecT().Configuration.Clusters[clusterIndex]
+ address.ClusterScopeAddress.Init()
+ for shardIndex := range cluster.Layout.Shards {
+ shard := cluster.GetShard(shardIndex)
+ for replicaIndex, host := range shard.Hosts {
+ replica := cluster.GetReplica(replicaIndex)
+ address.ClusterIndex = clusterIndex
+ address.ShardIndex = shardIndex
+ address.ReplicaIndex = replicaIndex
+ res = append(res, f(cr, cluster, shard, replica, host, address))
+ address.CRScopeAddress.Inc()
+ address.ClusterScopeAddress.Inc()
+ }
+ }
+ }
+ return res
}
-// SetTarget sets target of a CHI
-func (chi *ClickHouseInstallation) SetTarget(a *ClickHouseInstallation) {
- if chi == nil {
- return
- }
- chi.EnsureStatus().NormalizedCHI = a
+// WalkHostsFullPath walks hosts with a function
+func (cr *ClickHouseInstallation) WalkHostsFullPath(f WalkHostsAddressFn) []error {
+ return cr.WalkHostsFullPathAndScope(0, 0, f)
}
-// FirstHost returns first host of the CHI
-func (chi *ClickHouseInstallation) FirstHost() *ChiHost {
- var result *ChiHost
- chi.WalkHosts(func(host *ChiHost) error {
- if result == nil {
- result = host
- }
+// WalkHosts walks hosts with a function
+func (cr *ClickHouseInstallation) WalkHosts(f func(host *Host) error) []error {
+ if cr == nil {
return nil
- })
- return result
-}
+ }
+ res := make([]error, 0)
-func (chi *ClickHouseInstallation) GetName() string {
- if chi == nil {
- return ""
+ for clusterIndex := range cr.GetSpecT().Configuration.Clusters {
+ cluster := cr.GetSpecT().Configuration.Clusters[clusterIndex]
+ for shardIndex := range cluster.Layout.Shards {
+ shard := cluster.Layout.Shards[shardIndex]
+ for replicaIndex := range shard.Hosts {
+ host := shard.Hosts[replicaIndex]
+ res = append(res, f(host))
+ }
+ }
}
- return chi.Name
+
+ return res
}
-func (chi *ClickHouseInstallation) GetNamespace() string {
- if chi == nil {
- return ""
+// WalkTillError walks hosts with a function until an error met
+func (cr *ClickHouseInstallation) WalkTillError(
+ ctx context.Context,
+ fCRPreliminary func(ctx context.Context, chi *ClickHouseInstallation) error,
+ fCluster func(ctx context.Context, cluster *Cluster) error,
+ fShards func(ctx context.Context, shards []*ChiShard) error,
+ fCRFinal func(ctx context.Context, chi *ClickHouseInstallation) error,
+) error {
+ if err := fCRPreliminary(ctx, cr); err != nil {
+ return err
}
- return chi.Namespace
-}
-func (chi *ClickHouseInstallation) GetLabels() map[string]string {
- if chi == nil {
- return nil
+ for clusterIndex := range cr.GetSpecT().Configuration.Clusters {
+ cluster := cr.GetSpecT().Configuration.Clusters[clusterIndex]
+ if err := fCluster(ctx, cluster); err != nil {
+ return err
+ }
+
+ shards := make([]*ChiShard, 0, len(cluster.Layout.Shards))
+ for shardIndex := range cluster.Layout.Shards {
+ shards = append(shards, cluster.Layout.Shards[shardIndex])
+ }
+ if err := fShards(ctx, shards); err != nil {
+ return err
+ }
}
- return chi.Labels
-}
-func (chi *ClickHouseInstallation) GetAnnotations() map[string]string {
- if chi == nil {
- return nil
+ if err := fCRFinal(ctx, cr); err != nil {
+ return err
}
- return chi.Annotations
+
+ return nil
}
diff --git a/pkg/apis/clickhouse.altinity.com/v1/type_chi_templating.go b/pkg/apis/clickhouse.altinity.com/v1/type_chi_templating.go
new file mode 100644
index 000000000..0e3deef97
--- /dev/null
+++ b/pkg/apis/clickhouse.altinity.com/v1/type_chi_templating.go
@@ -0,0 +1,82 @@
+// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v1
+
+// ChiTemplating defines templating policy struct
+type ChiTemplating struct {
+ Policy string `json:"policy,omitempty" yaml:"policy,omitempty"`
+ CHISelector TargetSelector `json:"chiSelector,omitempty" yaml:"chiSelector,omitempty"`
+}
+
+// NewChiTemplating creates new templating
+func NewChiTemplating() *ChiTemplating {
+ return new(ChiTemplating)
+}
+
+// GetPolicy gets policy
+func (t *ChiTemplating) GetPolicy() string {
+ if t == nil {
+ return ""
+ }
+ return t.Policy
+}
+
+// SetPolicy sets policy
+func (t *ChiTemplating) SetPolicy(p string) {
+ if t == nil {
+ return
+ }
+ t.Policy = p
+}
+
+// GetSelector gets CHI selector
+func (t *ChiTemplating) GetSelector() TargetSelector {
+ if t == nil {
+ return nil
+ }
+ return t.CHISelector
+}
+
+// MergeFrom merges from specified templating
+func (t *ChiTemplating) MergeFrom(from *ChiTemplating, _type MergeType) *ChiTemplating {
+ if from == nil {
+ return t
+ }
+
+ if t == nil {
+ t = NewChiTemplating()
+ }
+
+ switch _type {
+ case MergeTypeFillEmptyValues:
+ if t.Policy == "" {
+ t.Policy = from.Policy
+ }
+ if t.CHISelector == nil {
+ t.CHISelector = from.CHISelector
+ }
+ case MergeTypeOverrideByNonEmptyValues:
+ if from.Policy != "" {
+ // Override by non-empty values only
+ t.Policy = from.Policy
+ }
+ if from.CHISelector != nil {
+ // Override by non-empty values only
+ t.CHISelector = from.CHISelector
+ }
+ }
+
+ return t
+}
diff --git a/pkg/apis/clickhouse.altinity.com/v1/type_cleanup.go b/pkg/apis/clickhouse.altinity.com/v1/type_cleanup.go
new file mode 100644
index 000000000..d83c4dc06
--- /dev/null
+++ b/pkg/apis/clickhouse.altinity.com/v1/type_cleanup.go
@@ -0,0 +1,252 @@
+// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v1
+
+// Cleanup defines cleanup
+type Cleanup struct {
+ // UnknownObjects specifies cleanup of unknown objects
+ UnknownObjects *ObjectsCleanup `json:"unknownObjects,omitempty" yaml:"unknownObjects,omitempty"`
+ // ReconcileFailedObjects specifies cleanup of failed objects
+ ReconcileFailedObjects *ObjectsCleanup `json:"reconcileFailedObjects,omitempty" yaml:"reconcileFailedObjects,omitempty"`
+}
+
+// NewCleanup creates new cleanup
+func NewCleanup() *Cleanup {
+ return new(Cleanup)
+}
+
+// MergeFrom merges from specified cleanup
+func (t *Cleanup) MergeFrom(from *Cleanup, _type MergeType) *Cleanup {
+ if from == nil {
+ return t
+ }
+
+ if t == nil {
+ t = NewCleanup()
+ }
+
+ switch _type {
+ case MergeTypeFillEmptyValues:
+ case MergeTypeOverrideByNonEmptyValues:
+ }
+
+ t.UnknownObjects = t.UnknownObjects.MergeFrom(from.UnknownObjects, _type)
+ t.ReconcileFailedObjects = t.ReconcileFailedObjects.MergeFrom(from.ReconcileFailedObjects, _type)
+
+ return t
+}
+
+// GetUnknownObjects gets unknown objects cleanup
+func (t *Cleanup) GetUnknownObjects() *ObjectsCleanup {
+ if t == nil {
+ return nil
+ }
+ return t.UnknownObjects
+}
+
+// DefaultUnknownObjects makes default cleanup for known objects
+func (t *Cleanup) DefaultUnknownObjects() *ObjectsCleanup {
+ return NewObjectsCleanup().
+ SetStatefulSet(ObjectsCleanupDelete).
+ SetPVC(ObjectsCleanupDelete).
+ SetConfigMap(ObjectsCleanupDelete).
+ SetService(ObjectsCleanupDelete)
+}
+
+// GetReconcileFailedObjects gets failed objects cleanup
+func (t *Cleanup) GetReconcileFailedObjects() *ObjectsCleanup {
+ if t == nil {
+ return nil
+ }
+ return t.ReconcileFailedObjects
+}
+
+// DefaultReconcileFailedObjects makes default cleanup for failed objects
+func (t *Cleanup) DefaultReconcileFailedObjects() *ObjectsCleanup {
+ return NewObjectsCleanup().
+ SetStatefulSet(ObjectsCleanupRetain).
+ SetPVC(ObjectsCleanupRetain).
+ SetConfigMap(ObjectsCleanupRetain).
+ SetService(ObjectsCleanupRetain)
+}
+
+// SetDefaults set defaults for cleanup
+func (t *Cleanup) SetDefaults() *Cleanup {
+ if t == nil {
+ return nil
+ }
+ t.UnknownObjects = t.DefaultUnknownObjects()
+ t.ReconcileFailedObjects = t.DefaultReconcileFailedObjects()
+ return t
+}
+
+// Possible objects cleanup options
+const (
+ ObjectsCleanupUnspecified = "Unspecified"
+ ObjectsCleanupRetain = "Retain"
+ ObjectsCleanupDelete = "Delete"
+)
+
+// ObjectsCleanup specifies object cleanup struct
+type ObjectsCleanup struct {
+ StatefulSet string `json:"statefulSet,omitempty" yaml:"statefulSet,omitempty"`
+ PVC string `json:"pvc,omitempty" yaml:"pvc,omitempty"`
+ ConfigMap string `json:"configMap,omitempty" yaml:"configMap,omitempty"`
+ Service string `json:"service,omitempty" yaml:"service,omitempty"`
+ Secret string `json:"secret,omitempty" yaml:"secret,omitempty"`
+}
+
+// NewObjectsCleanup creates new object cleanup
+func NewObjectsCleanup() *ObjectsCleanup {
+ return new(ObjectsCleanup)
+}
+
+// MergeFrom merges from specified cleanup
+func (c *ObjectsCleanup) MergeFrom(from *ObjectsCleanup, _type MergeType) *ObjectsCleanup {
+ if from == nil {
+ return c
+ }
+
+ if c == nil {
+ c = NewObjectsCleanup()
+ }
+
+ switch _type {
+ case MergeTypeFillEmptyValues:
+ if c.StatefulSet == "" {
+ c.StatefulSet = from.StatefulSet
+ }
+ if c.PVC == "" {
+ c.PVC = from.PVC
+ }
+ if c.ConfigMap == "" {
+ c.ConfigMap = from.ConfigMap
+ }
+ if c.Service == "" {
+ c.Service = from.Service
+ }
+ if c.Secret == "" {
+ c.Secret = from.Secret
+ }
+ case MergeTypeOverrideByNonEmptyValues:
+ if from.StatefulSet != "" {
+ // Override by non-empty values only
+ c.StatefulSet = from.StatefulSet
+ }
+ if from.PVC != "" {
+ // Override by non-empty values only
+ c.PVC = from.PVC
+ }
+ if from.ConfigMap != "" {
+ // Override by non-empty values only
+ c.ConfigMap = from.ConfigMap
+ }
+ if from.Service != "" {
+ // Override by non-empty values only
+ c.Service = from.Service
+ }
+ if from.Secret != "" {
+ // Override by non-empty values only
+ c.Secret = from.Secret
+ }
+ }
+
+ return c
+}
+
+// GetStatefulSet gets stateful set
+func (c *ObjectsCleanup) GetStatefulSet() string {
+ if c == nil {
+ return ""
+ }
+ return c.StatefulSet
+}
+
+// SetStatefulSet sets stateful set
+func (c *ObjectsCleanup) SetStatefulSet(v string) *ObjectsCleanup {
+ if c == nil {
+ return nil
+ }
+ c.StatefulSet = v
+ return c
+}
+
+// GetPVC gets PVC
+func (c *ObjectsCleanup) GetPVC() string {
+ if c == nil {
+ return ""
+ }
+ return c.PVC
+}
+
+// SetPVC sets PVC
+func (c *ObjectsCleanup) SetPVC(v string) *ObjectsCleanup {
+ if c == nil {
+ return nil
+ }
+ c.PVC = v
+ return c
+}
+
+// GetConfigMap gets config map
+func (c *ObjectsCleanup) GetConfigMap() string {
+ if c == nil {
+ return ""
+ }
+ return c.ConfigMap
+}
+
+// SetConfigMap sets config map
+func (c *ObjectsCleanup) SetConfigMap(v string) *ObjectsCleanup {
+ if c == nil {
+ return nil
+ }
+ c.ConfigMap = v
+ return c
+}
+
+// GetService gets service
+func (c *ObjectsCleanup) GetService() string {
+ if c == nil {
+ return ""
+ }
+ return c.Service
+}
+
+// SetService sets service
+func (c *ObjectsCleanup) SetService(v string) *ObjectsCleanup {
+ if c == nil {
+ return nil
+ }
+ c.Service = v
+ return c
+}
+
+// GetSecret gets secret
+func (c *ObjectsCleanup) GetSecret() string {
+ if c == nil {
+ return ""
+ }
+ return c.Secret
+}
+
+// SetSecret sets service
+func (c *ObjectsCleanup) SetSecret(v string) *ObjectsCleanup {
+ if c == nil {
+ return nil
+ }
+ c.Secret = v
+ return c
+}
diff --git a/pkg/apis/clickhouse.altinity.com/v1/type_cluster.go b/pkg/apis/clickhouse.altinity.com/v1/type_cluster.go
index 2a58e7bb8..f9f10a90f 100644
--- a/pkg/apis/clickhouse.altinity.com/v1/type_cluster.go
+++ b/pkg/apis/clickhouse.altinity.com/v1/type_cluster.go
@@ -14,31 +14,42 @@
package v1
+import (
+ "github.com/altinity/clickhouse-operator/pkg/apis/common/types"
+)
+
// Cluster defines item of a clusters section of .configuration
type Cluster struct {
- Name string `json:"name,omitempty" yaml:"name,omitempty"`
- Zookeeper *ChiZookeeperConfig `json:"zookeeper,omitempty" yaml:"zookeeper,omitempty"`
- Settings *Settings `json:"settings,omitempty" yaml:"settings,omitempty"`
- Files *Settings `json:"files,omitempty" yaml:"files,omitempty"`
- Templates *ChiTemplateNames `json:"templates,omitempty" yaml:"templates,omitempty"`
- SchemaPolicy *SchemaPolicy `json:"schemaPolicy,omitempty" yaml:"schemaPolicy,omitempty"`
- Insecure *StringBool `json:"insecure,omitempty" yaml:"insecure,omitempty"`
- Secure *StringBool `json:"secure,omitempty" yaml:"secure,omitempty"`
- Secret *ClusterSecret `json:"secret,omitempty" yaml:"secret,omitempty"`
- Layout *ChiClusterLayout `json:"layout,omitempty" yaml:"layout,omitempty"`
-
- Runtime ClusterRuntime `json:"-" yaml:"-"`
+ Name string `json:"name,omitempty" yaml:"name,omitempty"`
+ Zookeeper *ZookeeperConfig `json:"zookeeper,omitempty" yaml:"zookeeper,omitempty"`
+ Settings *Settings `json:"settings,omitempty" yaml:"settings,omitempty"`
+ Files *Settings `json:"files,omitempty" yaml:"files,omitempty"`
+ Templates *TemplatesList `json:"templates,omitempty" yaml:"templates,omitempty"`
+ SchemaPolicy *SchemaPolicy `json:"schemaPolicy,omitempty" yaml:"schemaPolicy,omitempty"`
+ Insecure *types.StringBool `json:"insecure,omitempty" yaml:"insecure,omitempty"`
+ Secure *types.StringBool `json:"secure,omitempty" yaml:"secure,omitempty"`
+ Secret *ClusterSecret `json:"secret,omitempty" yaml:"secret,omitempty"`
+ PDBMaxUnavailable *types.Int32 `json:"pdbMaxUnavailable,omitempty" yaml:"pdbMaxUnavailable,omitempty"`
+ Layout *ChiClusterLayout `json:"layout,omitempty" yaml:"layout,omitempty"`
+
+ Runtime ChiClusterRuntime `json:"-" yaml:"-"`
}
-type ClusterRuntime struct {
+type ChiClusterRuntime struct {
Address ChiClusterAddress `json:"-" yaml:"-"`
CHI *ClickHouseInstallation `json:"-" yaml:"-" testdiff:"ignore"`
}
-// SchemaPolicy defines schema management policy - replica or shard-based
-type SchemaPolicy struct {
- Replica string `json:"replica" yaml:"replica"`
- Shard string `json:"shard" yaml:"shard"`
+func (r *ChiClusterRuntime) GetAddress() IClusterAddress {
+ return &r.Address
+}
+
+func (r ChiClusterRuntime) GetCR() ICustomResource {
+ return r.CHI
+}
+
+func (r *ChiClusterRuntime) SetCR(cr ICustomResource) {
+ r.CHI = cr.(*ClickHouseInstallation)
}
// ChiClusterAddress defines address of a cluster within ClickHouseInstallation
@@ -49,31 +60,76 @@ type ChiClusterAddress struct {
ClusterIndex int `json:"clusterIndex,omitempty" yaml:"clusterIndex,omitempty"`
}
-// ChiClusterLayout defines layout section of .spec.configuration.clusters
-type ChiClusterLayout struct {
- // DEPRECATED - to be removed soon
- Type string `json:"type,omitempty" yaml:"type,omitempty"`
- ShardsCount int `json:"shardsCount,omitempty" yaml:"shardsCount,omitempty"`
- ReplicasCount int `json:"replicasCount,omitempty" yaml:"replicasCount,omitempty"`
- // TODO refactor into map[string]ChiShard
- Shards []ChiShard `json:"shards,omitempty" yaml:"shards,omitempty"`
- Replicas []ChiReplica `json:"replicas,omitempty" yaml:"replicas,omitempty"`
+func (a *ChiClusterAddress) GetNamespace() string {
+ return a.Namespace
+}
- // Internal data
- // Whether shards or replicas are explicitly specified as Shards []ChiShard or Replicas []ChiReplica
- ShardsSpecified bool `json:"-" yaml:"-" testdiff:"ignore"`
- ReplicasSpecified bool `json:"-" yaml:"-" testdiff:"ignore"`
- HostsField *HostsField `json:"-" yaml:"-" testdiff:"ignore"`
+func (a *ChiClusterAddress) SetNamespace(namespace string) {
+ a.Namespace = namespace
}
-// NewClusterSchemaPolicy creates new cluster layout
-func NewClusterSchemaPolicy() *SchemaPolicy {
- return new(SchemaPolicy)
+func (a *ChiClusterAddress) GetCRName() string {
+ return a.CHIName
}
-// NewChiClusterLayout creates new cluster layout
-func NewChiClusterLayout() *ChiClusterLayout {
- return new(ChiClusterLayout)
+func (a *ChiClusterAddress) SetCRName(name string) {
+ a.CHIName = name
+}
+
+func (a *ChiClusterAddress) GetClusterName() string {
+ return a.ClusterName
+}
+
+func (a *ChiClusterAddress) SetClusterName(name string) {
+ a.ClusterName = name
+}
+
+func (a *ChiClusterAddress) GetClusterIndex() int {
+ return a.ClusterIndex
+}
+
+func (a *ChiClusterAddress) SetClusterIndex(index int) {
+ a.ClusterIndex = index
+}
+
+func (cluster *Cluster) GetName() string {
+ return cluster.Name
+}
+
+func (c *Cluster) GetZookeeper() *ZookeeperConfig {
+ return c.Zookeeper
+}
+
+func (c *Cluster) GetSchemaPolicy() *SchemaPolicy {
+ return c.SchemaPolicy
+}
+
+// GetInsecure is a getter
+func (cluster *Cluster) GetInsecure() *types.StringBool {
+ if cluster == nil {
+ return nil
+ }
+ return cluster.Insecure
+}
+
+// GetSecure is a getter
+func (cluster *Cluster) GetSecure() *types.StringBool {
+ if cluster == nil {
+ return nil
+ }
+ return cluster.Secure
+}
+
+func (c *Cluster) GetSecret() *ClusterSecret {
+ return c.Secret
+}
+
+func (cluster *Cluster) GetRuntime() IClusterRuntime {
+ return &cluster.Runtime
+}
+
+func (cluster *Cluster) GetPDBMaxUnavailable() *types.Int32 {
+ return cluster.PDBMaxUnavailable
}
// FillShardReplicaSpecified fills whether shard or replicas are explicitly specified
@@ -111,28 +167,28 @@ func (cluster *Cluster) InheritZookeeperFrom(chi *ClickHouseInstallation) {
// Has zk config explicitly specified alread
return
}
- if chi.Spec.Configuration == nil {
+ if chi.GetSpecT().Configuration == nil {
return
}
- if chi.Spec.Configuration.Zookeeper == nil {
+ if chi.GetSpecT().Configuration.Zookeeper == nil {
return
}
- cluster.Zookeeper = cluster.Zookeeper.MergeFrom(chi.Spec.Configuration.Zookeeper, MergeTypeFillEmptyValues)
+ cluster.Zookeeper = cluster.Zookeeper.MergeFrom(chi.GetSpecT().Configuration.Zookeeper, MergeTypeFillEmptyValues)
}
// InheritFilesFrom inherits files from CHI
func (cluster *Cluster) InheritFilesFrom(chi *ClickHouseInstallation) {
- if chi.Spec.Configuration == nil {
+ if chi.GetSpecT().Configuration == nil {
return
}
- if chi.Spec.Configuration.Files == nil {
+ if chi.GetSpecT().Configuration.Files == nil {
return
}
// Propagate host section only
- cluster.Files = cluster.Files.MergeFromCB(chi.Spec.Configuration.Files, func(path string, _ *Setting) bool {
- if section, err := getSectionFromPath(path); err == nil {
+ cluster.Files = cluster.Files.MergeFromCB(chi.GetSpecT().Configuration.Files, func(path string, _ *Setting) bool {
+ if section, err := GetSectionFromPath(path); err == nil {
if section.Equal(SectionHost) {
return true
}
@@ -144,13 +200,13 @@ func (cluster *Cluster) InheritFilesFrom(chi *ClickHouseInstallation) {
// InheritTemplatesFrom inherits templates from CHI
func (cluster *Cluster) InheritTemplatesFrom(chi *ClickHouseInstallation) {
- if chi.Spec.Defaults == nil {
+ if chi.GetSpec().GetDefaults() == nil {
return
}
- if chi.Spec.Defaults.Templates == nil {
+ if chi.GetSpec().GetDefaults().Templates == nil {
return
}
- cluster.Templates = cluster.Templates.MergeFrom(chi.Spec.Defaults.Templates, MergeTypeFillEmptyValues)
+ cluster.Templates = cluster.Templates.MergeFrom(chi.GetSpec().GetDefaults().Templates, MergeTypeFillEmptyValues)
cluster.Templates.HandleDeprecatedFields()
}
@@ -170,32 +226,32 @@ func (cluster *Cluster) GetCHI() *ClickHouseInstallation {
// GetShard gets shard with specified index
func (cluster *Cluster) GetShard(shard int) *ChiShard {
- return &cluster.Layout.Shards[shard]
+ return cluster.Layout.Shards[shard]
}
// GetOrCreateHost gets or creates host on specified coordinates
-func (cluster *Cluster) GetOrCreateHost(shard, replica int) *ChiHost {
+func (cluster *Cluster) GetOrCreateHost(shard, replica int) *Host {
return cluster.Layout.HostsField.GetOrCreate(shard, replica)
}
// GetReplica gets replica with specified index
func (cluster *Cluster) GetReplica(replica int) *ChiReplica {
- return &cluster.Layout.Replicas[replica]
+ return cluster.Layout.Replicas[replica]
}
// FindShard finds shard by name or index.
// Expectations: name is expected to be a string, index is expected to be an int.
-func (cluster *Cluster) FindShard(needle interface{}) *ChiShard {
+func (cluster *Cluster) FindShard(needle interface{}) IShard {
var resultShard *ChiShard
- cluster.WalkShards(func(index int, shard *ChiShard) error {
+ cluster.WalkShards(func(index int, shard IShard) error {
switch v := needle.(type) {
case string:
- if shard.Name == v {
- resultShard = shard
+ if shard.GetName() == v {
+ resultShard = shard.(*ChiShard)
}
case int:
if index == v {
- resultShard = shard
+ resultShard = shard.(*ChiShard)
}
}
return nil
@@ -205,14 +261,14 @@ func (cluster *Cluster) FindShard(needle interface{}) *ChiShard {
// FindHost finds host by name or index.
// Expectations: name is expected to be a string, index is expected to be an int.
-func (cluster *Cluster) FindHost(needleShard interface{}, needleHost interface{}) *ChiHost {
+func (cluster *Cluster) FindHost(needleShard interface{}, needleHost interface{}) *Host {
return cluster.FindShard(needleShard).FindHost(needleHost)
}
// FirstHost finds first host in the cluster
-func (cluster *Cluster) FirstHost() *ChiHost {
- var result *ChiHost
- cluster.WalkHosts(func(host *ChiHost) error {
+func (cluster *Cluster) FirstHost() *Host {
+ var result *Host
+ cluster.WalkHosts(func(host *Host) error {
if result == nil {
result = host
}
@@ -222,16 +278,14 @@ func (cluster *Cluster) FirstHost() *ChiHost {
}
// WalkShards walks shards
-func (cluster *Cluster) WalkShards(
- f func(index int, shard *ChiShard) error,
-) []error {
+func (cluster *Cluster) WalkShards(f func(index int, shard IShard) error) []error {
if cluster == nil {
return nil
}
res := make([]error, 0)
for shardIndex := range cluster.Layout.Shards {
- shard := &cluster.Layout.Shards[shardIndex]
+ shard := cluster.Layout.Shards[shardIndex]
res = append(res, f(shardIndex, shard))
}
@@ -243,7 +297,7 @@ func (cluster *Cluster) WalkReplicas(f func(index int, replica *ChiReplica) erro
res := make([]error, 0)
for replicaIndex := range cluster.Layout.Replicas {
- replica := &cluster.Layout.Replicas[replicaIndex]
+ replica := cluster.Layout.Replicas[replicaIndex]
res = append(res, f(replicaIndex, replica))
}
@@ -251,12 +305,11 @@ func (cluster *Cluster) WalkReplicas(f func(index int, replica *ChiReplica) erro
}
// WalkHosts walks hosts
-func (cluster *Cluster) WalkHosts(f func(host *ChiHost) error) []error {
-
+func (cluster *Cluster) WalkHosts(f func(host *Host) error) []error {
res := make([]error, 0)
for shardIndex := range cluster.Layout.Shards {
- shard := &cluster.Layout.Shards[shardIndex]
+ shard := cluster.Layout.Shards[shardIndex]
for replicaIndex := range shard.Hosts {
host := shard.Hosts[replicaIndex]
res = append(res, f(host))
@@ -267,12 +320,12 @@ func (cluster *Cluster) WalkHosts(f func(host *ChiHost) error) []error {
}
// WalkHostsByShards walks hosts by shards
-func (cluster *Cluster) WalkHostsByShards(f func(shard, replica int, host *ChiHost) error) []error {
+func (cluster *Cluster) WalkHostsByShards(f func(shard, replica int, host *Host) error) []error {
res := make([]error, 0)
for shardIndex := range cluster.Layout.Shards {
- shard := &cluster.Layout.Shards[shardIndex]
+ shard := cluster.Layout.Shards[shardIndex]
for replicaIndex := range shard.Hosts {
host := shard.Hosts[replicaIndex]
res = append(res, f(shardIndex, replicaIndex, host))
@@ -282,13 +335,17 @@ func (cluster *Cluster) WalkHostsByShards(f func(shard, replica int, host *ChiHo
return res
}
+func (cluster *Cluster) GetLayout() *ChiClusterLayout {
+ return cluster.Layout
+}
+
// WalkHostsByReplicas walks hosts by replicas
-func (cluster *Cluster) WalkHostsByReplicas(f func(shard, replica int, host *ChiHost) error) []error {
+func (cluster *Cluster) WalkHostsByReplicas(f func(shard, replica int, host *Host) error) []error {
res := make([]error, 0)
for replicaIndex := range cluster.Layout.Replicas {
- replica := &cluster.Layout.Replicas[replicaIndex]
+ replica := cluster.Layout.Replicas[replicaIndex]
for shardIndex := range replica.Hosts {
host := replica.Hosts[shardIndex]
res = append(res, f(shardIndex, replicaIndex, host))
@@ -301,25 +358,45 @@ func (cluster *Cluster) WalkHostsByReplicas(f func(shard, replica int, host *Chi
// HostsCount counts hosts
func (cluster *Cluster) HostsCount() int {
count := 0
- cluster.WalkHosts(func(host *ChiHost) error {
+ cluster.WalkHosts(func(host *Host) error {
count++
return nil
})
return count
}
-// GetInsecure is a getter
-func (cluster *Cluster) GetInsecure() *StringBool {
- if cluster == nil {
- return nil
- }
- return cluster.Insecure
+// ChiClusterLayout defines layout section of .spec.configuration.clusters
+type ChiClusterLayout struct {
+ ShardsCount int `json:"shardsCount,omitempty" yaml:"shardsCount,omitempty"`
+ ReplicasCount int `json:"replicasCount,omitempty" yaml:"replicasCount,omitempty"`
+
+ // TODO refactor into map[string]ChiShard
+ Shards []*ChiShard `json:"shards,omitempty" yaml:"shards,omitempty"`
+ Replicas []*ChiReplica `json:"replicas,omitempty" yaml:"replicas,omitempty"`
+
+ // Internal data
+ // Whether shards or replicas are explicitly specified as Shards []ChiShard or Replicas []ChiReplica
+ ShardsSpecified bool `json:"-" yaml:"-" testdiff:"ignore"`
+ ReplicasSpecified bool `json:"-" yaml:"-" testdiff:"ignore"`
+ HostsField *HostsField `json:"-" yaml:"-" testdiff:"ignore"`
}
-// GetSecure is a getter
-func (cluster *Cluster) GetSecure() *StringBool {
- if cluster == nil {
- return nil
- }
- return cluster.Secure
+// NewChiClusterLayout creates new cluster layout
+func NewChiClusterLayout() *ChiClusterLayout {
+ return new(ChiClusterLayout)
+}
+
+func (l *ChiClusterLayout) GetReplicasCount() int {
+ return l.ReplicasCount
+}
+
+// SchemaPolicy defines schema management policy - replica or shard-based
+type SchemaPolicy struct {
+ Replica string `json:"replica" yaml:"replica"`
+ Shard string `json:"shard" yaml:"shard"`
+}
+
+// NewClusterSchemaPolicy creates new cluster layout
+func NewClusterSchemaPolicy() *SchemaPolicy {
+ return new(SchemaPolicy)
}
diff --git a/pkg/apis/clickhouse.altinity.com/v1/type_cluster_secret.go b/pkg/apis/clickhouse.altinity.com/v1/type_cluster_secret.go
index ece36c280..c5e4965f6 100644
--- a/pkg/apis/clickhouse.altinity.com/v1/type_cluster_secret.go
+++ b/pkg/apis/clickhouse.altinity.com/v1/type_cluster_secret.go
@@ -15,14 +15,15 @@
package v1
import (
+ "github.com/altinity/clickhouse-operator/pkg/apis/common/types"
core "k8s.io/api/core/v1"
)
// ClusterSecret defines the shared secret for nodes to authenticate each other with
type ClusterSecret struct {
- Auto *StringBool `json:"auto,omitempty" yaml:"auto,omitempty"`
- Value string `json:"value,omitempty" yaml:"value,omitempty"`
- ValueFrom *DataSource `json:"valueFrom,omitempty" yaml:"valueFrom,omitempty"`
+ Auto *types.StringBool `json:"auto,omitempty" yaml:"auto,omitempty"`
+ Value string `json:"value,omitempty" yaml:"value,omitempty"`
+ ValueFrom *DataSource `json:"valueFrom,omitempty" yaml:"valueFrom,omitempty"`
}
// ClusterSecretSourceName specifies name of the source where secret is provided
diff --git a/pkg/apis/clickhouse.altinity.com/v1/type_config_chi.go b/pkg/apis/clickhouse.altinity.com/v1/type_config_chi.go
deleted file mode 100644
index cbdddb4cd..000000000
--- a/pkg/apis/clickhouse.altinity.com/v1/type_config_chi.go
+++ /dev/null
@@ -1,70 +0,0 @@
-// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package v1
-
-const (
- // CommonConfigDir specifies folder's name, where generated common XML files for ClickHouse would be placed
- CommonConfigDir = "config.d"
-
- // UsersConfigDir specifies folder's name, where generated users XML files for ClickHouse would be placed
- UsersConfigDir = "users.d"
-
- // HostConfigDir specifies folder's name, where generated host XML files for ClickHouse would be placed
- HostConfigDir = "conf.d"
-
- // TemplatesDir specifies folder's name where ClickHouseInstallationTemplates are located
- TemplatesDir = "templates.d"
-)
-
-// Configuration defines configuration section of .spec
-type Configuration struct {
- Zookeeper *ChiZookeeperConfig `json:"zookeeper,omitempty" yaml:"zookeeper,omitempty"`
- Users *Settings `json:"users,omitempty" yaml:"users,omitempty"`
- Profiles *Settings `json:"profiles,omitempty" yaml:"profiles,omitempty"`
- Quotas *Settings `json:"quotas,omitempty" yaml:"quotas,omitempty"`
- Settings *Settings `json:"settings,omitempty" yaml:"settings,omitempty"`
- Files *Settings `json:"files,omitempty" yaml:"files,omitempty"`
- // TODO refactor into map[string]ChiCluster
- Clusters []*Cluster `json:"clusters,omitempty" yaml:"clusters,omitempty"`
-}
-
-// NewConfiguration creates new Configuration objects
-func NewConfiguration() *Configuration {
- return new(Configuration)
-}
-
-// MergeFrom merges from specified source
-func (configuration *Configuration) MergeFrom(from *Configuration, _type MergeType) *Configuration {
- if from == nil {
- return configuration
- }
-
- if configuration == nil {
- configuration = NewConfiguration()
- }
-
- configuration.Zookeeper = configuration.Zookeeper.MergeFrom(from.Zookeeper, _type)
- configuration.Users = configuration.Users.MergeFrom(from.Users)
- configuration.Profiles = configuration.Profiles.MergeFrom(from.Profiles)
- configuration.Quotas = configuration.Quotas.MergeFrom(from.Quotas)
- configuration.Settings = configuration.Settings.MergeFrom(from.Settings)
- configuration.Files = configuration.Files.MergeFrom(from.Files)
-
- // TODO merge clusters
- // Copy Clusters for now
- configuration.Clusters = from.Clusters
-
- return configuration
-}
diff --git a/pkg/apis/clickhouse.altinity.com/v1/type_configuration_chi.go b/pkg/apis/clickhouse.altinity.com/v1/type_configuration_chi.go
new file mode 100644
index 000000000..245c27c42
--- /dev/null
+++ b/pkg/apis/clickhouse.altinity.com/v1/type_configuration_chi.go
@@ -0,0 +1,100 @@
+// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v1
+
+const (
+ // CommonConfigDirClickHouse specifies folder's name, where generated common XML files for ClickHouse would be placed
+ CommonConfigDirClickHouse = "config.d"
+
+ // UsersConfigDirClickHouse specifies folder's name, where generated users XML files for ClickHouse would be placed
+ UsersConfigDirClickHouse = "users.d"
+
+ // HostConfigDirClickHouse specifies folder's name, where generated host XML files for ClickHouse would be placed
+ HostConfigDirClickHouse = "conf.d"
+
+ // TemplatesDirClickHouse specifies folder's name where ClickHouseInstallationTemplates are located
+ TemplatesDirClickHouse = "templates.d"
+)
+
+const (
+ // CommonConfigDirKeeper specifies folder's name, where generated common XML files for ClickHouse would be placed
+ CommonConfigDirKeeper = "keeper_config.d"
+
+ // UsersConfigDirKeeper specifies folder's name, where generated users XML files for ClickHouse would be placed
+ UsersConfigDirKeeper = "users.d"
+
+ // HostConfigDirKeeper specifies folder's name, where generated host XML files for ClickHouse would be placed
+ HostConfigDirKeeper = "conf.d"
+
+ // TemplatesDirKeeper specifies folder's name where ClickHouseInstallationTemplates are located
+ TemplatesDirKeeper = "templates.d"
+)
+
+// Configuration defines configuration section of .spec
+type Configuration struct {
+ Zookeeper *ZookeeperConfig `json:"zookeeper,omitempty" yaml:"zookeeper,omitempty"`
+ Users *Settings `json:"users,omitempty" yaml:"users,omitempty"`
+ Profiles *Settings `json:"profiles,omitempty" yaml:"profiles,omitempty"`
+ Quotas *Settings `json:"quotas,omitempty" yaml:"quotas,omitempty"`
+ Settings *Settings `json:"settings,omitempty" yaml:"settings,omitempty"`
+ Files *Settings `json:"files,omitempty" yaml:"files,omitempty"`
+ // TODO refactor into map[string]ChiCluster
+ Clusters []*Cluster `json:"clusters,omitempty" yaml:"clusters,omitempty"`
+}
+
+// NewConfiguration creates new Configuration objects
+func NewConfiguration() *Configuration {
+ return new(Configuration)
+}
+
+func (c *Configuration) GetProfiles() *Settings {
+ return c.Profiles
+}
+
+func (c *Configuration) GetQuotas() *Settings {
+ return c.Quotas
+}
+
+func (c *Configuration) GetSettings() *Settings {
+ return c.Settings
+}
+
+func (c *Configuration) GetFiles() *Settings {
+ return c.Files
+}
+
+// MergeFrom merges from specified source
+func (c *Configuration) MergeFrom(from *Configuration, _type MergeType) *Configuration {
+ if from == nil {
+ return c
+ }
+
+ if c == nil {
+ c = NewConfiguration()
+ }
+
+ c.Zookeeper = c.Zookeeper.MergeFrom(from.Zookeeper, _type)
+ c.Users = c.Users.MergeFrom(from.Users)
+ c.Profiles = c.Profiles.MergeFrom(from.Profiles)
+ c.Quotas = c.Quotas.MergeFrom(from.Quotas)
+ c.Settings = c.Settings.MergeFrom(from.Settings)
+ c.Files = c.Files.MergeFrom(from.Files)
+
+ // TODO merge clusters
+ // Copy Clusters for now
+ c.Clusters = from.Clusters
+
+ return c
+}
diff --git a/pkg/apis/clickhouse.altinity.com/v1/type_config_chop.go b/pkg/apis/clickhouse.altinity.com/v1/type_configuration_chop.go
similarity index 95%
rename from pkg/apis/clickhouse.altinity.com/v1/type_config_chop.go
rename to pkg/apis/clickhouse.altinity.com/v1/type_configuration_chop.go
index 039681ce4..a4f631e09 100644
--- a/pkg/apis/clickhouse.altinity.com/v1/type_config_chop.go
+++ b/pkg/apis/clickhouse.altinity.com/v1/type_configuration_chop.go
@@ -24,12 +24,12 @@ import (
"sync"
"time"
- // log "k8s.io/klog"
log "github.com/golang/glog"
"github.com/imdario/mergo"
"gopkg.in/yaml.v3"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ meta "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "github.com/altinity/clickhouse-operator/pkg/apis/common/types"
"github.com/altinity/clickhouse-operator/pkg/apis/deployment"
"github.com/altinity/clickhouse-operator/pkg/util"
)
@@ -177,7 +177,7 @@ type OperatorConfigConfig struct {
}
// OperatorConfigRestartPolicyRuleSet specifies set of rules
-type OperatorConfigRestartPolicyRuleSet map[Matchable]StringBool
+type OperatorConfigRestartPolicyRuleSet map[types.Matchable]types.StringBool
// OperatorConfigRestartPolicyRule specifies ClickHouse version and rules for this version
type OperatorConfigRestartPolicyRule struct {
@@ -199,15 +199,15 @@ type OperatorConfigFile struct {
User string `json:"user" yaml:"user"`
} `json:"path" yaml:"path"`
- Runtime OperatorConfigFileRuntime `json:"runtime,omitempty" yaml:"runtime,omitempty"`
+ Runtime OperatorConfigFileRuntime `json:"-" yaml:"-"`
}
// OperatorConfigFileRuntime specifies runtime section
type OperatorConfigFileRuntime struct {
// OperatorConfig files fetched from paths specified above. Maps "file name->file content"
- CommonConfigFiles map[string]string `json:"commonConfigFiles,omitempty" yaml:"commonConfigFiles,omitempty"`
- HostConfigFiles map[string]string `json:"hostConfigFiles,omitempty" yaml:"hostConfigFiles,omitempty"`
- UsersConfigFiles map[string]string `json:"usersConfigFiles,omitempty" yaml:"usersConfigFiles,omitempty"`
+ CommonConfigFiles map[string]string `json:"-" yaml:"-"`
+ HostConfigFiles map[string]string `json:"-" yaml:"-"`
+ UsersConfigFiles map[string]string `json:"-" yaml:"-"`
}
// OperatorConfigUser specifies User section
@@ -280,6 +280,11 @@ type OperatorConfigClickHouse struct {
} `json:"metrics" yaml:"metrics"`
}
+// OperatorConfigKeeper specifies Keeper section
+type OperatorConfigKeeper struct {
+ Config OperatorConfigConfig `json:"configuration" yaml:"configuration"`
+}
+
// OperatorConfigTemplate specifies template section
type OperatorConfigTemplate struct {
CHI OperatorConfigCHI `json:"chi" yaml:"chi"`
@@ -365,9 +370,9 @@ type OperatorConfigReconcileHost struct {
// OperatorConfigReconcileHostWait defines reconcile host wait config
type OperatorConfigReconcileHostWait struct {
- Exclude *StringBool `json:"exclude,omitempty" yaml:"exclude,omitempty"`
- Queries *StringBool `json:"queries,omitempty" yaml:"queries,omitempty"`
- Include *StringBool `json:"include,omitempty" yaml:"include,omitempty"`
+ Exclude *types.StringBool `json:"exclude,omitempty" yaml:"exclude,omitempty"`
+ Queries *types.StringBool `json:"queries,omitempty" yaml:"queries,omitempty"`
+ Include *types.StringBool `json:"include,omitempty" yaml:"include,omitempty"`
}
// OperatorConfigAnnotation specifies annotation section
@@ -384,7 +389,7 @@ type OperatorConfigLabel struct {
Exclude []string `json:"exclude" yaml:"exclude"`
// Whether to append *Scope* labels to StatefulSet and Pod.
- AppendScopeString StringBool `json:"appendScope" yaml:"appendScope"`
+ AppendScopeString types.StringBool `json:"appendScope" yaml:"appendScope"`
Runtime struct {
AppendScope bool `json:"appendScope" yaml:"appendScope"`
@@ -401,6 +406,7 @@ type OperatorConfig struct {
Runtime OperatorConfigRuntime `json:"runtime" yaml:"runtime"`
Watch OperatorConfigWatch `json:"watch" yaml:"watch"`
ClickHouse OperatorConfigClickHouse `json:"clickhouse" yaml:"clickhouse"`
+ Keeper OperatorConfigKeeper `json:"keeper" yaml:"keeper"`
Template OperatorConfigTemplate `json:"template" yaml:"template"`
Reconcile OperatorConfigReconcile `json:"reconcile" yaml:"reconcile"`
Annotation OperatorConfigAnnotation `json:"annotation" yaml:"annotation"`
@@ -508,7 +514,7 @@ type OperatorConfig struct {
ExcludeFromPropagationLabels []string `json:"excludeFromPropagationLabels" yaml:"excludeFromPropagationLabels"`
// Whether to append *Scope* labels to StatefulSet and Pod.
- AppendScopeLabelsString StringBool `json:"appendScopeLabels" yaml:"appendScopeLabels"`
+ AppendScopeLabelsString types.StringBool `json:"appendScopeLabels" yaml:"appendScopeLabels"`
// Grace period for Pod termination.
TerminationGracePeriod int `json:"terminationGracePeriod" yaml:"terminationGracePeriod"`
@@ -697,6 +703,7 @@ func (c *OperatorConfig) DeleteCHITemplate(template *ClickHouseInstallation) {
func (c *OperatorConfig) Postprocess() {
c.normalize()
c.readClickHouseCustomConfigFiles()
+ c.readKeeperCustomConfigFiles()
c.readCHITemplates()
c.applyEnvVarParams()
c.applyDefaultWatchNamespace()
@@ -705,9 +712,17 @@ func (c *OperatorConfig) Postprocess() {
func (c *OperatorConfig) normalizeSectionClickHouseConfigurationFile() {
// Process ClickHouse configuration files section
// Apply default paths in case nothing specified
- util.PreparePath(&c.ClickHouse.Config.File.Path.Common, c.Runtime.ConfigFolderPath, CommonConfigDir)
- util.PreparePath(&c.ClickHouse.Config.File.Path.Host, c.Runtime.ConfigFolderPath, HostConfigDir)
- util.PreparePath(&c.ClickHouse.Config.File.Path.User, c.Runtime.ConfigFolderPath, UsersConfigDir)
+ util.PreparePath(&c.ClickHouse.Config.File.Path.Common, c.Runtime.ConfigFolderPath, CommonConfigDirClickHouse)
+ util.PreparePath(&c.ClickHouse.Config.File.Path.Host, c.Runtime.ConfigFolderPath, HostConfigDirClickHouse)
+ util.PreparePath(&c.ClickHouse.Config.File.Path.User, c.Runtime.ConfigFolderPath, UsersConfigDirClickHouse)
+}
+
+func (c *OperatorConfig) normalizeSectionKeeperConfigurationFile() {
+ // Process Keeper configuration files section
+ // Apply default paths in case nothing specified
+ util.PreparePath(&c.Keeper.Config.File.Path.Common, c.Runtime.ConfigFolderPath, CommonConfigDirKeeper)
+ util.PreparePath(&c.Keeper.Config.File.Path.Host, c.Runtime.ConfigFolderPath, HostConfigDirKeeper)
+ util.PreparePath(&c.Keeper.Config.File.Path.User, c.Runtime.ConfigFolderPath, UsersConfigDirKeeper)
}
func (c *OperatorConfig) normalizeSectionTemplate() {
@@ -722,7 +737,7 @@ func (c *OperatorConfig) normalizeSectionTemplate() {
}
// Process ClickHouseInstallation templates section
- util.PreparePath(&c.Template.CHI.Path, c.Runtime.ConfigFolderPath, TemplatesDir)
+ util.PreparePath(&c.Template.CHI.Path, c.Runtime.ConfigFolderPath, TemplatesDirClickHouse)
}
func (c *OperatorConfig) normalizeSectionReconcileStatefulSet() {
@@ -892,6 +907,7 @@ func (c *OperatorConfig) normalize() {
c.normalizeSectionClickHouseConfigurationUserDefault()
c.normalizeSectionClickHouseAccess()
c.normalizeSectionClickHouseMetrics()
+ c.normalizeSectionKeeperConfigurationFile()
c.normalizeSectionTemplate()
c.normalizeSectionReconcileStatefulSet()
c.normalizeSectionReconcileRuntime()
@@ -956,6 +972,13 @@ func (c *OperatorConfig) readClickHouseCustomConfigFiles() {
c.ClickHouse.Config.File.Runtime.UsersConfigFiles = util.ReadFilesIntoMap(c.ClickHouse.Config.File.Path.User, c.isCHConfigExt)
}
+// readKeeperCustomConfigFiles reads all extra user-specified Keeper config files
+func (c *OperatorConfig) readKeeperCustomConfigFiles() {
+ c.Keeper.Config.File.Runtime.CommonConfigFiles = util.ReadFilesIntoMap(c.Keeper.Config.File.Path.Common, c.isCHConfigExt)
+ c.Keeper.Config.File.Runtime.HostConfigFiles = util.ReadFilesIntoMap(c.Keeper.Config.File.Path.Host, c.isCHConfigExt)
+ c.Keeper.Config.File.Runtime.UsersConfigFiles = util.ReadFilesIntoMap(c.Keeper.Config.File.Path.User, c.isCHConfigExt)
+}
+
// isCHConfigExt returns true in case specified file has proper extension for a ClickHouse config file
func (c *OperatorConfig) isCHConfigExt(file string) bool {
switch util.ExtToLower(file) {
@@ -1026,7 +1049,7 @@ func (c *OperatorConfig) IsWatchedNamespace(namespace string) bool {
// TODO unify approaches to multiple namespaces support
func (c *OperatorConfig) GetInformerNamespace() string {
// Namespace where informers would watch notifications from
- namespace := metav1.NamespaceAll
+ namespace := meta.NamespaceAll
if len(c.Watch.Namespaces) == 1 {
// We have exactly one watch namespace specified
// This scenario is implemented in go-client
diff --git a/pkg/apis/clickhouse.altinity.com/v1/type_defaults.go b/pkg/apis/clickhouse.altinity.com/v1/type_defaults.go
index e7d3f4a2a..8da292d7f 100644
--- a/pkg/apis/clickhouse.altinity.com/v1/type_defaults.go
+++ b/pkg/apis/clickhouse.altinity.com/v1/type_defaults.go
@@ -14,27 +14,29 @@
package v1
-// ChiDefaults defines defaults section of .spec
-type ChiDefaults struct {
- ReplicasUseFQDN *StringBool `json:"replicasUseFQDN,omitempty" yaml:"replicasUseFQDN,omitempty"`
- DistributedDDL *ChiDistributedDDL `json:"distributedDDL,omitempty" yaml:"distributedDDL,omitempty"`
+import "github.com/altinity/clickhouse-operator/pkg/apis/common/types"
+
+// Defaults defines defaults section of .spec
+type Defaults struct {
+ ReplicasUseFQDN *types.StringBool `json:"replicasUseFQDN,omitempty" yaml:"replicasUseFQDN,omitempty"`
+ DistributedDDL *DistributedDDL `json:"distributedDDL,omitempty" yaml:"distributedDDL,omitempty"`
StorageManagement *StorageManagement `json:"storageManagement,omitempty" yaml:"storageManagement,omitempty"`
- Templates *ChiTemplateNames `json:"templates,omitempty" yaml:"templates,omitempty"`
+ Templates *TemplatesList `json:"templates,omitempty" yaml:"templates,omitempty"`
}
-// NewChiDefaults creates new ChiDefaults object
-func NewChiDefaults() *ChiDefaults {
- return new(ChiDefaults)
+// NewDefaults creates new Defaults object
+func NewDefaults() *Defaults {
+ return new(Defaults)
}
// MergeFrom merges from specified object
-func (defaults *ChiDefaults) MergeFrom(from *ChiDefaults, _type MergeType) *ChiDefaults {
+func (defaults *Defaults) MergeFrom(from *Defaults, _type MergeType) *Defaults {
if from == nil {
return defaults
}
if defaults == nil {
- defaults = NewChiDefaults()
+ defaults = NewDefaults()
}
switch _type {
diff --git a/pkg/apis/clickhouse.altinity.com/v1/type_distributed_ddl.go b/pkg/apis/clickhouse.altinity.com/v1/type_distributed_ddl.go
index bbbd32c52..8e93a795e 100644
--- a/pkg/apis/clickhouse.altinity.com/v1/type_distributed_ddl.go
+++ b/pkg/apis/clickhouse.altinity.com/v1/type_distributed_ddl.go
@@ -14,13 +14,18 @@
package v1
-// NewChiDistributedDDL creates new ChiDistributedDDL
-func NewChiDistributedDDL() *ChiDistributedDDL {
- return new(ChiDistributedDDL)
+// DistributedDDL defines distributedDDL section of .spec.defaults
+type DistributedDDL struct {
+ Profile string `json:"profile,omitempty" yaml:"profile"`
+}
+
+// NewDistributedDDL creates new DistributedDDL
+func NewDistributedDDL() *DistributedDDL {
+ return new(DistributedDDL)
}
// HasProfile checks whether profile is present
-func (d *ChiDistributedDDL) HasProfile() bool {
+func (d *DistributedDDL) HasProfile() bool {
if d == nil {
return false
}
@@ -28,7 +33,7 @@ func (d *ChiDistributedDDL) HasProfile() bool {
}
// GetProfile gets profile
-func (d *ChiDistributedDDL) GetProfile() string {
+func (d *DistributedDDL) GetProfile() string {
if d == nil {
return ""
}
@@ -36,13 +41,13 @@ func (d *ChiDistributedDDL) GetProfile() string {
}
// MergeFrom merges from specified source
-func (d *ChiDistributedDDL) MergeFrom(from *ChiDistributedDDL, _type MergeType) *ChiDistributedDDL {
+func (d *DistributedDDL) MergeFrom(from *DistributedDDL, _type MergeType) *DistributedDDL {
if from == nil {
return d
}
if d == nil {
- d = NewChiDistributedDDL()
+ d = NewDistributedDDL()
}
switch _type {
diff --git a/pkg/apis/clickhouse.altinity.com/v1/type_host.go b/pkg/apis/clickhouse.altinity.com/v1/type_host.go
index 4b6e34f16..2af2add9e 100644
--- a/pkg/apis/clickhouse.altinity.com/v1/type_host.go
+++ b/pkg/apis/clickhouse.altinity.com/v1/type_host.go
@@ -15,161 +15,210 @@
package v1
import (
+ "github.com/altinity/clickhouse-operator/pkg/apis/common/types"
apps "k8s.io/api/apps/v1"
core "k8s.io/api/core/v1"
"github.com/altinity/clickhouse-operator/pkg/apis/swversion"
+ "github.com/altinity/clickhouse-operator/pkg/util"
)
-// ChiHost defines host (a data replica within a shard) of .spec.configuration.clusters[n].shards[m]
-type ChiHost struct {
- Name string `json:"name,omitempty" yaml:"name,omitempty"`
+// Host defines host (a data replica within a shard) of .spec.configuration.clusters[n].shards[m]
+type Host struct {
+ Name string `json:"name,omitempty" yaml:"name,omitempty"`
+ HostSecure `json:",inline" yaml:",inline"`
+ HostPorts `json:",inline" yaml:",inline"`
+ HostSettings `json:",inline" yaml:",inline"`
+ Templates *TemplatesList `json:"templates,omitempty" yaml:"templates,omitempty"`
+
+ Runtime HostRuntime `json:"-" yaml:"-"`
+}
+
+type HostSecure struct {
+ Insecure *types.StringBool `json:"insecure,omitempty" yaml:"insecure,omitempty"`
+ Secure *types.StringBool `json:"secure,omitempty" yaml:"secure,omitempty"`
+}
+
+type HostPorts struct {
// DEPRECATED - to be removed soon
- Port int32 `json:"port,omitempty" yaml:"port,omitempty"`
+ Port *types.Int32 `json:"port,omitempty" yaml:"port,omitempty"`
- Insecure *StringBool `json:"insecure,omitempty" yaml:"insecure,omitempty"`
- Secure *StringBool `json:"secure,omitempty" yaml:"secure,omitempty"`
- TCPPort int32 `json:"tcpPort,omitempty" yaml:"tcpPort,omitempty"`
- TLSPort int32 `json:"tlsPort,omitempty" yaml:"tlsPort,omitempty"`
- HTTPPort int32 `json:"httpPort,omitempty" yaml:"httpPort,omitempty"`
- HTTPSPort int32 `json:"httpsPort,omitempty" yaml:"httpsPort,omitempty"`
- InterserverHTTPPort int32 `json:"interserverHTTPPort,omitempty" yaml:"interserverHTTPPort,omitempty"`
- Settings *Settings `json:"settings,omitempty" yaml:"settings,omitempty"`
- Files *Settings `json:"files,omitempty" yaml:"files,omitempty"`
- Templates *ChiTemplateNames `json:"templates,omitempty" yaml:"templates,omitempty"`
+ TCPPort *types.Int32 `json:"tcpPort,omitempty" yaml:"tcpPort,omitempty"`
+ TLSPort *types.Int32 `json:"tlsPort,omitempty" yaml:"tlsPort,omitempty"`
+ HTTPPort *types.Int32 `json:"httpPort,omitempty" yaml:"httpPort,omitempty"`
+ HTTPSPort *types.Int32 `json:"httpsPort,omitempty" yaml:"httpsPort,omitempty"`
+ InterserverHTTPPort *types.Int32 `json:"interserverHTTPPort,omitempty" yaml:"interserverHTTPPort,omitempty"`
+ ZKPort *types.Int32 `json:"zkPort,omitempty" yaml:"zkPort,omitempty"`
+ RaftPort *types.Int32 `json:"raftPort,omitempty" yaml:"raftPort,omitempty"`
+}
- Runtime ChiHostRuntime `json:"-" yaml:"-"`
+type HostSettings struct {
+ Settings *Settings `json:"settings,omitempty" yaml:"settings,omitempty"`
+ Files *Settings `json:"files,omitempty" yaml:"files,omitempty"`
}
-type ChiHostRuntime struct {
+type HostRuntime struct {
// Internal data
- Address ChiHostAddress `json:"-" yaml:"-"`
- Config ChiHostConfig `json:"-" yaml:"-"`
+ Address HostAddress `json:"-" yaml:"-"`
Version *swversion.SoftWareVersion `json:"-" yaml:"-"`
reconcileAttributes *HostReconcileAttributes `json:"-" yaml:"-" testdiff:"ignore"`
+ replicas *types.Int32 `json:"-" yaml:"-"`
+ hasData bool `json:"-" yaml:"-"`
+
// CurStatefulSet is a current stateful set, fetched from k8s
CurStatefulSet *apps.StatefulSet `json:"-" yaml:"-" testdiff:"ignore"`
// DesiredStatefulSet is a desired stateful set - reconcile target
- DesiredStatefulSet *apps.StatefulSet `json:"-" yaml:"-" testdiff:"ignore"`
- CHI *ClickHouseInstallation `json:"-" yaml:"-" testdiff:"ignore"`
+ DesiredStatefulSet *apps.StatefulSet `json:"-" yaml:"-" testdiff:"ignore"`
+
+ cr ICustomResource `json:"-" yaml:"-" testdiff:"ignore"`
+}
+
+func (r *HostRuntime) GetAddress() IHostAddress {
+ return &r.Address
+}
+
+func (r *HostRuntime) SetCR(cr ICustomResource) {
+ r.cr = cr
+}
+
+func (r *HostRuntime) GetCR() ICustomResource {
+ return r.cr.(ICustomResource)
+}
+
+func (host *Host) GetRuntime() IHostRuntime {
+ return &host.Runtime
}
// GetReconcileAttributes is an ensurer getter
-func (host *ChiHost) GetReconcileAttributes() *HostReconcileAttributes {
+func (host *Host) GetReconcileAttributes() *HostReconcileAttributes {
if host == nil {
return nil
}
if host.Runtime.reconcileAttributes == nil {
- host.Runtime.reconcileAttributes = NewChiHostReconcileAttributes()
+ host.Runtime.reconcileAttributes = NewHostReconcileAttributes()
}
return host.Runtime.reconcileAttributes
}
// InheritSettingsFrom inherits settings from specified shard and replica
-func (host *ChiHost) InheritSettingsFrom(shard *ChiShard, replica *ChiReplica) {
- if shard != nil {
- host.Settings = host.Settings.MergeFrom(shard.Settings)
+func (host *Host) InheritSettingsFrom(shard IShard, replica IReplica) {
+ if (shard != nil) && shard.HasSettings() {
+ host.Settings = host.Settings.MergeFrom(shard.GetSettings())
}
- if replica != nil {
- host.Settings = host.Settings.MergeFrom(replica.Settings)
+ if (replica != nil) && replica.HasSettings() {
+ host.Settings = host.Settings.MergeFrom(replica.GetSettings())
}
}
// InheritFilesFrom inherits files from specified shard and replica
-func (host *ChiHost) InheritFilesFrom(shard *ChiShard, replica *ChiReplica) {
- if shard != nil {
- host.Files = host.Files.MergeFrom(shard.Files)
- }
-
- if replica != nil {
- host.Files = host.Files.MergeFrom(replica.Files)
- }
-}
-
-// InheritTemplatesFrom inherits templates from specified shard and replica
-func (host *ChiHost) InheritTemplatesFrom(shard *ChiShard, replica *ChiReplica, template *HostTemplate) {
- if shard != nil {
- host.Templates = host.Templates.MergeFrom(shard.Templates, MergeTypeFillEmptyValues)
- }
-
- if replica != nil {
- host.Templates = host.Templates.MergeFrom(replica.Templates, MergeTypeFillEmptyValues)
- }
-
- if template != nil {
- host.Templates = host.Templates.MergeFrom(template.Spec.Templates, MergeTypeFillEmptyValues)
+func (host *Host) InheritFilesFrom(shard IShard, replica IReplica) {
+ if (shard != nil) && shard.HasFiles() {
+ host.Files = host.Files.MergeFrom(shard.GetFiles())
+ }
+
+ if (replica != nil) && replica.HasFiles() {
+ host.Files = host.Files.MergeFrom(replica.GetFiles())
+ }
+}
+
+// InheritTemplatesFrom inherits templates from specified shard, replica or template
+func (host *Host) InheritTemplatesFrom(sources ...any) {
+ for _, source := range sources {
+ switch typed := source.(type) {
+ case IShard:
+ shard := typed
+ if shard.HasTemplates() {
+ host.Templates = host.Templates.MergeFrom(shard.GetTemplates(), MergeTypeFillEmptyValues)
+ }
+ case IReplica:
+ replica := typed
+ if replica.HasTemplates() {
+ host.Templates = host.Templates.MergeFrom(replica.GetTemplates(), MergeTypeFillEmptyValues)
+ }
+ case *HostTemplate:
+ template := typed
+ if template != nil {
+ host.Templates = host.Templates.MergeFrom(template.Spec.Templates, MergeTypeFillEmptyValues)
+ }
+ }
}
host.Templates.HandleDeprecatedFields()
}
-func isUnassigned(port int32) bool {
- return port == PortMayBeAssignedLaterOrLeftUnused
-}
-
// MergeFrom merges from specified host
-func (host *ChiHost) MergeFrom(from *ChiHost) {
+func (host *Host) MergeFrom(from *Host) {
if (host == nil) || (from == nil) {
return
}
host.Insecure = host.Insecure.MergeFrom(from.Insecure)
host.Secure = host.Secure.MergeFrom(from.Secure)
- if isUnassigned(host.TCPPort) {
- host.TCPPort = from.TCPPort
+
+ if !host.TCPPort.HasValue() {
+ host.TCPPort.MergeFrom(from.TCPPort)
+ }
+ if !host.TLSPort.HasValue() {
+ host.TLSPort.MergeFrom(from.TLSPort)
+ }
+ if !host.HTTPPort.HasValue() {
+ host.HTTPPort.MergeFrom(from.HTTPPort)
}
- if isUnassigned(host.TLSPort) {
- host.TLSPort = from.TLSPort
+ if !host.HTTPSPort.HasValue() {
+ host.HTTPSPort.MergeFrom(from.HTTPSPort)
}
- if isUnassigned(host.HTTPPort) {
- host.HTTPPort = from.HTTPPort
+ if !host.InterserverHTTPPort.HasValue() {
+ host.InterserverHTTPPort.MergeFrom(from.InterserverHTTPPort)
}
- if isUnassigned(host.HTTPSPort) {
- host.HTTPSPort = from.HTTPSPort
+ if !host.ZKPort.HasValue() {
+ host.ZKPort.MergeFrom(from.ZKPort)
}
- if isUnassigned(host.InterserverHTTPPort) {
- host.InterserverHTTPPort = from.InterserverHTTPPort
+ if !host.RaftPort.HasValue() {
+ host.RaftPort.MergeFrom(from.RaftPort)
}
+
host.Templates = host.Templates.MergeFrom(from.Templates, MergeTypeFillEmptyValues)
host.Templates.HandleDeprecatedFields()
}
// GetHostTemplate gets host template
-func (host *ChiHost) GetHostTemplate() (*HostTemplate, bool) {
+func (host *Host) GetHostTemplate() (*HostTemplate, bool) {
if !host.Templates.HasHostTemplate() {
return nil, false
}
name := host.Templates.GetHostTemplate()
- return host.Runtime.CHI.GetHostTemplate(name)
+ return host.GetCR().GetHostTemplate(name)
}
// GetPodTemplate gets pod template
-func (host *ChiHost) GetPodTemplate() (*PodTemplate, bool) {
+func (host *Host) GetPodTemplate() (*PodTemplate, bool) {
if !host.Templates.HasPodTemplate() {
return nil, false
}
name := host.Templates.GetPodTemplate()
- return host.Runtime.CHI.GetPodTemplate(name)
+ return host.GetCR().GetPodTemplate(name)
}
// GetServiceTemplate gets service template
-func (host *ChiHost) GetServiceTemplate() (*ServiceTemplate, bool) {
+func (host *Host) GetServiceTemplate() (*ServiceTemplate, bool) {
if !host.Templates.HasReplicaServiceTemplate() {
return nil, false
}
name := host.Templates.GetReplicaServiceTemplate()
- return host.Runtime.CHI.GetServiceTemplate(name)
+ return host.GetCR().GetServiceTemplate(name)
}
// GetStatefulSetReplicasNum gets stateful set replica num
-func (host *ChiHost) GetStatefulSetReplicasNum(shutdown bool) *int32 {
+func (host *Host) GetStatefulSetReplicasNum(shutdown bool) *int32 {
var num int32 = 0
switch {
case shutdown:
num = 0
case host.IsStopped():
num = 0
+ case host.Runtime.replicas.HasValue():
+ num = host.Runtime.replicas.Value()
default:
num = 1
}
@@ -177,52 +226,56 @@ func (host *ChiHost) GetStatefulSetReplicasNum(shutdown bool) *int32 {
}
// GetSettings gets settings
-func (host *ChiHost) GetSettings() *Settings {
+func (host *Host) GetSettings() *Settings {
return host.Settings
}
// GetZookeeper gets zookeeper
-func (host *ChiHost) GetZookeeper() *ChiZookeeperConfig {
+func (host *Host) GetZookeeper() *ZookeeperConfig {
cluster := host.GetCluster()
- return cluster.Zookeeper
+ return cluster.GetZookeeper()
}
// GetName gets name
-func (host *ChiHost) GetName() string {
+func (host *Host) GetName() string {
if host == nil {
return "host-is-nil"
}
return host.Name
}
-// GetCHI gets CHI
-func (host *ChiHost) GetCHI() *ClickHouseInstallation {
- if host == nil {
- return nil
- }
- return host.Runtime.CHI
+// GetCR gets CHI
+func (host *Host) GetCR() ICustomResource {
+ return host.GetRuntime().GetCR()
+}
+
+// HasCR checks whether host has CHI
+func (host *Host) HasCR() bool {
+ return host.GetCR() != nil
}
-// HasCHI checks whether host has CHI
-func (host *ChiHost) HasCHI() bool {
- return host.GetCHI() != nil
+func (host *Host) SetCR(chi ICustomResource) {
+ host.GetRuntime().SetCR(chi)
}
// GetCluster gets cluster
-func (host *ChiHost) GetCluster() *Cluster {
+func (host *Host) GetCluster() ICluster {
// Host has to have filled Address
- return host.GetCHI().FindCluster(host.Runtime.Address.ClusterName)
+ return host.GetCR().FindCluster(host.Runtime.Address.ClusterName)
}
// GetShard gets shard
-func (host *ChiHost) GetShard() *ChiShard {
+func (host *Host) GetShard() IShard {
// Host has to have filled Address
- return host.GetCHI().FindShard(host.Runtime.Address.ClusterName, host.Runtime.Address.ShardName)
+ return host.GetCR().FindShard(host.Runtime.Address.ClusterName, host.Runtime.Address.ShardName)
}
// GetAncestor gets ancestor of a host
-func (host *ChiHost) GetAncestor() *ChiHost {
- return host.GetCHI().GetAncestor().FindHost(
+func (host *Host) GetAncestor() *Host {
+ if !host.HasAncestorCR() {
+ return nil
+ }
+ return host.GetAncestorCR().FindHost(
host.Runtime.Address.ClusterName,
host.Runtime.Address.ShardName,
host.Runtime.Address.HostName,
@@ -230,34 +283,34 @@ func (host *ChiHost) GetAncestor() *ChiHost {
}
// HasAncestor checks whether host has an ancestor
-func (host *ChiHost) HasAncestor() bool {
+func (host *Host) HasAncestor() bool {
return host.GetAncestor() != nil
}
-// GetAncestorCHI gets ancestor of a host
-func (host *ChiHost) GetAncestorCHI() *ClickHouseInstallation {
- return host.GetCHI().GetAncestor()
+// GetAncestorCR gets ancestor of a host
+func (host *Host) GetAncestorCR() ICustomResource {
+ return host.GetCR().GetAncestor()
}
-// HasAncestorCHI checks whether host has an ancestor
-func (host *ChiHost) HasAncestorCHI() bool {
- return host.GetAncestorCHI() != nil
+// HasAncestorCR checks whether host has an ancestor
+func (host *Host) HasAncestorCR() bool {
+ return host.GetAncestorCR().IsNonZero()
}
// WalkVolumeClaimTemplates walks VolumeClaimTemplate(s)
-func (host *ChiHost) WalkVolumeClaimTemplates(f func(template *VolumeClaimTemplate)) {
- host.GetCHI().WalkVolumeClaimTemplates(f)
+func (host *Host) WalkVolumeClaimTemplates(f func(template *VolumeClaimTemplate)) {
+ host.GetCR().WalkVolumeClaimTemplates(f)
}
// IsStopped checks whether host is stopped
-func (host *ChiHost) IsStopped() bool {
- return host.GetCHI().IsStopped()
+func (host *Host) IsStopped() bool {
+ return host.GetCR().IsStopped()
}
-// IsNewOne checks whether host is a new one
+// IsInNewCluster checks whether host is in a new cluster
// TODO unify with model HostIsNewOne
-func (host *ChiHost) IsNewOne() bool {
- return !host.HasAncestor()
+func (host *Host) IsInNewCluster() bool {
+ return !host.HasAncestor() && (host.GetCR().IEnsureStatus().GetHostsCount() == host.GetCR().IEnsureStatus().GetHostsAddedCount())
}
// WhichStatefulSet specifies which StatefulSet we are going to process in host functions
@@ -281,7 +334,7 @@ func (w WhichStatefulSet) DesiredStatefulSet() bool {
}
// WalkVolumeMounts walks VolumeMount(s)
-func (host *ChiHost) WalkVolumeMounts(which WhichStatefulSet, f func(volumeMount *core.VolumeMount)) {
+func (host *Host) WalkVolumeMounts(which WhichStatefulSet, f func(volumeMount *core.VolumeMount)) {
if host == nil {
return
}
@@ -314,7 +367,7 @@ func (host *ChiHost) WalkVolumeMounts(which WhichStatefulSet, f func(volumeMount
}
// GetVolumeMount gets VolumeMount by the name
-//func (host *ChiHost) GetVolumeMount(volumeMountName string) (vm *corev1.VolumeMount, ok bool) {
+//func (host *Host) GetVolumeMount(volumeMountName string) (vm *corev1.VolumeMount, ok bool) {
// host.WalkVolumeMounts(func(volumeMount *corev1.VolumeMount) {
// if volumeMount.Name == volumeMountName {
// vm = volumeMount
@@ -325,7 +378,7 @@ func (host *ChiHost) WalkVolumeMounts(which WhichStatefulSet, f func(volumeMount
//}
// IsSecure checks whether the host requires secure communication
-func (host *ChiHost) IsSecure() bool {
+func (host *Host) IsSecure() bool {
if host == nil {
return false
}
@@ -345,7 +398,7 @@ func (host *ChiHost) IsSecure() bool {
}
// IsInsecure checks whether the host requires insecure communication
-func (host *ChiHost) IsInsecure() bool {
+func (host *Host) IsInsecure() bool {
if host == nil {
return false
}
@@ -365,7 +418,7 @@ func (host *ChiHost) IsInsecure() bool {
}
// IsFirst checks whether the host is the first host of the whole CHI
-func (host *ChiHost) IsFirst() bool {
+func (host *Host) IsFirst() bool {
if host == nil {
return false
}
@@ -373,8 +426,17 @@ func (host *ChiHost) IsFirst() bool {
return host.Runtime.Address.CHIScopeIndex == 0
}
+// IsFirst checks whether the host is the last host of the whole CHI
+func (host *Host) IsLast() bool {
+ if host == nil {
+ return false
+ }
+
+ return host.Runtime.Address.CHIScopeIndex == (host.GetCR().HostsCount() - 1)
+}
+
// HasCurStatefulSet checks whether host has CurStatefulSet
-func (host *ChiHost) HasCurStatefulSet() bool {
+func (host *Host) HasCurStatefulSet() bool {
if host == nil {
return false
}
@@ -383,10 +445,115 @@ func (host *ChiHost) HasCurStatefulSet() bool {
}
// HasDesiredStatefulSet checks whether host has DesiredStatefulSet
-func (host *ChiHost) HasDesiredStatefulSet() bool {
+func (host *Host) HasDesiredStatefulSet() bool {
if host == nil {
return false
}
return host.Runtime.DesiredStatefulSet != nil
}
+
+const (
+ ChDefaultPortName = "port"
+ ChDefaultPortNumber = int32(9000)
+
+ // ClickHouse open ports names and values
+ ChDefaultTCPPortName = "tcp"
+ ChDefaultTCPPortNumber = int32(9000)
+ ChDefaultTLSPortName = "secureclient"
+ ChDefaultTLSPortNumber = int32(9440)
+ ChDefaultHTTPPortName = "http"
+ ChDefaultHTTPPortNumber = int32(8123)
+ ChDefaultHTTPSPortName = "https"
+ ChDefaultHTTPSPortNumber = int32(8443)
+ ChDefaultInterserverHTTPPortName = "interserver"
+ ChDefaultInterserverHTTPPortNumber = int32(9009)
+
+ // Keeper open ports names and values
+ KpDefaultZKPortName = "zk"
+ KpDefaultZKPortNumber = int32(2181)
+ KpDefaultRaftPortName = "raft"
+ KpDefaultRaftPortNumber = int32(9444)
+)
+
+func (host *Host) WalkPorts(f func(name string, port *types.Int32, protocol core.Protocol) bool) {
+ if host == nil {
+ return
+ }
+ if f(ChDefaultPortName, host.Port, core.ProtocolTCP) {
+ return
+ }
+ if f(ChDefaultTCPPortName, host.TCPPort, core.ProtocolTCP) {
+ return
+ }
+ if f(ChDefaultTLSPortName, host.TLSPort, core.ProtocolTCP) {
+ return
+ }
+ if f(ChDefaultHTTPPortName, host.HTTPPort, core.ProtocolTCP) {
+ return
+ }
+ if f(ChDefaultHTTPSPortName, host.HTTPSPort, core.ProtocolTCP) {
+ return
+ }
+ if f(ChDefaultInterserverHTTPPortName, host.InterserverHTTPPort, core.ProtocolTCP) {
+ return
+ }
+ if f(KpDefaultZKPortName, host.ZKPort, core.ProtocolTCP) {
+ return
+ }
+ if f(KpDefaultRaftPortName, host.RaftPort, core.ProtocolTCP) {
+ return
+ }
+}
+
+func (host *Host) WalkSpecifiedPorts(f func(name string, port *types.Int32, protocol core.Protocol) bool) {
+ host.WalkPorts(
+ func(_name string, _port *types.Int32, _protocol core.Protocol) bool {
+ if _port.HasValue() {
+ // Port is explicitly specified - call provided function on it
+ return f(_name, _port, _protocol)
+ }
+ // Do not break, continue iterating
+ return false
+ },
+ )
+}
+
+func (host *Host) AppendSpecifiedPortsToContainer(container *core.Container) {
+ // Walk over all assigned ports of the host and append each port to the list of container's ports
+ host.WalkSpecifiedPorts(
+ func(name string, port *types.Int32, protocol core.Protocol) bool {
+ // Append assigned port to the list of container's ports
+ container.Ports = append(container.Ports,
+ core.ContainerPort{
+ Name: name,
+ ContainerPort: port.Value(),
+ Protocol: protocol,
+ },
+ )
+ // Do not abort, continue iterating
+ return false
+ },
+ )
+}
+
+func (host *Host) HasListedTablesCreated(name string) bool {
+ return util.InArray(
+ name,
+ host.GetCR().IEnsureStatus().GetHostsWithTablesCreated(),
+ )
+}
+
+func (host *Host) HasData() bool {
+ if host == nil {
+ return false
+ }
+ return host.Runtime.hasData
+}
+
+func (host *Host) SetHasData(hasData bool) {
+ if host == nil {
+ return
+ }
+ host.Runtime.hasData = hasData
+}
diff --git a/pkg/apis/clickhouse.altinity.com/v1/type_host_address.go b/pkg/apis/clickhouse.altinity.com/v1/type_host_address.go
index 22bffe916..84f32d825 100644
--- a/pkg/apis/clickhouse.altinity.com/v1/type_host_address.go
+++ b/pkg/apis/clickhouse.altinity.com/v1/type_host_address.go
@@ -16,8 +16,8 @@ package v1
import "fmt"
-// ChiHostAddress defines address of a host within ClickHouseInstallation
-type ChiHostAddress struct {
+// HostAddress defines address of a host within ClickHouseInstallation
+type HostAddress struct {
Namespace string `json:"namespace,omitempty" yaml:"namespace,omitempty"`
StatefulSet string `json:"statefulSet,omitempty" yaml:"statefulSet,omitempty"`
FQDN string `json:"fqdn,omitempty" yaml:"fqdn,omitempty"`
@@ -41,23 +41,183 @@ type ChiHostAddress struct {
ClusterScopeCycleOffset int `json:"clusterScopeCycleOffset,omitempty" yaml:"clusterScopeCycleOffset,omitempty"`
}
+func (a *HostAddress) GetNamespace() string {
+ return a.Namespace
+}
+
+func (a *HostAddress) SetNamespace(namespace string) {
+ a.Namespace = namespace
+}
+
+func (a *HostAddress) GetStatefulSet() string {
+ return a.StatefulSet
+}
+
+func (a *HostAddress) GetFQDN() string {
+ return a.FQDN
+}
+
+func (a *HostAddress) GetCRName() string {
+ return a.CHIName
+}
+
+func (a *HostAddress) SetCRName(name string) {
+ a.CHIName = name
+}
+
+func (a *HostAddress) GetClusterName() string {
+ return a.ClusterName
+}
+
+func (a *HostAddress) SetClusterName(name string) {
+ a.ClusterName = name
+}
+
+func (a *HostAddress) GetClusterIndex() int {
+ return a.ClusterIndex
+}
+
+func (a *HostAddress) SetClusterIndex(index int) {
+ a.ClusterIndex = index
+}
+
+func (a *HostAddress) GetShardName() string {
+ return a.ShardName
+}
+
+func (a *HostAddress) SetShardName(name string) {
+ a.ShardName = name
+}
+
+func (a *HostAddress) GetShardIndex() int {
+ return a.ShardIndex
+}
+
+func (a *HostAddress) SetShardIndex(index int) {
+ a.ShardIndex = index
+}
+
+func (a *HostAddress) GetShardScopeIndex() int {
+ return a.ShardScopeIndex
+}
+
+func (a *HostAddress) SetShardScopeIndex(index int) {
+ a.ShardScopeIndex = index
+}
+
+func (a *HostAddress) GetReplicaName() string {
+ return a.ReplicaName
+}
+
+func (a *HostAddress) SetReplicaName(name string) {
+ a.ReplicaName = name
+}
+
+func (a *HostAddress) GetReplicaIndex() int {
+ return a.ReplicaIndex
+}
+
+func (a *HostAddress) SetReplicaIndex(index int) {
+ a.ReplicaIndex = index
+}
+
+func (a *HostAddress) GetReplicaScopeIndex() int {
+ return a.ReplicaScopeIndex
+}
+
+func (a *HostAddress) SetReplicaScopeIndex(index int) {
+ a.ReplicaScopeIndex = index
+}
+
+func (a *HostAddress) GetHostName() string {
+ return a.HostName
+}
+
+func (a *HostAddress) SetHostName(name string) {
+ a.HostName = name
+}
+
+func (a *HostAddress) GetCRScopeIndex() int {
+ return a.CHIScopeIndex
+}
+
+func (a *HostAddress) SetCRScopeIndex(index int) {
+ a.CHIScopeIndex = index
+}
+
+func (a *HostAddress) GetCRScopeCycleSize() int {
+ return a.CHIScopeCycleSize
+}
+
+func (a *HostAddress) SetCRScopeCycleSize(size int) {
+ a.CHIScopeCycleSize = size
+}
+
+func (a *HostAddress) GetCRScopeCycleIndex() int {
+ return a.CHIScopeCycleIndex
+}
+
+func (a *HostAddress) SetCRScopeCycleIndex(index int) {
+ a.CHIScopeCycleIndex = index
+}
+
+func (a *HostAddress) GetCRScopeCycleOffset() int {
+ return a.CHIScopeCycleOffset
+}
+
+func (a *HostAddress) SetCRScopeCycleOffset(offset int) {
+ a.CHIScopeCycleOffset = offset
+}
+
+func (a *HostAddress) GetClusterScopeIndex() int {
+ return a.ClusterScopeIndex
+}
+
+func (a *HostAddress) SetClusterScopeIndex(index int) {
+ a.ClusterScopeIndex = index
+}
+
+func (a *HostAddress) GetClusterScopeCycleSize() int {
+ return a.ClusterScopeCycleSize
+}
+
+func (a *HostAddress) SetClusterScopeCycleSize(size int) {
+ a.ClusterScopeCycleSize = size
+}
+
+func (a *HostAddress) GetClusterScopeCycleIndex() int {
+ return a.ClusterScopeCycleIndex
+}
+
+func (a *HostAddress) SetClusterScopeCycleIndex(index int) {
+ a.ClusterScopeCycleIndex = index
+}
+
+func (a *HostAddress) GetClusterScopeCycleOffset() int {
+ return a.ClusterScopeCycleOffset
+}
+
+func (a *HostAddress) SetClusterScopeCycleOffset(offset int) {
+ a.ClusterScopeCycleOffset = offset
+}
+
// CompactString creates compact string representation
-func (a ChiHostAddress) CompactString() string {
+func (a HostAddress) CompactString() string {
return fmt.Sprintf("ns:%s|chi:%s|clu:%s|sha:%s|rep:%s|host:%s",
- a.Namespace, a.CHIName, a.ClusterName, a.ShardName, a.ReplicaName, a.HostName)
+ a.GetNamespace(),
+ a.GetCRName(),
+ a.GetClusterName(),
+ a.GetShardName(),
+ a.GetReplicaName(),
+ a.GetHostName())
}
// ClusterNameString creates cluster+host pair
-func (a ChiHostAddress) ClusterNameString() string {
- return fmt.Sprintf("%s/%s", a.ClusterName, a.HostName)
+func (a HostAddress) ClusterNameString() string {
+ return fmt.Sprintf("%s/%s", a.GetClusterName(), a.GetHostName())
}
// NamespaceNameString creates namespace+name pair
-func (a ChiHostAddress) NamespaceNameString() string {
- return fmt.Sprintf("%s/%s", a.Namespace, a.HostName)
-}
-
-// NamespaceCHINameString creates namespace+CHI pair
-func (a ChiHostAddress) NamespaceCHINameString() string {
- return fmt.Sprintf("%s/%s", a.Namespace, a.CHIName)
+func (a HostAddress) NamespaceNameString() string {
+ return fmt.Sprintf("%s/%s", a.GetNamespace(), a.GetHostName())
}
diff --git a/pkg/apis/clickhouse.altinity.com/v1/type_host_reconcile_attributes.go b/pkg/apis/clickhouse.altinity.com/v1/type_host_reconcile_attributes.go
index 57a097b2b..012c7b6b9 100644
--- a/pkg/apis/clickhouse.altinity.com/v1/type_host_reconcile_attributes.go
+++ b/pkg/apis/clickhouse.altinity.com/v1/type_host_reconcile_attributes.go
@@ -43,8 +43,8 @@ type HostReconcileAttributes struct {
exclude bool
}
-// NewChiHostReconcileAttributes creates new reconcile attributes
-func NewChiHostReconcileAttributes() *HostReconcileAttributes {
+// NewHostReconcileAttributes creates new reconcile attributes
+func NewHostReconcileAttributes() *HostReconcileAttributes {
return &HostReconcileAttributes{}
}
@@ -214,8 +214,8 @@ func (s *HostReconcileAttributes) String() string {
)
}
-// ChiHostReconcileAttributesCounters defines host reconcile status and attributes counters
-type ChiHostReconcileAttributesCounters struct {
+// HostReconcileAttributesCounters defines host reconcile status and attributes counters
+type HostReconcileAttributesCounters struct {
status map[ObjectStatus]int
// Attributes are used by config generator
@@ -228,15 +228,15 @@ type ChiHostReconcileAttributesCounters struct {
exclude int
}
-// NewChiHostReconcileAttributesCounters creates new reconcile attributes
-func NewChiHostReconcileAttributesCounters() *ChiHostReconcileAttributesCounters {
- return &ChiHostReconcileAttributesCounters{
+// NewHostReconcileAttributesCounters creates new reconcile attributes
+func NewHostReconcileAttributesCounters() *HostReconcileAttributesCounters {
+ return &HostReconcileAttributesCounters{
status: make(map[ObjectStatus]int),
}
}
// Add adds to counters provided HostReconcileAttributes
-func (s *ChiHostReconcileAttributesCounters) Add(a *HostReconcileAttributes) {
+func (s *HostReconcileAttributesCounters) Add(a *HostReconcileAttributes) {
if s == nil {
return
}
@@ -267,7 +267,7 @@ func (s *ChiHostReconcileAttributesCounters) Add(a *HostReconcileAttributes) {
}
// GetAdd gets added
-func (s *ChiHostReconcileAttributesCounters) GetAdd() int {
+func (s *HostReconcileAttributesCounters) GetAdd() int {
if s == nil {
return 0
}
@@ -275,7 +275,7 @@ func (s *ChiHostReconcileAttributesCounters) GetAdd() int {
}
// GetRemove gets removed
-func (s *ChiHostReconcileAttributesCounters) GetRemove() int {
+func (s *HostReconcileAttributesCounters) GetRemove() int {
if s == nil {
return 0
}
@@ -283,7 +283,7 @@ func (s *ChiHostReconcileAttributesCounters) GetRemove() int {
}
// GetModify gets modified
-func (s *ChiHostReconcileAttributesCounters) GetModify() int {
+func (s *HostReconcileAttributesCounters) GetModify() int {
if s == nil {
return 0
}
@@ -291,7 +291,7 @@ func (s *ChiHostReconcileAttributesCounters) GetModify() int {
}
// GetFound gets found
-func (s *ChiHostReconcileAttributesCounters) GetFound() int {
+func (s *HostReconcileAttributesCounters) GetFound() int {
if s == nil {
return 0
}
@@ -299,9 +299,14 @@ func (s *ChiHostReconcileAttributesCounters) GetFound() int {
}
// GetExclude gets exclude
-func (s *ChiHostReconcileAttributesCounters) GetExclude() int {
+func (s *HostReconcileAttributesCounters) GetExclude() int {
if s == nil {
return 0
}
return s.exclude
}
+
+// AddOnly checks whether counters have Add() only items
+func (s *HostReconcileAttributesCounters) AddOnly() bool {
+ return s.GetAdd() > 0 && s.GetFound() == 0 && s.GetModify() == 0 && s.GetRemove() == 0
+}
diff --git a/pkg/apis/clickhouse.altinity.com/v1/type_hosts_field.go b/pkg/apis/clickhouse.altinity.com/v1/type_hosts_field.go
index 62114aeea..e7fd9c0e8 100644
--- a/pkg/apis/clickhouse.altinity.com/v1/type_hosts_field.go
+++ b/pkg/apis/clickhouse.altinity.com/v1/type_hosts_field.go
@@ -18,7 +18,7 @@ package v1
type HostsField struct {
ShardsCount int
ReplicasCount int
- Field [][]*ChiHost
+ Field [][]*Host
}
// NewHostsField creates new field of hosts
@@ -28,34 +28,34 @@ func NewHostsField(shards, replicas int) *HostsField {
hf.ShardsCount = shards
hf.ReplicasCount = replicas
- hf.Field = make([][]*ChiHost, hf.ShardsCount)
+ hf.Field = make([][]*Host, hf.ShardsCount)
for shard := 0; shard < hf.ShardsCount; shard++ {
- hf.Field[shard] = make([]*ChiHost, hf.ReplicasCount)
+ hf.Field[shard] = make([]*Host, hf.ReplicasCount)
}
return hf
}
// Set sets host on specified coordinates
-func (hf *HostsField) Set(shard, replica int, host *ChiHost) {
+func (hf *HostsField) Set(shard, replica int, host *Host) {
hf.Field[shard][replica] = host
}
// Get gets host from specified coordinates
-func (hf *HostsField) Get(shard, replica int) *ChiHost {
+func (hf *HostsField) Get(shard, replica int) *Host {
return hf.Field[shard][replica]
}
// GetOrCreate gets and creates if necessary
-func (hf *HostsField) GetOrCreate(shard, replica int) *ChiHost {
+func (hf *HostsField) GetOrCreate(shard, replica int) *Host {
if hf.Field[shard][replica] == nil {
- hf.Field[shard][replica] = new(ChiHost)
+ hf.Field[shard][replica] = new(Host)
}
return hf.Field[shard][replica]
}
// WalkHosts walks hosts with a function
-func (hf *HostsField) WalkHosts(f func(shard, replica int, host *ChiHost) error) []error {
+func (hf *HostsField) WalkHosts(f func(shard, replica int, host *Host) error) []error {
res := make([]error, 0)
for shardIndex := range hf.Field {
@@ -72,7 +72,7 @@ func (hf *HostsField) WalkHosts(f func(shard, replica int, host *ChiHost) error)
// HostsCount returns hosts number
func (hf *HostsField) HostsCount() int {
count := 0
- hf.WalkHosts(func(shard, replica int, host *ChiHost) error {
+ hf.WalkHosts(func(shard, replica int, host *Host) error {
count++
return nil
})
diff --git a/pkg/apis/clickhouse.altinity.com/v1/type_reconciling.go b/pkg/apis/clickhouse.altinity.com/v1/type_reconciling.go
new file mode 100644
index 000000000..e11e18707
--- /dev/null
+++ b/pkg/apis/clickhouse.altinity.com/v1/type_reconciling.go
@@ -0,0 +1,157 @@
+// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v1
+
+import (
+ "strings"
+ "time"
+)
+
+// Reconciling defines reconciling specification
+type Reconciling struct {
+ // About to be DEPRECATED
+ Policy string `json:"policy,omitempty" yaml:"policy,omitempty"`
+ // ConfigMapPropagationTimeout specifies timeout for ConfigMap to propagate
+ ConfigMapPropagationTimeout int `json:"configMapPropagationTimeout,omitempty" yaml:"configMapPropagationTimeout,omitempty"`
+ // Cleanup specifies cleanup behavior
+ Cleanup *Cleanup `json:"cleanup,omitempty" yaml:"cleanup,omitempty"`
+}
+
+// NewReconciling creates new reconciling
+func NewReconciling() *Reconciling {
+ return new(Reconciling)
+}
+
+// MergeFrom merges from specified reconciling
+func (t *Reconciling) MergeFrom(from *Reconciling, _type MergeType) *Reconciling {
+ if from == nil {
+ return t
+ }
+
+ if t == nil {
+ t = NewReconciling()
+ }
+
+ switch _type {
+ case MergeTypeFillEmptyValues:
+ if t.Policy == "" {
+ t.Policy = from.Policy
+ }
+ if t.ConfigMapPropagationTimeout == 0 {
+ t.ConfigMapPropagationTimeout = from.ConfigMapPropagationTimeout
+ }
+ case MergeTypeOverrideByNonEmptyValues:
+ if from.Policy != "" {
+ // Override by non-empty values only
+ t.Policy = from.Policy
+ }
+ if from.ConfigMapPropagationTimeout != 0 {
+ // Override by non-empty values only
+ t.ConfigMapPropagationTimeout = from.ConfigMapPropagationTimeout
+ }
+ }
+
+ t.Cleanup = t.Cleanup.MergeFrom(from.Cleanup, _type)
+
+ return t
+}
+
+// SetDefaults set default values for reconciling
+func (t *Reconciling) SetDefaults() *Reconciling {
+ if t == nil {
+ return nil
+ }
+ t.Policy = ReconcilingPolicyUnspecified
+ t.ConfigMapPropagationTimeout = 10
+ t.Cleanup = NewCleanup().SetDefaults()
+ return t
+}
+
+// GetPolicy gets policy
+func (t *Reconciling) GetPolicy() string {
+ if t == nil {
+ return ""
+ }
+ return t.Policy
+}
+
+// SetPolicy sets policy
+func (t *Reconciling) SetPolicy(p string) {
+ if t == nil {
+ return
+ }
+ t.Policy = p
+}
+
+func (t *Reconciling) HasConfigMapPropagationTimeout() bool {
+ return t.GetConfigMapPropagationTimeout() > 0
+}
+
+// GetConfigMapPropagationTimeout gets config map propagation timeout
+func (t *Reconciling) GetConfigMapPropagationTimeout() int {
+ if t == nil {
+ return 0
+ }
+ return t.ConfigMapPropagationTimeout
+}
+
+// SetConfigMapPropagationTimeout sets config map propagation timeout
+func (t *Reconciling) SetConfigMapPropagationTimeout(timeout int) {
+ if t == nil {
+ return
+ }
+ t.ConfigMapPropagationTimeout = timeout
+}
+
+// GetConfigMapPropagationTimeoutDuration gets config map propagation timeout duration
+func (t *Reconciling) GetConfigMapPropagationTimeoutDuration() time.Duration {
+ if t == nil {
+ return 0
+ }
+ return time.Duration(t.GetConfigMapPropagationTimeout()) * time.Second
+}
+
+// Possible reconcile policy values
+const (
+ ReconcilingPolicyUnspecified = "unspecified"
+ ReconcilingPolicyWait = "wait"
+ ReconcilingPolicyNoWait = "nowait"
+)
+
+// IsReconcilingPolicyWait checks whether reconcile policy is "wait"
+func (t *Reconciling) IsReconcilingPolicyWait() bool {
+ return strings.ToLower(t.GetPolicy()) == ReconcilingPolicyWait
+}
+
+// IsReconcilingPolicyNoWait checks whether reconcile policy is "no wait"
+func (t *Reconciling) IsReconcilingPolicyNoWait() bool {
+ return strings.ToLower(t.GetPolicy()) == ReconcilingPolicyNoWait
+}
+
+// GetCleanup gets cleanup
+func (t *Reconciling) GetCleanup() *Cleanup {
+ if t == nil {
+ return nil
+ }
+ return t.Cleanup
+}
+
+// GetCleanup gets cleanup
+func (t *Reconciling) SetCleanup(cleanup *Cleanup) {
+ if t == nil {
+ return
+ }
+ t.Cleanup = cleanup
+}
diff --git a/pkg/apis/clickhouse.altinity.com/v1/type_replica.go b/pkg/apis/clickhouse.altinity.com/v1/type_replica.go
index 426749fad..d358a0a2e 100644
--- a/pkg/apis/clickhouse.altinity.com/v1/type_replica.go
+++ b/pkg/apis/clickhouse.altinity.com/v1/type_replica.go
@@ -14,6 +14,37 @@
package v1
+// ChiReplica defines item of a replica section of .spec.configuration.clusters[n].replicas
+// TODO unify with ChiShard based on HostsSet
+type ChiReplica struct {
+ Name string `json:"name,omitempty" yaml:"name,omitempty"`
+ Settings *Settings `json:"settings,omitempty" yaml:"settings,omitempty"`
+ Files *Settings `json:"files,omitempty" yaml:"files,omitempty"`
+ Templates *TemplatesList `json:"templates,omitempty" yaml:"templates,omitempty"`
+ ShardsCount int `json:"shardsCount,omitempty" yaml:"shardsCount,omitempty"`
+ // TODO refactor into map[string]Host
+ Hosts []*Host `json:"shards,omitempty" yaml:"shards,omitempty"`
+
+ Runtime ChiReplicaRuntime `json:"-" yaml:"-"`
+}
+
+type ChiReplicaRuntime struct {
+ Address ChiReplicaAddress `json:"-" yaml:"-"`
+ CHI *ClickHouseInstallation `json:"-" yaml:"-" testdiff:"ignore"`
+}
+
+func (r *ChiReplicaRuntime) GetAddress() IReplicaAddress {
+ return &r.Address
+}
+
+func (r *ChiReplicaRuntime) SetCR(cr ICustomResource) {
+ r.CHI = cr.(*ClickHouseInstallation)
+}
+
+func (replica *ChiReplica) GetName() string {
+ return replica.Name
+}
+
// InheritSettingsFrom inherits settings from specified cluster
func (replica *ChiReplica) InheritSettingsFrom(cluster *Cluster) {
replica.Settings = replica.Settings.MergeFrom(cluster.Settings)
@@ -49,7 +80,7 @@ func (replica *ChiReplica) HasShardsCount() bool {
}
// WalkHosts walks over hosts
-func (replica *ChiReplica) WalkHosts(f func(host *ChiHost) error) []error {
+func (replica *ChiReplica) WalkHosts(f func(host *Host) error) []error {
res := make([]error, 0)
for shardIndex := range replica.Hosts {
@@ -63,9 +94,107 @@ func (replica *ChiReplica) WalkHosts(f func(host *ChiHost) error) []error {
// HostsCount returns number of hosts
func (replica *ChiReplica) HostsCount() int {
count := 0
- replica.WalkHosts(func(host *ChiHost) error {
+ replica.WalkHosts(func(host *Host) error {
count++
return nil
})
return count
}
+
+func (replica *ChiReplica) HasSettings() bool {
+ return replica.GetSettings() != nil
+}
+
+func (replica *ChiReplica) GetSettings() *Settings {
+ if replica == nil {
+ return nil
+ }
+ return replica.Settings
+}
+
+func (replica *ChiReplica) HasFiles() bool {
+ return replica.GetFiles() != nil
+}
+
+func (replica *ChiReplica) GetFiles() *Settings {
+ if replica == nil {
+ return nil
+ }
+ return replica.Files
+}
+
+func (replica *ChiReplica) HasTemplates() bool {
+ return replica.GetTemplates() != nil
+}
+
+func (replica *ChiReplica) GetTemplates() *TemplatesList {
+ if replica == nil {
+ return nil
+ }
+ return replica.Templates
+}
+
+func (replica *ChiReplica) GetRuntime() IReplicaRuntime {
+ if replica == nil {
+ return (*ChiReplicaRuntime)(nil)
+ }
+ return &replica.Runtime
+}
+
+// ChiReplicaAddress defines address of a replica within ClickHouseInstallation
+type ChiReplicaAddress struct {
+ Namespace string `json:"namespace,omitempty" yaml:"namespace,omitempty"`
+ CHIName string `json:"chiName,omitempty" yaml:"chiName,omitempty"`
+ ClusterName string `json:"clusterName,omitempty" yaml:"clusterName,omitempty"`
+ ClusterIndex int `json:"clusterIndex,omitempty" yaml:"clusterIndex,omitempty"`
+ ReplicaName string `json:"replicaName,omitempty" yaml:"replicaName,omitempty"`
+ ReplicaIndex int `json:"replicaIndex,omitempty" yaml:"replicaIndex,omitempty"`
+}
+
+func (a *ChiReplicaAddress) GetNamespace() string {
+ return a.Namespace
+}
+
+func (a *ChiReplicaAddress) SetNamespace(namespace string) {
+ a.Namespace = namespace
+}
+
+func (a *ChiReplicaAddress) GetCRName() string {
+ return a.CHIName
+}
+
+func (a *ChiReplicaAddress) SetCRName(name string) {
+ a.CHIName = name
+}
+
+func (a *ChiReplicaAddress) GetClusterName() string {
+ return a.ClusterName
+}
+
+func (a *ChiReplicaAddress) SetClusterName(name string) {
+ a.ClusterName = name
+}
+
+func (a *ChiReplicaAddress) GetClusterIndex() int {
+ return a.ClusterIndex
+}
+
+func (a *ChiReplicaAddress) SetClusterIndex(index int) {
+ a.ClusterIndex = index
+}
+
+func (a *ChiReplicaAddress) GetReplicaName() string {
+ return a.ReplicaName
+}
+
+func (a *ChiReplicaAddress) SetReplicaName(name string) {
+ a.ReplicaName = name
+}
+
+func (a *ChiReplicaAddress) GetReplicaIndex() int {
+ return a.ReplicaIndex
+}
+
+func (a *ChiReplicaAddress) SetReplicaIndex(index int) {
+ a.ReplicaIndex = index
+}
diff --git a/pkg/apis/clickhouse.altinity.com/v1/type_setting.go b/pkg/apis/clickhouse.altinity.com/v1/type_setting.go
index d175a936e..6491b93d4 100644
--- a/pkg/apis/clickhouse.altinity.com/v1/type_setting.go
+++ b/pkg/apis/clickhouse.altinity.com/v1/type_setting.go
@@ -41,6 +41,7 @@ type Setting struct {
vector []string
src *SettingSource
attributes map[string]string
+ embed bool
}
type SettingType string
@@ -283,3 +284,18 @@ func (s *Setting) parseDataSourceAddress(dataSourceAddress, defaultNamespace str
return addr, nil
}
+
+func (s *Setting) SetEmbed() *Setting {
+ if s == nil {
+ return nil
+ }
+ s.embed = true
+ return s
+}
+
+func (s *Setting) IsEmbed() bool {
+ if s == nil {
+ return false
+ }
+ return s.embed
+}
diff --git a/pkg/apis/clickhouse.altinity.com/v1/type_setting_data_source.go b/pkg/apis/clickhouse.altinity.com/v1/type_setting_data_source.go
index fe3d140b9..6da81856b 100644
--- a/pkg/apis/clickhouse.altinity.com/v1/type_setting_data_source.go
+++ b/pkg/apis/clickhouse.altinity.com/v1/type_setting_data_source.go
@@ -16,7 +16,6 @@ package v1
import (
"encoding/json"
-
core "k8s.io/api/core/v1"
)
diff --git a/pkg/apis/clickhouse.altinity.com/v1/type_setting_scalar.go b/pkg/apis/clickhouse.altinity.com/v1/type_setting_scalar.go
index 61132f913..a5407049c 100644
--- a/pkg/apis/clickhouse.altinity.com/v1/type_setting_scalar.go
+++ b/pkg/apis/clickhouse.altinity.com/v1/type_setting_scalar.go
@@ -19,6 +19,8 @@ import (
"math"
"reflect"
"strconv"
+
+ "github.com/altinity/clickhouse-operator/pkg/apis/common/types"
)
// NewSettingScalar makes new scalar Setting
@@ -38,15 +40,21 @@ func NewSettingScalarFromAny(untyped any) (*Setting, bool) {
return nil, false
}
+// MustNewSettingScalarFromAny makes new scalar Setting from untyped
+func MustNewSettingScalarFromAny(untyped any) *Setting {
+ if scalar, ok := parseSettingScalarValue(untyped); ok {
+ return NewSettingScalar(scalar)
+ }
+
+ return nil
+}
+
const (
// Float with fractional part less than ignoreThreshold is considered to be int and is casted to int
ignoreThreshold = 0.001
)
func parseSettingScalarValue(untyped any) (string, bool) {
- var scalarValue string
- var isKnownType bool
-
typeOf := reflect.TypeOf(untyped)
if typeOf == nil {
// Unable to determine type of the value
@@ -54,6 +62,9 @@ func parseSettingScalarValue(untyped any) (string, bool) {
}
switch untyped.(type) {
+ case fmt.Stringer:
+ stringer := untyped.(fmt.Stringer)
+ return fmt.Sprintf("%s", stringer), true
case // scalar
int, uint,
int8, uint8,
@@ -62,8 +73,7 @@ func parseSettingScalarValue(untyped any) (string, bool) {
int64, uint64,
bool,
string:
- scalarValue = fmt.Sprintf("%v", untyped)
- isKnownType = true
+ return fmt.Sprintf("%v", untyped), true
case // scalar
float32:
floatVal := untyped.(float32)
@@ -72,13 +82,12 @@ func parseSettingScalarValue(untyped any) (string, bool) {
_, frac := math.Modf(float64(floatVal))
if frac > ignoreThreshold {
// Consider it float
- scalarValue = fmt.Sprintf("%f", untyped)
+ return fmt.Sprintf("%f", untyped), true
} else {
// Consider it int
intVal := int64(floatVal)
- scalarValue = fmt.Sprintf("%v", intVal)
+ return fmt.Sprintf("%v", intVal), true
}
- isKnownType = true
case // scalar
float64:
floatVal := untyped.(float64)
@@ -87,21 +96,21 @@ func parseSettingScalarValue(untyped any) (string, bool) {
_, frac := math.Modf(floatVal)
if frac > ignoreThreshold {
// Consider it float
- scalarValue = fmt.Sprintf("%f", untyped)
+ return fmt.Sprintf("%f", untyped), true
} else {
// Consider it int
intVal := int64(floatVal)
- scalarValue = fmt.Sprintf("%v", intVal)
+ return fmt.Sprintf("%v", intVal), true
}
- isKnownType = true
}
- if isKnownType {
- return scalarValue, true
- }
return "", false
}
+func (s *Setting) IsEmpty() bool {
+ return s == nil
+}
+
// IsScalar checks whether setting is a scalar value
func (s *Setting) IsScalar() bool {
return s.Type() == SettingTypeScalar
@@ -127,6 +136,18 @@ func (s *Setting) ScalarInt() int {
return 0
}
+// ScalarInt gets int scalar value of a setting
+func (s *Setting) ScalarInt32Ptr() *types.Int32 {
+ if s == nil {
+ return nil
+ }
+ if value, err := strconv.Atoi(s.scalar); err == nil {
+ return types.NewInt32(int32(value))
+ }
+
+ return nil
+}
+
// scalarAsAny gets scalar value of a setting as any
func (s *Setting) scalarAsAny() any {
if s == nil {
diff --git a/pkg/apis/clickhouse.altinity.com/v1/type_settings.go b/pkg/apis/clickhouse.altinity.com/v1/type_settings.go
index f8fbaed58..8e7f2e6e4 100644
--- a/pkg/apis/clickhouse.altinity.com/v1/type_settings.go
+++ b/pkg/apis/clickhouse.altinity.com/v1/type_settings.go
@@ -15,6 +15,7 @@
package v1
import (
+ "bytes"
"encoding/json"
"fmt"
"regexp"
@@ -23,7 +24,9 @@ import (
"gopkg.in/d4l3k/messagediff.v1"
+ "github.com/altinity/clickhouse-operator/pkg/apis/common/types"
"github.com/altinity/clickhouse-operator/pkg/util"
+ "github.com/altinity/clickhouse-operator/pkg/xml"
)
// Specify returned errors for being re-used
@@ -155,6 +158,14 @@ func (s *Settings) WalkSafe(f func(name string, setting *Setting)) {
})
}
+// WalkNames walks over settings with a function. Function receives name.
+// Storage key is used internally.
+func (s *Settings) WalkNames(f func(name string)) {
+ s.WalkKeys(func(key string, _setting *Setting) {
+ f(s.Key2Name(key))
+ })
+}
+
// HasKey checks whether key setting exists.
func (s *Settings) HasKey(key string) bool {
if s == nil {
@@ -192,6 +203,12 @@ func (s *Settings) Get(name string) *Setting {
return s.GetKey(s.Name2Key(name))
}
+// GetA gets named setting.
+// Storage key is used internally.
+func (s *Settings) GetA(name string) any {
+ return s.GetKey(s.Name2Key(name))
+}
+
// SetKey sets key setting.
func (s *Settings) SetKey(key string, setting *Setting) *Settings {
if s == nil {
@@ -350,43 +367,53 @@ func (s *Settings) MarshalJSON() ([]byte, error) {
return json.Marshal(raw)
}
-// fetchPort is the base function to fetch int32 port value
-func (s *Settings) fetchPort(name string) int32 {
- return int32(s.Get(name).ScalarInt())
+// fetchPort is the base function to fetch *Int32 port value
+func (s *Settings) fetchPort(name string) *types.Int32 {
+ return s.Get(name).ScalarInt32Ptr()
}
// GetTCPPort gets TCP port from settings
-func (s *Settings) GetTCPPort() int32 {
+func (s *Settings) GetTCPPort() *types.Int32 {
return s.fetchPort("tcp_port")
}
// GetTCPPortSecure gets TCP port secure from settings
-func (s *Settings) GetTCPPortSecure() int32 {
+func (s *Settings) GetTCPPortSecure() *types.Int32 {
return s.fetchPort("tcp_port_secure")
}
// GetHTTPPort gets HTTP port from settings
-func (s *Settings) GetHTTPPort() int32 {
+func (s *Settings) GetHTTPPort() *types.Int32 {
return s.fetchPort("http_port")
}
// GetHTTPSPort gets HTTPS port from settings
-func (s *Settings) GetHTTPSPort() int32 {
+func (s *Settings) GetHTTPSPort() *types.Int32 {
return s.fetchPort("https_port")
}
// GetInterserverHTTPPort gets interserver HTTP port from settings
-func (s *Settings) GetInterserverHTTPPort() int32 {
+func (s *Settings) GetInterserverHTTPPort() *types.Int32 {
return s.fetchPort("interserver_http_port")
}
-// MergeFrom merges into `dst` non-empty new-key-values from `src` in case no such `key` already in `src`
-func (s *Settings) MergeFrom(src *Settings) *Settings {
- if src.Len() == 0 {
+// GetZKPort gets Zookeeper port from settings
+func (s *Settings) GetZKPort() *types.Int32 {
+ return s.fetchPort("keeper_server/tcp_port")
+}
+
+// GetRaftPort gets Raft port from settings
+func (s *Settings) GetRaftPort() *types.Int32 {
+ return s.fetchPort("keeper_server/raft_configuration/server/port")
+}
+
+// MergeFrom merges into `dst` non-empty new-key-values from `from` in case no such `key` already in `src`
+func (s *Settings) MergeFrom(from *Settings) *Settings {
+ if from.Len() == 0 {
return s
}
- src.Walk(func(name string, value *Setting) {
+ from.Walk(func(name string, value *Setting) {
s = s.Ensure().SetIfNotExists(name, value)
})
@@ -416,7 +443,7 @@ func (s *Settings) GetSection(section SettingsSection, includeSettingWithNoSecti
}
s.WalkKeys(func(key string, setting *Setting) {
- _section, err := getSectionFromPath(key)
+ _section, err := GetSectionFromPath(key)
switch {
case (err == nil) && !_section.Equal(section):
// Section is specified in this key.
@@ -467,7 +494,7 @@ func (s *Settings) Filter(
}
s.WalkKeys(func(key string, _ *Setting) {
- section, err := getSectionFromPath(key)
+ section, err := GetSectionFromPath(key)
if (err != nil) && (err != errorNoSectionSpecified) {
// We have a complex error, skip to the next
@@ -552,6 +579,30 @@ func (s *Settings) normalizeKeys() {
}
}
+const xmlTagClickHouse = "clickhouse"
+
+// ClickHouseConfig produces ClickHouse config
+func (s *Settings) ClickHouseConfig(_prefix ...string) string {
+ if s.Len() == 0 {
+ return ""
+ }
+
+ prefix := ""
+ if len(_prefix) > 0 {
+ prefix = _prefix[0]
+ }
+
+ b := &bytes.Buffer{}
+ //
+ // XML code
+ //
+ util.Iline(b, 0, "<"+xmlTagClickHouse+">")
+ xml.GenerateFromSettings(b, s, prefix)
+ util.Iline(b, 0, ""+xmlTagClickHouse+">")
+
+ return b.String()
+}
+
// normalizeKeyAsPath normalizes key which is treated as a path
// Normalized key looks like 'a/b/c'
// Used in in .spec.configuration.{users, profiles, quotas, settings, files} sections
@@ -602,8 +653,8 @@ func getSuffixFromPath(path string) (string, error) {
return suffix, nil
}
-// getSectionFromPath
-func getSectionFromPath(path string) (SettingsSection, error) {
+// GetSectionFromPath
+func GetSectionFromPath(path string) (SettingsSection, error) {
// String representation of the section
section, err := getPrefixFromPath(path)
if err != nil {
@@ -614,11 +665,17 @@ func getSectionFromPath(path string) (SettingsSection, error) {
// Check dir names to determine which section path points to
configDir := section
switch {
- case strings.EqualFold(configDir, CommonConfigDir):
+ case strings.EqualFold(configDir, CommonConfigDirClickHouse):
+ return SectionCommon, nil
+ case strings.EqualFold(configDir, UsersConfigDirClickHouse):
+ return SectionUsers, nil
+ case strings.EqualFold(configDir, HostConfigDirClickHouse):
+ return SectionHost, nil
+ case strings.EqualFold(configDir, CommonConfigDirKeeper):
return SectionCommon, nil
- case strings.EqualFold(configDir, UsersConfigDir):
+ case strings.EqualFold(configDir, UsersConfigDirKeeper):
return SectionUsers, nil
- case strings.EqualFold(configDir, HostConfigDir):
+ case strings.EqualFold(configDir, HostConfigDirKeeper):
return SectionHost, nil
}
diff --git a/pkg/apis/clickhouse.altinity.com/v1/type_shard.go b/pkg/apis/clickhouse.altinity.com/v1/type_shard.go
index f714054f4..22c07ae79 100644
--- a/pkg/apis/clickhouse.altinity.com/v1/type_shard.go
+++ b/pkg/apis/clickhouse.altinity.com/v1/type_shard.go
@@ -14,6 +14,54 @@
package v1
+import (
+ "github.com/altinity/clickhouse-operator/pkg/apis/common/types"
+)
+
+// ChiShard defines item of a shard section of .spec.configuration.clusters[n].shards
+// TODO unify with ChiReplica based on HostsSet
+type ChiShard struct {
+ Name string `json:"name,omitempty" yaml:"name,omitempty"`
+ Weight *int `json:"weight,omitempty" yaml:"weight,omitempty"`
+ InternalReplication *types.StringBool `json:"internalReplication,omitempty" yaml:"internalReplication,omitempty"`
+ Settings *Settings `json:"settings,omitempty" yaml:"settings,omitempty"`
+ Files *Settings `json:"files,omitempty" yaml:"files,omitempty"`
+ Templates *TemplatesList `json:"templates,omitempty" yaml:"templates,omitempty"`
+ ReplicasCount int `json:"replicasCount,omitempty" yaml:"replicasCount,omitempty"`
+ // TODO refactor into map[string]Host
+ Hosts []*Host `json:"replicas,omitempty" yaml:"replicas,omitempty"`
+
+ Runtime ChiShardRuntime `json:"-" yaml:"-"`
+
+ // DefinitionType is DEPRECATED - to be removed soon
+ DefinitionType string `json:"definitionType,omitempty" yaml:"definitionType,omitempty"`
+}
+
+type ChiShardRuntime struct {
+ Address ChiShardAddress `json:"-" yaml:"-"`
+ CHI *ClickHouseInstallation `json:"-" yaml:"-" testdiff:"ignore"`
+}
+
+func (r *ChiShardRuntime) GetAddress() IShardAddress {
+ return &r.Address
+}
+
+func (r *ChiShardRuntime) GetCR() ICustomResource {
+ return r.CHI
+}
+
+func (r *ChiShardRuntime) SetCR(cr ICustomResource) {
+ r.CHI = cr.(*ClickHouseInstallation)
+}
+
+func (shard *ChiShard) GetName() string {
+ return shard.Name
+}
+
+func (shard *ChiShard) GetInternalReplication() *types.StringBool {
+ return shard.InternalReplication
+}
+
// InheritSettingsFrom inherits settings from specified cluster
func (shard *ChiShard) InheritSettingsFrom(cluster *Cluster) {
shard.Settings = shard.Settings.MergeFrom(cluster.Settings)
@@ -49,7 +97,7 @@ func (shard *ChiShard) HasReplicasCount() bool {
}
// WalkHosts runs specified function on each host
-func (shard *ChiShard) WalkHosts(f func(host *ChiHost) error) []error {
+func (shard *ChiShard) WalkHosts(f func(host *Host) error) []error {
if shard == nil {
return nil
}
@@ -64,10 +112,26 @@ func (shard *ChiShard) WalkHosts(f func(host *ChiHost) error) []error {
return res
}
+// WalkHosts runs specified function on each host
+func (shard *ChiShard) WalkHostsAbortOnError(f func(host *Host) error) error {
+ if shard == nil {
+ return nil
+ }
+
+ for replicaIndex := range shard.Hosts {
+ host := shard.Hosts[replicaIndex]
+ if err := f(host); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
// FindHost finds host by name or index.
// Expectations: name is expected to be a string, index is expected to be an int.
-func (shard *ChiShard) FindHost(needle interface{}) (res *ChiHost) {
- shard.WalkHosts(func(host *ChiHost) error {
+func (shard *ChiShard) FindHost(needle interface{}) (res *Host) {
+ shard.WalkHosts(func(host *Host) error {
switch v := needle.(type) {
case string:
if host.Runtime.Address.HostName == v {
@@ -84,9 +148,9 @@ func (shard *ChiShard) FindHost(needle interface{}) (res *ChiHost) {
}
// FirstHost finds first host in the shard
-func (shard *ChiShard) FirstHost() *ChiHost {
- var result *ChiHost
- shard.WalkHosts(func(host *ChiHost) error {
+func (shard *ChiShard) FirstHost() *Host {
+ var result *Host
+ shard.WalkHosts(func(host *Host) error {
if result == nil {
result = host
}
@@ -98,21 +162,21 @@ func (shard *ChiShard) FirstHost() *ChiHost {
// HostsCount returns count of hosts in the shard
func (shard *ChiShard) HostsCount() int {
count := 0
- shard.WalkHosts(func(host *ChiHost) error {
+ shard.WalkHosts(func(host *Host) error {
count++
return nil
})
return count
}
-// GetCHI gets CHI of the shard
+// GetCHI gets Custom Resource of the shard
func (shard *ChiShard) GetCHI() *ClickHouseInstallation {
return shard.Runtime.CHI
}
// GetCluster gets cluster of the shard
func (shard *ChiShard) GetCluster() *Cluster {
- return shard.Runtime.CHI.Spec.Configuration.Clusters[shard.Runtime.Address.ClusterIndex]
+ return shard.Runtime.CHI.GetSpecT().Configuration.Clusters[shard.Runtime.Address.ClusterIndex]
}
// HasWeight checks whether shard has applicable weight value specified
@@ -133,3 +197,101 @@ func (shard *ChiShard) GetWeight() int {
}
return 0
}
+
+func (shard *ChiShard) GetRuntime() IShardRuntime {
+ if shard == nil {
+ return (*ChiShardRuntime)(nil)
+ }
+ return &shard.Runtime
+}
+
+func (shard *ChiShard) HasSettings() bool {
+ return shard.GetSettings() != nil
+}
+
+func (shard *ChiShard) GetSettings() *Settings {
+ if shard == nil {
+ return nil
+ }
+ return shard.Settings
+}
+
+func (shard *ChiShard) HasFiles() bool {
+ return shard.GetFiles() != nil
+}
+
+func (shard *ChiShard) GetFiles() *Settings {
+ if shard == nil {
+ return nil
+ }
+ return shard.Files
+}
+
+func (shard *ChiShard) HasTemplates() bool {
+ return shard.GetTemplates() != nil
+}
+
+func (shard *ChiShard) GetTemplates() *TemplatesList {
+ if shard == nil {
+ return nil
+ }
+ return shard.Templates
+}
+
+// ChiShardAddress defines address of a shard within ClickHouseInstallation
+type ChiShardAddress struct {
+ Namespace string `json:"namespace,omitempty" yaml:"namespace,omitempty"`
+ CHIName string `json:"chiName,omitempty" yaml:"chiName,omitempty"`
+ ClusterName string `json:"clusterName,omitempty" yaml:"clusterName,omitempty"`
+ ClusterIndex int `json:"clusterIndex,omitempty" yaml:"clusterIndex,omitempty"`
+ ShardName string `json:"shardName,omitempty" yaml:"shardName,omitempty"`
+ ShardIndex int `json:"shardIndex,omitempty" yaml:"shardIndex,omitempty"`
+}
+
+func (a *ChiShardAddress) GetNamespace() string {
+ return a.Namespace
+}
+
+func (a *ChiShardAddress) SetNamespace(namespace string) {
+ a.Namespace = namespace
+}
+
+func (a *ChiShardAddress) GetCRName() string {
+ return a.CHIName
+}
+
+func (a *ChiShardAddress) SetCRName(name string) {
+ a.CHIName = name
+}
+
+func (a *ChiShardAddress) GetClusterName() string {
+ return a.ClusterName
+}
+
+func (a *ChiShardAddress) SetClusterName(name string) {
+ a.ClusterName = name
+}
+
+func (a *ChiShardAddress) GetClusterIndex() int {
+ return a.ClusterIndex
+}
+
+func (a *ChiShardAddress) SetClusterIndex(index int) {
+ a.ClusterIndex = index
+}
+
+func (a *ChiShardAddress) GetShardName() string {
+ return a.ShardName
+}
+
+func (a *ChiShardAddress) SetShardName(name string) {
+ a.ShardName = name
+}
+
+func (a *ChiShardAddress) GetShardIndex() int {
+ return a.ShardIndex
+}
+
+func (a *ChiShardAddress) SetShardIndex(index int) {
+ a.ShardIndex = index
+}
diff --git a/pkg/apis/clickhouse.altinity.com/v1/type_spec.go b/pkg/apis/clickhouse.altinity.com/v1/type_spec.go
new file mode 100644
index 000000000..11b50e4a5
--- /dev/null
+++ b/pkg/apis/clickhouse.altinity.com/v1/type_spec.go
@@ -0,0 +1,129 @@
+// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v1
+
+import (
+ "github.com/altinity/clickhouse-operator/pkg/apis/common/types"
+)
+
+// ChiSpec defines spec section of ClickHouseInstallation resource
+type ChiSpec struct {
+ TaskID *types.String `json:"taskID,omitempty" yaml:"taskID,omitempty"`
+ Stop *types.StringBool `json:"stop,omitempty" yaml:"stop,omitempty"`
+ Restart *types.String `json:"restart,omitempty" yaml:"restart,omitempty"`
+ Troubleshoot *types.StringBool `json:"troubleshoot,omitempty" yaml:"troubleshoot,omitempty"`
+ NamespaceDomainPattern *types.String `json:"namespaceDomainPattern,omitempty" yaml:"namespaceDomainPattern,omitempty"`
+ Templating *ChiTemplating `json:"templating,omitempty" yaml:"templating,omitempty"`
+ Reconciling *Reconciling `json:"reconciling,omitempty" yaml:"reconciling,omitempty"`
+ Defaults *Defaults `json:"defaults,omitempty" yaml:"defaults,omitempty"`
+ Configuration *Configuration `json:"configuration,omitempty" yaml:"configuration,omitempty"`
+ Templates *Templates `json:"templates,omitempty" yaml:"templates,omitempty"`
+ UseTemplates []*TemplateRef `json:"useTemplates,omitempty" yaml:"useTemplates,omitempty"`
+}
+
+// HasTaskID checks whether task id is specified
+func (spec *ChiSpec) HasTaskID() bool {
+ return len(spec.TaskID.Value()) > 0
+}
+
+// GetTaskID gets task id as a string
+func (spec *ChiSpec) GetTaskID() string {
+ return spec.TaskID.Value()
+}
+
+func (spec *ChiSpec) GetStop() *types.StringBool {
+ return spec.Stop
+}
+
+func (spec *ChiSpec) GetRestart() *types.String {
+ return spec.Restart
+}
+
+func (spec *ChiSpec) GetTroubleshoot() *types.StringBool {
+ return spec.Troubleshoot
+}
+
+func (spec *ChiSpec) GetNamespaceDomainPattern() *types.String {
+ return spec.NamespaceDomainPattern
+}
+
+func (spec *ChiSpec) GetTemplating() *ChiTemplating {
+ return spec.Templating
+}
+
+func (spec *ChiSpec) GetDefaults() *Defaults {
+ return spec.Defaults
+}
+
+func (spec *ChiSpec) GetConfiguration() IConfiguration {
+ return spec.Configuration
+}
+
+func (spec *ChiSpec) GetTemplates() *Templates {
+ return spec.Templates
+}
+
+// MergeFrom merges from spec
+func (spec *ChiSpec) MergeFrom(from *ChiSpec, _type MergeType) {
+ if from == nil {
+ return
+ }
+
+ switch _type {
+ case MergeTypeFillEmptyValues:
+ if !spec.HasTaskID() {
+ spec.TaskID = spec.TaskID.MergeFrom(from.TaskID)
+ }
+ if !spec.Stop.HasValue() {
+ spec.Stop = spec.Stop.MergeFrom(from.Stop)
+ }
+ if !spec.Restart.HasValue() {
+ spec.Restart = spec.Restart.MergeFrom(from.Restart)
+ }
+ if !spec.Troubleshoot.HasValue() {
+ spec.Troubleshoot = spec.Troubleshoot.MergeFrom(from.Troubleshoot)
+ }
+ if !spec.NamespaceDomainPattern.HasValue() {
+ spec.NamespaceDomainPattern = spec.NamespaceDomainPattern.MergeFrom(from.NamespaceDomainPattern)
+ }
+ case MergeTypeOverrideByNonEmptyValues:
+ if from.HasTaskID() {
+ spec.TaskID = spec.TaskID.MergeFrom(from.TaskID)
+ }
+ if from.Stop.HasValue() {
+ // Override by non-empty values only
+ spec.Stop = from.Stop
+ }
+ if from.Restart.HasValue() {
+ // Override by non-empty values only
+ spec.Restart = spec.Restart.MergeFrom(from.Restart)
+ }
+ if from.Troubleshoot.HasValue() {
+ // Override by non-empty values only
+ spec.Troubleshoot = from.Troubleshoot
+ }
+ if from.NamespaceDomainPattern.HasValue() {
+ spec.NamespaceDomainPattern = spec.NamespaceDomainPattern.MergeFrom(from.NamespaceDomainPattern)
+ }
+ }
+
+ spec.Templating = spec.Templating.MergeFrom(from.Templating, _type)
+ spec.Reconciling = spec.Reconciling.MergeFrom(from.Reconciling, _type)
+ spec.Defaults = spec.Defaults.MergeFrom(from.Defaults, _type)
+ spec.Configuration = spec.Configuration.MergeFrom(from.Configuration, _type)
+ spec.Templates = spec.Templates.MergeFrom(from.Templates, _type)
+ // TODO may be it would be wiser to make more intelligent merge
+ spec.UseTemplates = append(spec.UseTemplates, from.UseTemplates...)
+}
diff --git a/pkg/apis/clickhouse.altinity.com/v1/type_status.go b/pkg/apis/clickhouse.altinity.com/v1/type_status.go
index 2e869dcf5..bb5be021d 100644
--- a/pkg/apis/clickhouse.altinity.com/v1/type_status.go
+++ b/pkg/apis/clickhouse.altinity.com/v1/type_status.go
@@ -18,6 +18,7 @@ import (
"sort"
"sync"
+ "github.com/altinity/clickhouse-operator/pkg/apis/common/types"
"github.com/altinity/clickhouse-operator/pkg/util"
"github.com/altinity/clickhouse-operator/pkg/version"
)
@@ -28,7 +29,7 @@ const (
maxTaskIDs = 10
)
-// Possible CHI statuses
+// Possible CR statuses
const (
StatusInProgress = "InProgress"
StatusCompleted = "Completed"
@@ -36,13 +37,13 @@ const (
StatusTerminating = "Terminating"
)
-// ChiStatus defines status section of ClickHouseInstallation resource.
+// Status defines status section of the custom resource.
//
-// Note: application level reads and writes to ChiStatus fields should be done through synchronized getter/setter functions.
+// Note: application level reads and writes to Status fields should be done through synchronized getter/setter functions.
// While all of these fields need to be exported for JSON and YAML serialization/deserialization, we can at least audit
// that application logic sticks to the synchronized getter/setters by auditing whether all explicit Go field-level
// accesses are strictly within _this_ source file OR the generated deep copy source file.
-type ChiStatus struct {
+type Status struct {
CHOpVersion string `json:"chop-version,omitempty" yaml:"chop-version,omitempty"`
CHOpCommit string `json:"chop-commit,omitempty" yaml:"chop-commit,omitempty"`
CHOpDate string `json:"chop-date,omitempty" yaml:"chop-date,omitempty"`
@@ -70,24 +71,14 @@ type ChiStatus struct {
PodIPs []string `json:"pod-ips,omitempty" yaml:"pod-ips,omitempty"`
FQDNs []string `json:"fqdns,omitempty" yaml:"fqdns,omitempty"`
Endpoint string `json:"endpoint,omitempty" yaml:"endpoint,omitempty"`
- NormalizedCHI *ClickHouseInstallation `json:"normalized,omitempty" yaml:"normalized,omitempty"`
- NormalizedCHICompleted *ClickHouseInstallation `json:"normalizedCompleted,omitempty" yaml:"normalizedCompleted,omitempty"`
+ NormalizedCR *ClickHouseInstallation `json:"normalized,omitempty" yaml:"normalized,omitempty"`
+ NormalizedCRCompleted *ClickHouseInstallation `json:"normalizedCompleted,omitempty" yaml:"normalizedCompleted,omitempty"`
HostsWithTablesCreated []string `json:"hostsWithTablesCreated,omitempty" yaml:"hostsWithTablesCreated,omitempty"`
UsedTemplates []*TemplateRef `json:"usedTemplates,omitempty" yaml:"usedTemplates,omitempty"`
mu sync.RWMutex `json:"-" yaml:"-"`
}
-// CopyCHIStatusOptions specifies what to copy in CHI status options
-type CopyCHIStatusOptions struct {
- Actions bool
- Errors bool
- Normalized bool
- MainFields bool
- WholeStatus bool
- InheritableFields bool
-}
-
// FillStatusParams is a struct used to fill status params
type FillStatusParams struct {
CHOpIP string
@@ -104,14 +95,14 @@ type FillStatusParams struct {
Pods []string
FQDNs []string
Endpoint string
- NormalizedCHI *ClickHouseInstallation
+ NormalizedCR *ClickHouseInstallation
}
// Fill is a synchronized setter for a fairly large number of fields. We take a struct type "params" argument to avoid
// confusion of similarly typed positional arguments, and to avoid defining a lot of separate synchronized setters
// for these fields that are typically all set together at once (during "fills").
-func (s *ChiStatus) Fill(params *FillStatusParams) {
- doWithWriteLock(s, func(s *ChiStatus) {
+func (s *Status) Fill(params *FillStatusParams) {
+ doWithWriteLock(s, func(s *Status) {
// We always set these (build-hardcoded) version fields.
s.CHOpVersion = version.Version
s.CHOpCommit = version.GitSHA
@@ -132,20 +123,20 @@ func (s *ChiStatus) Fill(params *FillStatusParams) {
s.Pods = params.Pods
s.FQDNs = params.FQDNs
s.Endpoint = params.Endpoint
- s.NormalizedCHI = params.NormalizedCHI
+ s.NormalizedCR = params.NormalizedCR
})
}
// SetError sets status error
-func (s *ChiStatus) SetError(err string) {
- doWithWriteLock(s, func(s *ChiStatus) {
+func (s *Status) SetError(err string) {
+ doWithWriteLock(s, func(s *Status) {
s.Error = err
})
}
// SetAndPushError sets and pushes error into status
-func (s *ChiStatus) SetAndPushError(err string) {
- doWithWriteLock(s, func(s *ChiStatus) {
+func (s *Status) SetAndPushError(err string) {
+ doWithWriteLock(s, func(s *Status) {
s.Error = err
s.Errors = append([]string{err}, s.Errors...)
if len(s.Errors) > maxErrors {
@@ -155,8 +146,8 @@ func (s *ChiStatus) SetAndPushError(err string) {
}
// PushHostTablesCreated pushes host to the list of hosts with created tables
-func (s *ChiStatus) PushHostTablesCreated(host string) {
- doWithWriteLock(s, func(s *ChiStatus) {
+func (s *Status) PushHostTablesCreated(host string) {
+ doWithWriteLock(s, func(s *Status) {
if util.InArray(host, s.HostsWithTablesCreated) {
return
}
@@ -165,8 +156,8 @@ func (s *ChiStatus) PushHostTablesCreated(host string) {
}
// SyncHostTablesCreated syncs list of hosts with tables created with actual list of hosts
-func (s *ChiStatus) SyncHostTablesCreated() {
- doWithWriteLock(s, func(s *ChiStatus) {
+func (s *Status) SyncHostTablesCreated() {
+ doWithWriteLock(s, func(s *Status) {
if s.FQDNs == nil {
return
}
@@ -175,47 +166,47 @@ func (s *ChiStatus) SyncHostTablesCreated() {
}
// PushUsedTemplate pushes used template to the list of used templates
-func (s *ChiStatus) PushUsedTemplate(templateRef *TemplateRef) {
- doWithWriteLock(s, func(s *ChiStatus) {
+func (s *Status) PushUsedTemplate(templateRef *TemplateRef) {
+ doWithWriteLock(s, func(s *Status) {
s.UsedTemplates = append(s.UsedTemplates, templateRef)
})
}
// GetUsedTemplatesCount gets used templates count
-func (s *ChiStatus) GetUsedTemplatesCount() int {
- return getIntWithReadLock(s, func(s *ChiStatus) int {
+func (s *Status) GetUsedTemplatesCount() int {
+ return getIntWithReadLock(s, func(s *Status) int {
return len(s.UsedTemplates)
})
}
// SetAction action setter
-func (s *ChiStatus) SetAction(action string) {
- doWithWriteLock(s, func(s *ChiStatus) {
+func (s *Status) SetAction(action string) {
+ doWithWriteLock(s, func(s *Status) {
s.Action = action
})
}
-// HasNormalizedCHICompleted is a checker
-func (s *ChiStatus) HasNormalizedCHICompleted() bool {
- return s.GetNormalizedCHICompleted() != nil
+// HasNormalizedCRCompleted is a checker
+func (s *Status) HasNormalizedCRCompleted() bool {
+ return s.GetNormalizedCRCompleted() != nil
}
-// HasNormalizedCHI is a checker
-func (s *ChiStatus) HasNormalizedCHI() bool {
- return s.GetNormalizedCHI() != nil
+// HasNormalizedCR is a checker
+func (s *Status) HasNormalizedCR() bool {
+ return s.GetNormalizedCR() != nil
}
// PushAction pushes action into status
-func (s *ChiStatus) PushAction(action string) {
- doWithWriteLock(s, func(s *ChiStatus) {
+func (s *Status) PushAction(action string) {
+ doWithWriteLock(s, func(s *Status) {
s.Actions = append([]string{action}, s.Actions...)
trimActionsNoSync(s)
})
}
// PushError sets and pushes error into status
-func (s *ChiStatus) PushError(error string) {
- doWithWriteLock(s, func(s *ChiStatus) {
+func (s *Status) PushError(error string) {
+ doWithWriteLock(s, func(s *Status) {
s.Errors = append([]string{error}, s.Errors...)
if len(s.Errors) > maxErrors {
s.Errors = s.Errors[:maxErrors]
@@ -224,57 +215,57 @@ func (s *ChiStatus) PushError(error string) {
}
// SetPodIPs sets pod IPs
-func (s *ChiStatus) SetPodIPs(podIPs []string) {
- doWithWriteLock(s, func(s *ChiStatus) {
+func (s *Status) SetPodIPs(podIPs []string) {
+ doWithWriteLock(s, func(s *Status) {
s.PodIPs = podIPs
})
}
// HostDeleted increments deleted hosts counter
-func (s *ChiStatus) HostDeleted() {
- doWithWriteLock(s, func(s *ChiStatus) {
+func (s *Status) HostDeleted() {
+ doWithWriteLock(s, func(s *Status) {
s.HostsDeletedCount++
})
}
// HostUpdated increments updated hosts counter
-func (s *ChiStatus) HostUpdated() {
- doWithWriteLock(s, func(s *ChiStatus) {
+func (s *Status) HostUpdated() {
+ doWithWriteLock(s, func(s *Status) {
s.HostsUpdatedCount++
})
}
// HostAdded increments added hosts counter
-func (s *ChiStatus) HostAdded() {
- doWithWriteLock(s, func(s *ChiStatus) {
+func (s *Status) HostAdded() {
+ doWithWriteLock(s, func(s *Status) {
s.HostsAddedCount++
})
}
// HostUnchanged increments unchanged hosts counter
-func (s *ChiStatus) HostUnchanged() {
- doWithWriteLock(s, func(s *ChiStatus) {
+func (s *Status) HostUnchanged() {
+ doWithWriteLock(s, func(s *Status) {
s.HostsUnchangedCount++
})
}
// HostFailed increments failed hosts counter
-func (s *ChiStatus) HostFailed() {
- doWithWriteLock(s, func(s *ChiStatus) {
+func (s *Status) HostFailed() {
+ doWithWriteLock(s, func(s *Status) {
s.HostsFailedCount++
})
}
// HostCompleted increments completed hosts counter
-func (s *ChiStatus) HostCompleted() {
- doWithWriteLock(s, func(s *ChiStatus) {
+func (s *Status) HostCompleted() {
+ doWithWriteLock(s, func(s *Status) {
s.HostsCompletedCount++
})
}
// ReconcileStart marks reconcile start
-func (s *ChiStatus) ReconcileStart(deleteHostsCount int) {
- doWithWriteLock(s, func(s *ChiStatus) {
+func (s *Status) ReconcileStart(deleteHostsCount int) {
+ doWithWriteLock(s, func(s *Status) {
if s == nil {
return
}
@@ -290,8 +281,8 @@ func (s *ChiStatus) ReconcileStart(deleteHostsCount int) {
}
// ReconcileComplete marks reconcile completion
-func (s *ChiStatus) ReconcileComplete() {
- doWithWriteLock(s, func(s *ChiStatus) {
+func (s *Status) ReconcileComplete() {
+ doWithWriteLock(s, func(s *Status) {
if s == nil {
return
}
@@ -302,8 +293,8 @@ func (s *ChiStatus) ReconcileComplete() {
}
// ReconcileAbort marks reconcile abortion
-func (s *ChiStatus) ReconcileAbort() {
- doWithWriteLock(s, func(s *ChiStatus) {
+func (s *Status) ReconcileAbort() {
+ doWithWriteLock(s, func(s *Status) {
if s == nil {
return
}
@@ -314,8 +305,8 @@ func (s *ChiStatus) ReconcileAbort() {
}
// DeleteStart marks deletion start
-func (s *ChiStatus) DeleteStart() {
- doWithWriteLock(s, func(s *ChiStatus) {
+func (s *Status) DeleteStart() {
+ doWithWriteLock(s, func(s *Status) {
if s == nil {
return
}
@@ -330,10 +321,10 @@ func (s *ChiStatus) DeleteStart() {
})
}
-// CopyFrom copies the state of a given ChiStatus f into the receiver ChiStatus of the call.
-func (s *ChiStatus) CopyFrom(f *ChiStatus, opts CopyCHIStatusOptions) {
- doWithWriteLock(s, func(s *ChiStatus) {
- doWithReadLock(f, func(from *ChiStatus) {
+// CopyFrom copies the state of a given Status f into the receiver Status of the call.
+func (s *Status) CopyFrom(f *Status, opts types.CopyStatusOptions) {
+ doWithWriteLock(s, func(s *Status) {
+ doWithReadLock(f, func(from *Status) {
if s == nil || from == nil {
return
}
@@ -392,11 +383,11 @@ func (s *ChiStatus) CopyFrom(f *ChiStatus, opts CopyCHIStatusOptions) {
s.PodIPs = from.PodIPs
s.FQDNs = from.FQDNs
s.Endpoint = from.Endpoint
- s.NormalizedCHI = from.NormalizedCHI
+ s.NormalizedCR = from.NormalizedCR
}
if opts.Normalized {
- s.NormalizedCHI = from.NormalizedCHI
+ s.NormalizedCR = from.NormalizedCR
}
if opts.WholeStatus {
@@ -426,240 +417,240 @@ func (s *ChiStatus) CopyFrom(f *ChiStatus, opts CopyCHIStatusOptions) {
s.PodIPs = from.PodIPs
s.FQDNs = from.FQDNs
s.Endpoint = from.Endpoint
- s.NormalizedCHI = from.NormalizedCHI
- s.NormalizedCHICompleted = from.NormalizedCHICompleted
+ s.NormalizedCR = from.NormalizedCR
+ s.NormalizedCRCompleted = from.NormalizedCRCompleted
}
})
})
}
-// ClearNormalizedCHI clears normalized CHI in status
-func (s *ChiStatus) ClearNormalizedCHI() {
- doWithWriteLock(s, func(s *ChiStatus) {
- s.NormalizedCHI = nil
+// ClearNormalizedCR clears normalized CR in status
+func (s *Status) ClearNormalizedCR() {
+ doWithWriteLock(s, func(s *Status) {
+ s.NormalizedCR = nil
})
}
-// SetNormalizedCompletedFromCurrentNormalized sets completed CHI from current CHI
-func (s *ChiStatus) SetNormalizedCompletedFromCurrentNormalized() {
- doWithWriteLock(s, func(s *ChiStatus) {
- s.NormalizedCHICompleted = s.NormalizedCHI
+// SetNormalizedCompletedFromCurrentNormalized sets completed CR from current CR
+func (s *Status) SetNormalizedCompletedFromCurrentNormalized() {
+ doWithWriteLock(s, func(s *Status) {
+ s.NormalizedCRCompleted = s.NormalizedCR
})
}
// GetCHOpVersion gets operator version
-func (s *ChiStatus) GetCHOpVersion() string {
- return getStringWithReadLock(s, func(s *ChiStatus) string {
+func (s *Status) GetCHOpVersion() string {
+ return getStringWithReadLock(s, func(s *Status) string {
return s.CHOpVersion
})
}
// GetCHOpCommit gets operator build commit
-func (s *ChiStatus) GetCHOpCommit() string {
- return getStringWithReadLock(s, func(s *ChiStatus) string {
+func (s *Status) GetCHOpCommit() string {
+ return getStringWithReadLock(s, func(s *Status) string {
return s.CHOpCommit
})
}
// GetCHOpDate gets operator build date
-func (s *ChiStatus) GetCHOpDate() string {
- return getStringWithReadLock(s, func(s *ChiStatus) string {
+func (s *Status) GetCHOpDate() string {
+ return getStringWithReadLock(s, func(s *Status) string {
return s.CHOpDate
})
}
// GetCHOpIP gets operator pod's IP
-func (s *ChiStatus) GetCHOpIP() string {
- return getStringWithReadLock(s, func(s *ChiStatus) string {
+func (s *Status) GetCHOpIP() string {
+ return getStringWithReadLock(s, func(s *Status) string {
return s.CHOpIP
})
}
// GetClustersCount gets clusters count
-func (s *ChiStatus) GetClustersCount() int {
- return getIntWithReadLock(s, func(s *ChiStatus) int {
+func (s *Status) GetClustersCount() int {
+ return getIntWithReadLock(s, func(s *Status) int {
return s.ClustersCount
})
}
// GetShardsCount gets shards count
-func (s *ChiStatus) GetShardsCount() int {
- return getIntWithReadLock(s, func(s *ChiStatus) int {
+func (s *Status) GetShardsCount() int {
+ return getIntWithReadLock(s, func(s *Status) int {
return s.ShardsCount
})
}
// GetReplicasCount gets replicas count
-func (s *ChiStatus) GetReplicasCount() int {
- return getIntWithReadLock(s, func(s *ChiStatus) int {
+func (s *Status) GetReplicasCount() int {
+ return getIntWithReadLock(s, func(s *Status) int {
return s.ReplicasCount
})
}
// GetHostsCount gets hosts count
-func (s *ChiStatus) GetHostsCount() int {
- return getIntWithReadLock(s, func(s *ChiStatus) int {
+func (s *Status) GetHostsCount() int {
+ return getIntWithReadLock(s, func(s *Status) int {
return s.HostsCount
})
}
// GetStatus gets status
-func (s *ChiStatus) GetStatus() string {
- return getStringWithReadLock(s, func(s *ChiStatus) string {
+func (s *Status) GetStatus() string {
+ return getStringWithReadLock(s, func(s *Status) string {
return s.Status
})
}
// GetTaskID gets task ipd
-func (s *ChiStatus) GetTaskID() string {
- return getStringWithReadLock(s, func(s *ChiStatus) string {
+func (s *Status) GetTaskID() string {
+ return getStringWithReadLock(s, func(s *Status) string {
return s.TaskID
})
}
// GetTaskIDsStarted gets started task id
-func (s *ChiStatus) GetTaskIDsStarted() []string {
- return getStringArrWithReadLock(s, func(s *ChiStatus) []string {
+func (s *Status) GetTaskIDsStarted() []string {
+ return getStringArrWithReadLock(s, func(s *Status) []string {
return s.TaskIDsStarted
})
}
// GetTaskIDsCompleted gets completed task id
-func (s *ChiStatus) GetTaskIDsCompleted() []string {
- return getStringArrWithReadLock(s, func(s *ChiStatus) []string {
+func (s *Status) GetTaskIDsCompleted() []string {
+ return getStringArrWithReadLock(s, func(s *Status) []string {
return s.TaskIDsCompleted
})
}
// GetAction gets last action
-func (s *ChiStatus) GetAction() string {
- return getStringWithReadLock(s, func(s *ChiStatus) string {
+func (s *Status) GetAction() string {
+ return getStringWithReadLock(s, func(s *Status) string {
return s.Action
})
}
// GetActions gets all actions
-func (s *ChiStatus) GetActions() []string {
- return getStringArrWithReadLock(s, func(s *ChiStatus) []string {
+func (s *Status) GetActions() []string {
+ return getStringArrWithReadLock(s, func(s *Status) []string {
return s.Actions
})
}
// GetError gets last error
-func (s *ChiStatus) GetError() string {
- return getStringWithReadLock(s, func(s *ChiStatus) string {
+func (s *Status) GetError() string {
+ return getStringWithReadLock(s, func(s *Status) string {
return s.Error
})
}
// GetErrors gets all errors
-func (s *ChiStatus) GetErrors() []string {
- return getStringArrWithReadLock(s, func(s *ChiStatus) []string {
+func (s *Status) GetErrors() []string {
+ return getStringArrWithReadLock(s, func(s *Status) []string {
return s.Errors
})
}
// GetHostsUpdatedCount gets updated hosts counter
-func (s *ChiStatus) GetHostsUpdatedCount() int {
- return getIntWithReadLock(s, func(s *ChiStatus) int {
+func (s *Status) GetHostsUpdatedCount() int {
+ return getIntWithReadLock(s, func(s *Status) int {
return s.HostsUpdatedCount
})
}
// GetHostsAddedCount gets added hosts counter
-func (s *ChiStatus) GetHostsAddedCount() int {
- return getIntWithReadLock(s, func(s *ChiStatus) int {
+func (s *Status) GetHostsAddedCount() int {
+ return getIntWithReadLock(s, func(s *Status) int {
return s.HostsAddedCount
})
}
// GetHostsUnchangedCount gets unchanged hosts counter
-func (s *ChiStatus) GetHostsUnchangedCount() int {
- return getIntWithReadLock(s, func(s *ChiStatus) int {
+func (s *Status) GetHostsUnchangedCount() int {
+ return getIntWithReadLock(s, func(s *Status) int {
return s.HostsUnchangedCount
})
}
// GetHostsFailedCount gets failed hosts counter
-func (s *ChiStatus) GetHostsFailedCount() int {
- return getIntWithReadLock(s, func(s *ChiStatus) int {
+func (s *Status) GetHostsFailedCount() int {
+ return getIntWithReadLock(s, func(s *Status) int {
return s.HostsFailedCount
})
}
// GetHostsCompletedCount gets completed hosts counter
-func (s *ChiStatus) GetHostsCompletedCount() int {
- return getIntWithReadLock(s, func(s *ChiStatus) int {
+func (s *Status) GetHostsCompletedCount() int {
+ return getIntWithReadLock(s, func(s *Status) int {
return s.HostsCompletedCount
})
}
// GetHostsDeletedCount gets deleted hosts counter
-func (s *ChiStatus) GetHostsDeletedCount() int {
- return getIntWithReadLock(s, func(s *ChiStatus) int {
+func (s *Status) GetHostsDeletedCount() int {
+ return getIntWithReadLock(s, func(s *Status) int {
return s.HostsDeletedCount
})
}
// GetHostsDeleteCount gets hosts to be deleted counter
-func (s *ChiStatus) GetHostsDeleteCount() int {
- return getIntWithReadLock(s, func(s *ChiStatus) int {
+func (s *Status) GetHostsDeleteCount() int {
+ return getIntWithReadLock(s, func(s *Status) int {
return s.HostsDeleteCount
})
}
// GetPods gets list of pods
-func (s *ChiStatus) GetPods() []string {
- return getStringArrWithReadLock(s, func(s *ChiStatus) []string {
+func (s *Status) GetPods() []string {
+ return getStringArrWithReadLock(s, func(s *Status) []string {
return s.Pods
})
}
// GetPodIPs gets list of pod ips
-func (s *ChiStatus) GetPodIPs() []string {
- return getStringArrWithReadLock(s, func(s *ChiStatus) []string {
+func (s *Status) GetPodIPs() []string {
+ return getStringArrWithReadLock(s, func(s *Status) []string {
return s.PodIPs
})
}
// GetFQDNs gets list of all FQDNs of hosts
-func (s *ChiStatus) GetFQDNs() []string {
- return getStringArrWithReadLock(s, func(s *ChiStatus) []string {
+func (s *Status) GetFQDNs() []string {
+ return getStringArrWithReadLock(s, func(s *Status) []string {
return s.FQDNs
})
}
// GetEndpoint gets API endpoint
-func (s *ChiStatus) GetEndpoint() string {
- return getStringWithReadLock(s, func(s *ChiStatus) string {
+func (s *Status) GetEndpoint() string {
+ return getStringWithReadLock(s, func(s *Status) string {
return s.Endpoint
})
}
-// GetNormalizedCHI gets target CHI
-func (s *ChiStatus) GetNormalizedCHI() *ClickHouseInstallation {
- return getInstallationWithReadLock(s, func(s *ChiStatus) *ClickHouseInstallation {
- return s.NormalizedCHI
+// GetNormalizedCR gets target CR
+func (s *Status) GetNormalizedCR() *ClickHouseInstallation {
+ return getInstallationWithReadLock(s, func(s *Status) *ClickHouseInstallation {
+ return s.NormalizedCR
})
}
-// GetNormalizedCHICompleted gets completed CHI
-func (s *ChiStatus) GetNormalizedCHICompleted() *ClickHouseInstallation {
- return getInstallationWithReadLock(s, func(s *ChiStatus) *ClickHouseInstallation {
- return s.NormalizedCHICompleted
+// GetNormalizedCRCompleted gets completed CR
+func (s *Status) GetNormalizedCRCompleted() *ClickHouseInstallation {
+ return getInstallationWithReadLock(s, func(s *Status) *ClickHouseInstallation {
+ return s.NormalizedCRCompleted
})
}
// GetHostsWithTablesCreated gets hosts with created tables
-func (s *ChiStatus) GetHostsWithTablesCreated() []string {
- return getStringArrWithReadLock(s, func(s *ChiStatus) []string {
+func (s *Status) GetHostsWithTablesCreated() []string {
+ return getStringArrWithReadLock(s, func(s *Status) []string {
return s.HostsWithTablesCreated
})
}
// Begin helpers
-func doWithWriteLock(s *ChiStatus, f func(s *ChiStatus)) {
+func doWithWriteLock(s *Status, f func(s *Status)) {
if s == nil {
return
}
@@ -669,7 +660,7 @@ func doWithWriteLock(s *ChiStatus, f func(s *ChiStatus)) {
f(s)
}
-func doWithReadLock(s *ChiStatus, f func(s *ChiStatus)) {
+func doWithReadLock(s *Status, f func(s *Status)) {
if s == nil {
return
}
@@ -679,7 +670,7 @@ func doWithReadLock(s *ChiStatus, f func(s *ChiStatus)) {
f(s)
}
-func getIntWithReadLock(s *ChiStatus, f func(s *ChiStatus) int) int {
+func getIntWithReadLock(s *Status, f func(s *Status) int) int {
var zeroVal int
if s == nil {
return zeroVal
@@ -690,7 +681,7 @@ func getIntWithReadLock(s *ChiStatus, f func(s *ChiStatus) int) int {
return f(s)
}
-func getStringWithReadLock(s *ChiStatus, f func(s *ChiStatus) string) string {
+func getStringWithReadLock(s *Status, f func(s *Status) string) string {
var zeroVal string
if s == nil {
return zeroVal
@@ -701,7 +692,7 @@ func getStringWithReadLock(s *ChiStatus, f func(s *ChiStatus) string) string {
return f(s)
}
-func getInstallationWithReadLock(s *ChiStatus, f func(s *ChiStatus) *ClickHouseInstallation) *ClickHouseInstallation {
+func getInstallationWithReadLock(s *Status, f func(s *Status) *ClickHouseInstallation) *ClickHouseInstallation {
var zeroVal *ClickHouseInstallation
if s == nil {
return zeroVal
@@ -712,7 +703,7 @@ func getInstallationWithReadLock(s *ChiStatus, f func(s *ChiStatus) *ClickHouseI
return f(s)
}
-func getStringArrWithReadLock(s *ChiStatus, f func(s *ChiStatus) []string) []string {
+func getStringArrWithReadLock(s *Status, f func(s *Status) []string) []string {
emptyArr := make([]string, 0, 0)
if s == nil {
return emptyArr
@@ -725,21 +716,21 @@ func getStringArrWithReadLock(s *ChiStatus, f func(s *ChiStatus) []string) []str
// mergeActionsNoSync merges the actions of from into those of s (without synchronization, because synchronized
// functions call into this).
-func mergeActionsNoSync(s *ChiStatus, from *ChiStatus) {
+func mergeActionsNoSync(s *Status, from *Status) {
s.Actions = util.MergeStringArrays(s.Actions, from.Actions)
sort.Sort(sort.Reverse(sort.StringSlice(s.Actions)))
trimActionsNoSync(s)
}
// trimActionsNoSync trims actions (without synchronization, because synchronized functions call into this).
-func trimActionsNoSync(s *ChiStatus) {
+func trimActionsNoSync(s *Status) {
if len(s.Actions) > maxActions {
s.Actions = s.Actions[:maxActions]
}
}
// pushTaskIDStartedNoSync pushes task id into status
-func pushTaskIDStartedNoSync(s *ChiStatus) {
+func pushTaskIDStartedNoSync(s *Status) {
s.TaskIDsStarted = append([]string{s.TaskID}, s.TaskIDsStarted...)
if len(s.TaskIDsStarted) > maxTaskIDs {
s.TaskIDsStarted = s.TaskIDsStarted[:maxTaskIDs]
@@ -747,7 +738,7 @@ func pushTaskIDStartedNoSync(s *ChiStatus) {
}
// pushTaskIDCompletedNoSync pushes task id into status w/o sync
-func pushTaskIDCompletedNoSync(s *ChiStatus) {
+func pushTaskIDCompletedNoSync(s *Status) {
s.TaskIDsCompleted = append([]string{s.TaskID}, s.TaskIDsCompleted...)
if len(s.TaskIDsCompleted) > maxTaskIDs {
s.TaskIDsCompleted = s.TaskIDsCompleted[:maxTaskIDs]
diff --git a/pkg/apis/clickhouse.altinity.com/v1/type_status_test.go b/pkg/apis/clickhouse.altinity.com/v1/type_status_test.go
index 6d4158840..1f1c3cc09 100644
--- a/pkg/apis/clickhouse.altinity.com/v1/type_status_test.go
+++ b/pkg/apis/clickhouse.altinity.com/v1/type_status_test.go
@@ -22,7 +22,7 @@ var fillParamsA = &FillStatusParams{
Pods: []string{"pod-a-1", "pod-a-2"},
FQDNs: []string{"fqdns-a-1", "fqdns-a-2"},
Endpoint: "endpoint-a",
- NormalizedCHI: normalizedChiA, // fields not recursively checked, this is only used as a pointer
+ NormalizedCR: normalizedChiA, // fields not recursively checked, this is only used as a pointer
}
var normalizedChiB = &ClickHouseInstallation{}
@@ -40,10 +40,10 @@ var fillParamsB = &FillStatusParams{
Pods: []string{"pod-b-1", "pod-b-2"},
FQDNs: []string{"fqdns-b-1", "fqdns-b-2"},
Endpoint: "endpoint-b",
- NormalizedCHI: normalizedChiB, // fields not recursively checked, this is only used as a pointer
+ NormalizedCR: normalizedChiB, // fields not recursively checked, this is only used as a pointer
}
-var copyTestStatusFrom = &ChiStatus{
+var copyTestStatusFrom = &Status{
CHOpVersion: "version-a",
CHOpCommit: "commit-a",
CHOpDate: "date-a",
@@ -71,8 +71,8 @@ var copyTestStatusFrom = &ChiStatus{
PodIPs: []string{"podIP-a-1", "podIP-a-2"},
FQDNs: []string{"fqdns-a-1", "fqdns-a-2"},
Endpoint: "endpt-a",
- NormalizedCHI: normalizedChiA,
- NormalizedCHICompleted: normalizedChiA,
+ NormalizedCR: normalizedChiA,
+ NormalizedCRCompleted: normalizedChiA,
HostsWithTablesCreated: []string{"host-a-1", "host-a-2"},
}
@@ -82,20 +82,20 @@ var copyTestStatusFrom = &ChiStatus{
func Test_ChiStatus_BasicOperations_SingleStatus_ConcurrencyTest(t *testing.T) {
type testCase struct {
name string
- goRoutineA func(s *ChiStatus)
- goRoutineB func(s *ChiStatus)
- postConditionsVerification func(tt *testing.T, s *ChiStatus)
+ goRoutineA func(s *Status)
+ goRoutineB func(s *Status)
+ postConditionsVerification func(tt *testing.T, s *Status)
}
for _, tc := range []testCase{
{
name: "PushAction",
- goRoutineA: func(s *ChiStatus) {
+ goRoutineA: func(s *Status) {
s.PushAction("foo")
},
- goRoutineB: func(s *ChiStatus) {
+ goRoutineB: func(s *Status) {
s.PushAction("bar")
},
- postConditionsVerification: func(tt *testing.T, s *ChiStatus) {
+ postConditionsVerification: func(tt *testing.T, s *Status) {
actual := s.GetActions()
require.Len(tt, actual, 2)
require.Contains(tt, actual, "foo")
@@ -104,14 +104,14 @@ func Test_ChiStatus_BasicOperations_SingleStatus_ConcurrencyTest(t *testing.T) {
},
{
name: "PushError",
- goRoutineA: func(s *ChiStatus) {
+ goRoutineA: func(s *Status) {
s.PushError("errA")
s.PushError("errB")
},
- goRoutineB: func(s *ChiStatus) {
+ goRoutineB: func(s *Status) {
s.PushError("errC")
},
- postConditionsVerification: func(tt *testing.T, s *ChiStatus) {
+ postConditionsVerification: func(tt *testing.T, s *Status) {
actual := s.GetErrors()
require.Len(t, actual, 3)
require.Contains(tt, actual, "errA")
@@ -121,13 +121,13 @@ func Test_ChiStatus_BasicOperations_SingleStatus_ConcurrencyTest(t *testing.T) {
},
{
name: "Fill",
- goRoutineA: func(s *ChiStatus) {
+ goRoutineA: func(s *Status) {
s.Fill(fillParamsA)
},
- goRoutineB: func(s *ChiStatus) {
+ goRoutineB: func(s *Status) {
s.Fill(fillParamsB)
},
- postConditionsVerification: func(tt *testing.T, s *ChiStatus) {
+ postConditionsVerification: func(tt *testing.T, s *Status) {
// Fill performs hard updates (overwrites), not pushing/adding extra data.
// The winning goroutine should basically determine the resultant post-condition for every "filled" field.
var expectedParams *FillStatusParams
@@ -151,14 +151,14 @@ func Test_ChiStatus_BasicOperations_SingleStatus_ConcurrencyTest(t *testing.T) {
require.Equal(tt, expectedParams.Pods, s.Pods)
require.Equal(tt, expectedParams.FQDNs, s.FQDNs)
require.Equal(tt, expectedParams.Endpoint, s.Endpoint)
- require.Equal(tt, expectedParams.NormalizedCHI, s.NormalizedCHI)
+ require.Equal(tt, expectedParams.NormalizedCR, s.NormalizedCR)
},
},
{
name: "CopyFrom",
- goRoutineA: func(s *ChiStatus) {
+ goRoutineA: func(s *Status) {
s.PushAction("always-present-action") // CopyFrom preserves existing actions (does not clobber)
- s.CopyFrom(copyTestStatusFrom, CopyCHIStatusOptions{
+ s.CopyFrom(copyTestStatusFrom, CopyStatusOptions{
Actions: true,
Errors: true,
MainFields: true,
@@ -166,10 +166,10 @@ func Test_ChiStatus_BasicOperations_SingleStatus_ConcurrencyTest(t *testing.T) {
InheritableFields: true,
})
},
- goRoutineB: func(s *ChiStatus) {
+ goRoutineB: func(s *Status) {
s.PushAction("additional-action") // this may or may not win the race, but the race will be sync
},
- postConditionsVerification: func(tt *testing.T, s *ChiStatus) {
+ postConditionsVerification: func(tt *testing.T, s *Status) {
if len(s.GetActions()) == len(copyTestStatusFrom.GetActions())+2 {
require.Equal(tt, copyTestStatusFrom.GetActions(), s.GetActions())
require.Contains(tt, s.GetActions(), "always-present-action")
@@ -204,8 +204,8 @@ func Test_ChiStatus_BasicOperations_SingleStatus_ConcurrencyTest(t *testing.T) {
require.Equal(tt, copyTestStatusFrom.GetHostsWithTablesCreated(), s.GetHostsWithTablesCreated())
require.Equal(tt, copyTestStatusFrom.GetHostsWithTablesCreated(), s.GetHostsWithTablesCreated())
require.Equal(tt, copyTestStatusFrom.GetHostsWithTablesCreated(), s.GetHostsWithTablesCreated())
- require.Equal(tt, copyTestStatusFrom.GetNormalizedCHI(), s.GetNormalizedCHI())
- require.Equal(tt, copyTestStatusFrom.GetNormalizedCHICompleted(), s.GetNormalizedCHICompleted())
+ require.Equal(tt, copyTestStatusFrom.GetNormalizedCR(), s.GetNormalizedCR())
+ require.Equal(tt, copyTestStatusFrom.GetNormalizedCRCompleted(), s.GetNormalizedCRCompleted())
require.Equal(tt, copyTestStatusFrom.GetPodIPs(), s.GetPodIPs())
require.Equal(tt, copyTestStatusFrom.GetPods(), s.GetPods())
require.Equal(tt, copyTestStatusFrom.GetReplicasCount(), s.GetReplicasCount())
@@ -218,7 +218,7 @@ func Test_ChiStatus_BasicOperations_SingleStatus_ConcurrencyTest(t *testing.T) {
},
} {
t.Run(tc.name, func(tt *testing.T) {
- status := &ChiStatus{}
+ status := &Status{}
startWg := sync.WaitGroup{}
doneWg := sync.WaitGroup{}
startWg.Add(2) // We will make sure both goroutines begin execution, i.e., that they don't execute sequentially.
diff --git a/pkg/apis/clickhouse.altinity.com/v1/type_target_selector.go b/pkg/apis/clickhouse.altinity.com/v1/type_target_selector.go
new file mode 100644
index 000000000..0b82faba8
--- /dev/null
+++ b/pkg/apis/clickhouse.altinity.com/v1/type_target_selector.go
@@ -0,0 +1,48 @@
+// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v1
+
+// TargetSelector specifies target selector based on labels
+type TargetSelector map[string]string
+
+// Matches checks whether TargetSelector matches provided set of labels
+func (s TargetSelector) Matches(labels map[string]string) bool {
+ if s == nil {
+ // Empty selector matches all labels
+ return true
+ }
+
+ // Walk over selector keys
+ for key, selectorValue := range s {
+ if labelValue, ok := labels[key]; !ok {
+ // Labels have no key specified in selector.
+ // Selector does not match the labels
+ return false
+ } else if selectorValue != labelValue {
+ // Labels have the key specified in selector, but selector value is not the same as labels value
+ // Selector does not match the labels
+ return false
+ } else {
+ // Selector value and label value are equal
+ // So far label matches selector
+ // Continue iteration to next value
+ }
+ }
+
+ // All keys are in place with the same values
+ // Selector matches the labels
+
+ return true
+}
diff --git a/pkg/apis/clickhouse.altinity.com/v1/type_template_names.go b/pkg/apis/clickhouse.altinity.com/v1/type_template_names.go
deleted file mode 100644
index dadfe6f0c..000000000
--- a/pkg/apis/clickhouse.altinity.com/v1/type_template_names.go
+++ /dev/null
@@ -1,242 +0,0 @@
-// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package v1
-
-// NewChiTemplateNames creates new ChiTemplateNames object
-func NewChiTemplateNames() *ChiTemplateNames {
- return new(ChiTemplateNames)
-}
-
-// HasHostTemplate checks whether host template is specified
-func (templateNames *ChiTemplateNames) HasHostTemplate() bool {
- if templateNames == nil {
- return false
- }
- return len(templateNames.HostTemplate) > 0
-}
-
-// GetHostTemplate gets host template
-func (templateNames *ChiTemplateNames) GetHostTemplate() string {
- if templateNames == nil {
- return ""
- }
- return templateNames.HostTemplate
-}
-
-// HasPodTemplate checks whether pod template is specified
-func (templateNames *ChiTemplateNames) HasPodTemplate() bool {
- if templateNames == nil {
- return false
- }
- return len(templateNames.PodTemplate) > 0
-}
-
-// GetPodTemplate gets pod template
-func (templateNames *ChiTemplateNames) GetPodTemplate() string {
- if templateNames == nil {
- return ""
- }
- return templateNames.PodTemplate
-}
-
-// HasDataVolumeClaimTemplate checks whether data volume claim template is specified
-func (templateNames *ChiTemplateNames) HasDataVolumeClaimTemplate() bool {
- if templateNames == nil {
- return false
- }
- return len(templateNames.DataVolumeClaimTemplate) > 0
-}
-
-// GetDataVolumeClaimTemplate gets data volume claim template
-func (templateNames *ChiTemplateNames) GetDataVolumeClaimTemplate() string {
- if templateNames == nil {
- return ""
- }
- return templateNames.DataVolumeClaimTemplate
-}
-
-// HasLogVolumeClaimTemplate checks whether log volume claim template is specified
-func (templateNames *ChiTemplateNames) HasLogVolumeClaimTemplate() bool {
- if templateNames == nil {
- return false
- }
- return len(templateNames.LogVolumeClaimTemplate) > 0
-}
-
-// GetLogVolumeClaimTemplate gets log volume claim template
-func (templateNames *ChiTemplateNames) GetLogVolumeClaimTemplate() string {
- if templateNames == nil {
- return ""
- }
- return templateNames.LogVolumeClaimTemplate
-}
-
-// HasServiceTemplate checks whether service template is specified
-func (templateNames *ChiTemplateNames) HasServiceTemplate() bool {
- if templateNames == nil {
- return false
- }
- return len(templateNames.ServiceTemplate) > 0
-}
-
-// GetServiceTemplate gets service template
-func (templateNames *ChiTemplateNames) GetServiceTemplate() string {
- if templateNames == nil {
- return ""
- }
- return templateNames.ServiceTemplate
-}
-
-// HasClusterServiceTemplate checks whether cluster service template is specified
-func (templateNames *ChiTemplateNames) HasClusterServiceTemplate() bool {
- if templateNames == nil {
- return false
- }
- return len(templateNames.ClusterServiceTemplate) > 0
-}
-
-// GetClusterServiceTemplate gets cluster service template
-func (templateNames *ChiTemplateNames) GetClusterServiceTemplate() string {
- if templateNames == nil {
- return ""
- }
- return templateNames.ClusterServiceTemplate
-}
-
-// HasShardServiceTemplate checks whether shard service template is specified
-func (templateNames *ChiTemplateNames) HasShardServiceTemplate() bool {
- if templateNames == nil {
- return false
- }
- return len(templateNames.ShardServiceTemplate) > 0
-}
-
-// GetShardServiceTemplate gets shard service template
-func (templateNames *ChiTemplateNames) GetShardServiceTemplate() string {
- if templateNames == nil {
- return ""
- }
- return templateNames.ShardServiceTemplate
-}
-
-// HasReplicaServiceTemplate checks whether replica service template is specified
-func (templateNames *ChiTemplateNames) HasReplicaServiceTemplate() bool {
- if templateNames == nil {
- return false
- }
- return len(templateNames.ReplicaServiceTemplate) > 0
-}
-
-// GetReplicaServiceTemplate gets replica service template
-func (templateNames *ChiTemplateNames) GetReplicaServiceTemplate() string {
- if templateNames == nil {
- return ""
- }
- return templateNames.ReplicaServiceTemplate
-}
-
-// HandleDeprecatedFields helps to deal with deprecated fields
-func (templateNames *ChiTemplateNames) HandleDeprecatedFields() {
- if templateNames == nil {
- return
- }
- if templateNames.DataVolumeClaimTemplate == "" {
- templateNames.DataVolumeClaimTemplate = templateNames.VolumeClaimTemplate
- }
-}
-
-// MergeFrom merges from specified object
-func (templateNames *ChiTemplateNames) MergeFrom(from *ChiTemplateNames, _type MergeType) *ChiTemplateNames {
- if from == nil {
- return templateNames
- }
-
- if templateNames == nil {
- templateNames = NewChiTemplateNames()
- }
-
- switch _type {
- case MergeTypeFillEmptyValues:
- return templateNames.mergeFromFillEmptyValues(from)
- case MergeTypeOverrideByNonEmptyValues:
- return templateNames.mergeFromOverwriteByNonEmptyValues(from)
- }
-
- return templateNames
-}
-
-// mergeFromFillEmptyValues fills empty values
-func (templateNames *ChiTemplateNames) mergeFromFillEmptyValues(from *ChiTemplateNames) *ChiTemplateNames {
- if templateNames.HostTemplate == "" {
- templateNames.HostTemplate = from.HostTemplate
- }
- if templateNames.PodTemplate == "" {
- templateNames.PodTemplate = from.PodTemplate
- }
- if templateNames.DataVolumeClaimTemplate == "" {
- templateNames.DataVolumeClaimTemplate = from.DataVolumeClaimTemplate
- }
- if templateNames.LogVolumeClaimTemplate == "" {
- templateNames.LogVolumeClaimTemplate = from.LogVolumeClaimTemplate
- }
- if templateNames.VolumeClaimTemplate == "" {
- templateNames.VolumeClaimTemplate = from.VolumeClaimTemplate
- }
- if templateNames.ServiceTemplate == "" {
- templateNames.ServiceTemplate = from.ServiceTemplate
- }
- if templateNames.ClusterServiceTemplate == "" {
- templateNames.ClusterServiceTemplate = from.ClusterServiceTemplate
- }
- if templateNames.ShardServiceTemplate == "" {
- templateNames.ShardServiceTemplate = from.ShardServiceTemplate
- }
- if templateNames.ReplicaServiceTemplate == "" {
- templateNames.ReplicaServiceTemplate = from.ReplicaServiceTemplate
- }
- return templateNames
-}
-
-// mergeFromOverwriteByNonEmptyValues overwrites by non-empty values
-func (templateNames *ChiTemplateNames) mergeFromOverwriteByNonEmptyValues(from *ChiTemplateNames) *ChiTemplateNames {
- if from.HostTemplate != "" {
- templateNames.HostTemplate = from.HostTemplate
- }
- if from.PodTemplate != "" {
- templateNames.PodTemplate = from.PodTemplate
- }
- if from.DataVolumeClaimTemplate != "" {
- templateNames.DataVolumeClaimTemplate = from.DataVolumeClaimTemplate
- }
- if from.LogVolumeClaimTemplate != "" {
- templateNames.LogVolumeClaimTemplate = from.LogVolumeClaimTemplate
- }
- if from.VolumeClaimTemplate != "" {
- templateNames.VolumeClaimTemplate = from.VolumeClaimTemplate
- }
- if from.ServiceTemplate != "" {
- templateNames.ServiceTemplate = from.ServiceTemplate
- }
- if from.ClusterServiceTemplate != "" {
- templateNames.ClusterServiceTemplate = from.ClusterServiceTemplate
- }
- if from.ShardServiceTemplate != "" {
- templateNames.ShardServiceTemplate = from.ShardServiceTemplate
- }
- if from.ReplicaServiceTemplate != "" {
- templateNames.ReplicaServiceTemplate = from.ReplicaServiceTemplate
- }
- return templateNames
-}
diff --git a/pkg/apis/clickhouse.altinity.com/v1/type_template_ref.go b/pkg/apis/clickhouse.altinity.com/v1/type_template_ref.go
new file mode 100644
index 000000000..eb242e4f9
--- /dev/null
+++ b/pkg/apis/clickhouse.altinity.com/v1/type_template_ref.go
@@ -0,0 +1,22 @@
+// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v1
+
+// TemplateRef defines UseTemplate section of ClickHouseInstallation resource
+type TemplateRef struct {
+ Name string `json:"name,omitempty" yaml:"name,omitempty"`
+ Namespace string `json:"namespace,omitempty" yaml:"namespace,omitempty"`
+ UseType string `json:"useType,omitempty" yaml:"useType,omitempty"`
+}
diff --git a/pkg/apis/clickhouse.altinity.com/v1/type_templates.go b/pkg/apis/clickhouse.altinity.com/v1/type_templates.go
index 7d72653ba..97a98ba59 100644
--- a/pkg/apis/clickhouse.altinity.com/v1/type_templates.go
+++ b/pkg/apis/clickhouse.altinity.com/v1/type_templates.go
@@ -16,10 +16,72 @@ package v1
import (
"github.com/imdario/mergo"
+
+ core "k8s.io/api/core/v1"
+ meta "k8s.io/apimachinery/pkg/apis/meta/v1"
)
-// NewChiTemplates creates new Templates object
-func NewChiTemplates() *Templates {
+// Templates defines templates section of .spec
+type Templates struct {
+ // Templates
+ HostTemplates []HostTemplate `json:"hostTemplates,omitempty" yaml:"hostTemplates,omitempty"`
+ PodTemplates []PodTemplate `json:"podTemplates,omitempty" yaml:"podTemplates,omitempty"`
+ VolumeClaimTemplates []VolumeClaimTemplate `json:"volumeClaimTemplates,omitempty" yaml:"volumeClaimTemplates,omitempty"`
+ ServiceTemplates []ServiceTemplate `json:"serviceTemplates,omitempty" yaml:"serviceTemplates,omitempty"`
+
+ // Index maps template name to template itself
+ HostTemplatesIndex *HostTemplatesIndex `json:",omitempty" yaml:",omitempty" testdiff:"ignore"`
+ PodTemplatesIndex *PodTemplatesIndex `json:",omitempty" yaml:",omitempty" testdiff:"ignore"`
+ VolumeClaimTemplatesIndex *VolumeClaimTemplatesIndex `json:",omitempty" yaml:",omitempty" testdiff:"ignore"`
+ ServiceTemplatesIndex *ServiceTemplatesIndex `json:",omitempty" yaml:",omitempty" testdiff:"ignore"`
+}
+
+// HostTemplate defines full Host Template
+type HostTemplate struct {
+ Name string `json:"name,omitempty" yaml:"name,omitempty"`
+ PortDistribution []PortDistribution `json:"portDistribution,omitempty" yaml:"portDistribution,omitempty"`
+ Spec Host `json:"spec,omitempty" yaml:"spec,omitempty"`
+}
+
+// PortDistribution defines port distribution
+type PortDistribution struct {
+ Type string `json:"type,omitempty" yaml:"type,omitempty"`
+}
+
+// PodTemplate defines full Pod Template, directly used by StatefulSet
+type PodTemplate struct {
+ Name string `json:"name" yaml:"name"`
+ GenerateName string `json:"generateName,omitempty" yaml:"generateName,omitempty"`
+ Zone PodTemplateZone `json:"zone,omitempty" yaml:"zone,omitempty"`
+ PodDistribution []PodDistribution `json:"podDistribution,omitempty" yaml:"podDistribution,omitempty"`
+ ObjectMeta meta.ObjectMeta `json:"metadata,omitempty" yaml:"metadata,omitempty"`
+ Spec core.PodSpec `json:"spec,omitempty" yaml:"spec,omitempty"`
+}
+
+// PodTemplateZone defines pod template zone
+type PodTemplateZone struct {
+ Key string `json:"key,omitempty" yaml:"key,omitempty"`
+ Values []string `json:"values,omitempty" yaml:"values,omitempty"`
+}
+
+// PodDistribution defines pod distribution
+type PodDistribution struct {
+ Type string `json:"type,omitempty" yaml:"type,omitempty"`
+ Scope string `json:"scope,omitempty" yaml:"scope,omitempty"`
+ Number int `json:"number,omitempty" yaml:"number,omitempty"`
+ TopologyKey string `json:"topologyKey,omitempty" yaml:"topologyKey,omitempty"`
+}
+
+// ServiceTemplate defines CHI service template
+type ServiceTemplate struct {
+ Name string `json:"name" yaml:"name"`
+ GenerateName string `json:"generateName,omitempty" yaml:"generateName,omitempty"`
+ ObjectMeta meta.ObjectMeta `json:"metadata,omitempty" yaml:"metadata,omitempty"`
+ Spec core.ServiceSpec `json:"spec,omitempty" yaml:"spec,omitempty"`
+}
+
+// NewTemplates creates new Templates object
+func NewTemplates() *Templates {
return new(Templates)
}
@@ -84,7 +146,7 @@ func (templates *Templates) MergeFrom(_from any, _type MergeType) *Templates {
}
if templates == nil {
- templates = NewChiTemplates()
+ templates = NewTemplates()
}
// Merge sections
diff --git a/pkg/apis/clickhouse.altinity.com/v1/type_templates_list.go b/pkg/apis/clickhouse.altinity.com/v1/type_templates_list.go
new file mode 100644
index 000000000..59ad1c258
--- /dev/null
+++ b/pkg/apis/clickhouse.altinity.com/v1/type_templates_list.go
@@ -0,0 +1,258 @@
+// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v1
+
+// TemplatesList defines references to .spec.templates to be used
+type TemplatesList struct {
+ HostTemplate string `json:"hostTemplate,omitempty" yaml:"hostTemplate,omitempty"`
+ PodTemplate string `json:"podTemplate,omitempty" yaml:"podTemplate,omitempty"`
+ DataVolumeClaimTemplate string `json:"dataVolumeClaimTemplate,omitempty" yaml:"dataVolumeClaimTemplate,omitempty"`
+ LogVolumeClaimTemplate string `json:"logVolumeClaimTemplate,omitempty" yaml:"logVolumeClaimTemplate,omitempty"`
+ ServiceTemplate string `json:"serviceTemplate,omitempty" yaml:"serviceTemplate,omitempty"`
+ ClusterServiceTemplate string `json:"clusterServiceTemplate,omitempty" yaml:"clusterServiceTemplate,omitempty"`
+ ShardServiceTemplate string `json:"shardServiceTemplate,omitempty" yaml:"shardServiceTemplate,omitempty"`
+ ReplicaServiceTemplate string `json:"replicaServiceTemplate,omitempty" yaml:"replicaServiceTemplate,omitempty"`
+
+ // VolumeClaimTemplate is deprecated in favor of DataVolumeClaimTemplate and LogVolumeClaimTemplate
+ // !!! DEPRECATED !!!
+ VolumeClaimTemplate string `json:"volumeClaimTemplate,omitempty" yaml:"volumeClaimTemplate,omitempty"`
+}
+
+// NewTemplatesList creates new TemplatesList object
+func NewTemplatesList() *TemplatesList {
+ return new(TemplatesList)
+}
+
+// HasHostTemplate checks whether host template is specified
+func (tl *TemplatesList) HasHostTemplate() bool {
+ if tl == nil {
+ return false
+ }
+ return len(tl.HostTemplate) > 0
+}
+
+// GetHostTemplate gets host template
+func (tl *TemplatesList) GetHostTemplate() string {
+ if tl == nil {
+ return ""
+ }
+ return tl.HostTemplate
+}
+
+// HasPodTemplate checks whether pod template is specified
+func (tl *TemplatesList) HasPodTemplate() bool {
+ if tl == nil {
+ return false
+ }
+ return len(tl.PodTemplate) > 0
+}
+
+// GetPodTemplate gets pod template
+func (tl *TemplatesList) GetPodTemplate() string {
+ if tl == nil {
+ return ""
+ }
+ return tl.PodTemplate
+}
+
+// HasDataVolumeClaimTemplate checks whether data volume claim template is specified
+func (tl *TemplatesList) HasDataVolumeClaimTemplate() bool {
+ if tl == nil {
+ return false
+ }
+ return len(tl.DataVolumeClaimTemplate) > 0
+}
+
+// GetDataVolumeClaimTemplate gets data volume claim template
+func (tl *TemplatesList) GetDataVolumeClaimTemplate() string {
+ if tl == nil {
+ return ""
+ }
+ return tl.DataVolumeClaimTemplate
+}
+
+// HasLogVolumeClaimTemplate checks whether log volume claim template is specified
+func (tl *TemplatesList) HasLogVolumeClaimTemplate() bool {
+ if tl == nil {
+ return false
+ }
+ return len(tl.LogVolumeClaimTemplate) > 0
+}
+
+// GetLogVolumeClaimTemplate gets log volume claim template
+func (tl *TemplatesList) GetLogVolumeClaimTemplate() string {
+ if tl == nil {
+ return ""
+ }
+ return tl.LogVolumeClaimTemplate
+}
+
+// HasServiceTemplate checks whether service template is specified
+func (tl *TemplatesList) HasServiceTemplate() bool {
+ if tl == nil {
+ return false
+ }
+ return len(tl.ServiceTemplate) > 0
+}
+
+// GetServiceTemplate gets service template
+func (tl *TemplatesList) GetServiceTemplate() string {
+ if tl == nil {
+ return ""
+ }
+ return tl.ServiceTemplate
+}
+
+// HasClusterServiceTemplate checks whether cluster service template is specified
+func (tl *TemplatesList) HasClusterServiceTemplate() bool {
+ if tl == nil {
+ return false
+ }
+ return len(tl.ClusterServiceTemplate) > 0
+}
+
+// GetClusterServiceTemplate gets cluster service template
+func (tl *TemplatesList) GetClusterServiceTemplate() string {
+ if tl == nil {
+ return ""
+ }
+ return tl.ClusterServiceTemplate
+}
+
+// HasShardServiceTemplate checks whether shard service template is specified
+func (tl *TemplatesList) HasShardServiceTemplate() bool {
+ if tl == nil {
+ return false
+ }
+ return len(tl.ShardServiceTemplate) > 0
+}
+
+// GetShardServiceTemplate gets shard service template
+func (tl *TemplatesList) GetShardServiceTemplate() string {
+ if tl == nil {
+ return ""
+ }
+ return tl.ShardServiceTemplate
+}
+
+// HasReplicaServiceTemplate checks whether replica service template is specified
+func (tl *TemplatesList) HasReplicaServiceTemplate() bool {
+ if tl == nil {
+ return false
+ }
+ return len(tl.ReplicaServiceTemplate) > 0
+}
+
+// GetReplicaServiceTemplate gets replica service template
+func (tl *TemplatesList) GetReplicaServiceTemplate() string {
+ if tl == nil {
+ return ""
+ }
+ return tl.ReplicaServiceTemplate
+}
+
+// HandleDeprecatedFields helps to deal with deprecated fields
+func (tl *TemplatesList) HandleDeprecatedFields() {
+ if tl == nil {
+ return
+ }
+ if tl.DataVolumeClaimTemplate == "" {
+ tl.DataVolumeClaimTemplate = tl.VolumeClaimTemplate
+ }
+}
+
+// MergeFrom merges from specified object
+func (tl *TemplatesList) MergeFrom(from *TemplatesList, _type MergeType) *TemplatesList {
+ if from == nil {
+ return tl
+ }
+
+ if tl == nil {
+ tl = NewTemplatesList()
+ }
+
+ switch _type {
+ case MergeTypeFillEmptyValues:
+ return tl.mergeFromFillEmptyValues(from)
+ case MergeTypeOverrideByNonEmptyValues:
+ return tl.mergeFromOverwriteByNonEmptyValues(from)
+ }
+
+ return tl
+}
+
+// mergeFromFillEmptyValues fills empty values
+func (tl *TemplatesList) mergeFromFillEmptyValues(from *TemplatesList) *TemplatesList {
+ if tl.HostTemplate == "" {
+ tl.HostTemplate = from.HostTemplate
+ }
+ if tl.PodTemplate == "" {
+ tl.PodTemplate = from.PodTemplate
+ }
+ if tl.DataVolumeClaimTemplate == "" {
+ tl.DataVolumeClaimTemplate = from.DataVolumeClaimTemplate
+ }
+ if tl.LogVolumeClaimTemplate == "" {
+ tl.LogVolumeClaimTemplate = from.LogVolumeClaimTemplate
+ }
+ if tl.VolumeClaimTemplate == "" {
+ tl.VolumeClaimTemplate = from.VolumeClaimTemplate
+ }
+ if tl.ServiceTemplate == "" {
+ tl.ServiceTemplate = from.ServiceTemplate
+ }
+ if tl.ClusterServiceTemplate == "" {
+ tl.ClusterServiceTemplate = from.ClusterServiceTemplate
+ }
+ if tl.ShardServiceTemplate == "" {
+ tl.ShardServiceTemplate = from.ShardServiceTemplate
+ }
+ if tl.ReplicaServiceTemplate == "" {
+ tl.ReplicaServiceTemplate = from.ReplicaServiceTemplate
+ }
+ return tl
+}
+
+// mergeFromOverwriteByNonEmptyValues overwrites by non-empty values
+func (tl *TemplatesList) mergeFromOverwriteByNonEmptyValues(from *TemplatesList) *TemplatesList {
+ if from.HostTemplate != "" {
+ tl.HostTemplate = from.HostTemplate
+ }
+ if from.PodTemplate != "" {
+ tl.PodTemplate = from.PodTemplate
+ }
+ if from.DataVolumeClaimTemplate != "" {
+ tl.DataVolumeClaimTemplate = from.DataVolumeClaimTemplate
+ }
+ if from.LogVolumeClaimTemplate != "" {
+ tl.LogVolumeClaimTemplate = from.LogVolumeClaimTemplate
+ }
+ if from.VolumeClaimTemplate != "" {
+ tl.VolumeClaimTemplate = from.VolumeClaimTemplate
+ }
+ if from.ServiceTemplate != "" {
+ tl.ServiceTemplate = from.ServiceTemplate
+ }
+ if from.ClusterServiceTemplate != "" {
+ tl.ClusterServiceTemplate = from.ClusterServiceTemplate
+ }
+ if from.ShardServiceTemplate != "" {
+ tl.ShardServiceTemplate = from.ShardServiceTemplate
+ }
+ if from.ReplicaServiceTemplate != "" {
+ tl.ReplicaServiceTemplate = from.ReplicaServiceTemplate
+ }
+ return tl
+}
diff --git a/pkg/apis/clickhouse.altinity.com/v1/type_zookeeper.go b/pkg/apis/clickhouse.altinity.com/v1/type_zookeeper.go
index aeaa7a472..7a3e5f8c2 100644
--- a/pkg/apis/clickhouse.altinity.com/v1/type_zookeeper.go
+++ b/pkg/apis/clickhouse.altinity.com/v1/type_zookeeper.go
@@ -14,26 +14,51 @@
package v1
-import "gopkg.in/d4l3k/messagediff.v1"
+import (
+ "gopkg.in/d4l3k/messagediff.v1"
+ "strings"
+)
-// ChiZookeeperConfig defines zookeeper section of .spec.configuration
+// ZookeeperConfig defines zookeeper section of .spec.configuration
// Refers to
// https://clickhouse.yandex/docs/en/single/index.html?#server-settings_zookeeper
-type ChiZookeeperConfig struct {
- Nodes []ChiZookeeperNode `json:"nodes,omitempty" yaml:"nodes,omitempty"`
- SessionTimeoutMs int `json:"session_timeout_ms,omitempty" yaml:"session_timeout_ms,omitempty"`
- OperationTimeoutMs int `json:"operation_timeout_ms,omitempty" yaml:"operation_timeout_ms,omitempty"`
- Root string `json:"root,omitempty" yaml:"root,omitempty"`
- Identity string `json:"identity,omitempty" yaml:"identity,omitempty"`
+type ZookeeperConfig struct {
+ Nodes ZookeeperNodes `json:"nodes,omitempty" yaml:"nodes,omitempty"`
+ SessionTimeoutMs int `json:"session_timeout_ms,omitempty" yaml:"session_timeout_ms,omitempty"`
+ OperationTimeoutMs int `json:"operation_timeout_ms,omitempty" yaml:"operation_timeout_ms,omitempty"`
+ Root string `json:"root,omitempty" yaml:"root,omitempty"`
+ Identity string `json:"identity,omitempty" yaml:"identity,omitempty"`
}
-// NewChiZookeeperConfig creates new ChiZookeeperConfig object
-func NewChiZookeeperConfig() *ChiZookeeperConfig {
- return new(ChiZookeeperConfig)
+type ZookeeperNodes []ZookeeperNode
+
+func (n ZookeeperNodes) Len() int {
+ return len(n)
+}
+
+func (n ZookeeperNodes) First() ZookeeperNode {
+ return n[0]
+}
+
+func (n ZookeeperNodes) Servers() []string {
+ var servers []string
+ for _, node := range n {
+ servers = append(servers, node.String())
+ }
+ return servers
+}
+
+func (n ZookeeperNodes) String() string {
+ return strings.Join(n.Servers(), ",")
+}
+
+// NewZookeeperConfig creates new ZookeeperConfig object
+func NewZookeeperConfig() *ZookeeperConfig {
+ return new(ZookeeperConfig)
}
// IsEmpty checks whether config is empty
-func (zkc *ChiZookeeperConfig) IsEmpty() bool {
+func (zkc *ZookeeperConfig) IsEmpty() bool {
if zkc == nil {
return true
}
@@ -42,19 +67,19 @@ func (zkc *ChiZookeeperConfig) IsEmpty() bool {
}
// MergeFrom merges from provided object
-func (zkc *ChiZookeeperConfig) MergeFrom(from *ChiZookeeperConfig, _type MergeType) *ChiZookeeperConfig {
+func (zkc *ZookeeperConfig) MergeFrom(from *ZookeeperConfig, _type MergeType) *ZookeeperConfig {
if from == nil {
return zkc
}
if zkc == nil {
- zkc = NewChiZookeeperConfig()
+ zkc = NewZookeeperConfig()
}
if !from.IsEmpty() {
// Append Nodes from `from`
if zkc.Nodes == nil {
- zkc.Nodes = make([]ChiZookeeperNode, 0)
+ zkc.Nodes = make([]ZookeeperNode, 0)
}
for fromIndex := range from.Nodes {
fromNode := &from.Nodes[fromIndex]
@@ -94,7 +119,7 @@ func (zkc *ChiZookeeperConfig) MergeFrom(from *ChiZookeeperConfig, _type MergeTy
}
// Equals checks whether config is equal to another one
-func (zkc *ChiZookeeperConfig) Equals(b *ChiZookeeperConfig) bool {
+func (zkc *ZookeeperConfig) Equals(b *ZookeeperConfig) bool {
_, equals := messagediff.DeepDiff(zkc, b)
return equals
}
diff --git a/pkg/apis/clickhouse.altinity.com/v1/type_zookeeper_node.go b/pkg/apis/clickhouse.altinity.com/v1/type_zookeeper_node.go
index 5968f3c85..583359561 100644
--- a/pkg/apis/clickhouse.altinity.com/v1/type_zookeeper_node.go
+++ b/pkg/apis/clickhouse.altinity.com/v1/type_zookeeper_node.go
@@ -14,24 +14,51 @@
package v1
-// ChiZookeeperNode defines item of nodes section of .spec.configuration.zookeeper
-type ChiZookeeperNode struct {
- Host string `json:"host,omitempty" yaml:"host,omitempty"`
- Port int32 `json:"port,omitempty" yaml:"port,omitempty"`
- Secure *StringBool `json:"secure,omitempty" yaml:"secure,omitempty"`
+import (
+ "github.com/altinity/clickhouse-operator/pkg/apis/common/types"
+)
+
+// ZookeeperNode defines item of nodes section of .spec.configuration.zookeeper
+type ZookeeperNode struct {
+ Host string `json:"host,omitempty" yaml:"host,omitempty"`
+ Port *types.Int32 `json:"port,omitempty" yaml:"port,omitempty"`
+ Secure *types.StringBool `json:"secure,omitempty" yaml:"secure,omitempty"`
+}
+
+func (zkNode *ZookeeperNode) String() string {
+ if zkNode == nil {
+ return ""
+ }
+ str := zkNode.Host
+ if zkNode.Port.HasValue() {
+ str += ":" + zkNode.Port.String()
+ }
+ return str
}
// Equal checks whether zookeeper node is equal to another
-func (zkNode *ChiZookeeperNode) Equal(to *ChiZookeeperNode) bool {
+func (zkNode *ZookeeperNode) Equal(to *ZookeeperNode) bool {
if to == nil {
return false
}
- return (zkNode.Host == to.Host) && (zkNode.Port == to.Port) && (zkNode.Secure.Value() == zkNode.Secure.Value())
+ return zkNode.hostEqual(to) && zkNode.portEqual(to) && zkNode.secureEqual(to)
+}
+
+func (zkNode *ZookeeperNode) hostEqual(to *ZookeeperNode) bool {
+ return zkNode.Host == to.Host
+}
+
+func (zkNode *ZookeeperNode) portEqual(to *ZookeeperNode) bool {
+ return zkNode.Port.Equal(to.Port)
+}
+
+func (zkNode *ZookeeperNode) secureEqual(to *ZookeeperNode) bool {
+ return zkNode.Secure.Value() == to.Secure.Value()
}
// IsSecure checks whether zookeeper node is secure
-func (zkNode *ChiZookeeperNode) IsSecure() bool {
+func (zkNode *ZookeeperNode) IsSecure() bool {
if zkNode == nil {
return false
}
diff --git a/pkg/apis/clickhouse.altinity.com/v1/types.go b/pkg/apis/clickhouse.altinity.com/v1/types.go
index c4ef772bd..824077f84 100644
--- a/pkg/apis/clickhouse.altinity.com/v1/types.go
+++ b/pkg/apis/clickhouse.altinity.com/v1/types.go
@@ -15,11 +15,8 @@
package v1
import (
- "strings"
"sync"
- "time"
- core "k8s.io/api/core/v1"
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
)
@@ -40,8 +37,8 @@ type ClickHouseInstallation struct {
meta.TypeMeta `json:",inline" yaml:",inline"`
meta.ObjectMeta `json:"metadata,omitempty" yaml:"metadata,omitempty"`
- Spec ChiSpec `json:"spec" yaml:"spec"`
- Status *ChiStatus `json:"status,omitempty" yaml:"status,omitempty"`
+ Spec ChiSpec `json:"spec" yaml:"spec"`
+ Status *Status `json:"status,omitempty" yaml:"status,omitempty"`
runtime *ClickHouseInstallationRuntime `json:"-" yaml:"-"`
statusCreatorMutex sync.Mutex `json:"-" yaml:"-"`
@@ -71,14 +68,6 @@ func (runtime *ClickHouseInstallationRuntime) UnlockCommonConfig() {
runtime.commonConfigMutex.Unlock()
}
-// ComparableAttributes specifies CHI attributes that are comparable
-type ComparableAttributes struct {
- AdditionalEnvVars []core.EnvVar `json:"-" yaml:"-"`
- AdditionalVolumes []core.Volume `json:"-" yaml:"-"`
- AdditionalVolumeMounts []core.VolumeMount `json:"-" yaml:"-"`
- SkipOwnerRef bool `json:"-" yaml:"-"`
-}
-
// +genclient
// +genclient:noStatus
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
@@ -98,640 +87,6 @@ type ClickHouseOperatorConfiguration struct {
Status string `json:"status" yaml:"status"`
}
-// ChiSpec defines spec section of ClickHouseInstallation resource
-type ChiSpec struct {
- TaskID *string `json:"taskID,omitempty" yaml:"taskID,omitempty"`
- Stop *StringBool `json:"stop,omitempty" yaml:"stop,omitempty"`
- Restart string `json:"restart,omitempty" yaml:"restart,omitempty"`
- Troubleshoot *StringBool `json:"troubleshoot,omitempty" yaml:"troubleshoot,omitempty"`
- NamespaceDomainPattern string `json:"namespaceDomainPattern,omitempty" yaml:"namespaceDomainPattern,omitempty"`
- Templating *ChiTemplating `json:"templating,omitempty" yaml:"templating,omitempty"`
- Reconciling *ChiReconciling `json:"reconciling,omitempty" yaml:"reconciling,omitempty"`
- Defaults *ChiDefaults `json:"defaults,omitempty" yaml:"defaults,omitempty"`
- Configuration *Configuration `json:"configuration,omitempty" yaml:"configuration,omitempty"`
- Templates *Templates `json:"templates,omitempty" yaml:"templates,omitempty"`
- UseTemplates []*TemplateRef `json:"useTemplates,omitempty" yaml:"useTemplates,omitempty"`
-}
-
-// TemplateRef defines UseTemplate section of ClickHouseInstallation resource
-type TemplateRef struct {
- Name string `json:"name,omitempty" yaml:"name,omitempty"`
- Namespace string `json:"namespace,omitempty" yaml:"namespace,omitempty"`
- UseType string `json:"useType,omitempty" yaml:"useType,omitempty"`
-}
-
-// ChiTemplating defines templating policy struct
-type ChiTemplating struct {
- Policy string `json:"policy,omitempty" yaml:"policy,omitempty"`
- CHISelector CHISelector `json:"chiSelector,omitempty" yaml:"chiSelector,omitempty"`
-}
-
-// CHISelector specifies CHI label selector
-type CHISelector map[string]string
-
-// Matches checks whether CHISelector matches provided set of labels
-func (s CHISelector) Matches(labels map[string]string) bool {
- if s == nil {
- // Empty selector matches all labels
- return true
- }
-
- // Walk over selector keys
- for key, selectorValue := range s {
- if labelValue, ok := labels[key]; !ok {
- // Labels have no key specified in selector.
- // Selector does not match the labels
- return false
- } else if selectorValue != labelValue {
- // Labels have the key specified in selector, but selector value is not the same as labels value
- // Selector does not match the labels
- return false
- } else {
- // Selector value and label value are equal
- // So far label matches selector
- // Continue iteration to next value
- }
- }
-
- // All keys are in place with the same values
- // Selector matches the labels
-
- return true
-}
-
-// NewChiTemplating creates new templating
-func NewChiTemplating() *ChiTemplating {
- return new(ChiTemplating)
-}
-
-// GetPolicy gets policy
-func (t *ChiTemplating) GetPolicy() string {
- if t == nil {
- return ""
- }
- return t.Policy
-}
-
-// SetPolicy sets policy
-func (t *ChiTemplating) SetPolicy(p string) {
- if t == nil {
- return
- }
- t.Policy = p
-}
-
-// GetSelector gets CHI selector
-func (t *ChiTemplating) GetSelector() CHISelector {
- if t == nil {
- return nil
- }
- return t.CHISelector
-}
-
-// MergeFrom merges from specified templating
-func (t *ChiTemplating) MergeFrom(from *ChiTemplating, _type MergeType) *ChiTemplating {
- if from == nil {
- return t
- }
-
- if t == nil {
- t = NewChiTemplating()
- }
-
- switch _type {
- case MergeTypeFillEmptyValues:
- if t.Policy == "" {
- t.Policy = from.Policy
- }
- if t.CHISelector == nil {
- t.CHISelector = from.CHISelector
- }
- case MergeTypeOverrideByNonEmptyValues:
- if from.Policy != "" {
- // Override by non-empty values only
- t.Policy = from.Policy
- }
- if from.CHISelector != nil {
- // Override by non-empty values only
- t.CHISelector = from.CHISelector
- }
- }
-
- return t
-}
-
-// Possible objects cleanup options
-const (
- ObjectsCleanupUnspecified = "Unspecified"
- ObjectsCleanupRetain = "Retain"
- ObjectsCleanupDelete = "Delete"
-)
-
-// ChiObjectsCleanup specifies object cleanup struct
-type ChiObjectsCleanup struct {
- StatefulSet string `json:"statefulSet,omitempty" yaml:"statefulSet,omitempty"`
- PVC string `json:"pvc,omitempty" yaml:"pvc,omitempty"`
- ConfigMap string `json:"configMap,omitempty" yaml:"configMap,omitempty"`
- Service string `json:"service,omitempty" yaml:"service,omitempty"`
- Secret string `json:"secret,omitempty" yaml:"secret,omitempty"`
-}
-
-// NewChiObjectsCleanup creates new object cleanup
-func NewChiObjectsCleanup() *ChiObjectsCleanup {
- return new(ChiObjectsCleanup)
-}
-
-// MergeFrom merges from specified cleanup
-func (c *ChiObjectsCleanup) MergeFrom(from *ChiObjectsCleanup, _type MergeType) *ChiObjectsCleanup {
- if from == nil {
- return c
- }
-
- if c == nil {
- c = NewChiObjectsCleanup()
- }
-
- switch _type {
- case MergeTypeFillEmptyValues:
- if c.StatefulSet == "" {
- c.StatefulSet = from.StatefulSet
- }
- if c.PVC == "" {
- c.PVC = from.PVC
- }
- if c.ConfigMap == "" {
- c.ConfigMap = from.ConfigMap
- }
- if c.Service == "" {
- c.Service = from.Service
- }
- if c.Secret == "" {
- c.Secret = from.Secret
- }
- case MergeTypeOverrideByNonEmptyValues:
- if from.StatefulSet != "" {
- // Override by non-empty values only
- c.StatefulSet = from.StatefulSet
- }
- if from.PVC != "" {
- // Override by non-empty values only
- c.PVC = from.PVC
- }
- if from.ConfigMap != "" {
- // Override by non-empty values only
- c.ConfigMap = from.ConfigMap
- }
- if from.Service != "" {
- // Override by non-empty values only
- c.Service = from.Service
- }
- if from.Secret != "" {
- // Override by non-empty values only
- c.Secret = from.Secret
- }
- }
-
- return c
-}
-
-// GetStatefulSet gets stateful set
-func (c *ChiObjectsCleanup) GetStatefulSet() string {
- if c == nil {
- return ""
- }
- return c.StatefulSet
-}
-
-// SetStatefulSet sets stateful set
-func (c *ChiObjectsCleanup) SetStatefulSet(v string) *ChiObjectsCleanup {
- if c == nil {
- return nil
- }
- c.StatefulSet = v
- return c
-}
-
-// GetPVC gets PVC
-func (c *ChiObjectsCleanup) GetPVC() string {
- if c == nil {
- return ""
- }
- return c.PVC
-}
-
-// SetPVC sets PVC
-func (c *ChiObjectsCleanup) SetPVC(v string) *ChiObjectsCleanup {
- if c == nil {
- return nil
- }
- c.PVC = v
- return c
-}
-
-// GetConfigMap gets config map
-func (c *ChiObjectsCleanup) GetConfigMap() string {
- if c == nil {
- return ""
- }
- return c.ConfigMap
-}
-
-// SetConfigMap sets config map
-func (c *ChiObjectsCleanup) SetConfigMap(v string) *ChiObjectsCleanup {
- if c == nil {
- return nil
- }
- c.ConfigMap = v
- return c
-}
-
-// GetService gets service
-func (c *ChiObjectsCleanup) GetService() string {
- if c == nil {
- return ""
- }
- return c.Service
-}
-
-// SetService sets service
-func (c *ChiObjectsCleanup) SetService(v string) *ChiObjectsCleanup {
- if c == nil {
- return nil
- }
- c.Service = v
- return c
-}
-
-// GetSecret gets secret
-func (c *ChiObjectsCleanup) GetSecret() string {
- if c == nil {
- return ""
- }
- return c.Secret
-}
-
-// SetSecret sets service
-func (c *ChiObjectsCleanup) SetSecret(v string) *ChiObjectsCleanup {
- if c == nil {
- return nil
- }
- c.Secret = v
- return c
-}
-
-// ChiCleanup defines cleanup
-type ChiCleanup struct {
- // UnknownObjects specifies cleanup of unknown objects
- UnknownObjects *ChiObjectsCleanup `json:"unknownObjects,omitempty" yaml:"unknownObjects,omitempty"`
- // ReconcileFailedObjects specifies cleanup of failed objects
- ReconcileFailedObjects *ChiObjectsCleanup `json:"reconcileFailedObjects,omitempty" yaml:"reconcileFailedObjects,omitempty"`
-}
-
-// NewChiCleanup creates new cleanup
-func NewChiCleanup() *ChiCleanup {
- return new(ChiCleanup)
-}
-
-// MergeFrom merges from specified cleanup
-func (t *ChiCleanup) MergeFrom(from *ChiCleanup, _type MergeType) *ChiCleanup {
- if from == nil {
- return t
- }
-
- if t == nil {
- t = NewChiCleanup()
- }
-
- switch _type {
- case MergeTypeFillEmptyValues:
- case MergeTypeOverrideByNonEmptyValues:
- }
-
- t.UnknownObjects = t.UnknownObjects.MergeFrom(from.UnknownObjects, _type)
- t.ReconcileFailedObjects = t.ReconcileFailedObjects.MergeFrom(from.ReconcileFailedObjects, _type)
-
- return t
-}
-
-// GetUnknownObjects gets unknown objects cleanup
-func (t *ChiCleanup) GetUnknownObjects() *ChiObjectsCleanup {
- if t == nil {
- return nil
- }
- return t.UnknownObjects
-}
-
-// DefaultUnknownObjects makes default cleanup for known objects
-func (t *ChiCleanup) DefaultUnknownObjects() *ChiObjectsCleanup {
- return NewChiObjectsCleanup().
- SetStatefulSet(ObjectsCleanupDelete).
- SetPVC(ObjectsCleanupDelete).
- SetConfigMap(ObjectsCleanupDelete).
- SetService(ObjectsCleanupDelete)
-}
-
-// GetReconcileFailedObjects gets failed objects cleanup
-func (t *ChiCleanup) GetReconcileFailedObjects() *ChiObjectsCleanup {
- if t == nil {
- return nil
- }
- return t.ReconcileFailedObjects
-}
-
-// DefaultReconcileFailedObjects makes default cleanup for failed objects
-func (t *ChiCleanup) DefaultReconcileFailedObjects() *ChiObjectsCleanup {
- return NewChiObjectsCleanup().
- SetStatefulSet(ObjectsCleanupRetain).
- SetPVC(ObjectsCleanupRetain).
- SetConfigMap(ObjectsCleanupRetain).
- SetService(ObjectsCleanupRetain)
-}
-
-// SetDefaults set defaults for cleanup
-func (t *ChiCleanup) SetDefaults() *ChiCleanup {
- if t == nil {
- return nil
- }
- t.UnknownObjects = t.DefaultUnknownObjects()
- t.ReconcileFailedObjects = t.DefaultReconcileFailedObjects()
- return t
-}
-
-// ChiReconciling defines CHI reconciling struct
-type ChiReconciling struct {
- // About to be DEPRECATED
- Policy string `json:"policy,omitempty" yaml:"policy,omitempty"`
- // ConfigMapPropagationTimeout specifies timeout for ConfigMap to propagate
- ConfigMapPropagationTimeout int `json:"configMapPropagationTimeout,omitempty" yaml:"configMapPropagationTimeout,omitempty"`
- // Cleanup specifies cleanup behavior
- Cleanup *ChiCleanup `json:"cleanup,omitempty" yaml:"cleanup,omitempty"`
-}
-
-// NewChiReconciling creates new reconciling
-func NewChiReconciling() *ChiReconciling {
- return new(ChiReconciling)
-}
-
-// MergeFrom merges from specified reconciling
-func (t *ChiReconciling) MergeFrom(from *ChiReconciling, _type MergeType) *ChiReconciling {
- if from == nil {
- return t
- }
-
- if t == nil {
- t = NewChiReconciling()
- }
-
- switch _type {
- case MergeTypeFillEmptyValues:
- if t.Policy == "" {
- t.Policy = from.Policy
- }
- if t.ConfigMapPropagationTimeout == 0 {
- t.ConfigMapPropagationTimeout = from.ConfigMapPropagationTimeout
- }
- case MergeTypeOverrideByNonEmptyValues:
- if from.Policy != "" {
- // Override by non-empty values only
- t.Policy = from.Policy
- }
- if from.ConfigMapPropagationTimeout != 0 {
- // Override by non-empty values only
- t.ConfigMapPropagationTimeout = from.ConfigMapPropagationTimeout
- }
- }
-
- t.Cleanup = t.Cleanup.MergeFrom(from.Cleanup, _type)
-
- return t
-}
-
-// SetDefaults set default values for reconciling
-func (t *ChiReconciling) SetDefaults() *ChiReconciling {
- if t == nil {
- return nil
- }
- t.Policy = ReconcilingPolicyUnspecified
- t.ConfigMapPropagationTimeout = 10
- t.Cleanup = NewChiCleanup().SetDefaults()
- return t
-}
-
-// GetPolicy gets policy
-func (t *ChiReconciling) GetPolicy() string {
- if t == nil {
- return ""
- }
- return t.Policy
-}
-
-// SetPolicy sets policy
-func (t *ChiReconciling) SetPolicy(p string) {
- if t == nil {
- return
- }
- t.Policy = p
-}
-
-// GetConfigMapPropagationTimeout gets config map propagation timeout
-func (t *ChiReconciling) GetConfigMapPropagationTimeout() int {
- if t == nil {
- return 0
- }
- return t.ConfigMapPropagationTimeout
-}
-
-// SetConfigMapPropagationTimeout sets config map propagation timeout
-func (t *ChiReconciling) SetConfigMapPropagationTimeout(timeout int) {
- if t == nil {
- return
- }
- t.ConfigMapPropagationTimeout = timeout
-}
-
-// GetConfigMapPropagationTimeoutDuration gets config map propagation timeout duration
-func (t *ChiReconciling) GetConfigMapPropagationTimeoutDuration() time.Duration {
- if t == nil {
- return 0
- }
- return time.Duration(t.GetConfigMapPropagationTimeout()) * time.Second
-}
-
-// Possible reconcile policy values
-const (
- ReconcilingPolicyUnspecified = "unspecified"
- ReconcilingPolicyWait = "wait"
- ReconcilingPolicyNoWait = "nowait"
-)
-
-// IsReconcilingPolicyWait checks whether reconcile policy is "wait"
-func (t *ChiReconciling) IsReconcilingPolicyWait() bool {
- return strings.ToLower(t.GetPolicy()) == ReconcilingPolicyWait
-}
-
-// IsReconcilingPolicyNoWait checks whether reconcile policy is "no wait"
-func (t *ChiReconciling) IsReconcilingPolicyNoWait() bool {
- return strings.ToLower(t.GetPolicy()) == ReconcilingPolicyNoWait
-}
-
-// GetCleanup gets cleanup
-func (t *ChiReconciling) GetCleanup() *ChiCleanup {
- if t == nil {
- return nil
- }
- return t.Cleanup
-}
-
-// ChiTemplateNames defines references to .spec.templates to be used on current level of cluster
-type ChiTemplateNames struct {
- HostTemplate string `json:"hostTemplate,omitempty" yaml:"hostTemplate,omitempty"`
- PodTemplate string `json:"podTemplate,omitempty" yaml:"podTemplate,omitempty"`
- DataVolumeClaimTemplate string `json:"dataVolumeClaimTemplate,omitempty" yaml:"dataVolumeClaimTemplate,omitempty"`
- LogVolumeClaimTemplate string `json:"logVolumeClaimTemplate,omitempty" yaml:"logVolumeClaimTemplate,omitempty"`
- ServiceTemplate string `json:"serviceTemplate,omitempty" yaml:"serviceTemplate,omitempty"`
- ClusterServiceTemplate string `json:"clusterServiceTemplate,omitempty" yaml:"clusterServiceTemplate,omitempty"`
- ShardServiceTemplate string `json:"shardServiceTemplate,omitempty" yaml:"shardServiceTemplate,omitempty"`
- ReplicaServiceTemplate string `json:"replicaServiceTemplate,omitempty" yaml:"replicaServiceTemplate,omitempty"`
-
- // VolumeClaimTemplate is deprecated in favor of DataVolumeClaimTemplate and LogVolumeClaimTemplate
- // !!! DEPRECATED !!!
- VolumeClaimTemplate string `json:"volumeClaimTemplate,omitempty" yaml:"volumeClaimTemplate,omitempty"`
-}
-
-// ChiShard defines item of a shard section of .spec.configuration.clusters[n].shards
-// TODO unify with ChiReplica based on HostsSet
-type ChiShard struct {
- Name string `json:"name,omitempty" yaml:"name,omitempty"`
- Weight *int `json:"weight,omitempty" yaml:"weight,omitempty"`
- InternalReplication *StringBool `json:"internalReplication,omitempty" yaml:"internalReplication,omitempty"`
- Settings *Settings `json:"settings,omitempty" yaml:"settings,omitempty"`
- Files *Settings `json:"files,omitempty" yaml:"files,omitempty"`
- Templates *ChiTemplateNames `json:"templates,omitempty" yaml:"templates,omitempty"`
- ReplicasCount int `json:"replicasCount,omitempty" yaml:"replicasCount,omitempty"`
- // TODO refactor into map[string]ChiHost
- Hosts []*ChiHost `json:"replicas,omitempty" yaml:"replicas,omitempty"`
-
- Runtime ChiShardRuntime `json:"-" yaml:"-"`
-
- // DefinitionType is DEPRECATED - to be removed soon
- DefinitionType string `json:"definitionType,omitempty" yaml:"definitionType,omitempty"`
-}
-
-type ChiShardRuntime struct {
- Address ChiShardAddress `json:"-" yaml:"-"`
- CHI *ClickHouseInstallation `json:"-" yaml:"-" testdiff:"ignore"`
-}
-
-// ChiReplica defines item of a replica section of .spec.configuration.clusters[n].replicas
-// TODO unify with ChiShard based on HostsSet
-type ChiReplica struct {
- Name string `json:"name,omitempty" yaml:"name,omitempty"`
- Settings *Settings `json:"settings,omitempty" yaml:"settings,omitempty"`
- Files *Settings `json:"files,omitempty" yaml:"files,omitempty"`
- Templates *ChiTemplateNames `json:"templates,omitempty" yaml:"templates,omitempty"`
- ShardsCount int `json:"shardsCount,omitempty" yaml:"shardsCount,omitempty"`
- // TODO refactor into map[string]ChiHost
- Hosts []*ChiHost `json:"shards,omitempty" yaml:"shards,omitempty"`
-
- Runtime ChiReplicaRuntime `json:"-" yaml:"-"`
-}
-
-type ChiReplicaRuntime struct {
- Address ChiReplicaAddress `json:"-" yaml:"-"`
- CHI *ClickHouseInstallation `json:"-" yaml:"-" testdiff:"ignore"`
-}
-
-// ChiShardAddress defines address of a shard within ClickHouseInstallation
-type ChiShardAddress struct {
- Namespace string `json:"namespace,omitempty" yaml:"namespace,omitempty"`
- CHIName string `json:"chiName,omitempty" yaml:"chiName,omitempty"`
- ClusterName string `json:"clusterName,omitempty" yaml:"clusterName,omitempty"`
- ClusterIndex int `json:"clusterIndex,omitempty" yaml:"clusterIndex,omitempty"`
- ShardName string `json:"shardName,omitempty" yaml:"shardName,omitempty"`
- ShardIndex int `json:"shardIndex,omitempty" yaml:"shardIndex,omitempty"`
-}
-
-// ChiReplicaAddress defines address of a replica within ClickHouseInstallation
-type ChiReplicaAddress struct {
- Namespace string `json:"namespace,omitempty" yaml:"namespace,omitempty"`
- CHIName string `json:"chiName,omitempty" yaml:"chiName,omitempty"`
- ClusterName string `json:"clusterName,omitempty" yaml:"clusterName,omitempty"`
- ClusterIndex int `json:"clusterIndex,omitempty" yaml:"clusterIndex,omitempty"`
- ReplicaName string `json:"replicaName,omitempty" yaml:"replicaName,omitempty"`
- ReplicaIndex int `json:"replicaIndex,omitempty" yaml:"replicaIndex,omitempty"`
-}
-
-// HostTemplate defines full Host Template
-type HostTemplate struct {
- Name string `json:"name,omitempty" yaml:"name,omitempty"`
- PortDistribution []PortDistribution `json:"portDistribution,omitempty" yaml:"portDistribution,omitempty"`
- Spec ChiHost `json:"spec,omitempty" yaml:"spec,omitempty"`
-}
-
-// PortDistribution defines port distribution
-type PortDistribution struct {
- Type string `json:"type,omitempty" yaml:"type,omitempty"`
-}
-
-// ChiHostConfig defines additional data related to a host
-type ChiHostConfig struct {
- ZookeeperFingerprint string `json:"zookeeperfingerprint" yaml:"zookeeperfingerprint"`
- SettingsFingerprint string `json:"settingsfingerprint" yaml:"settingsfingerprint"`
- FilesFingerprint string `json:"filesfingerprint" yaml:"filesfingerprint"`
-}
-
-// Templates defines templates section of .spec
-type Templates struct {
- // Templates
- HostTemplates []HostTemplate `json:"hostTemplates,omitempty" yaml:"hostTemplates,omitempty"`
- PodTemplates []PodTemplate `json:"podTemplates,omitempty" yaml:"podTemplates,omitempty"`
- VolumeClaimTemplates []VolumeClaimTemplate `json:"volumeClaimTemplates,omitempty" yaml:"volumeClaimTemplates,omitempty"`
- ServiceTemplates []ServiceTemplate `json:"serviceTemplates,omitempty" yaml:"serviceTemplates,omitempty"`
-
- // Index maps template name to template itself
- HostTemplatesIndex *HostTemplatesIndex `json:",omitempty" yaml:",omitempty" testdiff:"ignore"`
- PodTemplatesIndex *PodTemplatesIndex `json:",omitempty" yaml:",omitempty" testdiff:"ignore"`
- VolumeClaimTemplatesIndex *VolumeClaimTemplatesIndex `json:",omitempty" yaml:",omitempty" testdiff:"ignore"`
- ServiceTemplatesIndex *ServiceTemplatesIndex `json:",omitempty" yaml:",omitempty" testdiff:"ignore"`
-}
-
-// PodTemplate defines full Pod Template, directly used by StatefulSet
-type PodTemplate struct {
- Name string `json:"name" yaml:"name"`
- GenerateName string `json:"generateName,omitempty" yaml:"generateName,omitempty"`
- Zone PodTemplateZone `json:"zone,omitempty" yaml:"zone,omitempty"`
- PodDistribution []PodDistribution `json:"podDistribution,omitempty" yaml:"podDistribution,omitempty"`
- ObjectMeta meta.ObjectMeta `json:"metadata,omitempty" yaml:"metadata,omitempty"`
- Spec core.PodSpec `json:"spec,omitempty" yaml:"spec,omitempty"`
-}
-
-// PodTemplateZone defines pod template zone
-type PodTemplateZone struct {
- Key string `json:"key,omitempty" yaml:"key,omitempty"`
- Values []string `json:"values,omitempty" yaml:"values,omitempty"`
-}
-
-// PodDistribution defines pod distribution
-type PodDistribution struct {
- Type string `json:"type,omitempty" yaml:"type,omitempty"`
- Scope string `json:"scope,omitempty" yaml:"scope,omitempty"`
- Number int `json:"number,omitempty" yaml:"number,omitempty"`
- TopologyKey string `json:"topologyKey,omitempty" yaml:"topologyKey,omitempty"`
-}
-
-// ServiceTemplate defines CHI service template
-type ServiceTemplate struct {
- Name string `json:"name" yaml:"name"`
- GenerateName string `json:"generateName,omitempty" yaml:"generateName,omitempty"`
- ObjectMeta meta.ObjectMeta `json:"metadata,omitempty" yaml:"metadata,omitempty"`
- Spec core.ServiceSpec `json:"spec,omitempty" yaml:"spec,omitempty"`
-}
-
-// ChiDistributedDDL defines distributedDDL section of .spec.defaults
-type ChiDistributedDDL struct {
- Profile string `json:"profile,omitempty" yaml:"profile"`
-}
-
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ClickHouseInstallationList defines a list of ClickHouseInstallation resources
@@ -758,8 +113,3 @@ type ClickHouseOperatorConfigurationList struct {
meta.ListMeta `json:"metadata" yaml:"metadata"`
Items []ClickHouseOperatorConfiguration `json:"items" yaml:"items"`
}
-
-// Secured interface for nodes and hosts
-type Secured interface {
- IsSecure() bool
-}
diff --git a/pkg/apis/clickhouse.altinity.com/v1/zz_generated.deepcopy.go b/pkg/apis/clickhouse.altinity.com/v1/zz_generated.deepcopy.go
index d457de29f..6f7c83ed9 100644
--- a/pkg/apis/clickhouse.altinity.com/v1/zz_generated.deepcopy.go
+++ b/pkg/apis/clickhouse.altinity.com/v1/zz_generated.deepcopy.go
@@ -22,60 +22,13 @@ limitations under the License.
package v1
import (
+ types "github.com/altinity/clickhouse-operator/pkg/apis/common/types"
swversion "github.com/altinity/clickhouse-operator/pkg/apis/swversion"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
)
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in CHISelector) DeepCopyInto(out *CHISelector) {
- {
- in := &in
- *out = make(CHISelector, len(*in))
- for key, val := range *in {
- (*out)[key] = val
- }
- return
- }
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CHISelector.
-func (in CHISelector) DeepCopy() CHISelector {
- if in == nil {
- return nil
- }
- out := new(CHISelector)
- in.DeepCopyInto(out)
- return *out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *ChiCleanup) DeepCopyInto(out *ChiCleanup) {
- *out = *in
- if in.UnknownObjects != nil {
- in, out := &in.UnknownObjects, &out.UnknownObjects
- *out = new(ChiObjectsCleanup)
- **out = **in
- }
- if in.ReconcileFailedObjects != nil {
- in, out := &in.ReconcileFailedObjects, &out.ReconcileFailedObjects
- *out = new(ChiObjectsCleanup)
- **out = **in
- }
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ChiCleanup.
-func (in *ChiCleanup) DeepCopy() *ChiCleanup {
- if in == nil {
- return nil
- }
- out := new(ChiCleanup)
- in.DeepCopyInto(out)
- return out
-}
-
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ChiClusterAddress) DeepCopyInto(out *ChiClusterAddress) {
*out = *in
@@ -97,16 +50,24 @@ func (in *ChiClusterLayout) DeepCopyInto(out *ChiClusterLayout) {
*out = *in
if in.Shards != nil {
in, out := &in.Shards, &out.Shards
- *out = make([]ChiShard, len(*in))
+ *out = make([]*ChiShard, len(*in))
for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
+ if (*in)[i] != nil {
+ in, out := &(*in)[i], &(*out)[i]
+ *out = new(ChiShard)
+ (*in).DeepCopyInto(*out)
+ }
}
}
if in.Replicas != nil {
in, out := &in.Replicas, &out.Replicas
- *out = make([]ChiReplica, len(*in))
+ *out = make([]*ChiReplica, len(*in))
for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
+ if (*in)[i] != nil {
+ in, out := &(*in)[i], &(*out)[i]
+ *out = new(ChiReplica)
+ (*in).DeepCopyInto(*out)
+ }
}
}
if in.HostsField != nil {
@@ -128,179 +89,9 @@ func (in *ChiClusterLayout) DeepCopy() *ChiClusterLayout {
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *ChiDefaults) DeepCopyInto(out *ChiDefaults) {
- *out = *in
- if in.ReplicasUseFQDN != nil {
- in, out := &in.ReplicasUseFQDN, &out.ReplicasUseFQDN
- *out = new(StringBool)
- **out = **in
- }
- if in.DistributedDDL != nil {
- in, out := &in.DistributedDDL, &out.DistributedDDL
- *out = new(ChiDistributedDDL)
- **out = **in
- }
- if in.StorageManagement != nil {
- in, out := &in.StorageManagement, &out.StorageManagement
- *out = new(StorageManagement)
- **out = **in
- }
- if in.Templates != nil {
- in, out := &in.Templates, &out.Templates
- *out = new(ChiTemplateNames)
- **out = **in
- }
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ChiDefaults.
-func (in *ChiDefaults) DeepCopy() *ChiDefaults {
- if in == nil {
- return nil
- }
- out := new(ChiDefaults)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *ChiDistributedDDL) DeepCopyInto(out *ChiDistributedDDL) {
- *out = *in
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ChiDistributedDDL.
-func (in *ChiDistributedDDL) DeepCopy() *ChiDistributedDDL {
- if in == nil {
- return nil
- }
- out := new(ChiDistributedDDL)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *ChiHost) DeepCopyInto(out *ChiHost) {
- *out = *in
- if in.Insecure != nil {
- in, out := &in.Insecure, &out.Insecure
- *out = new(StringBool)
- **out = **in
- }
- if in.Secure != nil {
- in, out := &in.Secure, &out.Secure
- *out = new(StringBool)
- **out = **in
- }
- if in.Settings != nil {
- in, out := &in.Settings, &out.Settings
- *out = new(Settings)
- (*in).DeepCopyInto(*out)
- }
- if in.Files != nil {
- in, out := &in.Files, &out.Files
- *out = new(Settings)
- (*in).DeepCopyInto(*out)
- }
- if in.Templates != nil {
- in, out := &in.Templates, &out.Templates
- *out = new(ChiTemplateNames)
- **out = **in
- }
- in.Runtime.DeepCopyInto(&out.Runtime)
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ChiHost.
-func (in *ChiHost) DeepCopy() *ChiHost {
- if in == nil {
- return nil
- }
- out := new(ChiHost)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *ChiHostAddress) DeepCopyInto(out *ChiHostAddress) {
- *out = *in
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ChiHostAddress.
-func (in *ChiHostAddress) DeepCopy() *ChiHostAddress {
- if in == nil {
- return nil
- }
- out := new(ChiHostAddress)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *ChiHostConfig) DeepCopyInto(out *ChiHostConfig) {
- *out = *in
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ChiHostConfig.
-func (in *ChiHostConfig) DeepCopy() *ChiHostConfig {
- if in == nil {
- return nil
- }
- out := new(ChiHostConfig)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *ChiHostReconcileAttributesCounters) DeepCopyInto(out *ChiHostReconcileAttributesCounters) {
- *out = *in
- if in.status != nil {
- in, out := &in.status, &out.status
- *out = make(map[ObjectStatus]int, len(*in))
- for key, val := range *in {
- (*out)[key] = val
- }
- }
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ChiHostReconcileAttributesCounters.
-func (in *ChiHostReconcileAttributesCounters) DeepCopy() *ChiHostReconcileAttributesCounters {
- if in == nil {
- return nil
- }
- out := new(ChiHostReconcileAttributesCounters)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *ChiHostRuntime) DeepCopyInto(out *ChiHostRuntime) {
+func (in *ChiClusterRuntime) DeepCopyInto(out *ChiClusterRuntime) {
*out = *in
out.Address = in.Address
- out.Config = in.Config
- if in.Version != nil {
- in, out := &in.Version, &out.Version
- *out = new(swversion.SoftWareVersion)
- **out = **in
- }
- if in.reconcileAttributes != nil {
- in, out := &in.reconcileAttributes, &out.reconcileAttributes
- *out = new(HostReconcileAttributes)
- **out = **in
- }
- if in.CurStatefulSet != nil {
- in, out := &in.CurStatefulSet, &out.CurStatefulSet
- *out = new(appsv1.StatefulSet)
- (*in).DeepCopyInto(*out)
- }
- if in.DesiredStatefulSet != nil {
- in, out := &in.DesiredStatefulSet, &out.DesiredStatefulSet
- *out = new(appsv1.StatefulSet)
- (*in).DeepCopyInto(*out)
- }
if in.CHI != nil {
in, out := &in.CHI, &out.CHI
*out = new(ClickHouseInstallation)
@@ -309,49 +100,12 @@ func (in *ChiHostRuntime) DeepCopyInto(out *ChiHostRuntime) {
return
}
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ChiHostRuntime.
-func (in *ChiHostRuntime) DeepCopy() *ChiHostRuntime {
- if in == nil {
- return nil
- }
- out := new(ChiHostRuntime)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *ChiObjectsCleanup) DeepCopyInto(out *ChiObjectsCleanup) {
- *out = *in
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ChiObjectsCleanup.
-func (in *ChiObjectsCleanup) DeepCopy() *ChiObjectsCleanup {
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ChiClusterRuntime.
+func (in *ChiClusterRuntime) DeepCopy() *ChiClusterRuntime {
if in == nil {
return nil
}
- out := new(ChiObjectsCleanup)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *ChiReconciling) DeepCopyInto(out *ChiReconciling) {
- *out = *in
- if in.Cleanup != nil {
- in, out := &in.Cleanup, &out.Cleanup
- *out = new(ChiCleanup)
- (*in).DeepCopyInto(*out)
- }
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ChiReconciling.
-func (in *ChiReconciling) DeepCopy() *ChiReconciling {
- if in == nil {
- return nil
- }
- out := new(ChiReconciling)
+ out := new(ChiClusterRuntime)
in.DeepCopyInto(out)
return out
}
@@ -371,16 +125,16 @@ func (in *ChiReplica) DeepCopyInto(out *ChiReplica) {
}
if in.Templates != nil {
in, out := &in.Templates, &out.Templates
- *out = new(ChiTemplateNames)
+ *out = new(TemplatesList)
**out = **in
}
if in.Hosts != nil {
in, out := &in.Hosts, &out.Hosts
- *out = make([]*ChiHost, len(*in))
+ *out = make([]*Host, len(*in))
for i := range *in {
if (*in)[i] != nil {
in, out := &(*in)[i], &(*out)[i]
- *out = new(ChiHost)
+ *out = new(Host)
(*in).DeepCopyInto(*out)
}
}
@@ -447,7 +201,7 @@ func (in *ChiShard) DeepCopyInto(out *ChiShard) {
}
if in.InternalReplication != nil {
in, out := &in.InternalReplication, &out.InternalReplication
- *out = new(StringBool)
+ *out = new(types.StringBool)
**out = **in
}
if in.Settings != nil {
@@ -462,16 +216,16 @@ func (in *ChiShard) DeepCopyInto(out *ChiShard) {
}
if in.Templates != nil {
in, out := &in.Templates, &out.Templates
- *out = new(ChiTemplateNames)
+ *out = new(TemplatesList)
**out = **in
}
if in.Hosts != nil {
in, out := &in.Hosts, &out.Hosts
- *out = make([]*ChiHost, len(*in))
+ *out = make([]*Host, len(*in))
for i := range *in {
if (*in)[i] != nil {
in, out := &(*in)[i], &(*out)[i]
- *out = new(ChiHost)
+ *out = new(Host)
(*in).DeepCopyInto(*out)
}
}
@@ -533,17 +287,27 @@ func (in *ChiSpec) DeepCopyInto(out *ChiSpec) {
*out = *in
if in.TaskID != nil {
in, out := &in.TaskID, &out.TaskID
- *out = new(string)
+ *out = new(types.String)
**out = **in
}
if in.Stop != nil {
in, out := &in.Stop, &out.Stop
- *out = new(StringBool)
+ *out = new(types.StringBool)
+ **out = **in
+ }
+ if in.Restart != nil {
+ in, out := &in.Restart, &out.Restart
+ *out = new(types.String)
**out = **in
}
if in.Troubleshoot != nil {
in, out := &in.Troubleshoot, &out.Troubleshoot
- *out = new(StringBool)
+ *out = new(types.StringBool)
+ **out = **in
+ }
+ if in.NamespaceDomainPattern != nil {
+ in, out := &in.NamespaceDomainPattern, &out.NamespaceDomainPattern
+ *out = new(types.String)
**out = **in
}
if in.Templating != nil {
@@ -553,12 +317,12 @@ func (in *ChiSpec) DeepCopyInto(out *ChiSpec) {
}
if in.Reconciling != nil {
in, out := &in.Reconciling, &out.Reconciling
- *out = new(ChiReconciling)
+ *out = new(Reconciling)
(*in).DeepCopyInto(*out)
}
if in.Defaults != nil {
in, out := &in.Defaults, &out.Defaults
- *out = new(ChiDefaults)
+ *out = new(Defaults)
(*in).DeepCopyInto(*out)
}
if in.Configuration != nil {
@@ -595,106 +359,12 @@ func (in *ChiSpec) DeepCopy() *ChiSpec {
return out
}
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *ChiStatus) DeepCopyInto(out *ChiStatus) {
- *out = *in
- if in.TaskIDsStarted != nil {
- in, out := &in.TaskIDsStarted, &out.TaskIDsStarted
- *out = make([]string, len(*in))
- copy(*out, *in)
- }
- if in.TaskIDsCompleted != nil {
- in, out := &in.TaskIDsCompleted, &out.TaskIDsCompleted
- *out = make([]string, len(*in))
- copy(*out, *in)
- }
- if in.Actions != nil {
- in, out := &in.Actions, &out.Actions
- *out = make([]string, len(*in))
- copy(*out, *in)
- }
- if in.Errors != nil {
- in, out := &in.Errors, &out.Errors
- *out = make([]string, len(*in))
- copy(*out, *in)
- }
- if in.Pods != nil {
- in, out := &in.Pods, &out.Pods
- *out = make([]string, len(*in))
- copy(*out, *in)
- }
- if in.PodIPs != nil {
- in, out := &in.PodIPs, &out.PodIPs
- *out = make([]string, len(*in))
- copy(*out, *in)
- }
- if in.FQDNs != nil {
- in, out := &in.FQDNs, &out.FQDNs
- *out = make([]string, len(*in))
- copy(*out, *in)
- }
- if in.NormalizedCHI != nil {
- in, out := &in.NormalizedCHI, &out.NormalizedCHI
- *out = new(ClickHouseInstallation)
- (*in).DeepCopyInto(*out)
- }
- if in.NormalizedCHICompleted != nil {
- in, out := &in.NormalizedCHICompleted, &out.NormalizedCHICompleted
- *out = new(ClickHouseInstallation)
- (*in).DeepCopyInto(*out)
- }
- if in.HostsWithTablesCreated != nil {
- in, out := &in.HostsWithTablesCreated, &out.HostsWithTablesCreated
- *out = make([]string, len(*in))
- copy(*out, *in)
- }
- if in.UsedTemplates != nil {
- in, out := &in.UsedTemplates, &out.UsedTemplates
- *out = make([]*TemplateRef, len(*in))
- for i := range *in {
- if (*in)[i] != nil {
- in, out := &(*in)[i], &(*out)[i]
- *out = new(TemplateRef)
- **out = **in
- }
- }
- }
- out.mu = in.mu
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ChiStatus.
-func (in *ChiStatus) DeepCopy() *ChiStatus {
- if in == nil {
- return nil
- }
- out := new(ChiStatus)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *ChiTemplateNames) DeepCopyInto(out *ChiTemplateNames) {
- *out = *in
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ChiTemplateNames.
-func (in *ChiTemplateNames) DeepCopy() *ChiTemplateNames {
- if in == nil {
- return nil
- }
- out := new(ChiTemplateNames)
- in.DeepCopyInto(out)
- return out
-}
-
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ChiTemplating) DeepCopyInto(out *ChiTemplating) {
*out = *in
if in.CHISelector != nil {
in, out := &in.CHISelector, &out.CHISelector
- *out = make(CHISelector, len(*in))
+ *out = make(TargetSelector, len(*in))
for key, val := range *in {
(*out)[key] = val
}
@@ -713,45 +383,27 @@ func (in *ChiTemplating) DeepCopy() *ChiTemplating {
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *ChiZookeeperConfig) DeepCopyInto(out *ChiZookeeperConfig) {
+func (in *Cleanup) DeepCopyInto(out *Cleanup) {
*out = *in
- if in.Nodes != nil {
- in, out := &in.Nodes, &out.Nodes
- *out = make([]ChiZookeeperNode, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ChiZookeeperConfig.
-func (in *ChiZookeeperConfig) DeepCopy() *ChiZookeeperConfig {
- if in == nil {
- return nil
+ if in.UnknownObjects != nil {
+ in, out := &in.UnknownObjects, &out.UnknownObjects
+ *out = new(ObjectsCleanup)
+ **out = **in
}
- out := new(ChiZookeeperConfig)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *ChiZookeeperNode) DeepCopyInto(out *ChiZookeeperNode) {
- *out = *in
- if in.Secure != nil {
- in, out := &in.Secure, &out.Secure
- *out = new(StringBool)
+ if in.ReconcileFailedObjects != nil {
+ in, out := &in.ReconcileFailedObjects, &out.ReconcileFailedObjects
+ *out = new(ObjectsCleanup)
**out = **in
}
return
}
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ChiZookeeperNode.
-func (in *ChiZookeeperNode) DeepCopy() *ChiZookeeperNode {
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Cleanup.
+func (in *Cleanup) DeepCopy() *Cleanup {
if in == nil {
return nil
}
- out := new(ChiZookeeperNode)
+ out := new(Cleanup)
in.DeepCopyInto(out)
return out
}
@@ -764,7 +416,7 @@ func (in *ClickHouseInstallation) DeepCopyInto(out *ClickHouseInstallation) {
in.Spec.DeepCopyInto(&out.Spec)
if in.Status != nil {
in, out := &in.Status, &out.Status
- *out = new(ChiStatus)
+ *out = new(Status)
(*in).DeepCopyInto(*out)
}
if in.runtime != nil {
@@ -858,7 +510,7 @@ func (in *ClickHouseInstallationTemplate) DeepCopyInto(out *ClickHouseInstallati
in.Spec.DeepCopyInto(&out.Spec)
if in.Status != nil {
in, out := &in.Status, &out.Status
- *out = new(ChiStatus)
+ *out = new(Status)
(*in).DeepCopyInto(*out)
}
if in.runtime != nil {
@@ -987,7 +639,7 @@ func (in *Cluster) DeepCopyInto(out *Cluster) {
*out = *in
if in.Zookeeper != nil {
in, out := &in.Zookeeper, &out.Zookeeper
- *out = new(ChiZookeeperConfig)
+ *out = new(ZookeeperConfig)
(*in).DeepCopyInto(*out)
}
if in.Settings != nil {
@@ -1002,7 +654,7 @@ func (in *Cluster) DeepCopyInto(out *Cluster) {
}
if in.Templates != nil {
in, out := &in.Templates, &out.Templates
- *out = new(ChiTemplateNames)
+ *out = new(TemplatesList)
**out = **in
}
if in.SchemaPolicy != nil {
@@ -1012,12 +664,12 @@ func (in *Cluster) DeepCopyInto(out *Cluster) {
}
if in.Insecure != nil {
in, out := &in.Insecure, &out.Insecure
- *out = new(StringBool)
+ *out = new(types.StringBool)
**out = **in
}
if in.Secure != nil {
in, out := &in.Secure, &out.Secure
- *out = new(StringBool)
+ *out = new(types.StringBool)
**out = **in
}
if in.Secret != nil {
@@ -1025,6 +677,11 @@ func (in *Cluster) DeepCopyInto(out *Cluster) {
*out = new(ClusterSecret)
(*in).DeepCopyInto(*out)
}
+ if in.PDBMaxUnavailable != nil {
+ in, out := &in.PDBMaxUnavailable, &out.PDBMaxUnavailable
+ *out = new(types.Int32)
+ **out = **in
+ }
if in.Layout != nil {
in, out := &in.Layout, &out.Layout
*out = new(ChiClusterLayout)
@@ -1044,34 +701,12 @@ func (in *Cluster) DeepCopy() *Cluster {
return out
}
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *ClusterRuntime) DeepCopyInto(out *ClusterRuntime) {
- *out = *in
- out.Address = in.Address
- if in.CHI != nil {
- in, out := &in.CHI, &out.CHI
- *out = new(ClickHouseInstallation)
- (*in).DeepCopyInto(*out)
- }
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterRuntime.
-func (in *ClusterRuntime) DeepCopy() *ClusterRuntime {
- if in == nil {
- return nil
- }
- out := new(ClusterRuntime)
- in.DeepCopyInto(out)
- return out
-}
-
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ClusterSecret) DeepCopyInto(out *ClusterSecret) {
*out = *in
if in.Auto != nil {
in, out := &in.Auto, &out.Auto
- *out = new(StringBool)
+ *out = new(types.StringBool)
**out = **in
}
if in.ValueFrom != nil {
@@ -1095,22 +730,22 @@ func (in *ClusterSecret) DeepCopy() *ClusterSecret {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ComparableAttributes) DeepCopyInto(out *ComparableAttributes) {
*out = *in
- if in.AdditionalEnvVars != nil {
- in, out := &in.AdditionalEnvVars, &out.AdditionalEnvVars
+ if in.additionalEnvVars != nil {
+ in, out := &in.additionalEnvVars, &out.additionalEnvVars
*out = make([]corev1.EnvVar, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
- if in.AdditionalVolumes != nil {
- in, out := &in.AdditionalVolumes, &out.AdditionalVolumes
+ if in.additionalVolumes != nil {
+ in, out := &in.additionalVolumes, &out.additionalVolumes
*out = make([]corev1.Volume, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
- if in.AdditionalVolumeMounts != nil {
- in, out := &in.AdditionalVolumeMounts, &out.AdditionalVolumeMounts
+ if in.additionalVolumeMounts != nil {
+ in, out := &in.additionalVolumeMounts, &out.additionalVolumeMounts
*out = make([]corev1.VolumeMount, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
@@ -1150,7 +785,7 @@ func (in *Configuration) DeepCopyInto(out *Configuration) {
*out = *in
if in.Zookeeper != nil {
in, out := &in.Zookeeper, &out.Zookeeper
- *out = new(ChiZookeeperConfig)
+ *out = new(ZookeeperConfig)
(*in).DeepCopyInto(*out)
}
if in.Users != nil {
@@ -1203,159 +838,335 @@ func (in *Configuration) DeepCopy() *Configuration {
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *CopyCHIOptions) DeepCopyInto(out *CopyCHIOptions) {
+func (in *DataSource) DeepCopyInto(out *DataSource) {
+ *out = *in
+ if in.SecretKeyRef != nil {
+ in, out := &in.SecretKeyRef, &out.SecretKeyRef
+ *out = new(corev1.SecretKeySelector)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataSource.
+func (in *DataSource) DeepCopy() *DataSource {
+ if in == nil {
+ return nil
+ }
+ out := new(DataSource)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Defaults) DeepCopyInto(out *Defaults) {
*out = *in
+ if in.ReplicasUseFQDN != nil {
+ in, out := &in.ReplicasUseFQDN, &out.ReplicasUseFQDN
+ *out = new(types.StringBool)
+ **out = **in
+ }
+ if in.DistributedDDL != nil {
+ in, out := &in.DistributedDDL, &out.DistributedDDL
+ *out = new(DistributedDDL)
+ **out = **in
+ }
+ if in.StorageManagement != nil {
+ in, out := &in.StorageManagement, &out.StorageManagement
+ *out = new(StorageManagement)
+ **out = **in
+ }
+ if in.Templates != nil {
+ in, out := &in.Templates, &out.Templates
+ *out = new(TemplatesList)
+ **out = **in
+ }
return
}
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CopyCHIOptions.
-func (in *CopyCHIOptions) DeepCopy() *CopyCHIOptions {
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Defaults.
+func (in *Defaults) DeepCopy() *Defaults {
if in == nil {
return nil
}
- out := new(CopyCHIOptions)
+ out := new(Defaults)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *CopyCHIStatusOptions) DeepCopyInto(out *CopyCHIStatusOptions) {
+func (in *DistributedDDL) DeepCopyInto(out *DistributedDDL) {
*out = *in
return
}
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CopyCHIStatusOptions.
-func (in *CopyCHIStatusOptions) DeepCopy() *CopyCHIStatusOptions {
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DistributedDDL.
+func (in *DistributedDDL) DeepCopy() *DistributedDDL {
if in == nil {
return nil
}
- out := new(CopyCHIStatusOptions)
+ out := new(DistributedDDL)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *CycleAddress) DeepCopyInto(out *CycleAddress) {
+func (in *FillStatusParams) DeepCopyInto(out *FillStatusParams) {
*out = *in
+ if in.Pods != nil {
+ in, out := &in.Pods, &out.Pods
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.FQDNs != nil {
+ in, out := &in.FQDNs, &out.FQDNs
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.NormalizedCR != nil {
+ in, out := &in.NormalizedCR, &out.NormalizedCR
+ *out = new(ClickHouseInstallation)
+ (*in).DeepCopyInto(*out)
+ }
return
}
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CycleAddress.
-func (in *CycleAddress) DeepCopy() *CycleAddress {
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FillStatusParams.
+func (in *FillStatusParams) DeepCopy() *FillStatusParams {
if in == nil {
return nil
}
- out := new(CycleAddress)
+ out := new(FillStatusParams)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *CycleSpec) DeepCopyInto(out *CycleSpec) {
+func (in *Host) DeepCopyInto(out *Host) {
*out = *in
+ in.HostSecure.DeepCopyInto(&out.HostSecure)
+ in.HostPorts.DeepCopyInto(&out.HostPorts)
+ in.HostSettings.DeepCopyInto(&out.HostSettings)
+ if in.Templates != nil {
+ in, out := &in.Templates, &out.Templates
+ *out = new(TemplatesList)
+ **out = **in
+ }
+ in.Runtime.DeepCopyInto(&out.Runtime)
return
}
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CycleSpec.
-func (in *CycleSpec) DeepCopy() *CycleSpec {
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Host.
+func (in *Host) DeepCopy() *Host {
if in == nil {
return nil
}
- out := new(CycleSpec)
+ out := new(Host)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *DataSource) DeepCopyInto(out *DataSource) {
+func (in *HostAddress) DeepCopyInto(out *HostAddress) {
*out = *in
- if in.SecretKeyRef != nil {
- in, out := &in.SecretKeyRef, &out.SecretKeyRef
- *out = new(corev1.SecretKeySelector)
- (*in).DeepCopyInto(*out)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostAddress.
+func (in *HostAddress) DeepCopy() *HostAddress {
+ if in == nil {
+ return nil
+ }
+ out := new(HostAddress)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *HostPorts) DeepCopyInto(out *HostPorts) {
+ *out = *in
+ if in.Port != nil {
+ in, out := &in.Port, &out.Port
+ *out = new(types.Int32)
+ **out = **in
+ }
+ if in.TCPPort != nil {
+ in, out := &in.TCPPort, &out.TCPPort
+ *out = new(types.Int32)
+ **out = **in
+ }
+ if in.TLSPort != nil {
+ in, out := &in.TLSPort, &out.TLSPort
+ *out = new(types.Int32)
+ **out = **in
+ }
+ if in.HTTPPort != nil {
+ in, out := &in.HTTPPort, &out.HTTPPort
+ *out = new(types.Int32)
+ **out = **in
+ }
+ if in.HTTPSPort != nil {
+ in, out := &in.HTTPSPort, &out.HTTPSPort
+ *out = new(types.Int32)
+ **out = **in
+ }
+ if in.InterserverHTTPPort != nil {
+ in, out := &in.InterserverHTTPPort, &out.InterserverHTTPPort
+ *out = new(types.Int32)
+ **out = **in
+ }
+ if in.ZKPort != nil {
+ in, out := &in.ZKPort, &out.ZKPort
+ *out = new(types.Int32)
+ **out = **in
+ }
+ if in.RaftPort != nil {
+ in, out := &in.RaftPort, &out.RaftPort
+ *out = new(types.Int32)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostPorts.
+func (in *HostPorts) DeepCopy() *HostPorts {
+ if in == nil {
+ return nil
+ }
+ out := new(HostPorts)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *HostReconcileAttributes) DeepCopyInto(out *HostReconcileAttributes) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostReconcileAttributes.
+func (in *HostReconcileAttributes) DeepCopy() *HostReconcileAttributes {
+ if in == nil {
+ return nil
+ }
+ out := new(HostReconcileAttributes)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *HostReconcileAttributesCounters) DeepCopyInto(out *HostReconcileAttributesCounters) {
+ *out = *in
+ if in.status != nil {
+ in, out := &in.status, &out.status
+ *out = make(map[ObjectStatus]int, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
}
return
}
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataSource.
-func (in *DataSource) DeepCopy() *DataSource {
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostReconcileAttributesCounters.
+func (in *HostReconcileAttributesCounters) DeepCopy() *HostReconcileAttributesCounters {
if in == nil {
return nil
}
- out := new(DataSource)
+ out := new(HostReconcileAttributesCounters)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *FillStatusParams) DeepCopyInto(out *FillStatusParams) {
+func (in *HostRuntime) DeepCopyInto(out *HostRuntime) {
*out = *in
- if in.Pods != nil {
- in, out := &in.Pods, &out.Pods
- *out = make([]string, len(*in))
- copy(*out, *in)
+ out.Address = in.Address
+ if in.Version != nil {
+ in, out := &in.Version, &out.Version
+ *out = new(swversion.SoftWareVersion)
+ **out = **in
}
- if in.FQDNs != nil {
- in, out := &in.FQDNs, &out.FQDNs
- *out = make([]string, len(*in))
- copy(*out, *in)
+ if in.reconcileAttributes != nil {
+ in, out := &in.reconcileAttributes, &out.reconcileAttributes
+ *out = new(HostReconcileAttributes)
+ **out = **in
}
- if in.NormalizedCHI != nil {
- in, out := &in.NormalizedCHI, &out.NormalizedCHI
- *out = new(ClickHouseInstallation)
+ if in.replicas != nil {
+ in, out := &in.replicas, &out.replicas
+ *out = new(types.Int32)
+ **out = **in
+ }
+ if in.CurStatefulSet != nil {
+ in, out := &in.CurStatefulSet, &out.CurStatefulSet
+ *out = new(appsv1.StatefulSet)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.DesiredStatefulSet != nil {
+ in, out := &in.DesiredStatefulSet, &out.DesiredStatefulSet
+ *out = new(appsv1.StatefulSet)
(*in).DeepCopyInto(*out)
}
return
}
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FillStatusParams.
-func (in *FillStatusParams) DeepCopy() *FillStatusParams {
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostRuntime.
+func (in *HostRuntime) DeepCopy() *HostRuntime {
if in == nil {
return nil
}
- out := new(FillStatusParams)
+ out := new(HostRuntime)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *HostAddress) DeepCopyInto(out *HostAddress) {
+func (in *HostSecure) DeepCopyInto(out *HostSecure) {
*out = *in
- if in.CHIScopeAddress != nil {
- in, out := &in.CHIScopeAddress, &out.CHIScopeAddress
- *out = new(ScopeAddress)
- (*in).DeepCopyInto(*out)
+ if in.Insecure != nil {
+ in, out := &in.Insecure, &out.Insecure
+ *out = new(types.StringBool)
+ **out = **in
}
- if in.ClusterScopeAddress != nil {
- in, out := &in.ClusterScopeAddress, &out.ClusterScopeAddress
- *out = new(ScopeAddress)
- (*in).DeepCopyInto(*out)
+ if in.Secure != nil {
+ in, out := &in.Secure, &out.Secure
+ *out = new(types.StringBool)
+ **out = **in
}
return
}
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostAddress.
-func (in *HostAddress) DeepCopy() *HostAddress {
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostSecure.
+func (in *HostSecure) DeepCopy() *HostSecure {
if in == nil {
return nil
}
- out := new(HostAddress)
+ out := new(HostSecure)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *HostReconcileAttributes) DeepCopyInto(out *HostReconcileAttributes) {
+func (in *HostSettings) DeepCopyInto(out *HostSettings) {
*out = *in
+ if in.Settings != nil {
+ in, out := &in.Settings, &out.Settings
+ *out = new(Settings)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Files != nil {
+ in, out := &in.Files, &out.Files
+ *out = new(Settings)
+ (*in).DeepCopyInto(*out)
+ }
return
}
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostReconcileAttributes.
-func (in *HostReconcileAttributes) DeepCopy() *HostReconcileAttributes {
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostSettings.
+func (in *HostSettings) DeepCopy() *HostSettings {
if in == nil {
return nil
}
- out := new(HostReconcileAttributes)
+ out := new(HostSettings)
in.DeepCopyInto(out)
return out
}
@@ -1418,15 +1229,15 @@ func (in *HostsField) DeepCopyInto(out *HostsField) {
*out = *in
if in.Field != nil {
in, out := &in.Field, &out.Field
- *out = make([][]*ChiHost, len(*in))
+ *out = make([][]*Host, len(*in))
for i := range *in {
if (*in)[i] != nil {
in, out := &(*in)[i], &(*out)[i]
- *out = make([]*ChiHost, len(*in))
+ *out = make([]*Host, len(*in))
for i := range *in {
if (*in)[i] != nil {
in, out := &(*in)[i], &(*out)[i]
- *out = new(ChiHost)
+ *out = new(Host)
(*in).DeepCopyInto(*out)
}
}
@@ -1462,12 +1273,29 @@ func (in *ObjectAddress) DeepCopy() *ObjectAddress {
return out
}
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ObjectsCleanup) DeepCopyInto(out *ObjectsCleanup) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectsCleanup.
+func (in *ObjectsCleanup) DeepCopy() *ObjectsCleanup {
+ if in == nil {
+ return nil
+ }
+ out := new(ObjectsCleanup)
+ in.DeepCopyInto(out)
+ return out
+}
+
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *OperatorConfig) DeepCopyInto(out *OperatorConfig) {
*out = *in
in.Runtime.DeepCopyInto(&out.Runtime)
in.Watch.DeepCopyInto(&out.Watch)
in.ClickHouse.DeepCopyInto(&out.ClickHouse)
+ in.Keeper.DeepCopyInto(&out.Keeper)
in.Template.DeepCopyInto(&out.Template)
in.Reconcile.DeepCopyInto(&out.Reconcile)
in.Annotation.DeepCopyInto(&out.Annotation)
@@ -1716,6 +1544,23 @@ func (in *OperatorConfigFileRuntime) DeepCopy() *OperatorConfigFileRuntime {
return out
}
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OperatorConfigKeeper) DeepCopyInto(out *OperatorConfigKeeper) {
+ *out = *in
+ in.Config.DeepCopyInto(&out.Config)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperatorConfigKeeper.
+func (in *OperatorConfigKeeper) DeepCopy() *OperatorConfigKeeper {
+ if in == nil {
+ return nil
+ }
+ out := new(OperatorConfigKeeper)
+ in.DeepCopyInto(out)
+ return out
+}
+
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *OperatorConfigLabel) DeepCopyInto(out *OperatorConfigLabel) {
*out = *in
@@ -1784,17 +1629,17 @@ func (in *OperatorConfigReconcileHostWait) DeepCopyInto(out *OperatorConfigRecon
*out = *in
if in.Exclude != nil {
in, out := &in.Exclude, &out.Exclude
- *out = new(StringBool)
+ *out = new(types.StringBool)
**out = **in
}
if in.Queries != nil {
in, out := &in.Queries, &out.Queries
- *out = new(StringBool)
+ *out = new(types.StringBool)
**out = **in
}
if in.Include != nil {
in, out := &in.Include, &out.Include
- *out = new(StringBool)
+ *out = new(types.StringBool)
**out = **in
}
return
@@ -2069,43 +1914,38 @@ func (in *PortDistribution) DeepCopy() *PortDistribution {
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *SchemaPolicy) DeepCopyInto(out *SchemaPolicy) {
+func (in *Reconciling) DeepCopyInto(out *Reconciling) {
*out = *in
+ if in.Cleanup != nil {
+ in, out := &in.Cleanup, &out.Cleanup
+ *out = new(Cleanup)
+ (*in).DeepCopyInto(*out)
+ }
return
}
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchemaPolicy.
-func (in *SchemaPolicy) DeepCopy() *SchemaPolicy {
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Reconciling.
+func (in *Reconciling) DeepCopy() *Reconciling {
if in == nil {
return nil
}
- out := new(SchemaPolicy)
+ out := new(Reconciling)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *ScopeAddress) DeepCopyInto(out *ScopeAddress) {
+func (in *SchemaPolicy) DeepCopyInto(out *SchemaPolicy) {
*out = *in
- if in.CycleSpec != nil {
- in, out := &in.CycleSpec, &out.CycleSpec
- *out = new(CycleSpec)
- **out = **in
- }
- if in.CycleAddress != nil {
- in, out := &in.CycleAddress, &out.CycleAddress
- *out = new(CycleAddress)
- **out = **in
- }
return
}
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScopeAddress.
-func (in *ScopeAddress) DeepCopy() *ScopeAddress {
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchemaPolicy.
+func (in *SchemaPolicy) DeepCopy() *SchemaPolicy {
if in == nil {
return nil
}
- out := new(ScopeAddress)
+ out := new(SchemaPolicy)
in.DeepCopyInto(out)
return out
}
@@ -2296,6 +2136,84 @@ func (in *SettingsUser) DeepCopy() *SettingsUser {
return out
}
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Status) DeepCopyInto(out *Status) {
+ *out = *in
+ if in.TaskIDsStarted != nil {
+ in, out := &in.TaskIDsStarted, &out.TaskIDsStarted
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.TaskIDsCompleted != nil {
+ in, out := &in.TaskIDsCompleted, &out.TaskIDsCompleted
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.Actions != nil {
+ in, out := &in.Actions, &out.Actions
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.Errors != nil {
+ in, out := &in.Errors, &out.Errors
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.Pods != nil {
+ in, out := &in.Pods, &out.Pods
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.PodIPs != nil {
+ in, out := &in.PodIPs, &out.PodIPs
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.FQDNs != nil {
+ in, out := &in.FQDNs, &out.FQDNs
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.NormalizedCR != nil {
+ in, out := &in.NormalizedCR, &out.NormalizedCR
+ *out = new(ClickHouseInstallation)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.NormalizedCRCompleted != nil {
+ in, out := &in.NormalizedCRCompleted, &out.NormalizedCRCompleted
+ *out = new(ClickHouseInstallation)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.HostsWithTablesCreated != nil {
+ in, out := &in.HostsWithTablesCreated, &out.HostsWithTablesCreated
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.UsedTemplates != nil {
+ in, out := &in.UsedTemplates, &out.UsedTemplates
+ *out = make([]*TemplateRef, len(*in))
+ for i := range *in {
+ if (*in)[i] != nil {
+ in, out := &(*in)[i], &(*out)[i]
+ *out = new(TemplateRef)
+ **out = **in
+ }
+ }
+ }
+ out.mu = in.mu
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Status.
+func (in *Status) DeepCopy() *Status {
+ if in == nil {
+ return nil
+ }
+ out := new(Status)
+ in.DeepCopyInto(out)
+ return out
+}
+
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *StorageManagement) DeepCopyInto(out *StorageManagement) {
*out = *in
@@ -2312,6 +2230,28 @@ func (in *StorageManagement) DeepCopy() *StorageManagement {
return out
}
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in TargetSelector) DeepCopyInto(out *TargetSelector) {
+ {
+ in := &in
+ *out = make(TargetSelector, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ return
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetSelector.
+func (in TargetSelector) DeepCopy() TargetSelector {
+ if in == nil {
+ return nil
+ }
+ out := new(TargetSelector)
+ in.DeepCopyInto(out)
+ return *out
+}
+
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *TemplateRef) DeepCopyInto(out *TemplateRef) {
*out = *in
@@ -2392,6 +2332,22 @@ func (in *Templates) DeepCopy() *Templates {
return out
}
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *TemplatesList) DeepCopyInto(out *TemplatesList) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TemplatesList.
+func (in *TemplatesList) DeepCopy() *TemplatesList {
+ if in == nil {
+ return nil
+ }
+ out := new(TemplatesList)
+ in.DeepCopyInto(out)
+ return out
+}
+
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *VolumeClaimTemplate) DeepCopyInto(out *VolumeClaimTemplate) {
*out = *in
@@ -2441,3 +2397,74 @@ func (in *VolumeClaimTemplatesIndex) DeepCopy() *VolumeClaimTemplatesIndex {
in.DeepCopyInto(out)
return out
}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ZookeeperConfig) DeepCopyInto(out *ZookeeperConfig) {
+ *out = *in
+ if in.Nodes != nil {
+ in, out := &in.Nodes, &out.Nodes
+ *out = make(ZookeeperNodes, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ZookeeperConfig.
+func (in *ZookeeperConfig) DeepCopy() *ZookeeperConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(ZookeeperConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ZookeeperNode) DeepCopyInto(out *ZookeeperNode) {
+ *out = *in
+ if in.Port != nil {
+ in, out := &in.Port, &out.Port
+ *out = new(types.Int32)
+ **out = **in
+ }
+ if in.Secure != nil {
+ in, out := &in.Secure, &out.Secure
+ *out = new(types.StringBool)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ZookeeperNode.
+func (in *ZookeeperNode) DeepCopy() *ZookeeperNode {
+ if in == nil {
+ return nil
+ }
+ out := new(ZookeeperNode)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in ZookeeperNodes) DeepCopyInto(out *ZookeeperNodes) {
+ {
+ in := &in
+ *out = make(ZookeeperNodes, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ return
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ZookeeperNodes.
+func (in ZookeeperNodes) DeepCopy() ZookeeperNodes {
+ if in == nil {
+ return nil
+ }
+ out := new(ZookeeperNodes)
+ in.DeepCopyInto(out)
+ return *out
+}
diff --git a/pkg/apis/common/types/copy_cr_options.go b/pkg/apis/common/types/copy_cr_options.go
new file mode 100644
index 000000000..5e1a30515
--- /dev/null
+++ b/pkg/apis/common/types/copy_cr_options.go
@@ -0,0 +1,23 @@
+// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+// CopyCROptions specifies options for custom resource copier
+type CopyCROptions struct {
+ // SkipStatus specifies whether to copy status
+ SkipStatus bool
+ // SkipManagedFields specifies whether to copy managed fields
+ SkipManagedFields bool
+}
diff --git a/pkg/apis/clickhouse.altinity.com/v1/type_host_scope_address.go b/pkg/apis/common/types/host_scope_address.go
similarity index 82%
rename from pkg/apis/clickhouse.altinity.com/v1/type_host_scope_address.go
rename to pkg/apis/common/types/host_scope_address.go
index e9c516dd3..7703030a3 100644
--- a/pkg/apis/clickhouse.altinity.com/v1/type_host_scope_address.go
+++ b/pkg/apis/common/types/host_scope_address.go
@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-package v1
+package types
// CycleSpec defines spec of a cycle, such as size
type CycleSpec struct {
@@ -108,10 +108,10 @@ func (s *ScopeAddress) Inc() {
s.Index++
}
-// HostAddress specifies address of a host
-type HostAddress struct {
- // CHIScopeAddress specifies address of a host within CHI scope
- CHIScopeAddress *ScopeAddress
+// HostScopeAddress specifies address of a host
+type HostScopeAddress struct {
+ // CRScopeAddress specifies address of a host within CHI scope
+ CRScopeAddress *ScopeAddress
// ClusterScopeAddress specifies address of a host within cluster scope
ClusterScopeAddress *ScopeAddress
// ClusterIndex specifies index of a cluster within CHI
@@ -122,21 +122,11 @@ type HostAddress struct {
ReplicaIndex int
}
-// NewHostAddress creates new HostAddress
-func NewHostAddress(chiScopeCycleSize, clusterScopeCycleSize int) (a *HostAddress) {
- a = &HostAddress{
- CHIScopeAddress: NewScopeAddress(chiScopeCycleSize),
+// NewHostScopeAddress creates new HostScopeAddress
+func NewHostScopeAddress(crScopeCycleSize, clusterScopeCycleSize int) (a *HostScopeAddress) {
+ a = &HostScopeAddress{
+ CRScopeAddress: NewScopeAddress(crScopeCycleSize),
ClusterScopeAddress: NewScopeAddress(clusterScopeCycleSize),
}
return a
}
-
-// WalkHostsAddressFn specifies function to walk over hosts
-type WalkHostsAddressFn func(
- chi *ClickHouseInstallation,
- cluster *Cluster,
- shard *ChiShard,
- replica *ChiReplica,
- host *ChiHost,
- address *HostAddress,
-) error
diff --git a/pkg/apis/common/types/int32.go b/pkg/apis/common/types/int32.go
new file mode 100644
index 000000000..73d9d518c
--- /dev/null
+++ b/pkg/apis/common/types/int32.go
@@ -0,0 +1,118 @@
+// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import "strconv"
+
+// Int32 defines int32 representation with possibility to be optional
+type Int32 int32
+
+// NewInt32 creates new variable
+func NewInt32(i int32) *Int32 {
+ i32 := new(Int32)
+ *i32 = Int32(i)
+ return i32
+}
+
+// From casts int32
+func (i *Int32) From(value int32) *Int32 {
+ return NewInt32(value)
+}
+
+// String casts to a string
+func (i *Int32) String() string {
+ if i == nil {
+ return ""
+ }
+ return strconv.Itoa(i.IntValue())
+}
+
+// HasValue checks whether value is specified
+func (i *Int32) HasValue() bool {
+ return i != nil
+}
+
+// Value returns value
+func (i *Int32) Value() int32 {
+ if i == nil {
+ return 0
+ }
+
+ return int32(*i)
+}
+
+// IntValue returns int value
+func (i *Int32) IntValue() int {
+ if i == nil {
+ return 0
+ }
+
+ return int(*i)
+}
+
+// IsValid checks whether var has a proper value
+func (i *Int32) IsValid() bool {
+ return i.HasValue()
+}
+
+// Normalize normalizes value with fallback to defaultValue in case initial value is incorrect
+func (i *Int32) Normalize(defaultValue int32) *Int32 {
+ if i.IsValid() {
+ return i
+ }
+
+ // Value is unrecognized, return default value
+ return NewInt32(defaultValue)
+}
+
+// MergeFrom merges value from another variable
+func (i *Int32) MergeFrom(from *Int32) *Int32 {
+ if from == nil {
+ // Nothing to merge from, keep original value
+ return i
+ }
+
+ // From now on we have `from` specified
+
+ if i == nil {
+ // Recipient is not specified, just use `from` value
+ return from
+ }
+
+ // Both recipient and `from` are specified, need to pick one value.
+ // Prefer local value
+ return i
+}
+
+// Equal checks whether is equal to another
+func (i *Int32) Equal(to *Int32) bool {
+ if (i == nil) && (to == nil) {
+ // Consider nil equal
+ return true
+ }
+
+ return i.EqualValue(to)
+}
+
+// EqualValue checks whether has equal values
+func (i *Int32) EqualValue(to *Int32) bool {
+ if !i.HasValue() || !to.HasValue() {
+ // Need to compare values only
+ return false
+ }
+
+ // Both have value available, comparable
+ return i.Value() == to.Value()
+}
diff --git a/pkg/model/chi/creator/cluster.go b/pkg/apis/common/types/list.go
similarity index 75%
rename from pkg/model/chi/creator/cluster.go
rename to pkg/apis/common/types/list.go
index 549a82b00..b8770626d 100644
--- a/pkg/model/chi/creator/cluster.go
+++ b/pkg/apis/common/types/list.go
@@ -12,13 +12,10 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-package creator
+package types
-import api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1"
+type List map[string]string
-// NewDefaultCluster
-func NewDefaultCluster() *api.Cluster {
- return &api.Cluster{
- Name: "cluster",
- }
+func (l List) Get(name string) string {
+ return l[name]
}
diff --git a/pkg/apis/clickhouse.altinity.com/v1/type_matchable.go b/pkg/apis/common/types/matchable.go
similarity index 98%
rename from pkg/apis/clickhouse.altinity.com/v1/type_matchable.go
rename to pkg/apis/common/types/matchable.go
index c4dc6d899..e7d24a22b 100644
--- a/pkg/apis/clickhouse.altinity.com/v1/type_matchable.go
+++ b/pkg/apis/common/types/matchable.go
@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-package v1
+package types
import (
"regexp"
diff --git a/pkg/apis/clickhouse.altinity.com/v1/type_port.go b/pkg/apis/common/types/port.go
similarity index 93%
rename from pkg/apis/clickhouse.altinity.com/v1/type_port.go
rename to pkg/apis/common/types/port.go
index c62b3a61d..655b9fb04 100644
--- a/pkg/apis/clickhouse.altinity.com/v1/type_port.go
+++ b/pkg/apis/common/types/port.go
@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-package v1
+package types
const (
// PortMayBeAssignedLaterOrLeftUnused value means that port
@@ -44,9 +44,9 @@ func IsPortInvalid(port int32) bool {
// - already has own value assigned
// - or has provided value
// - or value is fell back to default
-func EnsurePortValue(port int32, value, _default int32) int32 {
+func EnsurePortValue(port, value, _default *Int32) *Int32 {
// Port may already be explicitly specified in podTemplate or by portDistribution
- if IsPortAssigned(port) {
+ if port.HasValue() {
// Port has a value already
return port
}
@@ -54,7 +54,7 @@ func EnsurePortValue(port int32, value, _default int32) int32 {
// Port has no explicitly assigned value
// Let's use provided value real value
- if IsPortAssigned(value) {
+ if value.HasValue() {
// Provided value is a real value, use it
return value
}
diff --git a/pkg/apis/common/types/status_options.go b/pkg/apis/common/types/status_options.go
new file mode 100644
index 000000000..cbab9cbbb
--- /dev/null
+++ b/pkg/apis/common/types/status_options.go
@@ -0,0 +1,31 @@
+// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+// CopyStatusOptions specifies what parts to copy in status
+type CopyStatusOptions struct {
+ Actions bool
+ Errors bool
+ Normalized bool
+ MainFields bool
+ WholeStatus bool
+ InheritableFields bool
+}
+
+// UpdateStatusOptions defines how to update CHI status
+type UpdateStatusOptions struct {
+ CopyStatusOptions
+ TolerateAbsence bool
+}
diff --git a/pkg/apis/common/types/string.go b/pkg/apis/common/types/string.go
new file mode 100644
index 000000000..589346fc9
--- /dev/null
+++ b/pkg/apis/common/types/string.go
@@ -0,0 +1,86 @@
+// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+// String defines string representation with possibility to be optional
+type String string
+
+// NewString creates new variable
+func NewString(str string) *String {
+ s := new(String)
+ *s = String(str)
+ return s
+}
+
+// From casts string
+func (s *String) From(value string) *String {
+ return NewString(value)
+}
+
+// String casts to a string
+func (s *String) String() string {
+ if s == nil {
+ return ""
+ }
+ return s.Value()
+}
+
+// HasValue checks whether value is specified
+func (s *String) HasValue() bool {
+ return s != nil
+}
+
+// Value returns value
+func (s *String) Value() string {
+ if s == nil {
+ return ""
+ }
+
+ return string(*s)
+}
+
+// IsValid checks whether var has a proper value
+func (s *String) IsValid() bool {
+ return s.HasValue()
+}
+
+// Normalize normalizes value with fallback to defaultValue in case initial value is incorrect
+func (s *String) Normalize(defaultValue string) *String {
+ if s.IsValid() {
+ return s
+ }
+
+ // Value is unrecognized, return default value
+ return NewString(defaultValue)
+}
+
+// MergeFrom merges value from another variable
+func (s *String) MergeFrom(from *String) *String {
+ if from == nil {
+ // Nothing to merge from, keep original value
+ return s
+ }
+
+ // From now on we have `from` specified
+
+ if s == nil {
+ // Recipient is not specified, just use `from` value
+ return from
+ }
+
+ // Both recipient and `from` are specified, need to pick one value.
+ // Prefer local value
+ return s
+}
diff --git a/pkg/apis/clickhouse.altinity.com/v1/type_string_bool.go b/pkg/apis/common/types/string_bool.go
similarity index 99%
rename from pkg/apis/clickhouse.altinity.com/v1/type_string_bool.go
rename to pkg/apis/common/types/string_bool.go
index c3237416b..ca5b0374a 100644
--- a/pkg/apis/clickhouse.altinity.com/v1/type_string_bool.go
+++ b/pkg/apis/common/types/string_bool.go
@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-package v1
+package types
import "strings"
diff --git a/pkg/apis/metrics/type_watched_chi.go b/pkg/apis/metrics/watched_chi.go
similarity index 80%
rename from pkg/apis/metrics/type_watched_chi.go
rename to pkg/apis/metrics/watched_chi.go
index bf042bee5..40ab02fbd 100644
--- a/pkg/apis/metrics/type_watched_chi.go
+++ b/pkg/apis/metrics/watched_chi.go
@@ -46,22 +46,22 @@ type WatchedHost struct {
}
// NewWatchedCHI creates new watched CHI
-func NewWatchedCHI(c *api.ClickHouseInstallation) *WatchedCHI {
+func NewWatchedCHI(cr api.ICustomResource) *WatchedCHI {
chi := &WatchedCHI{}
- chi.readFrom(c)
+ chi.readFrom(cr)
return chi
}
-func (chi *WatchedCHI) readFrom(c *api.ClickHouseInstallation) {
+func (chi *WatchedCHI) readFrom(cr api.ICustomResource) {
if chi == nil {
return
}
- chi.Namespace = c.Namespace
- chi.Name = c.Name
- chi.Labels = c.Labels
- chi.Annotations = c.Annotations
+ chi.Namespace = cr.GetNamespace()
+ chi.Name = cr.GetName()
+ chi.Labels = cr.GetLabels()
+ chi.Annotations = cr.GetAnnotations()
- c.WalkClusters(func(cl *api.Cluster) error {
+ cr.WalkClusters(func(cl api.ICluster) error {
cluster := &WatchedCluster{}
cluster.readFrom(cl)
chi.Clusters = append(chi.Clusters, cluster)
@@ -69,7 +69,7 @@ func (chi *WatchedCHI) readFrom(c *api.ClickHouseInstallation) {
})
}
-func (chi *WatchedCHI) isValid() bool {
+func (chi *WatchedCHI) IsValid() bool {
return !chi.empty()
}
@@ -77,11 +77,11 @@ func (chi *WatchedCHI) empty() bool {
return (len(chi.Namespace) == 0) && (len(chi.Name) == 0) && (len(chi.Clusters) == 0)
}
-func (chi *WatchedCHI) indexKey() string {
+func (chi *WatchedCHI) IndexKey() string {
return chi.Namespace + ":" + chi.Name
}
-func (chi *WatchedCHI) walkHosts(f func(*WatchedCHI, *WatchedCluster, *WatchedHost)) {
+func (chi *WatchedCHI) WalkHosts(f func(*WatchedCHI, *WatchedCluster, *WatchedHost)) {
if chi == nil {
return
}
@@ -129,13 +129,13 @@ func (chi *WatchedCHI) String() string {
return string(bytes)
}
-func (cluster *WatchedCluster) readFrom(c *api.Cluster) {
+func (cluster *WatchedCluster) readFrom(c api.ICluster) {
if cluster == nil {
return
}
- cluster.Name = c.Name
+ cluster.Name = c.GetName()
- c.WalkHosts(func(h *api.ChiHost) error {
+ c.WalkHosts(func(h *api.Host) error {
host := &WatchedHost{}
host.readFrom(h)
cluster.Hosts = append(cluster.Hosts, host)
@@ -143,14 +143,14 @@ func (cluster *WatchedCluster) readFrom(c *api.Cluster) {
})
}
-func (host *WatchedHost) readFrom(h *api.ChiHost) {
+func (host *WatchedHost) readFrom(h *api.Host) {
if host == nil {
return
}
host.Name = h.Name
host.Hostname = h.Runtime.Address.FQDN
- host.TCPPort = h.TCPPort
- host.TLSPort = h.TLSPort
- host.HTTPPort = h.HTTPPort
- host.HTTPSPort = h.HTTPSPort
+ host.TCPPort = h.TCPPort.Value()
+ host.TLSPort = h.TLSPort.Value()
+ host.HTTPPort = h.HTTPPort.Value()
+ host.HTTPSPort = h.HTTPSPort.Value()
}
diff --git a/pkg/apis/swversion/type_software_version.go b/pkg/apis/swversion/software_version.go
similarity index 100%
rename from pkg/apis/swversion/type_software_version.go
rename to pkg/apis/swversion/software_version.go
diff --git a/pkg/chop/config_manager.go b/pkg/chop/config_manager.go
index d03a3f04c..2b98cae2f 100644
--- a/pkg/chop/config_manager.go
+++ b/pkg/chop/config_manager.go
@@ -18,7 +18,6 @@ import (
"context"
"errors"
"fmt"
- "github.com/altinity/clickhouse-operator/pkg/apis/deployment"
"os"
"os/user"
"path/filepath"
@@ -29,6 +28,7 @@ import (
log "github.com/altinity/clickhouse-operator/pkg/announcer"
api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1"
+ "github.com/altinity/clickhouse-operator/pkg/apis/deployment"
chopClientSet "github.com/altinity/clickhouse-operator/pkg/client/clientset/versioned"
"github.com/altinity/clickhouse-operator/pkg/controller"
)
diff --git a/pkg/chop/kube_machinery.go b/pkg/chop/kube_machinery.go
index 802cfd37f..6220b5eb5 100644
--- a/pkg/chop/kube_machinery.go
+++ b/pkg/chop/kube_machinery.go
@@ -16,7 +16,6 @@ package chop
import (
"fmt"
- "github.com/altinity/clickhouse-operator/pkg/apis/deployment"
"os"
"os/user"
"path/filepath"
@@ -29,6 +28,7 @@ import (
log "github.com/altinity/clickhouse-operator/pkg/announcer"
v1 "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1"
+ "github.com/altinity/clickhouse-operator/pkg/apis/deployment"
chopclientset "github.com/altinity/clickhouse-operator/pkg/client/clientset/versioned"
"github.com/altinity/clickhouse-operator/pkg/version"
)
diff --git a/pkg/client/clientset/versioned/doc.go b/pkg/client/clientset/versioned/doc.go
new file mode 100644
index 000000000..41721ca52
--- /dev/null
+++ b/pkg/client/clientset/versioned/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+// This package has the automatically generated clientset.
+package versioned
diff --git a/pkg/controller/chi/type_cmd_queue.go b/pkg/controller/chi/cmd_queue/type_cmd_queue.go
similarity index 72%
rename from pkg/controller/chi/type_cmd_queue.go
rename to pkg/controller/chi/cmd_queue/type_cmd_queue.go
index 658316aec..69d66836b 100644
--- a/pkg/controller/chi/type_cmd_queue.go
+++ b/pkg/controller/chi/cmd_queue/type_cmd_queue.go
@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-package chi
+package cmd_queue
import (
core "k8s.io/api/core/v1"
@@ -24,9 +24,9 @@ import (
)
const (
- reconcileAdd = "add"
- reconcileUpdate = "update"
- reconcileDelete = "delete"
+ ReconcileAdd = "add"
+ ReconcileUpdate = "update"
+ ReconcileDelete = "delete"
)
// PriorityQueueItem specifies item of the priority queue
@@ -50,20 +50,20 @@ const (
// ReconcileCHI specifies reconcile request queue item
type ReconcileCHI struct {
PriorityQueueItem
- cmd string
- old *api.ClickHouseInstallation
- new *api.ClickHouseInstallation
+ Cmd string
+ Old *api.ClickHouseInstallation
+ New *api.ClickHouseInstallation
}
var _ queue.PriorityQueueItem = &ReconcileCHI{}
// Handle returns handle of the queue item
func (r ReconcileCHI) Handle() queue.T {
- if r.new != nil {
- return "ReconcileCHI" + ":" + r.new.Namespace + "/" + r.new.Name
+ if r.New != nil {
+ return "ReconcileCHI" + ":" + r.New.Namespace + "/" + r.New.Name
}
- if r.old != nil {
- return "ReconcileCHI" + ":" + r.old.Namespace + "/" + r.old.Name
+ if r.Old != nil {
+ return "ReconcileCHI" + ":" + r.Old.Namespace + "/" + r.Old.Name
}
return ""
}
@@ -74,9 +74,9 @@ func NewReconcileCHI(cmd string, old, new *api.ClickHouseInstallation) *Reconcil
PriorityQueueItem: PriorityQueueItem{
priority: priorityReconcileCHI,
},
- cmd: cmd,
- old: old,
- new: new,
+ Cmd: cmd,
+ Old: old,
+ New: new,
}
/*
@@ -101,20 +101,20 @@ func NewReconcileCHI(cmd string, old, new *api.ClickHouseInstallation) *Reconcil
// ReconcileCHIT specifies reconcile CHI template queue item
type ReconcileCHIT struct {
PriorityQueueItem
- cmd string
- old *api.ClickHouseInstallationTemplate
- new *api.ClickHouseInstallationTemplate
+ Cmd string
+ Old *api.ClickHouseInstallationTemplate
+ New *api.ClickHouseInstallationTemplate
}
var _ queue.PriorityQueueItem = &ReconcileCHIT{}
// Handle returns handle of the queue item
func (r ReconcileCHIT) Handle() queue.T {
- if r.new != nil {
- return "ReconcileCHIT" + ":" + r.new.Namespace + "/" + r.new.Name
+ if r.New != nil {
+ return "ReconcileCHIT" + ":" + r.New.Namespace + "/" + r.New.Name
}
- if r.old != nil {
- return "ReconcileCHIT" + ":" + r.old.Namespace + "/" + r.old.Name
+ if r.Old != nil {
+ return "ReconcileCHIT" + ":" + r.Old.Namespace + "/" + r.Old.Name
}
return ""
}
@@ -125,29 +125,29 @@ func NewReconcileCHIT(cmd string, old, new *api.ClickHouseInstallationTemplate)
PriorityQueueItem: PriorityQueueItem{
priority: priorityReconcileCHIT,
},
- cmd: cmd,
- old: old,
- new: new,
+ Cmd: cmd,
+ Old: old,
+ New: new,
}
}
// ReconcileChopConfig specifies CHOp config queue item
type ReconcileChopConfig struct {
PriorityQueueItem
- cmd string
- old *api.ClickHouseOperatorConfiguration
- new *api.ClickHouseOperatorConfiguration
+ Cmd string
+ Old *api.ClickHouseOperatorConfiguration
+ New *api.ClickHouseOperatorConfiguration
}
var _ queue.PriorityQueueItem = &ReconcileChopConfig{}
// Handle returns handle of the queue item
func (r ReconcileChopConfig) Handle() queue.T {
- if r.new != nil {
- return "ReconcileChopConfig" + ":" + r.new.Namespace + "/" + r.new.Name
+ if r.New != nil {
+ return "ReconcileChopConfig" + ":" + r.New.Namespace + "/" + r.New.Name
}
- if r.old != nil {
- return "ReconcileChopConfig" + ":" + r.old.Namespace + "/" + r.old.Name
+ if r.Old != nil {
+ return "ReconcileChopConfig" + ":" + r.Old.Namespace + "/" + r.Old.Name
}
return ""
}
@@ -158,29 +158,29 @@ func NewReconcileChopConfig(cmd string, old, new *api.ClickHouseOperatorConfigur
PriorityQueueItem: PriorityQueueItem{
priority: priorityReconcileChopConfig,
},
- cmd: cmd,
- old: old,
- new: new,
+ Cmd: cmd,
+ Old: old,
+ New: new,
}
}
// ReconcileEndpoints specifies endpoint
type ReconcileEndpoints struct {
PriorityQueueItem
- cmd string
- old *core.Endpoints
- new *core.Endpoints
+ Cmd string
+ Old *core.Endpoints
+ New *core.Endpoints
}
var _ queue.PriorityQueueItem = &ReconcileEndpoints{}
// Handle returns handle of the queue item
func (r ReconcileEndpoints) Handle() queue.T {
- if r.new != nil {
- return "ReconcileEndpoints" + ":" + r.new.Namespace + "/" + r.new.Name
+ if r.New != nil {
+ return "ReconcileEndpoints" + ":" + r.New.Namespace + "/" + r.New.Name
}
- if r.old != nil {
- return "ReconcileEndpoints" + ":" + r.old.Namespace + "/" + r.old.Name
+ if r.Old != nil {
+ return "ReconcileEndpoints" + ":" + r.Old.Namespace + "/" + r.Old.Name
}
return ""
}
@@ -191,55 +191,55 @@ func NewReconcileEndpoints(cmd string, old, new *core.Endpoints) *ReconcileEndpo
PriorityQueueItem: PriorityQueueItem{
priority: priorityReconcileEndpoints,
},
- cmd: cmd,
- old: old,
- new: new,
+ Cmd: cmd,
+ Old: old,
+ New: new,
}
}
// DropDns specifies drop dns queue item
type DropDns struct {
PriorityQueueItem
- initiator *meta.ObjectMeta
+ Initiator meta.Object
}
var _ queue.PriorityQueueItem = &DropDns{}
// Handle returns handle of the queue item
func (r DropDns) Handle() queue.T {
- if r.initiator != nil {
- return "DropDNS" + ":" + r.initiator.Namespace + "/" + r.initiator.Name
+ if r.Initiator != nil {
+ return "DropDNS" + ":" + r.Initiator.GetNamespace() + "/" + r.Initiator.GetName()
}
return ""
}
// NewDropDns creates new drop dns queue item
-func NewDropDns(initiator *meta.ObjectMeta) *DropDns {
+func NewDropDns(initiator meta.Object) *DropDns {
return &DropDns{
PriorityQueueItem: PriorityQueueItem{
priority: priorityDropDNS,
},
- initiator: initiator,
+ Initiator: initiator,
}
}
// ReconcilePod specifies pod reconcile
type ReconcilePod struct {
PriorityQueueItem
- cmd string
- old *core.Pod
- new *core.Pod
+ Cmd string
+ Old *core.Pod
+ New *core.Pod
}
var _ queue.PriorityQueueItem = &ReconcileEndpoints{}
// Handle returns handle of the queue item
func (r ReconcilePod) Handle() queue.T {
- if r.new != nil {
- return "ReconcilePod" + ":" + r.new.Namespace + "/" + r.new.Name
+ if r.New != nil {
+ return "ReconcilePod" + ":" + r.New.Namespace + "/" + r.New.Name
}
- if r.old != nil {
- return "ReconcilePod" + ":" + r.old.Namespace + "/" + r.old.Name
+ if r.Old != nil {
+ return "ReconcilePod" + ":" + r.Old.Namespace + "/" + r.Old.Name
}
return ""
}
@@ -247,8 +247,8 @@ func (r ReconcilePod) Handle() queue.T {
// NewReconcilePod creates new reconcile endpoints queue item
func NewReconcilePod(cmd string, old, new *core.Pod) *ReconcilePod {
return &ReconcilePod{
- cmd: cmd,
- old: old,
- new: new,
+ Cmd: cmd,
+ Old: old,
+ New: new,
}
}
diff --git a/pkg/model/chk/namer.go b/pkg/controller/chi/const.go
similarity index 72%
rename from pkg/model/chk/namer.go
rename to pkg/controller/chi/const.go
index d638d9edf..97c6132f3 100644
--- a/pkg/model/chk/namer.go
+++ b/pkg/controller/chi/const.go
@@ -12,14 +12,18 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-package chk
+package chi
import (
- "fmt"
+ "time"
+)
- api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse-keeper.altinity.com/v1"
+const (
+ componentName = "clickhouse-operator"
+ runWorkerPeriod = time.Second
)
-func getHeadlessServiceName(chk *api.ClickHouseKeeperInstallation) string {
- return fmt.Sprintf("%s-headless", chk.GetName())
-}
+const (
+ messageUnableToDecode = "unable to decode object (invalid type)"
+ messageUnableToSync = "unable to sync caches for %s controller"
+)
diff --git a/pkg/controller/chi/controller-config-map.go b/pkg/controller/chi/controller-config-map.go
new file mode 100644
index 000000000..f27f28941
--- /dev/null
+++ b/pkg/controller/chi/controller-config-map.go
@@ -0,0 +1,97 @@
+// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package chi
+
+import (
+ "context"
+ "fmt"
+
+ apps "k8s.io/api/apps/v1"
+ core "k8s.io/api/core/v1"
+ apiErrors "k8s.io/apimachinery/pkg/api/errors"
+ meta "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+ log "github.com/altinity/clickhouse-operator/pkg/announcer"
+ "github.com/altinity/clickhouse-operator/pkg/controller"
+ chiLabeler "github.com/altinity/clickhouse-operator/pkg/model/chi/tags/labeler"
+ "github.com/altinity/clickhouse-operator/pkg/util"
+)
+
+// getConfigMap gets ConfigMap either by namespaced name or by labels
+// TODO review byNameOnly params
+func (c *Controller) getConfigMap(ctx context.Context, meta meta.Object, byNameOnly bool) (*core.ConfigMap, error) {
+ // Check whether object with such name already exists
+ configMap, err := c.kube.ConfigMap().Get(ctx, meta.GetNamespace(), meta.GetName())
+
+ if (configMap != nil) && (err == nil) {
+ // Object found by name
+ return configMap, nil
+ }
+
+ if !apiErrors.IsNotFound(err) {
+ // Error, which is not related to "Object not found"
+ return nil, err
+ }
+
+ // Object not found by name
+
+ if byNameOnly {
+ return nil, err
+ }
+
+ // Try to find by labels
+
+ set, err := chiLabeler.New(nil).MakeSetFromObjectMeta(meta)
+ if err != nil {
+ return nil, err
+ }
+ opts := controller.NewListOptions(set)
+
+ configMaps, err := c.kube.ConfigMap().List(ctx, meta.GetNamespace(), opts)
+ if err != nil {
+ return nil, err
+ }
+
+ if len(configMaps) == 0 {
+ return nil, apiErrors.NewNotFound(apps.Resource("ConfigMap"), meta.GetName())
+ }
+
+ if len(configMaps) == 1 {
+ // Exactly one object found by labels
+ return &configMaps[0], nil
+ }
+
+ // Too much objects found by labels
+ return nil, fmt.Errorf("too much objects found %d expecting 1", len(configMaps))
+}
+
+func (c *Controller) createConfigMap(ctx context.Context, cm *core.ConfigMap) error {
+ if util.IsContextDone(ctx) {
+ log.V(2).Info("task is done")
+ return nil
+ }
+
+ _, err := c.kube.ConfigMap().Create(ctx, cm)
+ return err
+}
+
+func (c *Controller) updateConfigMap(ctx context.Context, cm *core.ConfigMap) (*core.ConfigMap, error) {
+ if util.IsContextDone(ctx) {
+ log.V(2).Info("task is done")
+ return nil, nil
+ }
+
+ return c.kube.ConfigMap().Update(ctx, cm)
+}
diff --git a/pkg/controller/chi/controller-deleter.go b/pkg/controller/chi/controller-deleter.go
new file mode 100644
index 000000000..156bc8d72
--- /dev/null
+++ b/pkg/controller/chi/controller-deleter.go
@@ -0,0 +1,239 @@
+// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package chi
+
+import (
+ "context"
+
+ apps "k8s.io/api/apps/v1"
+ core "k8s.io/api/core/v1"
+ apiErrors "k8s.io/apimachinery/pkg/api/errors"
+ meta "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+ log "github.com/altinity/clickhouse-operator/pkg/announcer"
+ api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1"
+ "github.com/altinity/clickhouse-operator/pkg/controller/common/storage"
+ "github.com/altinity/clickhouse-operator/pkg/interfaces"
+ "github.com/altinity/clickhouse-operator/pkg/util"
+)
+
+// deleteHost deletes all kubernetes resources related to replica *chop.Host
+func (c *Controller) deleteHost(ctx context.Context, host *api.Host) error {
+ log.V(1).M(host).S().Info(host.Runtime.Address.ClusterNameString())
+
+ // Each host consists of:
+ _ = c.deleteStatefulSet(ctx, host)
+ _ = storage.NewStoragePVC(c.kube.Storage()).DeletePVC(ctx, host)
+ _ = c.deleteConfigMap(ctx, host)
+ _ = c.deleteServiceHost(ctx, host)
+
+ log.V(1).M(host).E().Info(host.Runtime.Address.ClusterNameString())
+
+ return nil
+}
+
+// deleteConfigMapsCHI
+func (c *Controller) deleteConfigMapsCHI(ctx context.Context, chi *api.ClickHouseInstallation) error {
+ if util.IsContextDone(ctx) {
+ log.V(2).Info("task is done")
+ return nil
+ }
+
+ // Delete common ConfigMap's
+ //
+ // chi-b3d29f-common-configd 2 61s
+ // chi-b3d29f-common-usersd 0 61s
+ // service/clickhouse-example-01 LoadBalancer 10.106.183.200 8123:31607/TCP,9000:31492/TCP,9009:31357/TCP 33s clickhouse.altinity.com/chi=example-01
+
+ var err error
+
+ configMapCommon := c.namer.Name(interfaces.NameConfigMapCommon, chi)
+ configMapCommonUsersName := c.namer.Name(interfaces.NameConfigMapCommonUsers, chi)
+
+ // Delete ConfigMap
+ err = c.kube.ConfigMap().Delete(ctx, chi.GetNamespace(), configMapCommon)
+ switch {
+ case err == nil:
+ log.V(1).M(chi).Info("OK delete ConfigMap %s/%s", chi.Namespace, configMapCommon)
+ case apiErrors.IsNotFound(err):
+ log.V(1).M(chi).Info("NEUTRAL not found ConfigMap %s/%s", chi.Namespace, configMapCommon)
+ default:
+ log.V(1).M(chi).F().Error("FAIL delete ConfigMap %s/%s err:%v", chi.Namespace, configMapCommon, err)
+ }
+
+ err = c.kube.ConfigMap().Delete(ctx, chi.Namespace, configMapCommonUsersName)
+ switch {
+ case err == nil:
+ log.V(1).M(chi).Info("OK delete ConfigMap %s/%s", chi.Namespace, configMapCommonUsersName)
+ case apiErrors.IsNotFound(err):
+ log.V(1).M(chi).Info("NEUTRAL not found ConfigMap %s/%s", chi.Namespace, configMapCommonUsersName)
+ err = nil
+ default:
+ log.V(1).M(chi).F().Error("FAIL delete ConfigMap %s/%s err:%v", chi.Namespace, configMapCommonUsersName, err)
+ }
+
+ return err
+}
+
+// statefulSetDeletePod delete a pod of a StatefulSet. This requests StatefulSet to relaunch deleted pod
+func (c *Controller) statefulSetDeletePod(ctx context.Context, statefulSet *apps.StatefulSet, host *api.Host) error {
+ if util.IsContextDone(ctx) {
+ log.V(2).Info("task is done")
+ return nil
+ }
+
+ name := c.namer.Name(interfaces.NamePod, statefulSet)
+ log.V(1).M(host).Info("Delete Pod %s/%s", statefulSet.Namespace, name)
+ err := c.kube.Pod().Delete(ctx, statefulSet.Namespace, name)
+ if err == nil {
+ log.V(1).M(host).Info("OK delete Pod %s/%s", statefulSet.Namespace, name)
+ } else if apiErrors.IsNotFound(err) {
+ log.V(1).M(host).Info("NEUTRAL not found Pod %s/%s", statefulSet.Namespace, name)
+ err = nil
+ } else {
+ log.V(1).M(host).F().Error("FAIL delete Pod %s/%s err:%v", statefulSet.Namespace, name, err)
+ }
+
+ return err
+}
+
+func (c *Controller) deleteStatefulSet(ctx context.Context, host *api.Host) error {
+ if util.IsContextDone(ctx) {
+ log.V(2).Info("task is done")
+ return nil
+ }
+
+ name := c.namer.Name(interfaces.NameStatefulSet, host)
+ namespace := host.Runtime.Address.Namespace
+ log.V(1).M(host).F().Info("%s/%s", namespace, name)
+ return c.kube.STS().Delete(ctx, namespace, name)
+}
+
+// deleteConfigMap deletes ConfigMap
+func (c *Controller) deleteConfigMap(ctx context.Context, host *api.Host) error {
+ if util.IsContextDone(ctx) {
+ log.V(2).Info("task is done")
+ return nil
+ }
+
+ name := c.namer.Name(interfaces.NameConfigMapHost, host)
+ namespace := host.Runtime.Address.Namespace
+ log.V(1).M(host).F().Info("%s/%s", namespace, name)
+
+ if err := c.kube.ConfigMap().Delete(ctx, namespace, name); err == nil {
+ log.V(1).M(host).Info("OK delete ConfigMap %s/%s", namespace, name)
+ } else if apiErrors.IsNotFound(err) {
+ log.V(1).M(host).Info("NEUTRAL not found ConfigMap %s/%s", namespace, name)
+ } else {
+ log.V(1).M(host).F().Error("FAIL delete ConfigMap %s/%s err:%v", namespace, name, err)
+ }
+ return nil
+}
+
+// deleteServiceHost deletes Service
+func (c *Controller) deleteServiceHost(ctx context.Context, host *api.Host) error {
+ if util.IsContextDone(ctx) {
+ log.V(2).Info("task is done")
+ return nil
+ }
+
+ serviceName := c.namer.Name(interfaces.NameStatefulSetService, host)
+ namespace := host.Runtime.Address.Namespace
+ log.V(1).M(host).F().Info("%s/%s", namespace, serviceName)
+ return c.deleteServiceIfExists(ctx, namespace, serviceName)
+}
+
+// deleteServiceShard
+func (c *Controller) deleteServiceShard(ctx context.Context, shard *api.ChiShard) error {
+ if util.IsContextDone(ctx) {
+ log.V(2).Info("task is done")
+ return nil
+ }
+
+ serviceName := c.namer.Name(interfaces.NameShardService, shard)
+ namespace := shard.Runtime.Address.Namespace
+ log.V(1).M(shard).F().Info("%s/%s", namespace, serviceName)
+ return c.deleteServiceIfExists(ctx, namespace, serviceName)
+}
+
+// deleteServiceCluster
+func (c *Controller) deleteServiceCluster(ctx context.Context, cluster *api.Cluster) error {
+ if util.IsContextDone(ctx) {
+ log.V(2).Info("task is done")
+ return nil
+ }
+
+ serviceName := c.namer.Name(interfaces.NameClusterService, cluster)
+ namespace := cluster.Runtime.Address.Namespace
+ log.V(1).M(cluster).F().Info("%s/%s", namespace, serviceName)
+ return c.deleteServiceIfExists(ctx, namespace, serviceName)
+}
+
+// deleteServiceCR
+func (c *Controller) deleteServiceCR(ctx context.Context, cr api.ICustomResource) error {
+ if util.IsContextDone(ctx) {
+ log.V(2).Info("task is done")
+ return nil
+ }
+
+ serviceName := c.namer.Name(interfaces.NameCRService, cr)
+ namespace := cr.GetNamespace()
+ log.V(1).M(cr).F().Info("%s/%s", namespace, serviceName)
+ return c.deleteServiceIfExists(ctx, namespace, serviceName)
+}
+
+// deleteSecretCluster
+func (c *Controller) deleteSecretCluster(ctx context.Context, cluster *api.Cluster) error {
+ if util.IsContextDone(ctx) {
+ log.V(2).Info("task is done")
+ return nil
+ }
+
+ secretName := c.namer.Name(interfaces.NameClusterAutoSecret, cluster)
+ namespace := cluster.Runtime.Address.Namespace
+ log.V(1).M(cluster).F().Info("%s/%s", namespace, secretName)
+ return c.deleteSecretIfExists(ctx, namespace, secretName)
+}
+
+// deleteSecretIfExists deletes Secret in case it does not exist
+func (c *Controller) deleteSecretIfExists(ctx context.Context, namespace, name string) error {
+ if util.IsContextDone(ctx) {
+ log.V(2).Info("task is done")
+ return nil
+ }
+
+ // Check specified service exists
+ _, err := c.kube.Secret().Get(ctx, &core.Secret{
+ ObjectMeta: meta.ObjectMeta{
+ Namespace: namespace,
+ Name: name,
+ },
+ })
+
+ if err != nil {
+ // No such a service, nothing to delete
+ return nil
+ }
+
+ // Delete
+ err = c.kube.Secret().Delete(ctx, namespace, name)
+ if err == nil {
+ log.V(1).M(namespace, name).Info("OK delete Secret/%s", namespace, name)
+ } else {
+ log.V(1).M(namespace, name).F().Error("FAIL delete Secret %s/%s err:%v", namespace, name, err)
+ }
+
+ return err
+}
diff --git a/pkg/controller/chi/controller-discoverer.go b/pkg/controller/chi/controller-discoverer.go
new file mode 100644
index 000000000..024a331ea
--- /dev/null
+++ b/pkg/controller/chi/controller-discoverer.go
@@ -0,0 +1,158 @@
+// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package chi
+
+import (
+ "context"
+
+ meta "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+ log "github.com/altinity/clickhouse-operator/pkg/announcer"
+ api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1"
+ "github.com/altinity/clickhouse-operator/pkg/controller"
+ "github.com/altinity/clickhouse-operator/pkg/interfaces"
+ "github.com/altinity/clickhouse-operator/pkg/model"
+ chiLabeler "github.com/altinity/clickhouse-operator/pkg/model/chi/tags/labeler"
+ "github.com/altinity/clickhouse-operator/pkg/util"
+)
+
+func getLabeler(cr api.ICustomResource) interfaces.ILabeler {
+ return chiLabeler.New(cr)
+}
+
+func (c *Controller) discovery(ctx context.Context, cr api.ICustomResource) *model.Registry {
+ if util.IsContextDone(ctx) {
+ log.V(2).Info("task is done")
+ return nil
+ }
+
+ opts := controller.NewListOptions(getLabeler(cr).Selector(interfaces.SelectorCRScope))
+ r := model.NewRegistry()
+ c.discoveryStatefulSets(ctx, r, cr, opts)
+ c.discoveryConfigMaps(ctx, r, cr, opts)
+ c.discoveryServices(ctx, r, cr, opts)
+ c.discoverySecrets(ctx, r, cr, opts)
+ c.discoveryPVCs(ctx, r, cr, opts)
+ // Comment out PV
+ //c.discoveryPVs(ctx, r, chi, opts)
+ c.discoveryPDBs(ctx, r, cr, opts)
+ return r
+}
+
+func (c *Controller) discoveryStatefulSets(ctx context.Context, r *model.Registry, cr api.ICustomResource, opts meta.ListOptions) {
+ list, err := c.kube.STS().List(ctx, cr.GetNamespace(), opts)
+ if err != nil {
+ log.M(cr).F().Error("FAIL to list StatefulSet - err: %v", err)
+ return
+ }
+ if list == nil {
+ log.M(cr).F().Error("FAIL to list StatefulSet - list is nil")
+ return
+ }
+ for _, obj := range list {
+ r.RegisterStatefulSet(obj.GetObjectMeta())
+ }
+}
+
+func (c *Controller) discoveryConfigMaps(ctx context.Context, r *model.Registry, cr api.ICustomResource, opts meta.ListOptions) {
+ list, err := c.kube.ConfigMap().List(ctx, cr.GetNamespace(), opts)
+ if err != nil {
+ log.M(cr).F().Error("FAIL to list ConfigMap - err: %v", err)
+ return
+ }
+ if list == nil {
+ log.M(cr).F().Error("FAIL to list ConfigMap - list is nil")
+ return
+ }
+ for _, obj := range list {
+ r.RegisterConfigMap(obj.GetObjectMeta())
+ }
+}
+
+func (c *Controller) discoveryServices(ctx context.Context, r *model.Registry, cr api.ICustomResource, opts meta.ListOptions) {
+ list, err := c.kube.Service().List(ctx, cr.GetNamespace(), opts)
+ if err != nil {
+ log.M(cr).F().Error("FAIL to list Service - err: %v", err)
+ return
+ }
+ if list == nil {
+ log.M(cr).F().Error("FAIL to list Service - list is nil")
+ return
+ }
+ for _, obj := range list {
+ r.RegisterService(obj.GetObjectMeta())
+ }
+}
+
+func (c *Controller) discoverySecrets(ctx context.Context, r *model.Registry, cr api.ICustomResource, opts meta.ListOptions) {
+ list, err := c.kube.Secret().List(ctx, cr.GetNamespace(), opts)
+ if err != nil {
+ log.M(cr).F().Error("FAIL to list Secret - err: %v", err)
+ return
+ }
+ if list == nil {
+ log.M(cr).F().Error("FAIL to list Secret - list is nil")
+ return
+ }
+ for _, obj := range list {
+ r.RegisterSecret(obj.GetObjectMeta())
+ }
+}
+
+func (c *Controller) discoveryPVCs(ctx context.Context, r *model.Registry, cr api.ICustomResource, opts meta.ListOptions) {
+ list, err := c.kube.Storage().List(ctx, cr.GetNamespace(), opts)
+ if err != nil {
+ log.M(cr).F().Error("FAIL to list PVC - err: %v", err)
+ return
+ }
+ if list == nil {
+ log.M(cr).F().Error("FAIL to list PVC - list is nil")
+ return
+ }
+ for _, obj := range list {
+ r.RegisterPVC(obj.GetObjectMeta())
+ }
+}
+
+// Comment out PV
+//func (c *Controller) discoveryPVs(ctx context.Context, r *chopModel.Registry, cr api.ICustomResource, opts meta.ListOptions) {
+// list, err := c.kubeClient.CoreV1().PersistentVolumes().List(ctx, opts)
+// if err != nil {
+// log.M(cr).F().Error("FAIL list PV err: %v", err)
+// return
+// }
+// if list == nil {
+// log.M(cr).F().Error("FAIL list PV list is nil")
+// return
+// }
+// for _, obj := range list.Items {
+// r.RegisterPV(obj.ObjectMeta)
+// }
+//}
+
+func (c *Controller) discoveryPDBs(ctx context.Context, r *model.Registry, cr api.ICustomResource, opts meta.ListOptions) {
+ list, err := c.kube.PDB().List(ctx, cr.GetNamespace(), opts)
+ if err != nil {
+ log.M(cr).F().Error("FAIL to list PDB - err: %v", err)
+ return
+ }
+ if list == nil {
+ log.M(cr).F().Error("FAIL to list PDB - list is nil")
+ return
+ }
+ for _, obj := range list {
+ r.RegisterPDB(obj.GetObjectMeta())
+ }
+}
diff --git a/pkg/controller/chi/controller-getter.go b/pkg/controller/chi/controller-getter.go
new file mode 100644
index 000000000..4fe6fd908
--- /dev/null
+++ b/pkg/controller/chi/controller-getter.go
@@ -0,0 +1,62 @@
+// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package chi
+
+import (
+ "fmt"
+
+ meta "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+ log "github.com/altinity/clickhouse-operator/pkg/announcer"
+ api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1"
+ "github.com/altinity/clickhouse-operator/pkg/controller"
+ chiLabeler "github.com/altinity/clickhouse-operator/pkg/model/chi/tags/labeler"
+)
+
+// getPodsIPs gets all pod IPs
+func (c *Controller) getPodsIPs(obj interface{}) (ips []string) {
+ log.V(3).M(obj).F().S().Info("looking for pods IPs")
+ defer log.V(3).M(obj).F().E().Info("looking for pods IPs")
+
+ for _, pod := range c.kube.Pod().GetAll(obj) {
+ if ip := pod.Status.PodIP; ip == "" {
+ log.V(3).M(pod).F().Warning("Pod NO IP address found. Pod: %s/%s", pod.Namespace, pod.Name)
+ } else {
+ ips = append(ips, ip)
+ log.V(3).M(pod).F().Info("Pod IP address found. Pod: %s/%s IP: %s", pod.Namespace, pod.Name, ip)
+ }
+ }
+ return ips
+}
+
+// GetCHIByObjectMeta gets CHI by namespaced name
+func (c *Controller) GetCHIByObjectMeta(meta meta.Object, isCR bool) (*api.ClickHouseInstallation, error) {
+ var crName string
+ if isCR {
+ crName = meta.GetName()
+ } else {
+ var err error
+ crName, err = chiLabeler.New(nil).GetCRNameFromObjectMeta(meta)
+ if err != nil {
+ return nil, fmt.Errorf("unable to find CR by name: '%s'. More info: %v", meta.GetName(), err)
+ }
+ }
+
+ cr, err := c.kube.CR().Get(controller.NewContext(), meta.GetNamespace(), crName)
+ if cr == nil {
+ return nil, err
+ }
+ return cr.(*api.ClickHouseInstallation), err
+}
diff --git a/pkg/controller/chi/controller-pdb.go b/pkg/controller/chi/controller-pdb.go
new file mode 100644
index 000000000..e683ea16a
--- /dev/null
+++ b/pkg/controller/chi/controller-pdb.go
@@ -0,0 +1,50 @@
+// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package chi
+
+import (
+ "context"
+
+ policy "k8s.io/api/policy/v1"
+
+ log "github.com/altinity/clickhouse-operator/pkg/announcer"
+ "github.com/altinity/clickhouse-operator/pkg/util"
+)
+
+func (c *Controller) getPDB(ctx context.Context, pdb *policy.PodDisruptionBudget) (*policy.PodDisruptionBudget, error) {
+ return c.kube.PDB().Get(ctx, pdb.GetNamespace(), pdb.GetName())
+}
+
+func (c *Controller) createPDB(ctx context.Context, pdb *policy.PodDisruptionBudget) error {
+ if util.IsContextDone(ctx) {
+ log.V(2).Info("task is done")
+ return nil
+ }
+
+ _, err := c.kube.PDB().Create(ctx, pdb)
+
+ return err
+}
+
+func (c *Controller) updatePDB(ctx context.Context, pdb *policy.PodDisruptionBudget) error {
+ if util.IsContextDone(ctx) {
+ log.V(2).Info("task is done")
+ return nil
+ }
+
+ _, err := c.kube.PDB().Update(ctx, pdb)
+
+ return err
+}
diff --git a/pkg/controller/chi/podder.go b/pkg/controller/chi/controller-podder.go
similarity index 84%
rename from pkg/controller/chi/podder.go
rename to pkg/controller/chi/controller-podder.go
index cd46d2303..7f1a94f46 100644
--- a/pkg/controller/chi/podder.go
+++ b/pkg/controller/chi/controller-podder.go
@@ -22,8 +22,8 @@ import (
)
// walkContainers walks with specified func over all containers of the specified host
-func (c *Controller) walkContainers(host *api.ChiHost, f func(container *v1.Container)) {
- pod, err := c.getPod(host)
+func (c *Controller) walkContainers(host *api.Host, f func(container *v1.Container)) {
+ pod, err := c.kube.Pod().Get(host)
if err != nil {
log.M(host).F().Error("FAIL get pod for host '%s' err: %v", host.Runtime.Address.NamespaceNameString(), err)
return
@@ -36,8 +36,8 @@ func (c *Controller) walkContainers(host *api.ChiHost, f func(container *v1.Cont
}
// walkContainerStatuses walks with specified func over all statuses of the specified host
-func (c *Controller) walkContainerStatuses(host *api.ChiHost, f func(status *v1.ContainerStatus)) {
- pod, err := c.getPod(host)
+func (c *Controller) walkContainerStatuses(host *api.Host, f func(status *v1.ContainerStatus)) {
+ pod, err := c.kube.Pod().Get(host)
if err != nil {
log.M(host).F().Error("FAIL get pod for host %s err:%v", host.Runtime.Address.NamespaceNameString(), err)
return
@@ -50,7 +50,7 @@ func (c *Controller) walkContainerStatuses(host *api.ChiHost, f func(status *v1.
}
// isHostRunning checks whether ALL containers of the specified host are running
-func (c *Controller) isHostRunning(host *api.ChiHost) bool {
+func (c *Controller) isHostRunning(host *api.Host) bool {
all := true
c.walkContainerStatuses(host, func(status *v1.ContainerStatus) {
if status.State.Running == nil {
diff --git a/pkg/controller/chi/controller-secret.go b/pkg/controller/chi/controller-secret.go
new file mode 100644
index 000000000..ee77fdab2
--- /dev/null
+++ b/pkg/controller/chi/controller-secret.go
@@ -0,0 +1,47 @@
+// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package chi
+
+import (
+ "context"
+
+ core "k8s.io/api/core/v1"
+
+ log "github.com/altinity/clickhouse-operator/pkg/announcer"
+ "github.com/altinity/clickhouse-operator/pkg/util"
+)
+
+// getSecret gets secret
+func (c *Controller) getSecret(ctx context.Context, secret *core.Secret) (*core.Secret, error) {
+ return c.kube.Secret().Get(ctx, secret)
+}
+
+func (c *Controller) createSecret(ctx context.Context, secret *core.Secret) error {
+ log.V(1).M(secret).F().P()
+
+ if util.IsContextDone(ctx) {
+ log.V(2).Info("task is done")
+ return nil
+ }
+
+ log.V(1).Info("Create Secret %s", util.NamespacedName(secret))
+ if _, err := c.kube.Secret().Create(ctx, secret); err != nil {
+ // Unable to create StatefulSet at all
+ log.V(1).Error("Create Secret %s failed err: %v", util.NamespacedName(secret), err)
+ return err
+ }
+
+ return nil
+}
diff --git a/pkg/controller/chi/controller-service.go b/pkg/controller/chi/controller-service.go
new file mode 100644
index 000000000..0fc95c910
--- /dev/null
+++ b/pkg/controller/chi/controller-service.go
@@ -0,0 +1,71 @@
+// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package chi
+
+import (
+ "context"
+ core "k8s.io/api/core/v1"
+ meta "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+ log "github.com/altinity/clickhouse-operator/pkg/announcer"
+ "github.com/altinity/clickhouse-operator/pkg/util"
+)
+
+func (c *Controller) getService(ctx context.Context, service *core.Service) (*core.Service, error) {
+ return c.kube.Service().Get(ctx, service)
+}
+
+func (c *Controller) createService(ctx context.Context, service *core.Service) error {
+ _, err := c.kube.Service().Create(ctx, service)
+ return err
+}
+
+func (c *Controller) updateService(ctx context.Context, service *core.Service) error {
+ _, err := c.kube.Service().Update(ctx, service)
+ return err
+}
+
+// deleteServiceIfExists deletes Service in case it does not exist
+func (c *Controller) deleteServiceIfExists(ctx context.Context, namespace, name string) error {
+ if util.IsContextDone(ctx) {
+ log.V(2).Info("task is done")
+ return nil
+ }
+
+ // Check specified service exists
+ _, err := c.kube.Service().Get(ctx, &core.Service{
+ ObjectMeta: meta.ObjectMeta{
+ Namespace: namespace,
+ Name: name,
+ },
+ })
+
+ if err != nil {
+ // No such a service, nothing to delete
+ log.V(1).M(namespace, name).F().Info("Not Found Service: %s/%s err: %v", namespace, name, err)
+ return nil
+ }
+
+ // Delete service
+ err = c.kube.Service().Delete(ctx, namespace, name)
+ if err == nil {
+ log.V(1).M(namespace, name).F().Info("OK delete Service: %s/%s", namespace, name)
+ log.V(1).M(namespace, name).F().Info("OK delete Service -- proceed further: %s/%s", namespace, name)
+ } else {
+ log.V(1).M(namespace, name).F().Error("FAIL delete Service: %s/%s err: %v", namespace, name, err)
+ }
+
+ return err
+}
diff --git a/pkg/controller/chi/controller-status.go b/pkg/controller/chi/controller-status.go
new file mode 100644
index 000000000..25a12e1cb
--- /dev/null
+++ b/pkg/controller/chi/controller-status.go
@@ -0,0 +1,27 @@
+// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package chi
+
+import (
+ "context"
+
+ api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1"
+ "github.com/altinity/clickhouse-operator/pkg/apis/common/types"
+)
+
+// updateCRObjectStatus updates Custom Resource object's Status
+func (c *Controller) updateCRObjectStatus(ctx context.Context, cr api.ICustomResource, opts types.UpdateStatusOptions) (err error) {
+ return c.kube.CR().StatusUpdate(ctx, cr, opts)
+}
diff --git a/pkg/controller/chi/controller.go b/pkg/controller/chi/controller.go
index 532239982..a433a14c3 100644
--- a/pkg/controller/chi/controller.go
+++ b/pkg/controller/chi/controller.go
@@ -27,7 +27,7 @@ import (
core "k8s.io/api/core/v1"
apiExtensions "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/types"
+ kubeTypes "k8s.io/apimachinery/pkg/types"
utilRuntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
kubeInformers "k8s.io/client-go/informers"
@@ -47,10 +47,40 @@ import (
chopClientSetScheme "github.com/altinity/clickhouse-operator/pkg/client/clientset/versioned/scheme"
chopInformers "github.com/altinity/clickhouse-operator/pkg/client/informers/externalversions"
"github.com/altinity/clickhouse-operator/pkg/controller"
- model "github.com/altinity/clickhouse-operator/pkg/model/chi"
+ "github.com/altinity/clickhouse-operator/pkg/controller/chi/cmd_queue"
+ chiKube "github.com/altinity/clickhouse-operator/pkg/controller/chi/kube"
+ ctrlLabeler "github.com/altinity/clickhouse-operator/pkg/controller/chi/labeler"
+ "github.com/altinity/clickhouse-operator/pkg/interfaces"
+ "github.com/altinity/clickhouse-operator/pkg/metrics/clickhouse"
+ chiLabeler "github.com/altinity/clickhouse-operator/pkg/model/chi/tags/labeler"
+ "github.com/altinity/clickhouse-operator/pkg/model/common/action_plan"
+ "github.com/altinity/clickhouse-operator/pkg/model/common/volume"
+ "github.com/altinity/clickhouse-operator/pkg/model/managers"
"github.com/altinity/clickhouse-operator/pkg/util"
)
+// Controller defines CRO controller
+type Controller struct {
+ // kube is a generalized kube client
+ kube interfaces.IKube
+
+ //
+ // Native clients
+ //
+ kubeClient kube.Interface
+ extClient apiExtensions.Interface
+ chopClient chopClientSet.Interface
+
+ // queues used to organize events queue processed by operator
+ queues []queue.PriorityQueue
+ // not used explicitly
+ recorder record.EventRecorder
+
+ namer interfaces.INameManager
+ ctrlLabeler *ctrlLabeler.Labeler
+ pvcDeleter *volume.PVCDeleter
+}
+
// NewController creates instance of Controller
func NewController(
chopClient chopClientSet.Interface,
@@ -78,26 +108,19 @@ func NewController(
},
)
+ namer := managers.NewNameManager(managers.NameManagerTypeClickHouse)
+ kube := chiKube.NewAdapter(kubeClient, chopClient, namer)
+
// Create Controller instance
controller := &Controller{
- kubeClient: kubeClient,
- extClient: extClient,
- chopClient: chopClient,
- chiLister: chopInformerFactory.Clickhouse().V1().ClickHouseInstallations().Lister(),
- chiListerSynced: chopInformerFactory.Clickhouse().V1().ClickHouseInstallations().Informer().HasSynced,
- chitLister: chopInformerFactory.Clickhouse().V1().ClickHouseInstallationTemplates().Lister(),
- chitListerSynced: chopInformerFactory.Clickhouse().V1().ClickHouseInstallationTemplates().Informer().HasSynced,
- serviceLister: kubeInformerFactory.Core().V1().Services().Lister(),
- serviceListerSynced: kubeInformerFactory.Core().V1().Services().Informer().HasSynced,
- endpointsLister: kubeInformerFactory.Core().V1().Endpoints().Lister(),
- endpointsListerSynced: kubeInformerFactory.Core().V1().Endpoints().Informer().HasSynced,
- configMapLister: kubeInformerFactory.Core().V1().ConfigMaps().Lister(),
- configMapListerSynced: kubeInformerFactory.Core().V1().ConfigMaps().Informer().HasSynced,
- statefulSetLister: kubeInformerFactory.Apps().V1().StatefulSets().Lister(),
- statefulSetListerSynced: kubeInformerFactory.Apps().V1().StatefulSets().Informer().HasSynced,
- podLister: kubeInformerFactory.Core().V1().Pods().Lister(),
- podListerSynced: kubeInformerFactory.Core().V1().Pods().Informer().HasSynced,
- recorder: recorder,
+ kubeClient: kubeClient,
+ extClient: extClient,
+ chopClient: chopClient,
+ recorder: recorder,
+ namer: namer,
+ kube: kube,
+ ctrlLabeler: ctrlLabeler.New(kube),
+ pvcDeleter: volume.NewPVCDeleter(managers.NewNameManager(managers.NameManagerTypeClickHouse)),
}
controller.initQueues()
controller.addEventHandlers(chopInformerFactory, kubeInformerFactory)
@@ -130,7 +153,7 @@ func (c *Controller) addEventHandlersCHI(
return
}
log.V(3).M(chi).Info("chiInformer.AddFunc")
- c.enqueueObject(NewReconcileCHI(reconcileAdd, nil, chi))
+ c.enqueueObject(cmd_queue.NewReconcileCHI(cmd_queue.ReconcileAdd, nil, chi))
},
UpdateFunc: func(old, new interface{}) {
oldChi := old.(*api.ClickHouseInstallation)
@@ -139,7 +162,7 @@ func (c *Controller) addEventHandlersCHI(
return
}
log.V(3).M(newChi).Info("chiInformer.UpdateFunc")
- c.enqueueObject(NewReconcileCHI(reconcileUpdate, oldChi, newChi))
+ c.enqueueObject(cmd_queue.NewReconcileCHI(cmd_queue.ReconcileUpdate, oldChi, newChi))
},
DeleteFunc: func(obj interface{}) {
chi := obj.(*api.ClickHouseInstallation)
@@ -147,7 +170,7 @@ func (c *Controller) addEventHandlersCHI(
return
}
log.V(3).M(chi).Info("chiInformer.DeleteFunc")
- c.enqueueObject(NewReconcileCHI(reconcileDelete, chi, nil))
+ c.enqueueObject(cmd_queue.NewReconcileCHI(cmd_queue.ReconcileDelete, chi, nil))
},
})
}
@@ -162,7 +185,7 @@ func (c *Controller) addEventHandlersCHIT(
return
}
log.V(3).M(chit).Info("chitInformer.AddFunc")
- c.enqueueObject(NewReconcileCHIT(reconcileAdd, nil, chit))
+ c.enqueueObject(cmd_queue.NewReconcileCHIT(cmd_queue.ReconcileAdd, nil, chit))
},
UpdateFunc: func(old, new interface{}) {
oldChit := old.(*api.ClickHouseInstallationTemplate)
@@ -171,7 +194,7 @@ func (c *Controller) addEventHandlersCHIT(
return
}
log.V(3).M(newChit).Info("chitInformer.UpdateFunc")
- c.enqueueObject(NewReconcileCHIT(reconcileUpdate, oldChit, newChit))
+ c.enqueueObject(cmd_queue.NewReconcileCHIT(cmd_queue.ReconcileUpdate, oldChit, newChit))
},
DeleteFunc: func(obj interface{}) {
chit := obj.(*api.ClickHouseInstallationTemplate)
@@ -179,7 +202,7 @@ func (c *Controller) addEventHandlersCHIT(
return
}
log.V(3).M(chit).Info("chitInformer.DeleteFunc")
- c.enqueueObject(NewReconcileCHIT(reconcileDelete, chit, nil))
+ c.enqueueObject(cmd_queue.NewReconcileCHIT(cmd_queue.ReconcileDelete, chit, nil))
},
})
}
@@ -194,7 +217,7 @@ func (c *Controller) addEventHandlersChopConfig(
return
}
log.V(3).M(chopConfig).Info("chopInformer.AddFunc")
- c.enqueueObject(NewReconcileChopConfig(reconcileAdd, nil, chopConfig))
+ c.enqueueObject(cmd_queue.NewReconcileChopConfig(cmd_queue.ReconcileAdd, nil, chopConfig))
},
UpdateFunc: func(old, new interface{}) {
newChopConfig := new.(*api.ClickHouseOperatorConfiguration)
@@ -203,7 +226,7 @@ func (c *Controller) addEventHandlersChopConfig(
return
}
log.V(3).M(newChopConfig).Info("chopInformer.UpdateFunc")
- c.enqueueObject(NewReconcileChopConfig(reconcileUpdate, oldChopConfig, newChopConfig))
+ c.enqueueObject(cmd_queue.NewReconcileChopConfig(cmd_queue.ReconcileUpdate, oldChopConfig, newChopConfig))
},
DeleteFunc: func(obj interface{}) {
chopConfig := obj.(*api.ClickHouseOperatorConfiguration)
@@ -211,7 +234,7 @@ func (c *Controller) addEventHandlersChopConfig(
return
}
log.V(3).M(chopConfig).Info("chopInformer.DeleteFunc")
- c.enqueueObject(NewReconcileChopConfig(reconcileDelete, chopConfig, nil))
+ c.enqueueObject(cmd_queue.NewReconcileChopConfig(cmd_queue.ReconcileDelete, chopConfig, nil))
},
})
}
@@ -222,7 +245,7 @@ func (c *Controller) addEventHandlersService(
kubeInformerFactory.Core().V1().Services().Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
service := obj.(*core.Service)
- if !c.isTrackedObject(&service.ObjectMeta) {
+ if !c.isTrackedObject(service.GetObjectMeta()) {
return
}
log.V(3).M(service).Info("serviceInformer.AddFunc")
@@ -334,8 +357,8 @@ func (c *Controller) addEventHandlersEndpoint(
}
log.V(3).M(newEndpoints).Info("endpointsInformer.UpdateFunc")
if updated(oldEndpoints, newEndpoints) {
- c.enqueueObject(NewReconcileEndpoints(reconcileUpdate, oldEndpoints, newEndpoints))
- c.enqueueObject(NewDropDns(&newEndpoints.ObjectMeta))
+ c.enqueueObject(cmd_queue.NewReconcileEndpoints(cmd_queue.ReconcileUpdate, oldEndpoints, newEndpoints))
+ c.enqueueObject(cmd_queue.NewDropDns(&newEndpoints.ObjectMeta))
}
},
DeleteFunc: func(obj interface{}) {
@@ -416,7 +439,7 @@ func (c *Controller) addEventHandlersPod(
return
}
log.V(3).M(pod).Info("podInformer.AddFunc")
- c.enqueueObject(NewReconcilePod(reconcileAdd, nil, pod))
+ c.enqueueObject(cmd_queue.NewReconcilePod(cmd_queue.ReconcileAdd, nil, pod))
},
UpdateFunc: func(old, new interface{}) {
oldPod := old.(*core.Pod)
@@ -425,7 +448,7 @@ func (c *Controller) addEventHandlersPod(
return
}
log.V(3).M(newPod).Info("podInformer.UpdateFunc")
- c.enqueueObject(NewReconcilePod(reconcileUpdate, oldPod, newPod))
+ c.enqueueObject(cmd_queue.NewReconcilePod(cmd_queue.ReconcileUpdate, oldPod, newPod))
},
DeleteFunc: func(obj interface{}) {
pod := obj.(*core.Pod)
@@ -433,7 +456,7 @@ func (c *Controller) addEventHandlersPod(
return
}
log.V(3).M(pod).Info("podInformer.DeleteFunc")
- c.enqueueObject(NewReconcilePod(reconcileDelete, pod, nil))
+ c.enqueueObject(cmd_queue.NewReconcilePod(cmd_queue.ReconcileDelete, pod, nil))
},
})
}
@@ -454,8 +477,8 @@ func (c *Controller) addEventHandlers(
}
// isTrackedObject checks whether operator is interested in changes of this object
-func (c *Controller) isTrackedObject(objectMeta *meta.ObjectMeta) bool {
- return chop.Config().IsWatchedNamespace(objectMeta.Namespace) && model.IsCHOPGeneratedObject(objectMeta)
+func (c *Controller) isTrackedObject(meta meta.Object) bool {
+ return chop.Config().IsWatchedNamespace(meta.GetNamespace()) && chiLabeler.New(nil).IsCHOPGeneratedObject(meta)
}
// Run syncs caches, starts workers
@@ -469,25 +492,14 @@ func (c *Controller) Run(ctx context.Context) {
}()
log.V(1).Info("Starting ClickHouseInstallation controller")
- if !waitForCacheSync(
- ctx,
- "ClickHouseInstallation",
- c.chiListerSynced,
- c.statefulSetListerSynced,
- c.configMapListerSynced,
- c.serviceListerSynced,
- ) {
- // Unable to sync
- return
- }
// Label controller runtime objects with proper labels
max := 10
for cnt := 0; cnt < max; cnt++ {
- switch err := c.labelMyObjectsTree(ctx); err {
+ switch err := c.ctrlLabeler.LabelMyObjectsTree(ctx); err {
case nil:
cnt = max
- case ErrOperatorPodNotSpecified:
+ case ctrlLabeler.ErrOperatorPodNotSpecified:
log.V(1).F().Error("Since operator pod is not specified, will not perform labeling")
cnt = max
default:
@@ -516,8 +528,8 @@ func (c *Controller) Run(ctx context.Context) {
<-ctx.Done()
}
-func prepareCHIAdd(command *ReconcileCHI) bool {
- newjs, _ := json.Marshal(command.new)
+func prepareCHIAdd(command *cmd_queue.ReconcileCHI) bool {
+ newjs, _ := json.Marshal(command.New)
newchi := api.ClickHouseInstallation{
TypeMeta: meta.TypeMeta{
APIVersion: api.SchemeGroupVersion.String(),
@@ -525,24 +537,24 @@ func prepareCHIAdd(command *ReconcileCHI) bool {
},
}
_ = json.Unmarshal(newjs, &newchi)
- command.new = &newchi
+ command.New = &newchi
logCommand(command)
return true
}
-func prepareCHIUpdate(command *ReconcileCHI) bool {
- actionPlan := model.NewActionPlan(command.old, command.new)
+func prepareCHIUpdate(command *cmd_queue.ReconcileCHI) bool {
+ actionPlan := action_plan.NewActionPlan(command.Old, command.New)
if !actionPlan.HasActionsToDo() {
return false
}
- oldjson, _ := json.MarshalIndent(command.old, "", " ")
- newjson, _ := json.MarshalIndent(command.new, "", " ")
+ oldjson, _ := json.MarshalIndent(command.Old, "", " ")
+ newjson, _ := json.MarshalIndent(command.New, "", " ")
log.V(2).Info("AP enqueue---------------------------------------------:\n%s\n", actionPlan)
log.V(3).Info("old enqueue--------------------------------------------:\n%s\n", string(oldjson))
log.V(3).Info("new enqueue--------------------------------------------:\n%s\n", string(newjson))
- oldjs, _ := json.Marshal(command.old)
- newjs, _ := json.Marshal(command.new)
+ oldjs, _ := json.Marshal(command.Old)
+ newjs, _ := json.Marshal(command.New)
oldchi := api.ClickHouseInstallation{}
newchi := api.ClickHouseInstallation{
TypeMeta: meta.TypeMeta{
@@ -552,24 +564,24 @@ func prepareCHIUpdate(command *ReconcileCHI) bool {
}
_ = json.Unmarshal(oldjs, &oldchi)
_ = json.Unmarshal(newjs, &newchi)
- command.old = &oldchi
- command.new = &newchi
+ command.Old = &oldchi
+ command.New = &newchi
logCommand(command)
return true
}
-func logCommand(command *ReconcileCHI) {
+func logCommand(command *cmd_queue.ReconcileCHI) {
namespace := "uns"
name := "un"
switch {
- case command.new != nil:
- namespace = command.new.Namespace
- name = command.new.Name
- case command.old != nil:
- namespace = command.old.Namespace
- name = command.old.Name
+ case command.New != nil:
+ namespace = command.New.Namespace
+ name = command.New.Name
+ case command.Old != nil:
+ namespace = command.Old.Namespace
+ name = command.Old.Name
}
- log.V(1).Info("ENQUEUE new ReconcileCHI cmd=%s for %s/%s", command.cmd, namespace, name)
+ log.V(1).Info("ENQUEUE new ReconcileCHI cmd=%s for %s/%s", command.Cmd, namespace, name)
}
// enqueueObject adds ClickHouseInstallation object to the work queue
@@ -578,21 +590,21 @@ func (c *Controller) enqueueObject(obj queue.PriorityQueueItem) {
index := 0
enqueue := false
switch command := obj.(type) {
- case *ReconcileCHI:
+ case *cmd_queue.ReconcileCHI:
variants := len(c.queues) - api.DefaultReconcileSystemThreadsNumber
index = api.DefaultReconcileSystemThreadsNumber + util.HashIntoIntTopped(handle, variants)
- switch command.cmd {
- case reconcileAdd:
+ switch command.Cmd {
+ case cmd_queue.ReconcileAdd:
enqueue = prepareCHIAdd(command)
- case reconcileUpdate:
+ case cmd_queue.ReconcileUpdate:
enqueue = prepareCHIUpdate(command)
}
case
- *ReconcileCHIT,
- *ReconcileChopConfig,
- *ReconcileEndpoints,
- *ReconcilePod,
- *DropDns:
+ *cmd_queue.ReconcileCHIT,
+ *cmd_queue.ReconcileChopConfig,
+ *cmd_queue.ReconcileEndpoints,
+ *cmd_queue.ReconcilePod,
+ *cmd_queue.DropDns:
variants := api.DefaultReconcileSystemThreadsNumber
index = util.HashIntoIntTopped(handle, variants)
enqueue = true
@@ -611,7 +623,7 @@ func (c *Controller) updateWatch(chi *api.ClickHouseInstallation) {
// updateWatchAsync
func (c *Controller) updateWatchAsync(chi *metrics.WatchedCHI) {
- if err := metrics.InformMetricsExporterAboutWatchedCHI(chi); err != nil {
+ if err := clickhouse.InformMetricsExporterAboutWatchedCHI(chi); err != nil {
log.V(1).F().Info("FAIL update watch (%s/%s): %q", chi.Namespace, chi.Name, err)
} else {
log.V(1).Info("OK update watch (%s/%s): %s", chi.Namespace, chi.Name, chi)
@@ -626,7 +638,7 @@ func (c *Controller) deleteWatch(chi *api.ClickHouseInstallation) {
// deleteWatchAsync
func (c *Controller) deleteWatchAsync(chi *metrics.WatchedCHI) {
- if err := metrics.InformMetricsExporterToDeleteWatchedCHI(chi); err != nil {
+ if err := clickhouse.InformMetricsExporterToDeleteWatchedCHI(chi); err != nil {
log.V(1).F().Info("FAIL delete watch (%s/%s): %q", chi.Namespace, chi.Name, err)
} else {
log.V(1).Info("OK delete watch (%s/%s)", chi.Namespace, chi.Name)
@@ -649,13 +661,13 @@ func (c *Controller) addChopConfig(chopConfig *api.ClickHouseOperatorConfigurati
// updateChopConfig
func (c *Controller) updateChopConfig(old, new *api.ClickHouseOperatorConfiguration) error {
- if old.ObjectMeta.ResourceVersion == new.ObjectMeta.ResourceVersion {
- log.V(2).M(old).F().Info("ResourceVersion did not change: %s", old.ObjectMeta.ResourceVersion)
+ if old.GetObjectMeta().GetResourceVersion() == new.GetObjectMeta().GetResourceVersion() {
+ log.V(2).M(old).F().Info("ResourceVersion did not change: %s", old.GetObjectMeta().GetResourceVersion())
// No need to react
return nil
}
- log.V(2).M(new).F().Info("ResourceVersion change: %s to %s", old.ObjectMeta.ResourceVersion, new.ObjectMeta.ResourceVersion)
+ log.V(2).M(new).F().Info("ResourceVersion change: %s to %s", old.GetObjectMeta().GetResourceVersion(), new.GetObjectMeta().GetResourceVersion())
// TODO
// NEED REFACTORING
//os.Exit(0)
@@ -698,104 +710,20 @@ func (c *Controller) patchCHIFinalizers(ctx context.Context, chi *api.ClickHouse
payload, _ := json.Marshal([]patchFinalizers{{
Op: "replace",
Path: "/metadata/finalizers",
- Value: chi.ObjectMeta.Finalizers,
+ Value: chi.GetFinalizers(),
}})
- _new, err := c.chopClient.ClickhouseV1().ClickHouseInstallations(chi.Namespace).Patch(ctx, chi.Name, types.JSONPatchType, payload, controller.NewPatchOptions())
+ _new, err := c.chopClient.ClickhouseV1().ClickHouseInstallations(chi.Namespace).Patch(ctx, chi.Name, kubeTypes.JSONPatchType, payload, controller.NewPatchOptions())
if err != nil {
// Error update
log.V(1).M(chi).F().Error("%q", err)
return err
}
- if chi.ObjectMeta.ResourceVersion != _new.ObjectMeta.ResourceVersion {
+ if chi.GetResourceVersion() != _new.GetResourceVersion() {
// Updated
- log.V(2).M(chi).F().Info("ResourceVersion change: %s to %s", chi.ObjectMeta.ResourceVersion, _new.ObjectMeta.ResourceVersion)
- chi.ObjectMeta.ResourceVersion = _new.ObjectMeta.ResourceVersion
- return nil
- }
-
- // ResourceVersion not changed - no update performed?
-
- return nil
-}
-
-// UpdateCHIStatusOptions defines how to update CHI status
-type UpdateCHIStatusOptions struct {
- api.CopyCHIStatusOptions
- TolerateAbsence bool
-}
-
-// updateCHIObjectStatus updates ClickHouseInstallation object's Status
-func (c *Controller) updateCHIObjectStatus(ctx context.Context, chi *api.ClickHouseInstallation, opts UpdateCHIStatusOptions) (err error) {
- if util.IsContextDone(ctx) {
- log.V(2).Info("task is done")
- return nil
- }
-
- for retry, attempt := true, 1; retry; attempt++ {
- if attempt >= 5 {
- retry = false
- }
-
- err = c.doUpdateCHIObjectStatus(ctx, chi, opts)
- if err == nil {
- return nil
- }
-
- if retry {
- log.V(2).M(chi).F().Warning("got error, will retry. err: %q", err)
- time.Sleep(1 * time.Second)
- } else {
- log.V(1).M(chi).F().Error("got error, all retries are exhausted. err: %q", err)
- }
- }
- return
-}
-
-// doUpdateCHIObjectStatus updates ClickHouseInstallation object's Status
-func (c *Controller) doUpdateCHIObjectStatus(ctx context.Context, chi *api.ClickHouseInstallation, opts UpdateCHIStatusOptions) error {
- if util.IsContextDone(ctx) {
- log.V(2).Info("task is done")
- return nil
- }
-
- namespace, name := util.NamespaceName(chi.ObjectMeta)
- log.V(3).M(chi).F().Info("Update CHI status")
-
- podIPs := c.getPodsIPs(chi)
-
- cur, err := c.chopClient.ClickhouseV1().ClickHouseInstallations(namespace).Get(ctx, name, controller.NewGetOptions())
- if err != nil {
- if opts.TolerateAbsence {
- return nil
- }
- log.V(1).M(chi).F().Error("%q", err)
- return err
- }
- if cur == nil {
- if opts.TolerateAbsence {
- return nil
- }
- log.V(1).M(chi).F().Error("NULL returned")
- return fmt.Errorf("ERROR GetCHI (%s/%s): NULL returned", namespace, name)
- }
-
- // Update status of a real object.
- cur.EnsureStatus().CopyFrom(chi.Status, opts.CopyCHIStatusOptions)
- cur.EnsureStatus().SetPodIPs(podIPs)
-
- _new, err := c.chopClient.ClickhouseV1().ClickHouseInstallations(chi.Namespace).UpdateStatus(ctx, cur, controller.NewUpdateOptions())
- if err != nil {
- // Error update
- log.V(2).M(chi).F().Info("Got error upon update, may retry. err: %q", err)
- return err
- }
-
- // Propagate updated ResourceVersion into chi
- if chi.ObjectMeta.ResourceVersion != _new.ObjectMeta.ResourceVersion {
- log.V(3).M(chi).F().Info("ResourceVersion change: %s to %s", chi.ObjectMeta.ResourceVersion, _new.ObjectMeta.ResourceVersion)
- chi.ObjectMeta.ResourceVersion = _new.ObjectMeta.ResourceVersion
+ log.V(2).M(chi).F().Info("ResourceVersion change: %s to %s", chi.GetResourceVersion(), _new.GetResourceVersion())
+ chi.SetResourceVersion(_new.GetResourceVersion())
return nil
}
@@ -810,11 +738,11 @@ func (c *Controller) poll(ctx context.Context, chi *api.ClickHouseInstallation,
return
}
- namespace, name := util.NamespaceName(chi.ObjectMeta)
+ namespace, name := util.NamespaceName(chi)
for {
- cur, err := c.chopClient.ClickhouseV1().ClickHouseInstallations(namespace).Get(ctx, name, controller.NewGetOptions())
- if f(cur, err) {
+ cur, err := c.kube.CR().Get(ctx, namespace, name)
+ if f(cur.(*api.ClickHouseInstallation), err) {
// Continue polling
if util.IsContextDone(ctx) {
log.V(2).Info("task is done")
@@ -843,16 +771,16 @@ func (c *Controller) installFinalizer(ctx context.Context, chi *api.ClickHouseIn
return err
}
if cur == nil {
- return fmt.Errorf("ERROR GetCHI (%s/%s): NULL returned", chi.Namespace, chi.Name)
+ return fmt.Errorf("ERROR GetCR (%s/%s): NULL returned", chi.Namespace, chi.Name)
}
- if util.InArray(FinalizerName, cur.ObjectMeta.Finalizers) {
+ if util.InArray(FinalizerName, cur.GetFinalizers()) {
// Already installed
return nil
}
log.V(3).M(chi).F().Info("no finalizer found, need to install one")
- cur.ObjectMeta.Finalizers = append(cur.ObjectMeta.Finalizers, FinalizerName)
+ cur.SetFinalizers(append(cur.GetFinalizers(), FinalizerName))
return c.patchCHIFinalizers(ctx, cur)
}
@@ -871,10 +799,10 @@ func (c *Controller) uninstallFinalizer(ctx context.Context, chi *api.ClickHouse
return err
}
if cur == nil {
- return fmt.Errorf("ERROR GetCHI (%s/%s): NULL returned", chi.Namespace, chi.Name)
+ return fmt.Errorf("ERROR GetCR (%s/%s): NULL returned", chi.Namespace, chi.Name)
}
- cur.ObjectMeta.Finalizers = util.RemoveFromArray(FinalizerName, cur.ObjectMeta.Finalizers)
+ cur.SetFinalizers(util.RemoveFromArray(FinalizerName, cur.GetFinalizers()))
return c.patchCHIFinalizers(ctx, cur)
}
@@ -924,14 +852,3 @@ func (c *Controller) handleObject(obj interface{}) {
// Add CHI object into reconcile loop
// TODO c.enqueueObject(chi.Namespace, chi.Name, chi)
}
-
-// waitForCacheSync is a logger-wrapper over cache.WaitForCacheSync() and it waits for caches to populate
-func waitForCacheSync(ctx context.Context, name string, cacheSyncs ...cache.InformerSynced) bool {
- log.V(1).F().Info("Syncing caches for %s controller", name)
- if !cache.WaitForCacheSync(ctx.Done(), cacheSyncs...) {
- utilRuntime.HandleError(fmt.Errorf(messageUnableToSync, name))
- return false
- }
- log.V(1).F().Info("Caches are synced for %s controller", name)
- return true
-}
diff --git a/pkg/controller/chi/creator.go b/pkg/controller/chi/creator.go
deleted file mode 100644
index 877d0fedb..000000000
--- a/pkg/controller/chi/creator.go
+++ /dev/null
@@ -1,319 +0,0 @@
-// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package chi
-
-import (
- "context"
- "fmt"
-
- "gopkg.in/d4l3k/messagediff.v1"
- apps "k8s.io/api/apps/v1"
- core "k8s.io/api/core/v1"
- apiErrors "k8s.io/apimachinery/pkg/api/errors"
-
- log "github.com/altinity/clickhouse-operator/pkg/announcer"
- api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1"
- "github.com/altinity/clickhouse-operator/pkg/chop"
- "github.com/altinity/clickhouse-operator/pkg/controller"
- "github.com/altinity/clickhouse-operator/pkg/util"
-)
-
-// createStatefulSet is an internal function, used in reconcileStatefulSet only
-func (c *Controller) createStatefulSet(ctx context.Context, host *api.ChiHost) ErrorCRUD {
- log.V(1).M(host).F().P()
-
- if util.IsContextDone(ctx) {
- log.V(2).Info("task is done")
- return nil
- }
-
- statefulSet := host.Runtime.DesiredStatefulSet
-
- log.V(1).Info("Create StatefulSet %s/%s", statefulSet.Namespace, statefulSet.Name)
- if _, err := c.kubeClient.AppsV1().StatefulSets(statefulSet.Namespace).Create(ctx, statefulSet, controller.NewCreateOptions()); err != nil {
- log.V(1).M(host).F().Error("StatefulSet create failed. err: %v", err)
- return errCRUDRecreate
- }
-
- // StatefulSet created, wait until host is ready
- if err := c.waitHostReady(ctx, host); err != nil {
- log.V(1).M(host).F().Error("StatefulSet create wait failed. err: %v", err)
- return c.onStatefulSetCreateFailed(ctx, host)
- }
-
- log.V(2).M(host).F().Info("Target generation reached, StatefulSet created successfully")
- return nil
-}
-
-// updateStatefulSet is an internal function, used in reconcileStatefulSet only
-func (c *Controller) updateStatefulSet(
- ctx context.Context,
- oldStatefulSet *apps.StatefulSet,
- newStatefulSet *apps.StatefulSet,
- host *api.ChiHost,
-) ErrorCRUD {
- log.V(2).M(host).F().P()
-
- if util.IsContextDone(ctx) {
- log.V(2).Info("task is done")
- return nil
- }
-
- // Apply newStatefulSet and wait for Generation to change
- updatedStatefulSet, err := c.kubeClient.AppsV1().StatefulSets(newStatefulSet.Namespace).Update(ctx, newStatefulSet, controller.NewUpdateOptions())
- if err != nil {
- log.V(1).M(host).F().Error("StatefulSet update failed. err: %v", err)
- diff, equal := messagediff.DeepDiff(oldStatefulSet.Spec, newStatefulSet.Spec)
-
- str := ""
- if equal {
- str += "EQUAL: "
- } else {
- str += "NOT EQUAL: "
- }
-
- if len(diff.Added) > 0 {
- // Something added
- str += util.MessageDiffItemString("added spec items", "none", "", diff.Added)
- }
-
- if len(diff.Removed) > 0 {
- // Something removed
- str += util.MessageDiffItemString("removed spec items", "none", "", diff.Removed)
- }
-
- if len(diff.Modified) > 0 {
- // Something modified
- str += util.MessageDiffItemString("modified spec items", "none", "", diff.Modified)
- }
- log.V(1).M(host).F().Error("%s", str)
-
- return errCRUDRecreate
- }
-
- // After calling "Update()"
- // 1. ObjectMeta.Generation is target generation
- // 2. Status.ObservedGeneration may be <= ObjectMeta.Generation
-
- if updatedStatefulSet.Generation == oldStatefulSet.Generation {
- // Generation is not updated - no changes in .spec section were made
- log.V(2).M(host).F().Info("no generation change")
- return nil
- }
-
- log.V(1).M(host).F().Info("generation change %d=>%d", oldStatefulSet.Generation, updatedStatefulSet.Generation)
-
- if err := c.waitHostReady(ctx, host); err != nil {
- log.V(1).M(host).F().Error("StatefulSet update wait failed. err: %v", err)
- return c.onStatefulSetUpdateFailed(ctx, oldStatefulSet, host)
- }
-
- log.V(2).M(host).F().Info("Target generation reached, StatefulSet updated successfully")
- return nil
-}
-
-// Comment out PV
-// updatePersistentVolume
-//func (c *Controller) updatePersistentVolume(ctx context.Context, pv *core.PersistentVolume) (*core.PersistentVolume, error) {
-// log.V(2).M(pv).F().P()
-// if util.IsContextDone(ctx) {
-// log.V(2).Info("task is done")
-// return nil, fmt.Errorf("task is done")
-// }
-//
-// var err error
-// pv, err = c.kubeClient.CoreV1().PersistentVolumes().Update(ctx, pv, newUpdateOptions())
-// if err != nil {
-// // Update failed
-// log.V(1).M(pv).F().Error("%v", err)
-// return nil, err
-// }
-//
-// return pv, err
-//}
-
-// updatePersistentVolumeClaim
-func (c *Controller) updatePersistentVolumeClaim(ctx context.Context, pvc *core.PersistentVolumeClaim) (*core.PersistentVolumeClaim, error) {
- log.V(2).M(pvc).F().P()
- if util.IsContextDone(ctx) {
- log.V(2).Info("task is done")
- return nil, fmt.Errorf("task is done")
- }
-
- _, err := c.kubeClient.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(ctx, pvc.Name, controller.NewGetOptions())
- if err != nil {
- if apiErrors.IsNotFound(err) {
- // This is not an error per se, means PVC is not created (yet)?
- _, err = c.kubeClient.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(ctx, pvc, controller.NewCreateOptions())
- if err != nil {
- log.V(1).M(pvc).F().Error("unable to Create PVC err: %v", err)
- }
- return pvc, err
- }
- // In case of any non-NotFound API error - unable to proceed
- log.V(1).M(pvc).F().Error("ERROR unable to get PVC(%s/%s) err: %v", pvc.Namespace, pvc.Name, err)
- return nil, err
- }
-
- pvcUpdated, err := c.kubeClient.CoreV1().PersistentVolumeClaims(pvc.Namespace).Update(ctx, pvc, controller.NewUpdateOptions())
- if err == nil {
- return pvcUpdated, err
- }
-
- // Update failed
- // May want to suppress special case of an error
- //if strings.Contains(err.Error(), "field can not be less than previous value") {
- // return pvc, nil
- //}
- log.V(1).M(pvc).F().Error("unable to Update PVC err: %v", err)
- return nil, err
-}
-
-// onStatefulSetCreateFailed handles situation when StatefulSet create failed
-// It can just delete failed StatefulSet or do nothing
-func (c *Controller) onStatefulSetCreateFailed(ctx context.Context, host *api.ChiHost) ErrorCRUD {
- if util.IsContextDone(ctx) {
- log.V(2).Info("task is done")
- return errCRUDIgnore
- }
-
- // What to do with StatefulSet - look into chop configuration settings
- switch chop.Config().Reconcile.StatefulSet.Create.OnFailure {
- case api.OnStatefulSetCreateFailureActionAbort:
- // Report appropriate error, it will break reconcile loop
- log.V(1).M(host).F().Info("abort")
- return errCRUDAbort
-
- case api.OnStatefulSetCreateFailureActionDelete:
- // Delete gracefully failed StatefulSet
- log.V(1).M(host).F().Info(
- "going to DELETE FAILED StatefulSet %s",
- util.NamespaceNameString(host.Runtime.DesiredStatefulSet.ObjectMeta))
- _ = c.deleteHost(ctx, host)
- return c.shouldContinueOnCreateFailed()
-
- case api.OnStatefulSetCreateFailureActionIgnore:
- // Ignore error, continue reconcile loop
- log.V(1).M(host).F().Info(
- "going to ignore error %s",
- util.NamespaceNameString(host.Runtime.DesiredStatefulSet.ObjectMeta))
- return errCRUDIgnore
-
- default:
- log.V(1).M(host).F().Error(
- "Unknown c.chop.Config().OnStatefulSetCreateFailureAction=%s",
- chop.Config().Reconcile.StatefulSet.Create.OnFailure)
- return errCRUDIgnore
- }
-
- return errCRUDUnexpectedFlow
-}
-
-// onStatefulSetUpdateFailed handles situation when StatefulSet update failed
-// It can try to revert StatefulSet to its previous version, specified in rollbackStatefulSet
-func (c *Controller) onStatefulSetUpdateFailed(ctx context.Context, rollbackStatefulSet *apps.StatefulSet, host *api.ChiHost) ErrorCRUD {
- if util.IsContextDone(ctx) {
- log.V(2).Info("task is done")
- return errCRUDIgnore
- }
-
- // Convenience shortcuts
- namespace := rollbackStatefulSet.Namespace
-
- // What to do with StatefulSet - look into chop configuration settings
- switch chop.Config().Reconcile.StatefulSet.Update.OnFailure {
- case api.OnStatefulSetUpdateFailureActionAbort:
- // Report appropriate error, it will break reconcile loop
- log.V(1).M(host).F().Info("abort StatefulSet %s", util.NamespaceNameString(rollbackStatefulSet.ObjectMeta))
- return errCRUDAbort
-
- case api.OnStatefulSetUpdateFailureActionRollback:
- // Need to revert current StatefulSet to oldStatefulSet
- log.V(1).M(host).F().Info("going to ROLLBACK FAILED StatefulSet %s", util.NamespaceNameString(rollbackStatefulSet.ObjectMeta))
- statefulSet, err := c.getStatefulSet(host)
- if err != nil {
- log.V(1).M(host).F().Warning("Unable to fetch current StatefulSet %s. err: %q", util.NamespaceNameString(rollbackStatefulSet.ObjectMeta), err)
- return c.shouldContinueOnUpdateFailed()
- }
-
- // Make copy of "previous" .Spec just to be sure nothing gets corrupted
- // Update StatefulSet to its 'previous' oldStatefulSet - this is expected to rollback inapplicable changes
- // Having StatefulSet .spec in rolled back status we need to delete current Pod - because in case of Pod being seriously broken,
- // it is the only way to go. Just delete Pod and StatefulSet will recreated Pod with current .spec
- // This will rollback Pod to previous .spec
- statefulSet.Spec = *rollbackStatefulSet.Spec.DeepCopy()
- statefulSet, _ = c.kubeClient.AppsV1().StatefulSets(namespace).Update(ctx, statefulSet, controller.NewUpdateOptions())
- _ = c.statefulSetDeletePod(ctx, statefulSet, host)
-
- return c.shouldContinueOnUpdateFailed()
-
- case api.OnStatefulSetUpdateFailureActionIgnore:
- // Ignore error, continue reconcile loop
- log.V(1).M(host).F().Info("going to ignore error %s", util.NamespaceNameString(rollbackStatefulSet.ObjectMeta))
- return errCRUDIgnore
-
- default:
- log.V(1).M(host).F().Error("Unknown c.chop.Config().OnStatefulSetUpdateFailureAction=%s", chop.Config().Reconcile.StatefulSet.Update.OnFailure)
- return errCRUDIgnore
- }
-
- return errCRUDUnexpectedFlow
-}
-
-// shouldContinueOnCreateFailed return nil in case 'continue' or error in case 'do not continue'
-func (c *Controller) shouldContinueOnCreateFailed() ErrorCRUD {
- // Check configuration option regarding should we continue when errors met on the way
- // c.chopConfig.OnStatefulSetUpdateFailureAction
- var continueUpdate = false
- if continueUpdate {
- // Continue update
- return errCRUDIgnore
- }
-
- // Do not continue update
- return errCRUDAbort
-}
-
-// shouldContinueOnUpdateFailed return nil in case 'continue' or error in case 'do not continue'
-func (c *Controller) shouldContinueOnUpdateFailed() ErrorCRUD {
- // Check configuration option regarding should we continue when errors met on the way
- // c.chopConfig.OnStatefulSetUpdateFailureAction
- var continueUpdate = false
- if continueUpdate {
- // Continue update
- return errCRUDIgnore
- }
-
- // Do not continue update
- return errCRUDAbort
-}
-
-func (c *Controller) createSecret(ctx context.Context, secret *core.Secret) error {
- log.V(1).M(secret).F().P()
-
- if util.IsContextDone(ctx) {
- log.V(2).Info("task is done")
- return nil
- }
-
- log.V(1).Info("Create Secret %s/%s", secret.Namespace, secret.Name)
- if _, err := c.kubeClient.CoreV1().Secrets(secret.Namespace).Create(ctx, secret, controller.NewCreateOptions()); err != nil {
- // Unable to create StatefulSet at all
- log.V(1).Error("Create Secret %s/%s failed err:%v", secret.Namespace, secret.Name, err)
- return err
- }
-
- return nil
-}
diff --git a/pkg/controller/chi/deleter.go b/pkg/controller/chi/deleter.go
deleted file mode 100644
index 72449a056..000000000
--- a/pkg/controller/chi/deleter.go
+++ /dev/null
@@ -1,374 +0,0 @@
-// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package chi
-
-import (
- "context"
- "time"
-
- apps "k8s.io/api/apps/v1"
- core "k8s.io/api/core/v1"
- apiErrors "k8s.io/apimachinery/pkg/api/errors"
-
- log "github.com/altinity/clickhouse-operator/pkg/announcer"
- api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1"
- "github.com/altinity/clickhouse-operator/pkg/controller"
- model "github.com/altinity/clickhouse-operator/pkg/model/chi"
- "github.com/altinity/clickhouse-operator/pkg/util"
-)
-
-// deleteHost deletes all kubernetes resources related to replica *chop.ChiHost
-func (c *Controller) deleteHost(ctx context.Context, host *api.ChiHost) error {
- log.V(1).M(host).S().Info(host.Runtime.Address.ClusterNameString())
-
- // Each host consists of:
- _ = c.deleteStatefulSet(ctx, host)
- _ = c.deletePVC(ctx, host)
- _ = c.deleteConfigMap(ctx, host)
- _ = c.deleteServiceHost(ctx, host)
-
- log.V(1).M(host).E().Info(host.Runtime.Address.ClusterNameString())
-
- return nil
-}
-
-// deleteConfigMapsCHI
-func (c *Controller) deleteConfigMapsCHI(ctx context.Context, chi *api.ClickHouseInstallation) error {
- if util.IsContextDone(ctx) {
- log.V(2).Info("task is done")
- return nil
- }
-
- // Delete common ConfigMap's
- //
- // chi-b3d29f-common-configd 2 61s
- // chi-b3d29f-common-usersd 0 61s
- // service/clickhouse-example-01 LoadBalancer 10.106.183.200 8123:31607/TCP,9000:31492/TCP,9009:31357/TCP 33s clickhouse.altinity.com/chi=example-01
-
- var err error
-
- configMapCommon := model.CreateConfigMapCommonName(chi)
- configMapCommonUsersName := model.CreateConfigMapCommonUsersName(chi)
-
- // Delete ConfigMap
- err = c.kubeClient.CoreV1().ConfigMaps(chi.Namespace).Delete(ctx, configMapCommon, controller.NewDeleteOptions())
- switch {
- case err == nil:
- log.V(1).M(chi).Info("OK delete ConfigMap %s/%s", chi.Namespace, configMapCommon)
- case apiErrors.IsNotFound(err):
- log.V(1).M(chi).Info("NEUTRAL not found ConfigMap %s/%s", chi.Namespace, configMapCommon)
- default:
- log.V(1).M(chi).F().Error("FAIL delete ConfigMap %s/%s err:%v", chi.Namespace, configMapCommon, err)
- }
-
- err = c.kubeClient.CoreV1().ConfigMaps(chi.Namespace).Delete(ctx, configMapCommonUsersName, controller.NewDeleteOptions())
- switch {
- case err == nil:
- log.V(1).M(chi).Info("OK delete ConfigMap %s/%s", chi.Namespace, configMapCommonUsersName)
- case apiErrors.IsNotFound(err):
- log.V(1).M(chi).Info("NEUTRAL not found ConfigMap %s/%s", chi.Namespace, configMapCommonUsersName)
- err = nil
- default:
- log.V(1).M(chi).F().Error("FAIL delete ConfigMap %s/%s err:%v", chi.Namespace, configMapCommonUsersName, err)
- }
-
- return err
-}
-
-// statefulSetDeletePod delete a pod of a StatefulSet. This requests StatefulSet to relaunch deleted pod
-func (c *Controller) statefulSetDeletePod(ctx context.Context, statefulSet *apps.StatefulSet, host *api.ChiHost) error {
- if util.IsContextDone(ctx) {
- log.V(2).Info("task is done")
- return nil
- }
-
- name := model.CreatePodName(statefulSet)
- log.V(1).M(host).Info("Delete Pod %s/%s", statefulSet.Namespace, name)
- err := c.kubeClient.CoreV1().Pods(statefulSet.Namespace).Delete(ctx, name, controller.NewDeleteOptions())
- if err == nil {
- log.V(1).M(host).Info("OK delete Pod %s/%s", statefulSet.Namespace, name)
- } else if apiErrors.IsNotFound(err) {
- log.V(1).M(host).Info("NEUTRAL not found Pod %s/%s", statefulSet.Namespace, name)
- err = nil
- } else {
- log.V(1).M(host).F().Error("FAIL delete Pod %s/%s err:%v", statefulSet.Namespace, name, err)
- }
-
- return err
-}
-
-// deleteStatefulSet gracefully deletes StatefulSet through zeroing Pod's count
-func (c *Controller) deleteStatefulSet(ctx context.Context, host *api.ChiHost) error {
- if util.IsContextDone(ctx) {
- log.V(2).Info("task is done")
- return nil
- }
-
- // IMPORTANT
- // StatefulSets do not provide any guarantees on the termination of pods when a StatefulSet is deleted.
- // To achieve ordered and graceful termination of the pods in the StatefulSet,
- // it is possible to scale the StatefulSet down to 0 prior to deletion.
-
- // Namespaced name
- name := model.CreateStatefulSetName(host)
- namespace := host.Runtime.Address.Namespace
- log.V(1).M(host).F().Info("%s/%s", namespace, name)
-
- var err error
- host.Runtime.CurStatefulSet, err = c.getStatefulSet(host)
- if err != nil {
- // Unable to fetch cur StatefulSet, but this is not necessarily an error yet
- if apiErrors.IsNotFound(err) {
- log.V(1).M(host).Info("NEUTRAL not found StatefulSet %s/%s", namespace, name)
- } else {
- log.V(1).M(host).F().Error("FAIL get StatefulSet %s/%s err:%v", namespace, name, err)
- }
- return err
- }
-
- // Scale StatefulSet down to 0 pods count.
- // This is the proper and graceful way to delete StatefulSet
- var zero int32 = 0
- host.Runtime.CurStatefulSet.Spec.Replicas = &zero
- if _, err := c.kubeClient.AppsV1().StatefulSets(namespace).Update(ctx, host.Runtime.CurStatefulSet, controller.NewUpdateOptions()); err != nil {
- log.V(1).M(host).Error("UNABLE to update StatefulSet %s/%s", namespace, name)
- return err
- }
-
- // Wait until StatefulSet scales down to 0 pods count.
- _ = c.waitHostReady(ctx, host)
-
- // And now delete empty StatefulSet
- if err := c.kubeClient.AppsV1().StatefulSets(namespace).Delete(ctx, name, controller.NewDeleteOptions()); err == nil {
- log.V(1).M(host).Info("OK delete StatefulSet %s/%s", namespace, name)
- c.waitHostDeleted(host)
- } else if apiErrors.IsNotFound(err) {
- log.V(1).M(host).Info("NEUTRAL not found StatefulSet %s/%s", namespace, name)
- } else {
- log.V(1).M(host).F().Error("FAIL delete StatefulSet %s/%s err: %v", namespace, name, err)
- }
-
- return nil
-}
-
-// syncStatefulSet
-func (c *Controller) syncStatefulSet(ctx context.Context, host *api.ChiHost) {
- for {
- if util.IsContextDone(ctx) {
- log.V(2).Info("task is done")
- return
- }
- // TODO
- // There should be better way to sync cache
- if sts, err := c.getStatefulSetByHost(host); err == nil {
- log.V(2).Info("cache NOT yet synced sts %s/%s is scheduled for deletion on %s", sts.Namespace, sts.Name, sts.DeletionTimestamp)
- util.WaitContextDoneOrTimeout(ctx, 15*time.Second)
- } else {
- log.V(1).Info("cache synced")
- return
- }
- }
-}
-
-// deletePVC deletes PersistentVolumeClaim
-func (c *Controller) deletePVC(ctx context.Context, host *api.ChiHost) error {
- if util.IsContextDone(ctx) {
- log.V(2).Info("task is done")
- return nil
- }
-
- log.V(2).M(host).S().P()
- defer log.V(2).M(host).E().P()
-
- namespace := host.Runtime.Address.Namespace
- c.walkDiscoveredPVCs(host, func(pvc *core.PersistentVolumeClaim) {
- if util.IsContextDone(ctx) {
- log.V(2).Info("task is done")
- return
- }
-
- // Check whether PVC can be deleted
- if model.HostCanDeletePVC(host, pvc.Name) {
- log.V(1).M(host).Info("PVC %s/%s would be deleted", namespace, pvc.Name)
- } else {
- log.V(1).M(host).Info("PVC %s/%s should not be deleted, leave it intact", namespace, pvc.Name)
- // Move to the next PVC
- return
- }
-
- // Delete PVC
- if err := c.kubeClient.CoreV1().PersistentVolumeClaims(namespace).Delete(ctx, pvc.Name, controller.NewDeleteOptions()); err == nil {
- log.V(1).M(host).Info("OK delete PVC %s/%s", namespace, pvc.Name)
- } else if apiErrors.IsNotFound(err) {
- log.V(1).M(host).Info("NEUTRAL not found PVC %s/%s", namespace, pvc.Name)
- } else {
- log.M(host).F().Error("FAIL to delete PVC %s/%s err:%v", namespace, pvc.Name, err)
- }
- })
-
- return nil
-}
-
-// deleteConfigMap deletes ConfigMap
-func (c *Controller) deleteConfigMap(ctx context.Context, host *api.ChiHost) error {
- if util.IsContextDone(ctx) {
- log.V(2).Info("task is done")
- return nil
- }
-
- name := model.CreateConfigMapHostName(host)
- namespace := host.Runtime.Address.Namespace
- log.V(1).M(host).F().Info("%s/%s", namespace, name)
-
- if err := c.kubeClient.CoreV1().ConfigMaps(namespace).Delete(ctx, name, controller.NewDeleteOptions()); err == nil {
- log.V(1).M(host).Info("OK delete ConfigMap %s/%s", namespace, name)
- } else if apiErrors.IsNotFound(err) {
- log.V(1).M(host).Info("NEUTRAL not found ConfigMap %s/%s", namespace, name)
- } else {
- log.V(1).M(host).F().Error("FAIL delete ConfigMap %s/%s err:%v", namespace, name, err)
- }
-
- //name = chopmodel.CreateConfigMapHostMigrationName(host)
- //namespace = host.Address.Namespace
- //log.V(1).M(host).F().Info("%s/%s", namespace, name)
- //
- //if err := c.kubeClient.CoreV1().ConfigMaps(namespace).Delete(task, name, newDeleteOptions()); err == nil {
- // log.V(1).M(host).Info("OK delete ConfigMap %s/%s", namespace, name)
- //} else if apierrors.IsNotFound(err) {
- // log.V(1).M(host).Info("NEUTRAL not found ConfigMap %s/%s", namespace, name)
- //} else {
- // log.V(1).M(host).F().Error("FAIL delete ConfigMap %s/%s err:%v", namespace, name, err)
- //}
-
- return nil
-}
-
-// deleteServiceHost deletes Service
-func (c *Controller) deleteServiceHost(ctx context.Context, host *api.ChiHost) error {
- if util.IsContextDone(ctx) {
- log.V(2).Info("task is done")
- return nil
- }
-
- serviceName := model.CreateStatefulSetServiceName(host)
- namespace := host.Runtime.Address.Namespace
- log.V(1).M(host).F().Info("%s/%s", namespace, serviceName)
- return c.deleteServiceIfExists(ctx, namespace, serviceName)
-}
-
-// deleteServiceShard
-func (c *Controller) deleteServiceShard(ctx context.Context, shard *api.ChiShard) error {
- if util.IsContextDone(ctx) {
- log.V(2).Info("task is done")
- return nil
- }
-
- serviceName := model.CreateShardServiceName(shard)
- namespace := shard.Runtime.Address.Namespace
- log.V(1).M(shard).F().Info("%s/%s", namespace, serviceName)
- return c.deleteServiceIfExists(ctx, namespace, serviceName)
-}
-
-// deleteServiceCluster
-func (c *Controller) deleteServiceCluster(ctx context.Context, cluster *api.Cluster) error {
- if util.IsContextDone(ctx) {
- log.V(2).Info("task is done")
- return nil
- }
-
- serviceName := model.CreateClusterServiceName(cluster)
- namespace := cluster.Runtime.Address.Namespace
- log.V(1).M(cluster).F().Info("%s/%s", namespace, serviceName)
- return c.deleteServiceIfExists(ctx, namespace, serviceName)
-}
-
-// deleteServiceCHI
-func (c *Controller) deleteServiceCHI(ctx context.Context, chi *api.ClickHouseInstallation) error {
- if util.IsContextDone(ctx) {
- log.V(2).Info("task is done")
- return nil
- }
-
- serviceName := model.CreateCHIServiceName(chi)
- namespace := chi.Namespace
- log.V(1).M(chi).F().Info("%s/%s", namespace, serviceName)
- return c.deleteServiceIfExists(ctx, namespace, serviceName)
-}
-
-// deleteServiceIfExists deletes Service in case it does not exist
-func (c *Controller) deleteServiceIfExists(ctx context.Context, namespace, name string) error {
- if util.IsContextDone(ctx) {
- log.V(2).Info("task is done")
- return nil
- }
-
- // Check specified service exists
- _, err := c.kubeClient.CoreV1().Services(namespace).Get(ctx, name, controller.NewGetOptions())
-
- if err != nil {
- // No such a service, nothing to delete
- log.V(1).M(namespace, name).F().Info("Not Found Service: %s/%s err: %v", namespace, name, err)
- return nil
- }
-
- // Delete service
- err = c.kubeClient.CoreV1().Services(namespace).Delete(ctx, name, controller.NewDeleteOptions())
- if err == nil {
- log.V(1).M(namespace, name).F().Info("OK delete Service: %s/%s", namespace, name)
- } else {
- log.V(1).M(namespace, name).F().Error("FAIL delete Service: %s/%s err:%v", namespace, name, err)
- }
-
- return err
-}
-
-// deleteSecretCluster
-func (c *Controller) deleteSecretCluster(ctx context.Context, cluster *api.Cluster) error {
- if util.IsContextDone(ctx) {
- log.V(2).Info("task is done")
- return nil
- }
-
- secretName := model.CreateClusterAutoSecretName(cluster)
- namespace := cluster.Runtime.Address.Namespace
- log.V(1).M(cluster).F().Info("%s/%s", namespace, secretName)
- return c.deleteSecretIfExists(ctx, namespace, secretName)
-}
-
-// deleteSecretIfExists deletes Secret in case it does not exist
-func (c *Controller) deleteSecretIfExists(ctx context.Context, namespace, name string) error {
- if util.IsContextDone(ctx) {
- log.V(2).Info("task is done")
- return nil
- }
-
- // Check specified service exists
- _, err := c.kubeClient.CoreV1().Secrets(namespace).Get(ctx, name, controller.NewGetOptions())
-
- if err != nil {
- // No such a service, nothing to delete
- return nil
- }
-
- // Delete
- err = c.kubeClient.CoreV1().Secrets(namespace).Delete(ctx, name, controller.NewDeleteOptions())
- if err == nil {
- log.V(1).M(namespace, name).Info("OK delete Secret/%s", namespace, name)
- } else {
- log.V(1).M(namespace, name).F().Error("FAIL delete Secret %s/%s err:%v", namespace, name, err)
- }
-
- return err
-}
diff --git a/pkg/controller/chi/discoverer.go b/pkg/controller/chi/discoverer.go
deleted file mode 100644
index 88a1b73db..000000000
--- a/pkg/controller/chi/discoverer.go
+++ /dev/null
@@ -1,152 +0,0 @@
-// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package chi
-
-import (
- "context"
-
- meta "k8s.io/apimachinery/pkg/apis/meta/v1"
-
- log "github.com/altinity/clickhouse-operator/pkg/announcer"
- api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1"
- "github.com/altinity/clickhouse-operator/pkg/controller"
- model "github.com/altinity/clickhouse-operator/pkg/model/chi"
- "github.com/altinity/clickhouse-operator/pkg/util"
-)
-
-func (c *Controller) discovery(ctx context.Context, chi *api.ClickHouseInstallation) *model.Registry {
- if util.IsContextDone(ctx) {
- log.V(2).Info("task is done")
- return nil
- }
-
- opts := controller.NewListOptions(model.NewLabeler(chi).GetSelectorCHIScope())
- r := model.NewRegistry()
- c.discoveryStatefulSets(ctx, r, chi, opts)
- c.discoveryConfigMaps(ctx, r, chi, opts)
- c.discoveryServices(ctx, r, chi, opts)
- c.discoverySecrets(ctx, r, chi, opts)
- c.discoveryPVCs(ctx, r, chi, opts)
- // Comment out PV
- //c.discoveryPVs(ctx, r, chi, opts)
- c.discoveryPDBs(ctx, r, chi, opts)
- return r
-}
-
-func (c *Controller) discoveryStatefulSets(ctx context.Context, r *model.Registry, chi *api.ClickHouseInstallation, opts meta.ListOptions) {
- list, err := c.kubeClient.AppsV1().StatefulSets(chi.Namespace).List(ctx, opts)
- if err != nil {
- log.M(chi).F().Error("FAIL list StatefulSet err: %v", err)
- return
- }
- if list == nil {
- log.M(chi).F().Error("FAIL list StatefulSet list is nil")
- return
- }
- for _, obj := range list.Items {
- r.RegisterStatefulSet(obj.ObjectMeta)
- }
-}
-
-func (c *Controller) discoveryConfigMaps(ctx context.Context, r *model.Registry, chi *api.ClickHouseInstallation, opts meta.ListOptions) {
- list, err := c.kubeClient.CoreV1().ConfigMaps(chi.Namespace).List(ctx, opts)
- if err != nil {
- log.M(chi).F().Error("FAIL list ConfigMap err: %v", err)
- return
- }
- if list == nil {
- log.M(chi).F().Error("FAIL list ConfigMap list is nil")
- return
- }
- for _, obj := range list.Items {
- r.RegisterConfigMap(obj.ObjectMeta)
- }
-}
-
-func (c *Controller) discoveryServices(ctx context.Context, r *model.Registry, chi *api.ClickHouseInstallation, opts meta.ListOptions) {
- list, err := c.kubeClient.CoreV1().Services(chi.Namespace).List(ctx, opts)
- if err != nil {
- log.M(chi).F().Error("FAIL list Service err: %v", err)
- return
- }
- if list == nil {
- log.M(chi).F().Error("FAIL list Service list is nil")
- return
- }
- for _, obj := range list.Items {
- r.RegisterService(obj.ObjectMeta)
- }
-}
-
-func (c *Controller) discoverySecrets(ctx context.Context, r *model.Registry, chi *api.ClickHouseInstallation, opts meta.ListOptions) {
- list, err := c.kubeClient.CoreV1().Secrets(chi.Namespace).List(ctx, opts)
- if err != nil {
- log.M(chi).F().Error("FAIL list Secret err: %v", err)
- return
- }
- if list == nil {
- log.M(chi).F().Error("FAIL list Secret list is nil")
- return
- }
- for _, obj := range list.Items {
- r.RegisterSecret(obj.ObjectMeta)
- }
-}
-
-func (c *Controller) discoveryPVCs(ctx context.Context, r *model.Registry, chi *api.ClickHouseInstallation, opts meta.ListOptions) {
- list, err := c.kubeClient.CoreV1().PersistentVolumeClaims(chi.Namespace).List(ctx, opts)
- if err != nil {
- log.M(chi).F().Error("FAIL list PVC err: %v", err)
- return
- }
- if list == nil {
- log.M(chi).F().Error("FAIL list PVC list is nil")
- return
- }
- for _, obj := range list.Items {
- r.RegisterPVC(obj.ObjectMeta)
- }
-}
-
-// Comment out PV
-//func (c *Controller) discoveryPVs(ctx context.Context, r *chopModel.Registry, chi *api.ClickHouseInstallation, opts meta.ListOptions) {
-// list, err := c.kubeClient.CoreV1().PersistentVolumes().List(ctx, opts)
-// if err != nil {
-// log.M(chi).F().Error("FAIL list PV err: %v", err)
-// return
-// }
-// if list == nil {
-// log.M(chi).F().Error("FAIL list PV list is nil")
-// return
-// }
-// for _, obj := range list.Items {
-// r.RegisterPV(obj.ObjectMeta)
-// }
-//}
-
-func (c *Controller) discoveryPDBs(ctx context.Context, r *model.Registry, chi *api.ClickHouseInstallation, opts meta.ListOptions) {
- list, err := c.kubeClient.PolicyV1().PodDisruptionBudgets(chi.Namespace).List(ctx, opts)
- if err != nil {
- log.M(chi).F().Error("FAIL list PDB err: %v", err)
- return
- }
- if list == nil {
- log.M(chi).F().Error("FAIL list PDB list is nil")
- return
- }
- for _, obj := range list.Items {
- r.RegisterPDB(obj.ObjectMeta)
- }
-}
diff --git a/pkg/controller/chi/error.go b/pkg/controller/chi/error.go
deleted file mode 100644
index 311421306..000000000
--- a/pkg/controller/chi/error.go
+++ /dev/null
@@ -1,47 +0,0 @@
-// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package chi
-
-import (
- "errors"
-)
-
-// ErrorCRUD specifies errors of the CRUD operations
-type ErrorCRUD error
-
-var (
- errCRUDAbort ErrorCRUD = errors.New("crud error - should abort")
- errCRUDIgnore ErrorCRUD = errors.New("crud error - should ignore")
- errCRUDRecreate ErrorCRUD = errors.New("crud error - should recreate")
- errCRUDUnexpectedFlow ErrorCRUD = errors.New("crud error - unexpected flow")
-)
-
-// ErrorDataPersistence specifies errors of the PVCs and PVs
-type ErrorDataPersistence error
-
-var (
- errPVCWithLostPVDeleted ErrorDataPersistence = errors.New("pvc with lost pv deleted")
- errPVCIsLost ErrorDataPersistence = errors.New("pvc is lost")
-)
-
-func errIsDataLoss(err error) bool {
- switch err {
- case errPVCWithLostPVDeleted:
- return true
- case errPVCIsLost:
- return true
- }
- return false
-}
diff --git a/pkg/controller/chi/event.go b/pkg/controller/chi/event.go
deleted file mode 100644
index 3f0d49deb..000000000
--- a/pkg/controller/chi/event.go
+++ /dev/null
@@ -1,151 +0,0 @@
-// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package chi
-
-import (
- "time"
-
- core "k8s.io/api/core/v1"
- meta "k8s.io/apimachinery/pkg/apis/meta/v1"
-
- log "github.com/altinity/clickhouse-operator/pkg/announcer"
- api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1"
- "github.com/altinity/clickhouse-operator/pkg/controller"
-)
-
-const (
- // Event type (Info, Warning, Error) specifies what event type is this
- eventTypeInfo = "Info"
- eventTypeWarning = "Warning"
- eventTypeError = "Error"
-)
-
-const (
- // Event action describes what action was taken
- eventActionReconcile = "Reconcile"
- eventActionCreate = "Create"
- eventActionUpdate = "Update"
- eventActionDelete = "Delete"
- eventActionProgress = "Progress"
-)
-
-const (
- // Short, machine understandable string that gives the reason for the transition into the object's current status
- eventReasonReconcileStarted = "ReconcileStarted"
- eventReasonReconcileInProgress = "ReconcileInProgress"
- eventReasonReconcileCompleted = "ReconcileCompleted"
- eventReasonReconcileFailed = "ReconcileFailed"
- eventReasonCreateStarted = "CreateStarted"
- eventReasonCreateInProgress = "CreateInProgress"
- eventReasonCreateCompleted = "CreateCompleted"
- eventReasonCreateFailed = "CreateFailed"
- eventReasonUpdateStarted = "UpdateStarted"
- eventReasonUpdateInProgress = "UpdateInProgress"
- eventReasonUpdateCompleted = "UpdateCompleted"
- eventReasonUpdateFailed = "UpdateFailed"
- eventReasonDeleteStarted = "DeleteStarted"
- eventReasonDeleteInProgress = "DeleteInProgress"
- eventReasonDeleteCompleted = "DeleteCompleted"
- eventReasonDeleteFailed = "DeleteFailed"
- eventReasonProgressHostsCompleted = "ProgressHostsCompleted"
-)
-
-// EventInfo emits event Info
-func (c *Controller) EventInfo(
- chi *api.ClickHouseInstallation,
- action string,
- reason string,
- message string,
-) {
- c.emitEvent(chi, eventTypeInfo, action, reason, message)
-}
-
-// EventWarning emits event Warning
-func (c *Controller) EventWarning(
- chi *api.ClickHouseInstallation,
- action string,
- reason string,
- message string,
-) {
- c.emitEvent(chi, eventTypeWarning, action, reason, message)
-}
-
-// EventError emits event Error
-func (c *Controller) EventError(
- chi *api.ClickHouseInstallation,
- action string,
- reason string,
- message string,
-) {
- c.emitEvent(chi, eventTypeError, action, reason, message)
-}
-
-// emitEvent creates CHI-related event
-// typ - type of the event - Normal, Warning, etc, one of eventType*
-// action - what action was attempted, and then succeeded/failed regarding to the Involved Object. One of eventAction*
-// reason - short, machine understandable string, one of eventReason*
-// message - human-readable description
-func (c *Controller) emitEvent(
- chi *api.ClickHouseInstallation,
- _type string,
- action string,
- reason string,
- message string,
-) {
- now := time.Now()
- kind := "ClickHouseInstallation"
- namespace := chi.Namespace
- name := chi.Name
- uid := chi.UID
- resourceVersion := chi.ResourceVersion
-
- event := &core.Event{
- ObjectMeta: meta.ObjectMeta{
- GenerateName: "chop-chi-",
- },
- InvolvedObject: core.ObjectReference{
- Kind: kind,
- Namespace: namespace,
- Name: name,
- UID: uid,
- APIVersion: "clickhouse.altinity.com/v1",
- ResourceVersion: resourceVersion,
- },
- Reason: reason,
- Message: message,
- Source: core.EventSource{
- Component: componentName,
- },
- FirstTimestamp: meta.Time{
- Time: now,
- },
- LastTimestamp: meta.Time{
- Time: now,
- },
- Count: 1,
- Type: _type,
- Action: action,
- ReportingController: componentName,
- // ID of the controller instance, e.g. `kubelet-xyzf`.
- // ReportingInstance:
- }
- _, err := c.kubeClient.CoreV1().Events(namespace).Create(controller.NewContext(), event, controller.NewCreateOptions())
-
- if err != nil {
- log.M(chi).F().Error("Create Event failed: %v", err)
- }
-
- log.V(2).M(chi).Info("Wrote event at: %s type: %s action: %s reason: %s message: %s", now, _type, action, reason, message)
-}
diff --git a/pkg/controller/chi/getter.go b/pkg/controller/chi/getter.go
deleted file mode 100644
index 46dfc02ff..000000000
--- a/pkg/controller/chi/getter.go
+++ /dev/null
@@ -1,278 +0,0 @@
-// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package chi
-
-import (
- "fmt"
-
- apps "k8s.io/api/apps/v1"
- core "k8s.io/api/core/v1"
- apiErrors "k8s.io/apimachinery/pkg/api/errors"
- meta "k8s.io/apimachinery/pkg/apis/meta/v1"
- k8sLabels "k8s.io/apimachinery/pkg/labels"
-
- log "github.com/altinity/clickhouse-operator/pkg/announcer"
- api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1"
- "github.com/altinity/clickhouse-operator/pkg/controller"
- model "github.com/altinity/clickhouse-operator/pkg/model/chi"
-)
-
-// getConfigMap gets ConfigMap either by namespaced name or by labels
-// TODO review byNameOnly params
-func (c *Controller) getConfigMap(objMeta *meta.ObjectMeta, byNameOnly bool) (*core.ConfigMap, error) {
- get := c.configMapLister.ConfigMaps(objMeta.Namespace).Get
- list := c.configMapLister.ConfigMaps(objMeta.Namespace).List
- var objects []*core.ConfigMap
-
- // Check whether object with such name already exists
- obj, err := get(objMeta.Name)
-
- if (obj != nil) && (err == nil) {
- // Object found by name
- return obj, nil
- }
-
- if !apiErrors.IsNotFound(err) {
- // Error, which is not related to "Object not found"
- return nil, err
- }
-
- // Object not found by name
-
- if byNameOnly {
- return nil, err
- }
-
- // Try to find by labels
-
- var selector k8sLabels.Selector
- if selector, err = model.MakeSelectorFromObjectMeta(objMeta); err != nil {
- return nil, err
- }
-
- if objects, err = list(selector); err != nil {
- return nil, err
- }
-
- if len(objects) == 0 {
- return nil, apiErrors.NewNotFound(apps.Resource("ConfigMap"), objMeta.Name)
- }
-
- if len(objects) == 1 {
- // Exactly one object found by labels
- return objects[0], nil
- }
-
- // Too much objects found by labels
- return nil, fmt.Errorf("too much objects found %d expecting 1", len(objects))
-}
-
-// getService gets Service. Accepted types:
-// 1. *core.Service
-// 2. *chop.ChiHost
-func (c *Controller) getService(obj interface{}) (*core.Service, error) {
- var name, namespace string
- switch typedObj := obj.(type) {
- case *core.Service:
- name = typedObj.Name
- namespace = typedObj.Namespace
- case *api.ChiHost:
- name = model.CreateStatefulSetServiceName(typedObj)
- namespace = typedObj.Runtime.Address.Namespace
- }
- return c.serviceLister.Services(namespace).Get(name)
- //return c.kubeClient.CoreV1().Services(namespace).Get(newTask(), name, newGetOptions())
-}
-
-// getStatefulSet gets StatefulSet. Accepted types:
-// 1. *meta.ObjectMeta
-// 2. *chop.ChiHost
-func (c *Controller) getStatefulSet(obj interface{}, byName ...bool) (*apps.StatefulSet, error) {
- switch typedObj := obj.(type) {
- case *meta.ObjectMeta:
- var b bool
- if len(byName) > 0 {
- b = byName[0]
- }
- return c.getStatefulSetByMeta(typedObj, b)
- case *api.ChiHost:
- return c.getStatefulSetByHost(typedObj)
- }
- return nil, fmt.Errorf("unknown type")
-}
-
-// getStatefulSet gets StatefulSet either by namespaced name or by labels
-// TODO review byNameOnly params
-func (c *Controller) getStatefulSetByMeta(meta *meta.ObjectMeta, byNameOnly bool) (*apps.StatefulSet, error) {
- get := c.statefulSetLister.StatefulSets(meta.Namespace).Get
- list := c.statefulSetLister.StatefulSets(meta.Namespace).List
- var objects []*apps.StatefulSet
-
- // Check whether object with such name already exists
- obj, err := get(meta.Name)
-
- if (obj != nil) && (err == nil) {
- // Object found by name
- return obj, nil
- }
-
- if !apiErrors.IsNotFound(err) {
- // Error, which is not related to "Object not found"
- return nil, err
- }
-
- // Object not found by name. Try to find by labels
-
- if byNameOnly {
- return nil, fmt.Errorf("object not found by name %s/%s and no label search allowed ", meta.Namespace, meta.Name)
- }
-
- var selector k8sLabels.Selector
- if selector, err = model.MakeSelectorFromObjectMeta(meta); err != nil {
- return nil, err
- }
-
- if objects, err = list(selector); err != nil {
- return nil, err
- }
-
- if len(objects) == 0 {
- return nil, apiErrors.NewNotFound(apps.Resource("StatefulSet"), meta.Name)
- }
-
- if len(objects) == 1 {
- // Exactly one object found by labels
- return objects[0], nil
- }
-
- // Too much objects found by labels
- return nil, fmt.Errorf("too much objects found %d expecting 1", len(objects))
-}
-
-// getStatefulSetByHost finds StatefulSet of a specified host
-func (c *Controller) getStatefulSetByHost(host *api.ChiHost) (*apps.StatefulSet, error) {
- // Namespaced name
- name := model.CreateStatefulSetName(host)
- namespace := host.Runtime.Address.Namespace
-
- return c.kubeClient.AppsV1().StatefulSets(namespace).Get(controller.NewContext(), name, controller.NewGetOptions())
-}
-
-// getSecret gets secret
-func (c *Controller) getSecret(secret *core.Secret) (*core.Secret, error) {
- return c.kubeClient.CoreV1().Secrets(secret.Namespace).Get(controller.NewContext(), secret.Name, controller.NewGetOptions())
-}
-
-// getPod gets pod. Accepted types:
-// 1. *apps.StatefulSet
-// 2. *chop.ChiHost
-func (c *Controller) getPod(obj interface{}) (*core.Pod, error) {
- var name, namespace string
- switch typedObj := obj.(type) {
- case *apps.StatefulSet:
- name = model.CreatePodName(obj)
- namespace = typedObj.Namespace
- case *api.ChiHost:
- name = model.CreatePodName(obj)
- namespace = typedObj.Runtime.Address.Namespace
- }
- return c.kubeClient.CoreV1().Pods(namespace).Get(controller.NewContext(), name, controller.NewGetOptions())
-}
-
-// getPods gets all pods for provided entity
-func (c *Controller) getPods(obj interface{}) []*core.Pod {
- switch typed := obj.(type) {
- case *api.ClickHouseInstallation:
- return c.getPodsOfCHI(typed)
- case *api.Cluster:
- return c.getPodsOfCluster(typed)
- case *api.ChiShard:
- return c.getPodsOfShard(typed)
- case
- *api.ChiHost,
- *apps.StatefulSet:
- if pod, err := c.getPod(typed); err == nil {
- return []*core.Pod{
- pod,
- }
- }
- }
- return nil
-}
-
-// getPodsOfCluster gets all pods in a cluster
-func (c *Controller) getPodsOfCluster(cluster *api.Cluster) (pods []*core.Pod) {
- cluster.WalkHosts(func(host *api.ChiHost) error {
- if pod, err := c.getPod(host); err == nil {
- pods = append(pods, pod)
- }
- return nil
- })
- return pods
-}
-
-// getPodsOfShard gets all pods in a shard
-func (c *Controller) getPodsOfShard(shard *api.ChiShard) (pods []*core.Pod) {
- shard.WalkHosts(func(host *api.ChiHost) error {
- if pod, err := c.getPod(host); err == nil {
- pods = append(pods, pod)
- }
- return nil
- })
- return pods
-}
-
-// getPodsOfCHI gets all pods in a CHI
-func (c *Controller) getPodsOfCHI(chi *api.ClickHouseInstallation) (pods []*core.Pod) {
- chi.WalkHosts(func(host *api.ChiHost) error {
- if pod, err := c.getPod(host); err == nil {
- pods = append(pods, pod)
- }
- return nil
- })
- return pods
-}
-
-// getPodsIPs gets all pod IPs
-func (c *Controller) getPodsIPs(obj interface{}) (ips []string) {
- log.V(3).M(obj).F().S().Info("looking for pods IPs")
- defer log.V(3).M(obj).F().E().Info("looking for pods IPs")
-
- for _, pod := range c.getPods(obj) {
- if ip := pod.Status.PodIP; ip == "" {
- log.V(3).M(pod).F().Warning("Pod NO IP address found. Pod: %s/%s", pod.Namespace, pod.Name)
- } else {
- ips = append(ips, ip)
- log.V(3).M(pod).F().Info("Pod IP address found. Pod: %s/%s IP: %s", pod.Namespace, pod.Name, ip)
- }
- }
- return ips
-}
-
-// GetCHIByObjectMeta gets CHI by namespaced name
-func (c *Controller) GetCHIByObjectMeta(objectMeta *meta.ObjectMeta, isCHI bool) (*api.ClickHouseInstallation, error) {
- var chiName string
- var err error
- if isCHI {
- chiName = objectMeta.Name
- } else {
- chiName, err = model.GetCHINameFromObjectMeta(objectMeta)
- if err != nil {
- return nil, fmt.Errorf("unable to find CHI by name: '%s'. More info: %v", objectMeta.Name, err)
- }
- }
-
- return c.chopClient.ClickhouseV1().ClickHouseInstallations(objectMeta.Namespace).Get(controller.NewContext(), chiName, controller.NewGetOptions())
-}
diff --git a/pkg/controller/chi/kube/adapter-kube.go b/pkg/controller/chi/kube/adapter-kube.go
new file mode 100644
index 000000000..4c06dd190
--- /dev/null
+++ b/pkg/controller/chi/kube/adapter-kube.go
@@ -0,0 +1,120 @@
+// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package kube
+
+import (
+ kube "k8s.io/client-go/kubernetes"
+
+ chopClientSet "github.com/altinity/clickhouse-operator/pkg/client/clientset/versioned"
+ "github.com/altinity/clickhouse-operator/pkg/controller/common/storage"
+ "github.com/altinity/clickhouse-operator/pkg/interfaces"
+)
+
+type Adapter struct {
+ kubeClient kube.Interface
+ namer interfaces.INameManager
+
+ // Set of CR k8s components
+
+ cr *CR
+
+ // Set of k8s components
+
+ configMap *ConfigMap
+ deployment *Deployment
+ event *Event
+ pdb *PDB
+ pod *Pod
+ pvc *storage.PVC
+ replicaSet *ReplicaSet
+ secret *Secret
+ service *Service
+ sts *STS
+}
+
+func NewAdapter(kubeClient kube.Interface, chopClient chopClientSet.Interface, namer interfaces.INameManager) *Adapter {
+ return &Adapter{
+ kubeClient: kubeClient,
+ namer: namer,
+
+ cr: NewCR(chopClient),
+
+ configMap: NewConfigMap(kubeClient),
+ deployment: NewDeployment(kubeClient),
+ event: NewEvent(kubeClient),
+ pdb: NewPDB(kubeClient),
+ pod: NewPod(kubeClient, namer),
+ pvc: storage.NewStoragePVC(NewPVC(kubeClient)),
+ replicaSet: NewReplicaSet(kubeClient),
+ secret: NewSecret(kubeClient, namer),
+ service: NewService(kubeClient, namer),
+ sts: NewSTS(kubeClient, namer),
+ }
+}
+
+// CR is a getter
+func (k *Adapter) CR() interfaces.IKubeCR {
+ return k.cr
+}
+
+// ConfigMap is a getter
+func (k *Adapter) ConfigMap() interfaces.IKubeConfigMap {
+ return k.configMap
+}
+
+// Deployment is a getter
+func (k *Adapter) Deployment() interfaces.IKubeDeployment {
+ return k.deployment
+}
+
+// Event is a getter
+func (k *Adapter) Event() interfaces.IKubeEvent {
+ return k.event
+}
+
+// PDB is a getter
+func (k *Adapter) PDB() interfaces.IKubePDB {
+ return k.pdb
+}
+
+// Pod is a getter
+func (k *Adapter) Pod() interfaces.IKubePod {
+ return k.pod
+}
+
+// Storage is a getter
+func (k *Adapter) Storage() interfaces.IKubeStoragePVC {
+ return k.pvc
+}
+
+// ReplicaSet is a getter
+func (k *Adapter) ReplicaSet() interfaces.IKubeReplicaSet {
+ return k.replicaSet
+}
+
+// Secret is a getter
+func (k *Adapter) Secret() interfaces.IKubeSecret {
+ return k.secret
+}
+
+// Service is a getter
+func (k *Adapter) Service() interfaces.IKubeService {
+ return k.service
+}
+
+// STS is a getter
+func (k *Adapter) STS() interfaces.IKubeSTS {
+ return k.sts
+}
diff --git a/pkg/controller/chi/kube/config-map.go b/pkg/controller/chi/kube/config-map.go
new file mode 100644
index 000000000..c0cf0e860
--- /dev/null
+++ b/pkg/controller/chi/kube/config-map.go
@@ -0,0 +1,74 @@
+// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package kube
+
+import (
+ "context"
+ "fmt"
+
+ core "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/api/errors"
+ meta "k8s.io/apimachinery/pkg/apis/meta/v1"
+ kube "k8s.io/client-go/kubernetes"
+
+ "github.com/altinity/clickhouse-operator/pkg/chop"
+ "github.com/altinity/clickhouse-operator/pkg/controller"
+ "github.com/altinity/clickhouse-operator/pkg/controller/common/poller"
+)
+
+type ConfigMap struct {
+ kubeClient kube.Interface
+}
+
+func NewConfigMap(kubeClient kube.Interface) *ConfigMap {
+ return &ConfigMap{
+ kubeClient: kubeClient,
+ }
+}
+
+func (c *ConfigMap) Create(ctx context.Context, cm *core.ConfigMap) (*core.ConfigMap, error) {
+ return c.kubeClient.CoreV1().ConfigMaps(cm.Namespace).Create(ctx, cm, controller.NewCreateOptions())
+}
+
+func (c *ConfigMap) Get(ctx context.Context, namespace, name string) (*core.ConfigMap, error) {
+ return c.kubeClient.CoreV1().ConfigMaps(namespace).Get(ctx, name, controller.NewGetOptions())
+}
+
+func (c *ConfigMap) Update(ctx context.Context, cm *core.ConfigMap) (*core.ConfigMap, error) {
+ return c.kubeClient.CoreV1().ConfigMaps(cm.Namespace).Update(ctx, cm, controller.NewUpdateOptions())
+}
+
+func (c *ConfigMap) Delete(ctx context.Context, namespace, name string) error {
+ c.kubeClient.CoreV1().ConfigMaps(namespace).Delete(ctx, name, controller.NewDeleteOptions())
+ return poller.New(ctx, fmt.Sprintf("%s/%s", namespace, name)).
+ WithOptions(poller.NewOptions().FromConfig(chop.Config())).
+ WithMain(&poller.Functions{
+ IsDone: func(_ctx context.Context, _ any) bool {
+ _, err := c.Get(ctx, namespace, name)
+ return errors.IsNotFound(err)
+ },
+ }).Poll()
+}
+
+func (c *ConfigMap) List(ctx context.Context, namespace string, opts meta.ListOptions) ([]core.ConfigMap, error) {
+ list, err := c.kubeClient.CoreV1().ConfigMaps(namespace).List(ctx, opts)
+ if err != nil {
+ return nil, err
+ }
+ if list == nil {
+ return nil, err
+ }
+ return list.Items, nil
+}
diff --git a/pkg/controller/chi/kube/cr.go b/pkg/controller/chi/kube/cr.go
new file mode 100644
index 000000000..1b26f5290
--- /dev/null
+++ b/pkg/controller/chi/kube/cr.go
@@ -0,0 +1,122 @@
+// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package kube
+
+import (
+ "context"
+ "fmt"
+ "time"
+
+ log "github.com/altinity/clickhouse-operator/pkg/announcer"
+ api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1"
+ commonTypes "github.com/altinity/clickhouse-operator/pkg/apis/common/types"
+ chopClientSet "github.com/altinity/clickhouse-operator/pkg/client/clientset/versioned"
+ "github.com/altinity/clickhouse-operator/pkg/controller"
+ "github.com/altinity/clickhouse-operator/pkg/util"
+)
+
+type CR struct {
+ chopClient chopClientSet.Interface
+}
+
+func NewCR(chopClient chopClientSet.Interface) *CR {
+ return &CR{
+ chopClient: chopClient,
+ }
+}
+
+func (c *CR) Get(ctx context.Context, namespace, name string) (api.ICustomResource, error) {
+ return c.chopClient.ClickhouseV1().ClickHouseInstallations(namespace).Get(ctx, name, controller.NewGetOptions())
+}
+
+// updateCHIObjectStatus updates ClickHouseInstallation object's Status
+func (c *CR) StatusUpdate(ctx context.Context, cr api.ICustomResource, opts commonTypes.UpdateStatusOptions) (err error) {
+ if util.IsContextDone(ctx) {
+ log.V(2).Info("task is done")
+ return nil
+ }
+
+ for retry, attempt := true, 1; retry; attempt++ {
+ if attempt > 60 {
+ retry = false
+ }
+
+ err = c.doUpdateCRStatus(ctx, cr, opts)
+ if err == nil {
+ return nil
+ }
+
+ if retry {
+ log.V(2).M(cr).F().Warning("got error, will retry. err: %q", err)
+ time.Sleep(1 * time.Second)
+ } else {
+ log.V(1).M(cr).F().Error("got error, all retries are exhausted. err: %q", err)
+ }
+ }
+ return
+}
+
+// doUpdateCRStatus updates ClickHouseInstallation object's Status
+func (c *CR) doUpdateCRStatus(ctx context.Context, cr api.ICustomResource, opts commonTypes.UpdateStatusOptions) error {
+ if util.IsContextDone(ctx) {
+ log.V(2).Info("task is done")
+ return nil
+ }
+
+ chi := cr.(*api.ClickHouseInstallation)
+ namespace, name := util.NamespaceName(chi)
+ log.V(3).M(chi).F().Info("Update CHI status")
+
+ _cur, err := c.Get(ctx, namespace, name)
+ cur := _cur.(*api.ClickHouseInstallation)
+ if err != nil {
+ if opts.TolerateAbsence {
+ return nil
+ }
+ log.V(1).M(chi).F().Error("%q", err)
+ return err
+ }
+ if cur == nil {
+ if opts.TolerateAbsence {
+ return nil
+ }
+ log.V(1).M(chi).F().Error("NULL returned")
+ return fmt.Errorf("ERROR GetCR (%s/%s): NULL returned", namespace, name)
+ }
+
+ // Update status of a real object.
+ cur.EnsureStatus().CopyFrom(chi.Status, opts.CopyStatusOptions)
+
+ _, err = c.chopClient.ClickhouseV1().ClickHouseInstallations(chi.GetNamespace()).UpdateStatus(ctx, cur, controller.NewUpdateOptions())
+ if err != nil {
+ // Error update
+ log.V(2).M(chi).F().Info("Got error upon update, may retry. err: %q", err)
+ return err
+ }
+
+ _cur, err = c.Get(ctx, namespace, name)
+ cur = _cur.(*api.ClickHouseInstallation)
+
+ // Propagate updated ResourceVersion into chi
+ if chi.GetResourceVersion() != cur.GetResourceVersion() {
+ log.V(3).M(chi).F().Info("ResourceVersion change: %s to %s", chi.GetResourceVersion(), cur.GetResourceVersion())
+ chi.SetResourceVersion(cur.GetResourceVersion())
+ return nil
+ }
+
+ // ResourceVersion not changed - no update performed?
+
+ return nil
+}
diff --git a/pkg/controller/chi/kube/deployment.go b/pkg/controller/chi/kube/deployment.go
new file mode 100644
index 000000000..65187c875
--- /dev/null
+++ b/pkg/controller/chi/kube/deployment.go
@@ -0,0 +1,40 @@
+// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package kube
+
+import (
+ apps "k8s.io/api/apps/v1"
+ kube "k8s.io/client-go/kubernetes"
+
+ "github.com/altinity/clickhouse-operator/pkg/controller"
+)
+
+type Deployment struct {
+ kubeClient kube.Interface
+}
+
+func NewDeployment(kubeClient kube.Interface) *Deployment {
+ return &Deployment{
+ kubeClient: kubeClient,
+ }
+}
+
+func (c *Deployment) Get(namespace, name string) (*apps.Deployment, error) {
+ return c.kubeClient.AppsV1().Deployments(namespace).Get(controller.NewContext(), name, controller.NewGetOptions())
+}
+
+func (c *Deployment) Update(deployment *apps.Deployment) (*apps.Deployment, error) {
+ return c.kubeClient.AppsV1().Deployments(deployment.Namespace).Update(controller.NewContext(), deployment, controller.NewUpdateOptions())
+}
diff --git a/pkg/controller/chi/kube/event.go b/pkg/controller/chi/kube/event.go
new file mode 100644
index 000000000..b6376244b
--- /dev/null
+++ b/pkg/controller/chi/kube/event.go
@@ -0,0 +1,38 @@
+// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package kube
+
+import (
+ "context"
+
+ core "k8s.io/api/core/v1"
+ kube "k8s.io/client-go/kubernetes"
+
+ "github.com/altinity/clickhouse-operator/pkg/controller"
+)
+
+type Event struct {
+ kubeClient kube.Interface
+}
+
+func NewEvent(kubeClient kube.Interface) *Event {
+ return &Event{
+ kubeClient: kubeClient,
+ }
+}
+
+func (c *Event) Create(ctx context.Context, event *core.Event) (*core.Event, error) {
+ return c.kubeClient.CoreV1().Events(event.Namespace).Create(ctx, event, controller.NewCreateOptions())
+}
diff --git a/pkg/controller/chi/kube/pdb.go b/pkg/controller/chi/kube/pdb.go
new file mode 100644
index 000000000..4a5230fea
--- /dev/null
+++ b/pkg/controller/chi/kube/pdb.go
@@ -0,0 +1,74 @@
+// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package kube
+
+import (
+ "context"
+ "fmt"
+
+ policy "k8s.io/api/policy/v1"
+ "k8s.io/apimachinery/pkg/api/errors"
+ meta "k8s.io/apimachinery/pkg/apis/meta/v1"
+ kube "k8s.io/client-go/kubernetes"
+
+ "github.com/altinity/clickhouse-operator/pkg/chop"
+ "github.com/altinity/clickhouse-operator/pkg/controller"
+ "github.com/altinity/clickhouse-operator/pkg/controller/common/poller"
+)
+
+type PDB struct {
+ kubeClient kube.Interface
+}
+
+func NewPDB(kubeClient kube.Interface) *PDB {
+ return &PDB{
+ kubeClient: kubeClient,
+ }
+}
+
+func (c *PDB) Create(ctx context.Context, pdb *policy.PodDisruptionBudget) (*policy.PodDisruptionBudget, error) {
+ return c.kubeClient.PolicyV1().PodDisruptionBudgets(pdb.Namespace).Create(ctx, pdb, controller.NewCreateOptions())
+}
+
+func (c *PDB) Get(ctx context.Context, namespace, name string) (*policy.PodDisruptionBudget, error) {
+ return c.kubeClient.PolicyV1().PodDisruptionBudgets(namespace).Get(ctx, name, controller.NewGetOptions())
+}
+
+func (c *PDB) Update(ctx context.Context, pdb *policy.PodDisruptionBudget) (*policy.PodDisruptionBudget, error) {
+ return c.kubeClient.PolicyV1().PodDisruptionBudgets(pdb.Namespace).Update(ctx, pdb, controller.NewUpdateOptions())
+}
+
+func (c *PDB) Delete(ctx context.Context, namespace, name string) error {
+ c.kubeClient.PolicyV1().PodDisruptionBudgets(namespace).Delete(ctx, name, controller.NewDeleteOptions())
+ return poller.New(ctx, fmt.Sprintf("%s/%s", namespace, name)).
+ WithOptions(poller.NewOptions().FromConfig(chop.Config())).
+ WithMain(&poller.Functions{
+ IsDone: func(_ctx context.Context, _ any) bool {
+ _, err := c.Get(ctx, namespace, name)
+ return errors.IsNotFound(err)
+ },
+ }).Poll()
+}
+
+func (c *PDB) List(ctx context.Context, namespace string, opts meta.ListOptions) ([]policy.PodDisruptionBudget, error) {
+ list, err := c.kubeClient.PolicyV1().PodDisruptionBudgets(namespace).List(ctx, opts)
+ if err != nil {
+ return nil, err
+ }
+ if list == nil {
+ return nil, err
+ }
+ return list.Items, nil
+}
diff --git a/pkg/controller/chi/kube/pod.go b/pkg/controller/chi/kube/pod.go
new file mode 100644
index 000000000..b18f10ffa
--- /dev/null
+++ b/pkg/controller/chi/kube/pod.go
@@ -0,0 +1,130 @@
+// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package kube
+
+import (
+ "context"
+
+ apps "k8s.io/api/apps/v1"
+ core "k8s.io/api/core/v1"
+ kube "k8s.io/client-go/kubernetes"
+
+ api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1"
+ "github.com/altinity/clickhouse-operator/pkg/controller"
+ "github.com/altinity/clickhouse-operator/pkg/interfaces"
+)
+
+type Pod struct {
+ kubeClient kube.Interface
+ namer interfaces.INameManager
+}
+
+func NewPod(kubeClient kube.Interface, namer interfaces.INameManager) *Pod {
+ return &Pod{
+ kubeClient: kubeClient,
+ namer: namer,
+ }
+}
+
+// getPod gets pod. Accepted types:
+// 1. *apps.StatefulSet
+// 2. *chop.Host
+func (c *Pod) Get(params ...any) (*core.Pod, error) {
+ var name, namespace string
+ switch len(params) {
+ case 2:
+ // Expecting namespace name
+ namespace = params[0].(string)
+ name = params[1].(string)
+ case 1:
+ // Expecting obj
+ obj := params[0]
+ switch typedObj := obj.(type) {
+ case *apps.StatefulSet:
+ name = c.namer.Name(interfaces.NamePod, obj)
+ namespace = typedObj.Namespace
+ case *api.Host:
+ name = c.namer.Name(interfaces.NamePod, obj)
+ namespace = typedObj.Runtime.Address.Namespace
+ default:
+ panic(any("unknown param"))
+ }
+ default:
+ panic(any("incorrect number or params"))
+ }
+ return c.kubeClient.CoreV1().Pods(namespace).Get(controller.NewContext(), name, controller.NewGetOptions())
+}
+
+// GetAll gets all pods for provided entity
+func (c *Pod) GetAll(obj any) []*core.Pod {
+ switch typed := obj.(type) {
+ case api.ICustomResource:
+ return c.getPodsOfCHI(typed)
+ case api.ICluster:
+ return c.getPodsOfCluster(typed)
+ case api.IShard:
+ return c.getPodsOfShard(typed)
+ case *api.Host:
+ if pod, err := c.Get(typed); err == nil {
+ return []*core.Pod{
+ pod,
+ }
+ }
+ default:
+ panic(any("unknown type"))
+ }
+ return nil
+}
+
+func (c *Pod) Update(ctx context.Context, pod *core.Pod) (*core.Pod, error) {
+ return c.kubeClient.CoreV1().Pods(pod.GetNamespace()).Update(ctx, pod, controller.NewUpdateOptions())
+}
+
+// getPodsOfCluster gets all pods in a cluster
+func (c *Pod) getPodsOfCluster(cluster api.ICluster) (pods []*core.Pod) {
+ cluster.WalkHosts(func(host *api.Host) error {
+ if pod, err := c.Get(host); err == nil {
+ pods = append(pods, pod)
+ }
+ return nil
+ })
+ return pods
+}
+
+// getPodsOfShard gets all pods in a shard
+func (c *Pod) getPodsOfShard(shard api.IShard) (pods []*core.Pod) {
+ shard.WalkHosts(func(host *api.Host) error {
+ if pod, err := c.Get(host); err == nil {
+ pods = append(pods, pod)
+ }
+ return nil
+ })
+ return pods
+}
+
+// getPodsOfCHI gets all pods in a CHI
+func (c *Pod) getPodsOfCHI(cr api.ICustomResource) (pods []*core.Pod) {
+ cr.WalkHosts(func(host *api.Host) error {
+ if pod, err := c.Get(host); err == nil {
+ pods = append(pods, pod)
+ }
+ return nil
+ })
+ return pods
+}
+
+func (c *Pod) Delete(ctx context.Context, namespace, name string) error {
+ return c.kubeClient.CoreV1().Pods(namespace).Delete(ctx, name, controller.NewDeleteOptions())
+}
diff --git a/pkg/controller/chi/kube/pvc.go b/pkg/controller/chi/kube/pvc.go
new file mode 100644
index 000000000..a03d615c1
--- /dev/null
+++ b/pkg/controller/chi/kube/pvc.go
@@ -0,0 +1,79 @@
+// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package kube
+
+import (
+ "context"
+
+ core "k8s.io/api/core/v1"
+ meta "k8s.io/apimachinery/pkg/apis/meta/v1"
+ kube "k8s.io/client-go/kubernetes"
+
+ api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1"
+ "github.com/altinity/clickhouse-operator/pkg/controller"
+ "github.com/altinity/clickhouse-operator/pkg/interfaces"
+ chiLabeler "github.com/altinity/clickhouse-operator/pkg/model/chi/tags/labeler"
+)
+
+type PVC struct {
+ kubeClient kube.Interface
+}
+
+func NewPVC(kubeClient kube.Interface) *PVC {
+ return &PVC{
+ kubeClient: kubeClient,
+ }
+}
+
+func (c *PVC) Create(ctx context.Context, pvc *core.PersistentVolumeClaim) (*core.PersistentVolumeClaim, error) {
+ return c.kubeClient.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(ctx, pvc, controller.NewCreateOptions())
+}
+
+func (c *PVC) Get(ctx context.Context, namespace, name string) (*core.PersistentVolumeClaim, error) {
+ return c.kubeClient.CoreV1().PersistentVolumeClaims(namespace).Get(ctx, name, controller.NewGetOptions())
+}
+
+func (c *PVC) Update(ctx context.Context, pvc *core.PersistentVolumeClaim) (*core.PersistentVolumeClaim, error) {
+ return c.kubeClient.CoreV1().PersistentVolumeClaims(pvc.Namespace).Update(ctx, pvc, controller.NewUpdateOptions())
+}
+
+func (c *PVC) Delete(ctx context.Context, namespace, name string) error {
+ return c.kubeClient.CoreV1().PersistentVolumeClaims(namespace).Delete(ctx, name, controller.NewDeleteOptions())
+}
+
+func (c *PVC) List(ctx context.Context, namespace string, opts meta.ListOptions) ([]core.PersistentVolumeClaim, error) {
+ list, err := c.kubeClient.CoreV1().PersistentVolumeClaims(namespace).List(ctx, opts)
+ if err != nil {
+ return nil, err
+ }
+ if list == nil {
+ return nil, err
+ }
+ return list.Items, nil
+}
+
+func (c *PVC) ListForHost(ctx context.Context, host *api.Host) (*core.PersistentVolumeClaimList, error) {
+ return c.kubeClient.
+ CoreV1().
+ PersistentVolumeClaims(host.Runtime.Address.Namespace).
+ List(
+ ctx,
+ controller.NewListOptions(labeler(host.GetCR()).Selector(interfaces.SelectorHostScope, host)),
+ )
+}
+
+func labeler(cr api.ICustomResource) interfaces.ILabeler {
+ return chiLabeler.New(cr)
+}
diff --git a/pkg/controller/chi/kube/replicaset.go b/pkg/controller/chi/kube/replicaset.go
new file mode 100644
index 000000000..b3533280f
--- /dev/null
+++ b/pkg/controller/chi/kube/replicaset.go
@@ -0,0 +1,42 @@
+// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package kube
+
+import (
+ "context"
+
+ apps "k8s.io/api/apps/v1"
+ kube "k8s.io/client-go/kubernetes"
+
+ "github.com/altinity/clickhouse-operator/pkg/controller"
+)
+
+type ReplicaSet struct {
+ kubeClient kube.Interface
+}
+
+func NewReplicaSet(kubeClient kube.Interface) *ReplicaSet {
+ return &ReplicaSet{
+ kubeClient: kubeClient,
+ }
+}
+
+func (c *ReplicaSet) Get(ctx context.Context, namespace, name string) (*apps.ReplicaSet, error) {
+ return c.kubeClient.AppsV1().ReplicaSets(namespace).Get(ctx, name, controller.NewGetOptions())
+}
+
+func (c *ReplicaSet) Update(ctx context.Context, replicaSet *apps.ReplicaSet) (*apps.ReplicaSet, error) {
+ return c.kubeClient.AppsV1().ReplicaSets(replicaSet.Namespace).Update(ctx, replicaSet, controller.NewUpdateOptions())
+}
diff --git a/pkg/controller/chi/kube/secret.go b/pkg/controller/chi/kube/secret.go
new file mode 100644
index 000000000..254aecb62
--- /dev/null
+++ b/pkg/controller/chi/kube/secret.go
@@ -0,0 +1,100 @@
+// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package kube
+
+import (
+ "context"
+ "fmt"
+
+ core "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/api/errors"
+ meta "k8s.io/apimachinery/pkg/apis/meta/v1"
+ kube "k8s.io/client-go/kubernetes"
+
+ api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1"
+ "github.com/altinity/clickhouse-operator/pkg/chop"
+ "github.com/altinity/clickhouse-operator/pkg/controller"
+ "github.com/altinity/clickhouse-operator/pkg/controller/common/poller"
+ "github.com/altinity/clickhouse-operator/pkg/interfaces"
+)
+
+type Secret struct {
+ kubeClient kube.Interface
+ namer interfaces.INameManager
+}
+
+func NewSecret(kubeClient kube.Interface, namer interfaces.INameManager) *Secret {
+ return &Secret{
+ kubeClient: kubeClient,
+ namer: namer,
+ }
+}
+
+// Get gets Secret. Accepted types:
+// 1. *core.Service
+// 2. *chop.Host
+func (c *Secret) Get(ctx context.Context, params ...any) (*core.Secret, error) {
+ var name, namespace string
+ switch len(params) {
+ case 2:
+ // Expecting namespace name
+ namespace = params[0].(string)
+ name = params[1].(string)
+ case 1:
+ // Expecting obj
+ obj := params[0]
+ switch typedObj := obj.(type) {
+ case *core.Secret:
+ name = typedObj.Name
+ namespace = typedObj.Namespace
+ case *api.Host:
+ name = c.namer.Name(interfaces.NameStatefulSetService, typedObj)
+ namespace = typedObj.Runtime.Address.Namespace
+ }
+ }
+ return c.kubeClient.CoreV1().Secrets(namespace).Get(ctx, name, controller.NewGetOptions())
+}
+
+func (c *Secret) Create(ctx context.Context, svc *core.Secret) (*core.Secret, error) {
+ return c.kubeClient.CoreV1().Secrets(svc.Namespace).Create(ctx, svc, controller.NewCreateOptions())
+}
+
+func (c *Secret) Update(ctx context.Context, svc *core.Secret) (*core.Secret, error) {
+ return c.kubeClient.CoreV1().Secrets(svc.Namespace).Update(ctx, svc, controller.NewUpdateOptions())
+}
+
+func (c *Secret) Delete(ctx context.Context, namespace, name string) error {
+ c.kubeClient.CoreV1().Secrets(namespace).Delete(ctx, name, controller.NewDeleteOptions())
+ return poller.New(ctx, fmt.Sprintf("%s/%s", namespace, name)).
+ WithOptions(poller.NewOptions().FromConfig(chop.Config())).
+ WithMain(&poller.Functions{
+ IsDone: func(_ctx context.Context, _ any) bool {
+ _, err := c.Get(ctx, namespace, name)
+ return errors.IsNotFound(err)
+ },
+ }).Poll()
+
+}
+
+func (c *Secret) List(ctx context.Context, namespace string, opts meta.ListOptions) ([]core.Secret, error) {
+ list, err := c.kubeClient.CoreV1().Secrets(namespace).List(ctx, opts)
+ if err != nil {
+ return nil, err
+ }
+ if list == nil {
+ return nil, err
+ }
+ return list.Items, nil
+}
diff --git a/pkg/controller/chi/kube/service.go b/pkg/controller/chi/kube/service.go
new file mode 100644
index 000000000..3279f5197
--- /dev/null
+++ b/pkg/controller/chi/kube/service.go
@@ -0,0 +1,99 @@
+// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package kube
+
+import (
+ "context"
+ "fmt"
+
+ core "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/api/errors"
+ meta "k8s.io/apimachinery/pkg/apis/meta/v1"
+ kube "k8s.io/client-go/kubernetes"
+
+ api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1"
+ "github.com/altinity/clickhouse-operator/pkg/chop"
+ "github.com/altinity/clickhouse-operator/pkg/controller"
+ "github.com/altinity/clickhouse-operator/pkg/controller/common/poller"
+ "github.com/altinity/clickhouse-operator/pkg/interfaces"
+)
+
+type Service struct {
+ kubeClient kube.Interface
+ namer interfaces.INameManager
+}
+
+func NewService(kubeClient kube.Interface, namer interfaces.INameManager) *Service {
+ return &Service{
+ kubeClient: kubeClient,
+ namer: namer,
+ }
+}
+
+// Get gets Service. Accepted types:
+// 1. *core.Service
+// 2. *chop.Host
+func (c *Service) Get(ctx context.Context, params ...any) (*core.Service, error) {
+ var name, namespace string
+ switch len(params) {
+ case 2:
+ // Expecting namespace name
+ namespace = params[0].(string)
+ name = params[1].(string)
+ case 1:
+ // Expecting obj
+ obj := params[0]
+ switch typedObj := obj.(type) {
+ case *core.Service:
+ name = typedObj.Name
+ namespace = typedObj.Namespace
+ case *api.Host:
+ name = c.namer.Name(interfaces.NameStatefulSetService, typedObj)
+ namespace = typedObj.Runtime.Address.Namespace
+ }
+ }
+ return c.kubeClient.CoreV1().Services(namespace).Get(ctx, name, controller.NewGetOptions())
+}
+
+func (c *Service) Create(ctx context.Context, svc *core.Service) (*core.Service, error) {
+ return c.kubeClient.CoreV1().Services(svc.Namespace).Create(ctx, svc, controller.NewCreateOptions())
+}
+
+func (c *Service) Update(ctx context.Context, svc *core.Service) (*core.Service, error) {
+ return c.kubeClient.CoreV1().Services(svc.Namespace).Update(ctx, svc, controller.NewUpdateOptions())
+}
+
+func (c *Service) Delete(ctx context.Context, namespace, name string) error {
+ c.kubeClient.CoreV1().Services(namespace).Delete(ctx, name, controller.NewDeleteOptions())
+ return poller.New(ctx, fmt.Sprintf("%s/%s", namespace, name)).
+ WithOptions(poller.NewOptions().FromConfig(chop.Config())).
+ WithMain(&poller.Functions{
+ IsDone: func(_ctx context.Context, _ any) bool {
+ _, err := c.Get(ctx, namespace, name)
+ return errors.IsNotFound(err)
+ },
+ }).Poll()
+}
+
+func (c *Service) List(ctx context.Context, namespace string, opts meta.ListOptions) ([]core.Service, error) {
+ list, err := c.kubeClient.CoreV1().Services(namespace).List(ctx, opts)
+ if err != nil {
+ return nil, err
+ }
+ if list == nil {
+ return nil, err
+ }
+ return list.Items, nil
+}
diff --git a/pkg/controller/chi/kube/statesfulset.go b/pkg/controller/chi/kube/statesfulset.go
new file mode 100644
index 000000000..33fa528f0
--- /dev/null
+++ b/pkg/controller/chi/kube/statesfulset.go
@@ -0,0 +1,102 @@
+// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package kube
+
+import (
+ "context"
+ "fmt"
+
+ apps "k8s.io/api/apps/v1"
+ "k8s.io/apimachinery/pkg/api/errors"
+ meta "k8s.io/apimachinery/pkg/apis/meta/v1"
+ kube "k8s.io/client-go/kubernetes"
+
+ api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1"
+ "github.com/altinity/clickhouse-operator/pkg/chop"
+ "github.com/altinity/clickhouse-operator/pkg/controller"
+ "github.com/altinity/clickhouse-operator/pkg/controller/common/poller"
+ "github.com/altinity/clickhouse-operator/pkg/interfaces"
+)
+
+type STS struct {
+ kubeClient kube.Interface
+ namer interfaces.INameManager
+}
+
+func NewSTS(kubeClient kube.Interface, namer interfaces.INameManager) *STS {
+ return &STS{
+ kubeClient: kubeClient,
+ namer: namer,
+ }
+}
+
+// Get gets StatefulSet. Accepted types:
+// 1. *meta.ObjectMeta
+// 2. *chop.Host
+func (c *STS) Get(ctx context.Context, params ...any) (*apps.StatefulSet, error) {
+ var name, namespace string
+ switch len(params) {
+ case 2:
+ // Expecting namespace name
+ namespace = params[0].(string)
+ name = params[1].(string)
+ case 1:
+ // Expecting obj
+ obj := params[0]
+ switch typedObj := obj.(type) {
+ case meta.Object:
+ name = typedObj.GetName()
+ namespace = typedObj.GetNamespace()
+ case *api.Host:
+ // Namespaced name
+ name = c.namer.Name(interfaces.NameStatefulSet, obj)
+ namespace = typedObj.Runtime.Address.Namespace
+ }
+ }
+ return c.kubeClient.AppsV1().StatefulSets(namespace).Get(ctx, name, controller.NewGetOptions())
+}
+
+func (c *STS) Create(ctx context.Context, statefulSet *apps.StatefulSet) (*apps.StatefulSet, error) {
+ return c.kubeClient.AppsV1().StatefulSets(statefulSet.Namespace).Create(ctx, statefulSet, controller.NewCreateOptions())
+}
+
+// Update is an internal function, used in reconcileStatefulSet only
+func (c *STS) Update(ctx context.Context, sts *apps.StatefulSet) (*apps.StatefulSet, error) {
+ return c.kubeClient.AppsV1().StatefulSets(sts.Namespace).Update(ctx, sts, controller.NewUpdateOptions())
+}
+
+// Delete gracefully deletes StatefulSet through zeroing Pod's count
+func (c *STS) Delete(ctx context.Context, namespace, name string) error {
+ c.kubeClient.AppsV1().StatefulSets(namespace).Delete(ctx, name, controller.NewDeleteOptions())
+ return poller.New(ctx, fmt.Sprintf("%s/%s", namespace, name)).
+ WithOptions(poller.NewOptions().FromConfig(chop.Config())).
+ WithMain(&poller.Functions{
+ IsDone: func(_ctx context.Context, _ any) bool {
+ _, err := c.Get(ctx, namespace, name)
+ return errors.IsNotFound(err)
+ },
+ }).Poll()
+}
+
+func (c *STS) List(ctx context.Context, namespace string, opts meta.ListOptions) ([]apps.StatefulSet, error) {
+ list, err := c.kubeClient.AppsV1().StatefulSets(namespace).List(ctx, opts)
+ if err != nil {
+ return nil, err
+ }
+ if list == nil {
+ return nil, err
+ }
+ return list.Items, nil
+}
diff --git a/pkg/controller/chi/labeler/errors.go b/pkg/controller/chi/labeler/errors.go
new file mode 100644
index 000000000..afd5a1889
--- /dev/null
+++ b/pkg/controller/chi/labeler/errors.go
@@ -0,0 +1,28 @@
+// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package labeler
+
+import (
+ "fmt"
+)
+
+var (
+ ErrEnvVarNotSpecified = fmt.Errorf("ENV var not specified")
+ // ErrOperatorPodNotSpecified specifies error when there is not namespace/name pair provided pointing to operator pod
+ ErrOperatorPodNotSpecified = fmt.Errorf("operator pod not specfied")
+ ErrUnableToLabelPod = fmt.Errorf("unable to label pod")
+ ErrUnableToLabelReplicaSet = fmt.Errorf("unable to label replica set")
+ ErrUnableToLabelDeployment = fmt.Errorf("unable to label deployment")
+)
diff --git a/pkg/controller/chi/labeler.go b/pkg/controller/chi/labeler/labeler.go
similarity index 69%
rename from pkg/controller/chi/labeler.go
rename to pkg/controller/chi/labeler/labeler.go
index c17d02f88..fcab7ff24 100644
--- a/pkg/controller/chi/labeler.go
+++ b/pkg/controller/chi/labeler/labeler.go
@@ -12,13 +12,12 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-package chi
+package labeler
import (
"context"
"errors"
"fmt"
- "strings"
apps "k8s.io/api/apps/v1"
core "k8s.io/api/core/v1"
@@ -28,17 +27,28 @@ import (
api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1"
"github.com/altinity/clickhouse-operator/pkg/apis/deployment"
"github.com/altinity/clickhouse-operator/pkg/chop"
- "github.com/altinity/clickhouse-operator/pkg/controller"
- model "github.com/altinity/clickhouse-operator/pkg/model/chi"
+ "github.com/altinity/clickhouse-operator/pkg/interfaces"
+ chiLabeler "github.com/altinity/clickhouse-operator/pkg/model/chi/tags/labeler"
"github.com/altinity/clickhouse-operator/pkg/util"
)
-var (
- // ErrOperatorPodNotSpecified specifies error when there is not namespace/name pair provided pointing to operator pod
- ErrOperatorPodNotSpecified = fmt.Errorf("operator pod not specfied")
-)
+type Labeler struct {
+ pod interfaces.IKubePod
+ service interfaces.IKubeService
+ replicaSet interfaces.IKubeReplicaSet
+ deployment interfaces.IKubeDeployment
+}
+
+func New(kube interfaces.IKube) *Labeler {
+ return &Labeler{
+ pod: kube.Pod(),
+ service: kube.Service(),
+ replicaSet: kube.ReplicaSet(),
+ deployment: kube.Deployment(),
+ }
+}
-func (c *Controller) labelMyObjectsTree(ctx context.Context) error {
+func (l *Labeler) LabelMyObjectsTree(ctx context.Context) error {
// Operator is running in the Pod. We need to label this Pod
// Pod is owned by ReplicaSet. We need to label this ReplicaSet also.
@@ -77,7 +87,7 @@ func (c *Controller) labelMyObjectsTree(ctx context.Context) error {
if !ok1 || !ok2 {
str := fmt.Sprintf("ERROR read env vars: %s/%s ", deployment.OPERATOR_POD_NAME, deployment.OPERATOR_POD_NAMESPACE)
log.V(1).M(namespace, name).F().Error(str)
- return errors.New(str)
+ return fmt.Errorf("%w %s", ErrEnvVarNotSpecified, str)
}
log.V(1).Info("OPERATOR_POD_NAMESPACE=%s OPERATOR_POD_NAME=%s", namespace, name)
@@ -86,34 +96,35 @@ func (c *Controller) labelMyObjectsTree(ctx context.Context) error {
}
// Put labels on the pod
- pod, err := c.labelPod(ctx, namespace, name)
+ pod, err := l.labelPod(ctx, namespace, name)
if err != nil {
- return err
+ return fmt.Errorf("%w %s/%s err: %v", ErrUnableToLabelPod, namespace, name, err)
}
if pod == nil {
- return fmt.Errorf("ERROR label pod %s/%s", namespace, name)
+ return fmt.Errorf("%w %s/%s", ErrUnableToLabelPod, namespace, name)
}
// Put labels on the ReplicaSet
- replicaSet, err := c.labelReplicaSet(ctx, pod)
+ replicaSet, err := l.labelReplicaSet(ctx, pod)
if err != nil {
- return err
+ return fmt.Errorf("%w %s err: %v", ErrUnableToLabelReplicaSet, util.NamespacedName(pod), err)
}
if replicaSet == nil {
- return fmt.Errorf("ERROR label ReplicaSet for pod %s/%s", pod.Namespace, pod.Name)
+ return fmt.Errorf("%w %s", ErrUnableToLabelReplicaSet, util.NamespacedName(pod))
}
// Put labels on the Deployment
- err = c.labelDeployment(ctx, replicaSet)
+ err = l.labelDeployment(ctx, replicaSet)
if err != nil {
+ fmt.Errorf("%w %s err: %v", ErrUnableToLabelDeployment, util.NamespacedName(replicaSet), err)
return err
}
return nil
}
-func (c *Controller) labelPod(ctx context.Context, namespace, name string) (*core.Pod, error) {
- pod, err := c.kubeClient.CoreV1().Pods(namespace).Get(ctx, name, controller.NewGetOptions())
+func (l *Labeler) labelPod(ctx context.Context, namespace, name string) (*core.Pod, error) {
+ pod, err := l.pod.Get(namespace, name)
if err != nil {
log.V(1).M(namespace, name).F().Error("ERROR get Pod %s/%s %v", namespace, name, err)
return nil, err
@@ -125,8 +136,8 @@ func (c *Controller) labelPod(ctx context.Context, namespace, name string) (*cor
}
// Put label on the Pod
- pod.Labels = c.addLabels(pod.Labels)
- pod, err = c.kubeClient.CoreV1().Pods(namespace).Update(ctx, pod, controller.NewUpdateOptions())
+ pod.Labels = l.addLabels(pod.Labels)
+ pod, err = l.pod.Update(ctx, pod)
if err != nil {
log.V(1).M(namespace, name).F().Error("ERROR put label on Pod %s/%s %v", namespace, name, err)
return nil, err
@@ -140,7 +151,7 @@ func (c *Controller) labelPod(ctx context.Context, namespace, name string) (*cor
return pod, nil
}
-func (c *Controller) labelReplicaSet(ctx context.Context, pod *core.Pod) (*apps.ReplicaSet, error) {
+func (l *Labeler) labelReplicaSet(ctx context.Context, pod *core.Pod) (*apps.ReplicaSet, error) {
// Find parent ReplicaSet
replicaSetName := ""
for i := range pod.OwnerReferences {
@@ -160,7 +171,7 @@ func (c *Controller) labelReplicaSet(ctx context.Context, pod *core.Pod) (*apps.
}
// ReplicaSet namespaced name found, fetch the ReplicaSet
- replicaSet, err := c.kubeClient.AppsV1().ReplicaSets(pod.Namespace).Get(ctx, replicaSetName, controller.NewGetOptions())
+ replicaSet, err := l.replicaSet.Get(ctx, pod.Namespace, replicaSetName)
if err != nil {
log.V(1).M(pod.Namespace, replicaSetName).F().Error("ERROR get ReplicaSet %s/%s %v", pod.Namespace, replicaSetName, err)
return nil, err
@@ -172,8 +183,8 @@ func (c *Controller) labelReplicaSet(ctx context.Context, pod *core.Pod) (*apps.
}
// Put label on the ReplicaSet
- replicaSet.Labels = c.addLabels(replicaSet.Labels)
- replicaSet, err = c.kubeClient.AppsV1().ReplicaSets(pod.Namespace).Update(ctx, replicaSet, controller.NewUpdateOptions())
+ replicaSet.Labels = l.addLabels(replicaSet.Labels)
+ replicaSet, err = l.replicaSet.Update(ctx, replicaSet)
if err != nil {
log.V(1).M(pod.Namespace, replicaSetName).F().Error("ERROR put label on ReplicaSet %s/%s %v", pod.Namespace, replicaSetName, err)
return nil, err
@@ -187,7 +198,7 @@ func (c *Controller) labelReplicaSet(ctx context.Context, pod *core.Pod) (*apps.
return replicaSet, nil
}
-func (c *Controller) labelDeployment(ctx context.Context, rs *apps.ReplicaSet) error {
+func (l *Labeler) labelDeployment(ctx context.Context, rs *apps.ReplicaSet) error {
// Find parent Deployment
deploymentName := ""
for i := range rs.OwnerReferences {
@@ -207,7 +218,7 @@ func (c *Controller) labelDeployment(ctx context.Context, rs *apps.ReplicaSet) e
}
// Deployment namespaced name found, fetch the Deployment
- deployment, err := c.kubeClient.AppsV1().Deployments(rs.Namespace).Get(ctx, deploymentName, controller.NewGetOptions())
+ deployment, err := l.deployment.Get(rs.Namespace, deploymentName)
if err != nil {
log.V(1).M(rs.Namespace, deploymentName).F().Error("ERROR get Deployment %s/%s", rs.Namespace, deploymentName)
return err
@@ -219,8 +230,8 @@ func (c *Controller) labelDeployment(ctx context.Context, rs *apps.ReplicaSet) e
}
// Put label on the Deployment
- deployment.Labels = c.addLabels(deployment.Labels)
- deployment, err = c.kubeClient.AppsV1().Deployments(rs.Namespace).Update(ctx, deployment, controller.NewUpdateOptions())
+ deployment.Labels = l.addLabels(deployment.Labels)
+ deployment, err = l.deployment.Update(deployment)
if err != nil {
log.V(1).M(rs.Namespace, deploymentName).F().Error("ERROR put label on Deployment %s/%s %v", rs.Namespace, deploymentName, err)
return err
@@ -235,35 +246,29 @@ func (c *Controller) labelDeployment(ctx context.Context, rs *apps.ReplicaSet) e
}
// addLabels adds app and version labels
-func (c *Controller) addLabels(labels map[string]string) map[string]string {
+func (l *Labeler) addLabels(labels map[string]string) map[string]string {
return util.MergeStringMapsOverwrite(
labels,
- // Add the following labels
- map[string]string{
- model.LabelAppName: model.LabelAppValue,
- model.LabelCHOP: chop.Get().Version,
- model.LabelCHOPCommit: chop.Get().Commit,
- model.LabelCHOPDate: strings.ReplaceAll(chop.Get().Date, ":", "."),
- },
+ chiLabeler.New(nil).GetCHOpSignature(),
)
}
// appendLabelReadyOnPod appends Label "Ready" to the pod of the specified host
-func (c *Controller) appendLabelReadyOnPod(ctx context.Context, host *api.ChiHost) error {
+func (l *Labeler) appendLabelReadyOnPod(ctx context.Context, host *api.Host) error {
if util.IsContextDone(ctx) {
log.V(2).Info("task is done")
return nil
}
- pod, err := c.getPod(host)
+ pod, err := l.pod.Get(host)
if err != nil {
log.M(host).F().Error("FAIL get pod for host %s err:%v", host.Runtime.Address.NamespaceNameString(), err)
return err
}
- if model.AppendLabelReady(&pod.ObjectMeta) {
+ if chiLabeler.New(host.GetCR()).AppendLabelReady(&pod.ObjectMeta) {
// Modified, need to update
- _, err = c.kubeClient.CoreV1().Pods(pod.Namespace).Update(ctx, pod, controller.NewUpdateOptions())
+ _, err = l.pod.Update(ctx, pod)
if err != nil {
log.M(host).F().Error("FAIL setting 'ready' label for host %s err:%v", host.Runtime.Address.NamespaceNameString(), err)
return err
@@ -273,8 +278,8 @@ func (c *Controller) appendLabelReadyOnPod(ctx context.Context, host *api.ChiHos
return nil
}
-// deleteLabelReadyPod deletes Label "Ready" from the pod of the specified host
-func (c *Controller) deleteLabelReadyPod(ctx context.Context, host *api.ChiHost) error {
+// deleteLabelReadyOnPod deletes Label "Ready" from the pod of the specified host
+func (l *Labeler) deleteLabelReadyOnPod(ctx context.Context, host *api.Host) error {
if util.IsContextDone(ctx) {
log.V(2).Info("task is done")
return nil
@@ -283,7 +288,7 @@ func (c *Controller) deleteLabelReadyPod(ctx context.Context, host *api.ChiHost)
if host == nil {
return nil
}
- pod, err := c.getPod(host)
+ pod, err := l.pod.Get(host)
if apiErrors.IsNotFound(err) {
// Pod may be missing in case, say, StatefulSet has 0 pods because CHI is stopped
// This is not an error, after all
@@ -295,9 +300,9 @@ func (c *Controller) deleteLabelReadyPod(ctx context.Context, host *api.ChiHost)
return err
}
- if model.DeleteLabelReady(&pod.ObjectMeta) {
+ if chiLabeler.New(host.GetCR()).DeleteLabelReady(&pod.ObjectMeta) {
// Modified, need to update
- _, err = c.kubeClient.CoreV1().Pods(pod.Namespace).Update(ctx, pod, controller.NewUpdateOptions())
+ _, err = l.pod.Update(ctx, pod)
return err
}
@@ -305,21 +310,21 @@ func (c *Controller) deleteLabelReadyPod(ctx context.Context, host *api.ChiHost)
}
// appendAnnotationReadyOnService appends Annotation "Ready" to the service of the specified host
-func (c *Controller) appendAnnotationReadyOnService(ctx context.Context, host *api.ChiHost) error {
+func (l *Labeler) appendAnnotationReadyOnService(ctx context.Context, host *api.Host) error {
if util.IsContextDone(ctx) {
log.V(2).Info("task is done")
return nil
}
- svc, err := c.getService(host)
+ svc, err := l.service.Get(ctx, host)
if err != nil {
log.M(host).F().Error("FAIL get service for host %s err:%v", host.Runtime.Address.NamespaceNameString(), err)
return err
}
- if model.AppendAnnotationReady(&svc.ObjectMeta) {
+ if chiLabeler.New(host.GetCR()).AppendAnnotationReady(&svc.ObjectMeta) {
// Modified, need to update
- _, err = c.kubeClient.CoreV1().Services(svc.Namespace).Update(ctx, svc, controller.NewUpdateOptions())
+ _, err = l.service.Update(ctx, svc)
if err != nil {
log.M(host).F().Error("FAIL setting 'ready' annotation for host service %s err:%v", host.Runtime.Address.NamespaceNameString(), err)
return err
@@ -329,8 +334,8 @@ func (c *Controller) appendAnnotationReadyOnService(ctx context.Context, host *a
return nil
}
-// deleteAnnotationReadyService deletes Annotation "Ready" from the service of the specified host
-func (c *Controller) deleteAnnotationReadyService(ctx context.Context, host *api.ChiHost) error {
+// deleteAnnotationReadyOnService deletes Annotation "Ready" from the service of the specified host
+func (l *Labeler) deleteAnnotationReadyOnService(ctx context.Context, host *api.Host) error {
if util.IsContextDone(ctx) {
log.V(2).Info("task is done")
return nil
@@ -340,7 +345,7 @@ func (c *Controller) deleteAnnotationReadyService(ctx context.Context, host *api
return nil
}
- svc, err := c.getService(host)
+ svc, err := l.service.Get(ctx, host)
if apiErrors.IsNotFound(err) {
// Service may be missing in case, say, StatefulSet has 0 pods because CHI is stopped
// This is not an error, after all
@@ -351,11 +356,31 @@ func (c *Controller) deleteAnnotationReadyService(ctx context.Context, host *api
return err
}
- if model.DeleteAnnotationReady(&svc.ObjectMeta) {
+ if chiLabeler.New(host.GetCR()).DeleteAnnotationReady(&svc.ObjectMeta) {
// Modified, need to update
- _, err = c.kubeClient.CoreV1().Services(svc.Namespace).Update(ctx, svc, controller.NewUpdateOptions())
+ _, err = l.service.Update(ctx, svc)
return err
}
return nil
}
+
+func (l *Labeler) DeleteReadyMarkOnPodAndService(ctx context.Context, host *api.Host) error {
+ if l == nil {
+ return nil
+ }
+ _ = l.deleteLabelReadyOnPod(ctx, host)
+ _ = l.deleteAnnotationReadyOnService(ctx, host)
+
+ return nil
+}
+
+func (l *Labeler) SetReadyMarkOnPodAndService(ctx context.Context, host *api.Host) error {
+ if l == nil {
+ return nil
+ }
+ _ = l.appendLabelReadyOnPod(ctx, host)
+ _ = l.appendAnnotationReadyOnService(ctx, host)
+
+ return nil
+}
diff --git a/pkg/controller/chi/metrics.go b/pkg/controller/chi/metrics/metrics.go
similarity index 68%
rename from pkg/controller/chi/metrics.go
rename to pkg/controller/chi/metrics/metrics.go
index d8465cb46..671f3ce6c 100644
--- a/pkg/controller/chi/metrics.go
+++ b/pkg/controller/chi/metrics/metrics.go
@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-package chi
+package metrics
import (
"context"
@@ -20,8 +20,7 @@ import (
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/metric"
- api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1"
- "github.com/altinity/clickhouse-operator/pkg/metrics"
+ "github.com/altinity/clickhouse-operator/pkg/metrics/operator"
)
// Metrics is a set of metrics that are tracked by the operator
@@ -58,64 +57,64 @@ var m *Metrics
func createMetrics() *Metrics {
// The unit u should be defined using the appropriate [UCUM](https://ucum.org) case-sensitive code.
- CHIReconcilesStarted, _ := metrics.Meter().Int64Counter(
+ CHIReconcilesStarted, _ := operator.Meter().Int64Counter(
"clickhouse_operator_chi_reconciles_started",
metric.WithDescription("number of CHI reconciles started"),
metric.WithUnit("items"),
)
- CHIReconcilesCompleted, _ := metrics.Meter().Int64Counter(
+ CHIReconcilesCompleted, _ := operator.Meter().Int64Counter(
"clickhouse_operator_chi_reconciles_completed",
metric.WithDescription("number of CHI reconciles completed successfully"),
metric.WithUnit("items"),
)
- CHIReconcilesAborted, _ := metrics.Meter().Int64Counter(
+ CHIReconcilesAborted, _ := operator.Meter().Int64Counter(
"clickhouse_operator_chi_reconciles_aborted",
metric.WithDescription("number of CHI reconciles aborted"),
metric.WithUnit("items"),
)
- CHIReconcilesTimings, _ := metrics.Meter().Float64Histogram(
+ CHIReconcilesTimings, _ := operator.Meter().Float64Histogram(
"clickhouse_operator_chi_reconciles_timings",
metric.WithDescription("timings of CHI reconciles completed successfully"),
metric.WithUnit("s"),
)
- HostReconcilesStarted, _ := metrics.Meter().Int64Counter(
+ HostReconcilesStarted, _ := operator.Meter().Int64Counter(
"clickhouse_operator_host_reconciles_started",
metric.WithDescription("number of host reconciles started"),
metric.WithUnit("items"),
)
- HostReconcilesCompleted, _ := metrics.Meter().Int64Counter(
+ HostReconcilesCompleted, _ := operator.Meter().Int64Counter(
"clickhouse_operator_host_reconciles_completed",
metric.WithDescription("number of host reconciles completed successfully"),
metric.WithUnit("items"),
)
- HostReconcilesRestarts, _ := metrics.Meter().Int64Counter(
+ HostReconcilesRestarts, _ := operator.Meter().Int64Counter(
"clickhouse_operator_host_reconciles_restarts",
metric.WithDescription("number of host restarts during reconciles"),
metric.WithUnit("items"),
)
- HostReconcilesErrors, _ := metrics.Meter().Int64Counter(
+ HostReconcilesErrors, _ := operator.Meter().Int64Counter(
"clickhouse_operator_host_reconciles_errors",
metric.WithDescription("number of host reconciles errors"),
metric.WithUnit("items"),
)
- HostReconcilesTimings, _ := metrics.Meter().Float64Histogram(
+ HostReconcilesTimings, _ := operator.Meter().Float64Histogram(
"clickhouse_operator_host_reconciles_timings",
metric.WithDescription("timings of host reconciles completed successfully"),
metric.WithUnit("s"),
)
- PodAddEvents, _ := metrics.Meter().Int64Counter(
+ PodAddEvents, _ := operator.Meter().Int64Counter(
"clickhouse_operator_pod_add_events",
metric.WithDescription("number PodAdd events"),
metric.WithUnit("items"),
)
- PodUpdateEvents, _ := metrics.Meter().Int64Counter(
+ PodUpdateEvents, _ := operator.Meter().Int64Counter(
"clickhouse_operator_pod_update_events",
metric.WithDescription("number PodUpdate events"),
metric.WithUnit("items"),
)
- PodDeleteEvents, _ := metrics.Meter().Int64Counter(
+ PodDeleteEvents, _ := operator.Meter().Int64Counter(
"clickhouse_operator_pod_delete_events",
metric.WithDescription("number PodDelete events"),
metric.WithUnit("items"),
@@ -146,8 +145,15 @@ func ensureMetrics() *Metrics {
return m
}
-func prepareLabels(chi *api.ClickHouseInstallation) (attributes []attribute.KeyValue) {
- labels, values := metrics.GetMandatoryLabelsAndValues(chi)
+type BaseInfoGetter interface {
+ GetName() string
+ GetNamespace() string
+ GetLabels() map[string]string
+ GetAnnotations() map[string]string
+}
+
+func prepareLabels(cr BaseInfoGetter) (attributes []attribute.KeyValue) {
+ labels, values := operator.GetMandatoryLabelsAndValues(cr)
for i := range labels {
label := labels[i]
value := values[i]
@@ -157,41 +163,56 @@ func prepareLabels(chi *api.ClickHouseInstallation) (attributes []attribute.KeyV
return attributes
}
-func metricsCHIReconcilesStarted(ctx context.Context, chi *api.ClickHouseInstallation) {
+// metricsCHIInitZeroValues initializes all metrics for CHI to zero values if not already present with appropriate labels
+//
+// This is due to `rate` prometheus function limitation where it expects the metric to be 0-initialized with all possible labels
+// and doesn't default to 0 if the metric is not present.
+func CHIInitZeroValues(ctx context.Context, chi BaseInfoGetter) {
+ ensureMetrics().CHIReconcilesStarted.Add(ctx, 0, metric.WithAttributes(prepareLabels(chi)...))
+ ensureMetrics().CHIReconcilesCompleted.Add(ctx, 0, metric.WithAttributes(prepareLabels(chi)...))
+ ensureMetrics().CHIReconcilesAborted.Add(ctx, 0, metric.WithAttributes(prepareLabels(chi)...))
+
+ ensureMetrics().HostReconcilesStarted.Add(ctx, 0, metric.WithAttributes(prepareLabels(chi)...))
+ ensureMetrics().HostReconcilesCompleted.Add(ctx, 0, metric.WithAttributes(prepareLabels(chi)...))
+ ensureMetrics().HostReconcilesRestarts.Add(ctx, 0, metric.WithAttributes(prepareLabels(chi)...))
+ ensureMetrics().HostReconcilesErrors.Add(ctx, 0, metric.WithAttributes(prepareLabels(chi)...))
+}
+
+func CHIReconcilesStarted(ctx context.Context, chi BaseInfoGetter) {
ensureMetrics().CHIReconcilesStarted.Add(ctx, 1, metric.WithAttributes(prepareLabels(chi)...))
}
-func metricsCHIReconcilesCompleted(ctx context.Context, chi *api.ClickHouseInstallation) {
+func CHIReconcilesCompleted(ctx context.Context, chi BaseInfoGetter) {
ensureMetrics().CHIReconcilesCompleted.Add(ctx, 1, metric.WithAttributes(prepareLabels(chi)...))
}
-func metricsCHIReconcilesAborted(ctx context.Context, chi *api.ClickHouseInstallation) {
+func CHIReconcilesAborted(ctx context.Context, chi BaseInfoGetter) {
ensureMetrics().CHIReconcilesAborted.Add(ctx, 1, metric.WithAttributes(prepareLabels(chi)...))
}
-func metricsCHIReconcilesTimings(ctx context.Context, chi *api.ClickHouseInstallation, seconds float64) {
+func CHIReconcilesTimings(ctx context.Context, chi BaseInfoGetter, seconds float64) {
ensureMetrics().CHIReconcilesTimings.Record(ctx, seconds, metric.WithAttributes(prepareLabels(chi)...))
}
-func metricsHostReconcilesStarted(ctx context.Context, chi *api.ClickHouseInstallation) {
+func HostReconcilesStarted(ctx context.Context, chi BaseInfoGetter) {
ensureMetrics().HostReconcilesStarted.Add(ctx, 1, metric.WithAttributes(prepareLabels(chi)...))
}
-func metricsHostReconcilesCompleted(ctx context.Context, chi *api.ClickHouseInstallation) {
+func HostReconcilesCompleted(ctx context.Context, chi BaseInfoGetter) {
ensureMetrics().HostReconcilesCompleted.Add(ctx, 1, metric.WithAttributes(prepareLabels(chi)...))
}
-func metricsHostReconcilesRestart(ctx context.Context, chi *api.ClickHouseInstallation) {
+func HostReconcilesRestart(ctx context.Context, chi BaseInfoGetter) {
ensureMetrics().HostReconcilesRestarts.Add(ctx, 1, metric.WithAttributes(prepareLabels(chi)...))
}
-func metricsHostReconcilesErrors(ctx context.Context, chi *api.ClickHouseInstallation) {
+func HostReconcilesErrors(ctx context.Context, chi BaseInfoGetter) {
ensureMetrics().HostReconcilesErrors.Add(ctx, 1, metric.WithAttributes(prepareLabels(chi)...))
}
-func metricsHostReconcilesTimings(ctx context.Context, chi *api.ClickHouseInstallation, seconds float64) {
+func HostReconcilesTimings(ctx context.Context, chi BaseInfoGetter, seconds float64) {
ensureMetrics().HostReconcilesTimings.Record(ctx, seconds, metric.WithAttributes(prepareLabels(chi)...))
}
-func metricsPodAdd(ctx context.Context) {
+func PodAdd(ctx context.Context) {
ensureMetrics().PodAddEvents.Add(ctx, 1)
}
func metricsPodUpdate(ctx context.Context) {
ensureMetrics().PodUpdateEvents.Add(ctx, 1)
}
-func metricsPodDelete(ctx context.Context) {
+func PodDelete(ctx context.Context) {
ensureMetrics().PodDeleteEvents.Add(ctx, 1)
}
diff --git a/pkg/controller/chi/poller.go b/pkg/controller/chi/poller.go
deleted file mode 100644
index 32b505136..000000000
--- a/pkg/controller/chi/poller.go
+++ /dev/null
@@ -1,182 +0,0 @@
-// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
-// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package chi
-
-import (
- "context"
- "time"
-
- apps "k8s.io/api/apps/v1"
- apiErrors "k8s.io/apimachinery/pkg/api/errors"
-
- log "github.com/altinity/clickhouse-operator/pkg/announcer"
- api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1"
- "github.com/altinity/clickhouse-operator/pkg/chop"
- "github.com/altinity/clickhouse-operator/pkg/controller"
- "github.com/altinity/clickhouse-operator/pkg/model/k8s"
- "github.com/altinity/clickhouse-operator/pkg/util"
-)
-
-// waitHostNotReady polls host's StatefulSet for not exists or not ready
-func (c *Controller) waitHostNotReady(ctx context.Context, host *api.ChiHost) error {
- err := c.pollHostStatefulSet(
- ctx,
- host,
- // Since we are waiting for host to be nopt readylet's assyme that it should exist already
- // and thus let's set GetErrorTimeout to zero, since we are not expecting getter function
- // to return any errors
- controller.NewPollerOptions().
- FromConfig(chop.Config()).
- SetGetErrorTimeout(0),
- func(_ context.Context, sts *apps.StatefulSet) bool {
- return k8s.IsStatefulSetNotReady(sts)
- },
- nil,
- )
- if apiErrors.IsNotFound(err) {
- err = nil
- }
-
- return err
-}
-
-// waitHostReady polls host's StatefulSet until it is ready
-func (c *Controller) waitHostReady(ctx context.Context, host *api.ChiHost) error {
- // Wait for StatefulSet to reach generation
- err := c.pollHostStatefulSet(
- ctx,
- host,
- nil, // rely on default options
- func(_ctx context.Context, sts *apps.StatefulSet) bool {
- if sts == nil {
- return false
- }
- _ = c.deleteLabelReadyPod(_ctx, host)
- _ = c.deleteAnnotationReadyService(_ctx, host)
- return k8s.IsStatefulSetGeneration(sts, sts.Generation)
- },
- func(_ctx context.Context) {
- _ = c.deleteLabelReadyPod(_ctx, host)
- _ = c.deleteAnnotationReadyService(_ctx, host)
- },
- )
- if err != nil {
- return err
- }
-
- // Wait StatefulSet to reach ready status
- err = c.pollHostStatefulSet(
- ctx,
- host,
- nil, // rely on default options
- func(_ctx context.Context, sts *apps.StatefulSet) bool {
- _ = c.deleteLabelReadyPod(_ctx, host)
- _ = c.deleteAnnotationReadyService(_ctx, host)
- return k8s.IsStatefulSetReady(sts)
- },
- func(_ctx context.Context) {
- _ = c.deleteLabelReadyPod(_ctx, host)
- _ = c.deleteAnnotationReadyService(_ctx, host)
- },
- )
-
- return err
-}
-
-// waitHostDeleted polls host's StatefulSet until it is not available
-func (c *Controller) waitHostDeleted(host *api.ChiHost) {
- for {
- // TODO
- // Probably there would be better way to wait until k8s reported StatefulSet deleted
- if _, err := c.getStatefulSet(host); err == nil {
- log.V(2).Info("cache NOT yet synced")
- time.Sleep(15 * time.Second)
- } else {
- log.V(1).Info("cache synced")
- return
- }
- }
-}
-
-// pollHost polls host
-func (c *Controller) pollHost(
- ctx context.Context,
- host *api.ChiHost,
- opts *controller.PollerOptions,
- isDoneFn func(ctx context.Context, host *api.ChiHost) bool,
-) error {
- if util.IsContextDone(ctx) {
- log.V(2).Info("task is done")
- return nil
- }
-
- opts = opts.Ensure().FromConfig(chop.Config())
- namespace := host.Runtime.Address.Namespace
- name := host.Runtime.Address.HostName
-
- return controller.Poll(
- ctx,
- namespace, name,
- opts,
- &controller.PollerFunctions{
- IsDone: func(_ctx context.Context, _ any) bool {
- return isDoneFn(_ctx, host)
- },
- },
- nil,
- )
-}
-
-// pollHostStatefulSet polls host's StatefulSet
-func (c *Controller) pollHostStatefulSet(
- ctx context.Context,
- host *api.ChiHost,
- opts *controller.PollerOptions,
- isDoneFn func(context.Context, *apps.StatefulSet) bool,
- backFn func(context.Context),
-) error {
- if util.IsContextDone(ctx) {
- log.V(2).Info("task is done")
- return nil
- }
-
- if opts == nil {
- opts = controller.NewPollerOptions().FromConfig(chop.Config())
- }
-
- namespace := host.Runtime.Address.Namespace
- name := host.Runtime.Address.StatefulSet
-
- return controller.Poll(
- ctx,
- namespace, name,
- opts,
- &controller.PollerFunctions{
- Get: func(_ctx context.Context) (any, error) {
- return c.getStatefulSet(host)
- },
- IsDone: func(_ctx context.Context, a any) bool {
- return isDoneFn(_ctx, a.(*apps.StatefulSet))
- },
- ShouldContinue: func(_ctx context.Context, _ any, e error) bool {
- return apiErrors.IsNotFound(e)
- },
- },
- &controller.PollerBackgroundFunctions{
- F: backFn,
- },
- )
-}
diff --git a/pkg/controller/chi/type_controller.go b/pkg/controller/chi/type_controller.go
deleted file mode 100644
index 52a8fc629..000000000
--- a/pkg/controller/chi/type_controller.go
+++ /dev/null
@@ -1,85 +0,0 @@
-// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package chi
-
-import (
- "time"
-
- kube "k8s.io/client-go/kubernetes"
- appsListers "k8s.io/client-go/listers/apps/v1"
- coreListers "k8s.io/client-go/listers/core/v1"
- "k8s.io/client-go/tools/cache"
- "k8s.io/client-go/tools/record"
- //"k8s.io/client-go/util/workqueue"
- apiExtensions "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
-
- "github.com/altinity/queue"
-
- chopClientSet "github.com/altinity/clickhouse-operator/pkg/client/clientset/versioned"
- chopListers "github.com/altinity/clickhouse-operator/pkg/client/listers/clickhouse.altinity.com/v1"
-)
-
-// Controller defines CRO controller
-type Controller struct {
- // kubeClient used to Create() k8s resources as c.kubeClient.AppsV1().StatefulSets(namespace).Create(name)
- kubeClient kube.Interface
- extClient apiExtensions.Interface
- // chopClient used to Update() CRD k8s resource as c.chopClient.ClickhouseV1().ClickHouseInstallations(chi.Namespace).Update(chiCopy)
- chopClient chopClientSet.Interface
-
- // chiLister used as chiLister.ClickHouseInstallations(namespace).Get(name)
- chiLister chopListers.ClickHouseInstallationLister
- // chiListerSynced used in waitForCacheSync()
- chiListerSynced cache.InformerSynced
-
- chitLister chopListers.ClickHouseInstallationTemplateLister
- chitListerSynced cache.InformerSynced
-
- // serviceLister used as serviceLister.Services(namespace).Get(name)
- serviceLister coreListers.ServiceLister
- // serviceListerSynced used in waitForCacheSync()
- serviceListerSynced cache.InformerSynced
- // endpointsLister used as endpointsLister.Endpoints(namespace).Get(name)
- endpointsLister coreListers.EndpointsLister
- // endpointsListerSynced used in waitForCacheSync()
- endpointsListerSynced cache.InformerSynced
- // configMapLister used as configMapLister.ConfigMaps(namespace).Get(name)
- configMapLister coreListers.ConfigMapLister
- // configMapListerSynced used in waitForCacheSync()
- configMapListerSynced cache.InformerSynced
- // statefulSetLister used as statefulSetLister.StatefulSets(namespace).Get(name)
- statefulSetLister appsListers.StatefulSetLister
- // statefulSetListerSynced used in waitForCacheSync()
- statefulSetListerSynced cache.InformerSynced
- // podLister used as statefulSetLister.StatefulSets(namespace).Get(name)
- podLister coreListers.PodLister
- // podListerSynced used in waitForCacheSync()
- podListerSynced cache.InformerSynced
-
- // queues used to organize events queue processed by operator
- queues []queue.PriorityQueue
- // not used explicitly
- recorder record.EventRecorder
-}
-
-const (
- componentName = "clickhouse-operator"
- runWorkerPeriod = time.Second
-)
-
-const (
- messageUnableToDecode = "unable to decode object (invalid type)"
- messageUnableToSync = "unable to sync caches for %s controller"
-)
diff --git a/pkg/controller/chi/version-options.go b/pkg/controller/chi/version-options.go
new file mode 100644
index 000000000..a9e50a82d
--- /dev/null
+++ b/pkg/controller/chi/version-options.go
@@ -0,0 +1,49 @@
+// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package chi
+
+import (
+ api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1"
+)
+
+const unknownVersion = "failed to query"
+
+type versionOptions struct {
+ skipNew bool
+ skipStopped bool
+ skipStoppedAncestor bool
+}
+
+func (opts versionOptions) shouldSkip(host *api.Host) (bool, string) {
+ if opts.skipNew {
+ if !host.HasAncestor() {
+ return true, "host is a new one, version is not not applicable"
+ }
+ }
+
+ if opts.skipStopped {
+ if host.IsStopped() {
+ return true, "host is stopped, version is not applicable"
+ }
+ }
+
+ if opts.skipStoppedAncestor {
+ if host.HasAncestor() && host.GetAncestor().IsStopped() {
+ return true, "host ancestor is stopped, version is not applicable"
+ }
+ }
+
+ return false, ""
+}
diff --git a/pkg/controller/chi/volumes.go b/pkg/controller/chi/volumes.go
deleted file mode 100644
index 30033fc95..000000000
--- a/pkg/controller/chi/volumes.go
+++ /dev/null
@@ -1,79 +0,0 @@
-// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package chi
-
-import (
- core "k8s.io/api/core/v1"
-
- log "github.com/altinity/clickhouse-operator/pkg/announcer"
- api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1"
- "github.com/altinity/clickhouse-operator/pkg/controller"
- model "github.com/altinity/clickhouse-operator/pkg/model/chi"
-)
-
-func (c *Controller) walkPVCs(host *api.ChiHost, f func(pvc *core.PersistentVolumeClaim)) {
- namespace := host.Runtime.Address.Namespace
- name := model.CreatePodName(host)
- pod, err := c.kubeClient.CoreV1().Pods(namespace).Get(controller.NewContext(), name, controller.NewGetOptions())
- if err != nil {
- log.M(host).F().Error("FAIL get pod for host %s/%s err:%v", namespace, host.GetName(), err)
- return
- }
-
- for i := range pod.Spec.Volumes {
- volume := &pod.Spec.Volumes[i]
- if volume.PersistentVolumeClaim == nil {
- continue
- }
-
- pvcName := volume.PersistentVolumeClaim.ClaimName
- pvc, err := c.kubeClient.CoreV1().PersistentVolumeClaims(namespace).Get(controller.NewContext(), pvcName, controller.NewGetOptions())
- if err != nil {
- log.M(host).F().Error("FAIL get PVC %s/%s for the host %s/%s with err:%v", namespace, pvcName, namespace, host.GetName(), err)
- continue
- }
-
- f(pvc)
- }
-}
-
-func (c *Controller) walkDiscoveredPVCs(host *api.ChiHost, f func(pvc *core.PersistentVolumeClaim)) {
- namespace := host.Runtime.Address.Namespace
-
- pvcList, err := c.kubeClient.CoreV1().PersistentVolumeClaims(namespace).List(controller.NewContext(), controller.NewListOptions(model.GetSelectorHostScope(host)))
- if err != nil {
- log.M(host).F().Error("FAIL get list of PVCs for the host %s/%s err:%v", namespace, host.GetName(), err)
- return
- }
-
- for i := range pvcList.Items {
- // Convenience wrapper
- pvc := &pvcList.Items[i]
-
- f(pvc)
- }
-}
-
-// Comment out PV
-//func (c *Controller) walkPVs(host *api.ChiHost, f func(pv *core.PersistentVolume)) {
-// c.walkPVCs(host, func(pvc *core.PersistentVolumeClaim) {
-// pv, err := c.kubeClient.CoreV1().PersistentVolumes().Get(newContext(), pvc.Spec.VolumeName, newGetOptions())
-// if err != nil {
-// log.M(host).F().Error("FAIL get PV %s err:%v", pvc.Spec.VolumeName, err)
-// return
-// }
-// f(pv)
-// })
-//}
diff --git a/pkg/controller/chi/worker-app-version.go b/pkg/controller/chi/worker-app-version.go
new file mode 100644
index 000000000..ccd0e94a9
--- /dev/null
+++ b/pkg/controller/chi/worker-app-version.go
@@ -0,0 +1,58 @@
+// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package chi
+
+import (
+ "context"
+
+ api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1"
+ "github.com/altinity/clickhouse-operator/pkg/apis/swversion"
+ "github.com/altinity/clickhouse-operator/pkg/controller/common/poller/domain"
+)
+
+// getHostClickHouseVersion gets host ClickHouse version
+func (w *worker) getHostClickHouseVersion(ctx context.Context, host *api.Host, opts versionOptions) (string, error) {
+ if skip, description := opts.shouldSkip(host); skip {
+ return description, nil
+ }
+
+ version, err := w.ensureClusterSchemer(host).HostClickHouseVersion(ctx, host)
+ if err != nil {
+ w.a.V(1).M(host).F().Warning("Failed to get ClickHouse version on host: %s", host.GetName())
+ return unknownVersion, err
+ }
+
+ w.a.V(1).M(host).F().Info("Get ClickHouse version on host: %s version: %s", host.GetName(), version)
+ host.Runtime.Version = swversion.NewSoftWareVersion(version)
+
+ return version, nil
+}
+
+func (w *worker) pollHostForClickHouseVersion(ctx context.Context, host *api.Host) (version string, err error) {
+ err = domain.PollHost(
+ ctx,
+ host,
+ func(_ctx context.Context, _host *api.Host) bool {
+ var e error
+ version, e = w.getHostClickHouseVersion(_ctx, _host, versionOptions{skipStopped: true})
+ if e == nil {
+ return true
+ }
+ w.a.V(1).M(host).F().Warning("Host is NOT alive: %s ", host.GetName())
+ return false
+ },
+ )
+ return
+}
diff --git a/pkg/controller/chi/worker-boilerplate.go b/pkg/controller/chi/worker-boilerplate.go
new file mode 100644
index 000000000..929a46479
--- /dev/null
+++ b/pkg/controller/chi/worker-boilerplate.go
@@ -0,0 +1,192 @@
+// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package chi
+
+import (
+ "context"
+ "fmt"
+
+ utilRuntime "k8s.io/apimachinery/pkg/util/runtime"
+
+ log "github.com/altinity/clickhouse-operator/pkg/announcer"
+ "github.com/altinity/clickhouse-operator/pkg/controller/chi/cmd_queue"
+ "github.com/altinity/clickhouse-operator/pkg/controller/chi/metrics"
+ normalizerCommon "github.com/altinity/clickhouse-operator/pkg/model/common/normalizer"
+ "github.com/altinity/clickhouse-operator/pkg/util"
+)
+
+// run is an endless work loop, expected to be run in a thread
+func (w *worker) run() {
+ w.a.V(2).S().P()
+ defer w.a.V(2).E().P()
+
+ // For system thread let's wait its 'official start time', thus giving it time to bootstrap
+ util.WaitContextDoneUntil(context.Background(), w.start)
+
+ // Events loop
+ for {
+ // Get() blocks until it can return an item
+ item, ctx, ok := w.queue.Get()
+ if !ok {
+ w.a.Info("shutdown request")
+ return
+ }
+
+ //item, shut := w.queue.Get()
+ //task := context.Background()
+ //if shut {
+ // w.a.Info("shutdown request")
+ // return
+ //}
+
+ if err := w.processItem(ctx, item); err != nil {
+ // Item not processed
+ // this code cannot return an error and needs to indicate error has been ignored
+ utilRuntime.HandleError(err)
+ }
+
+ // Forget indicates that an item is finished being retried. Doesn't matter whether its for perm failing
+ // or for success, we'll stop the rate limiter from tracking it. This only clears the `rateLimiter`, you
+ // still have to call `Done` on the queue.
+ //w.queue.Forget(item)
+
+ // Remove item from processing set when processing completed
+ w.queue.Done(item)
+ }
+}
+
+func (w *worker) processReconcileCHI(ctx context.Context, cmd *cmd_queue.ReconcileCHI) error {
+ switch cmd.Cmd {
+ case cmd_queue.ReconcileAdd:
+ return w.updateCHI(ctx, nil, cmd.New)
+ case cmd_queue.ReconcileUpdate:
+ return w.updateCHI(ctx, cmd.Old, cmd.New)
+ case cmd_queue.ReconcileDelete:
+ return w.discoveryAndDeleteCR(ctx, cmd.Old)
+ }
+
+ // Unknown item type, don't know what to do with it
+ // Just skip it and behave like it never existed
+ utilRuntime.HandleError(fmt.Errorf("unexpected reconcile - %#v", cmd))
+ return nil
+}
+
+func (w *worker) processReconcileCHIT(cmd *cmd_queue.ReconcileCHIT) error {
+ switch cmd.Cmd {
+ case cmd_queue.ReconcileAdd:
+ return w.addChit(cmd.New)
+ case cmd_queue.ReconcileUpdate:
+ return w.updateChit(cmd.Old, cmd.New)
+ case cmd_queue.ReconcileDelete:
+ return w.deleteChit(cmd.Old)
+ }
+
+ // Unknown item type, don't know what to do with it
+ // Just skip it and behave like it never existed
+ utilRuntime.HandleError(fmt.Errorf("unexpected reconcile - %#v", cmd))
+ return nil
+}
+
+func (w *worker) processReconcileChopConfig(cmd *cmd_queue.ReconcileChopConfig) error {
+ switch cmd.Cmd {
+ case cmd_queue.ReconcileAdd:
+ return w.c.addChopConfig(cmd.New)
+ case cmd_queue.ReconcileUpdate:
+ return w.c.updateChopConfig(cmd.Old, cmd.New)
+ case cmd_queue.ReconcileDelete:
+ return w.c.deleteChopConfig(cmd.Old)
+ }
+
+ // Unknown item type, don't know what to do with it
+ // Just skip it and behave like it never existed
+ utilRuntime.HandleError(fmt.Errorf("unexpected reconcile - %#v", cmd))
+ return nil
+}
+
+func (w *worker) processReconcileEndpoints(ctx context.Context, cmd *cmd_queue.ReconcileEndpoints) error {
+ switch cmd.Cmd {
+ case cmd_queue.ReconcileUpdate:
+ return w.updateEndpoints(ctx, cmd.Old, cmd.New)
+ }
+
+ // Unknown item type, don't know what to do with it
+ // Just skip it and behave like it never existed
+ utilRuntime.HandleError(fmt.Errorf("unexpected reconcile - %#v", cmd))
+ return nil
+}
+
+func (w *worker) processReconcilePod(ctx context.Context, cmd *cmd_queue.ReconcilePod) error {
+ switch cmd.Cmd {
+ case cmd_queue.ReconcileAdd:
+ w.a.V(1).M(cmd.New).F().Info("Add Pod. %s/%s", cmd.New.Namespace, cmd.New.Name)
+ metrics.PodAdd(ctx)
+ return nil
+ case cmd_queue.ReconcileUpdate:
+ //ignore
+ //w.a.V(1).M(cmd.new).F().Info("Update Pod. %s/%s", cmd.new.Namespace, cmd.new.Name)
+ //metricsPodUpdate(ctx)
+ return nil
+ case cmd_queue.ReconcileDelete:
+ w.a.V(1).M(cmd.Old).F().Info("Delete Pod. %s/%s", cmd.Old.Namespace, cmd.Old.Name)
+ metrics.PodDelete(ctx)
+ return nil
+ }
+
+ // Unknown item type, don't know what to do with it
+ // Just skip it and behave like it never existed
+ utilRuntime.HandleError(fmt.Errorf("unexpected reconcile - %#v", cmd))
+ return nil
+}
+
+func (w *worker) processDropDns(ctx context.Context, cmd *cmd_queue.DropDns) error {
+ if chi, err := w.createCRFromObjectMeta(cmd.Initiator, false, normalizerCommon.NewOptions()); err == nil {
+ w.a.V(2).M(cmd.Initiator).Info("flushing DNS for CHI %s", chi.Name)
+ _ = w.ensureClusterSchemer(chi.FirstHost()).CHIDropDnsCache(ctx, chi)
+ } else {
+ w.a.M(cmd.Initiator).F().Error("unable to find CHI by %v err: %v", cmd.Initiator.GetLabels(), err)
+ }
+ return nil
+}
+
+// processItem processes one work item according to its type
+func (w *worker) processItem(ctx context.Context, item interface{}) error {
+ if util.IsContextDone(ctx) {
+ log.V(2).Info("task is done")
+ return nil
+ }
+
+ w.a.V(3).S().P()
+ defer w.a.V(3).E().P()
+
+ switch cmd := item.(type) {
+ case *cmd_queue.ReconcileCHI:
+ return w.processReconcileCHI(ctx, cmd)
+ case *cmd_queue.ReconcileCHIT:
+ return w.processReconcileCHIT(cmd)
+ case *cmd_queue.ReconcileChopConfig:
+ return w.processReconcileChopConfig(cmd)
+ case *cmd_queue.ReconcileEndpoints:
+ return w.processReconcileEndpoints(ctx, cmd)
+ case *cmd_queue.ReconcilePod:
+ return w.processReconcilePod(ctx, cmd)
+ case *cmd_queue.DropDns:
+ return w.processDropDns(ctx, cmd)
+ }
+
+ // Unknown item type, don't know what to do with it
+ // Just skip it and behave like it never existed
+ utilRuntime.HandleError(fmt.Errorf("unexpected item in the queue - %#v", item))
+ return nil
+}
diff --git a/pkg/controller/chi/worker-chi-reconciler.go b/pkg/controller/chi/worker-chi-reconciler.go
index bde05f053..bb9850518 100644
--- a/pkg/controller/chi/worker-chi-reconciler.go
+++ b/pkg/controller/chi/worker-chi-reconciler.go
@@ -17,36 +17,32 @@ package chi
import (
"context"
"errors"
- "fmt"
"math"
"sync"
"time"
- "gopkg.in/d4l3k/messagediff.v1"
-
- apps "k8s.io/api/apps/v1"
- core "k8s.io/api/core/v1"
- policy "k8s.io/api/policy/v1"
- apiErrors "k8s.io/apimachinery/pkg/api/errors"
-
log "github.com/altinity/clickhouse-operator/pkg/announcer"
api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1"
- "github.com/altinity/clickhouse-operator/pkg/apis/swversion"
+ "github.com/altinity/clickhouse-operator/pkg/apis/common/types"
"github.com/altinity/clickhouse-operator/pkg/chop"
- "github.com/altinity/clickhouse-operator/pkg/controller"
- model "github.com/altinity/clickhouse-operator/pkg/model/chi"
- "github.com/altinity/clickhouse-operator/pkg/model/chi/creator"
+ "github.com/altinity/clickhouse-operator/pkg/controller/chi/metrics"
+ "github.com/altinity/clickhouse-operator/pkg/controller/common"
+ "github.com/altinity/clickhouse-operator/pkg/controller/common/statefulset"
+ "github.com/altinity/clickhouse-operator/pkg/controller/common/storage"
+ "github.com/altinity/clickhouse-operator/pkg/interfaces"
+ "github.com/altinity/clickhouse-operator/pkg/model/chi/config"
+ "github.com/altinity/clickhouse-operator/pkg/model/common/action_plan"
"github.com/altinity/clickhouse-operator/pkg/util"
)
-// reconcileCHI run reconcile cycle for a CHI
-func (w *worker) reconcileCHI(ctx context.Context, old, new *api.ClickHouseInstallation) error {
+// reconcileCR runs reconcile cycle for a Custom Resource
+func (w *worker) reconcileCR(ctx context.Context, old, new *api.ClickHouseInstallation) error {
if util.IsContextDone(ctx) {
log.V(2).Info("task is done")
return nil
}
- w.logOldAndNew("non-normalized yet (native)", old, new)
+ common.LogOldAndNew("non-normalized yet (native)", old, new)
switch {
case w.isAfterFinalizerInstalled(old, new):
@@ -59,30 +55,31 @@ func (w *worker) reconcileCHI(ctx context.Context, old, new *api.ClickHouseInsta
w.a.M(new).S().P()
defer w.a.M(new).E().P()
- metricsCHIReconcilesStarted(ctx, new)
+ metrics.CHIInitZeroValues(ctx, new)
+ metrics.CHIReconcilesStarted(ctx, new)
startTime := time.Now()
- w.a.M(new).F().Info("Changing OLD to Normalized COMPLETED: %s/%s", new.Namespace, new.Name)
+ w.a.M(new).F().Info("Changing OLD to Normalized COMPLETED: %s", util.NamespaceNameString(new))
if new.HasAncestor() {
- w.a.M(new).F().Info("has ancestor, use it as a base for reconcile. CHI: %s/%s", new.Namespace, new.Name)
- old = new.GetAncestor()
+ w.a.M(new).F().Info("has ancestor, use it as a base for reconcile. CR: %s", util.NamespaceNameString(new))
+ old = new.GetAncestorT()
} else {
- w.a.M(new).F().Info("has NO ancestor, use empty CHI as a base for reconcile. CHI: %s/%s", new.Namespace, new.Name)
+ w.a.M(new).F().Info("has NO ancestor, use empty base for reconcile. CR: %s", util.NamespaceNameString(new))
old = nil
}
- w.a.M(new).F().Info("Normalized OLD CHI: %s/%s", new.Namespace, new.Name)
+ w.a.M(new).F().Info("Normalized OLD: %s", util.NamespaceNameString(new))
old = w.normalize(old)
- w.a.M(new).F().Info("Normalized NEW CHI: %s/%s", new.Namespace, new.Name)
+ w.a.M(new).F().Info("Normalized NEW: %s", util.NamespaceNameString(new))
new = w.normalize(new)
new.SetAncestor(old)
- w.logOldAndNew("normalized", old, new)
+ common.LogOldAndNew("normalized", old, new)
- actionPlan := model.NewActionPlan(old, new)
- w.logActionPlan(actionPlan)
+ actionPlan := action_plan.NewActionPlan(old, new)
+ common.LogActionPlan(actionPlan)
switch {
case actionPlan.HasActionsToDo():
@@ -90,7 +87,7 @@ func (w *worker) reconcileCHI(ctx context.Context, old, new *api.ClickHouseInsta
case w.isAfterFinalizerInstalled(old, new):
w.a.M(new).F().Info("isAfterFinalizerInstalled - continue reconcile-2")
default:
- w.a.M(new).F().Info("ActionPlan has no actions and not finalizer - nothing to do")
+ w.a.M(new).F().Info("ActionPlan has no actions and no need to install finalizer - nothing to do")
return nil
}
@@ -106,13 +103,13 @@ func (w *worker) reconcileCHI(ctx context.Context, old, new *api.ClickHouseInsta
if err := w.reconcile(ctx, new); err != nil {
// Something went wrong
- w.a.WithEvent(new, eventActionReconcile, eventReasonReconcileFailed).
+ w.a.WithEvent(new, common.EventActionReconcile, common.EventReasonReconcileFailed).
WithStatusError(new).
M(new).F().
- Error("FAILED to reconcile CHI err: %v", err)
+ Error("FAILED to reconcile CR %s, err: %v", util.NamespaceNameString(new), err)
w.markReconcileCompletedUnsuccessfully(ctx, new, err)
- if errors.Is(err, errCRUDAbort) {
- metricsCHIReconcilesAborted(ctx, new)
+ if errors.Is(err, common.ErrCRUDAbort) {
+ metrics.CHIReconcilesAborted(ctx, new)
}
} else {
// Reconcile successful
@@ -127,131 +124,121 @@ func (w *worker) reconcileCHI(ctx context.Context, old, new *api.ClickHouseInsta
w.waitForIPAddresses(ctx, new)
w.finalizeReconcileAndMarkCompleted(ctx, new)
- metricsCHIReconcilesCompleted(ctx, new)
- metricsCHIReconcilesTimings(ctx, new, time.Now().Sub(startTime).Seconds())
+ metrics.CHIReconcilesCompleted(ctx, new)
+ metrics.CHIReconcilesTimings(ctx, new, time.Now().Sub(startTime).Seconds())
}
return nil
}
-// ReconcileShardsAndHostsOptionsCtxKeyType specifies type for ReconcileShardsAndHostsOptionsCtxKey
-// More details here on why do we need special type
-// https://stackoverflow.com/questions/40891345/fix-should-not-use-basic-type-string-as-key-in-context-withvalue-golint
-type ReconcileShardsAndHostsOptionsCtxKeyType string
-
-// ReconcileShardsAndHostsOptionsCtxKey specifies name of the key to be used for ReconcileShardsAndHostsOptions
-const ReconcileShardsAndHostsOptionsCtxKey ReconcileShardsAndHostsOptionsCtxKeyType = "ReconcileShardsAndHostsOptions"
-
-// reconcile reconciles ClickHouseInstallation
-func (w *worker) reconcile(ctx context.Context, chi *api.ClickHouseInstallation) error {
+// reconcile reconciles Custom Resource
+func (w *worker) reconcile(ctx context.Context, cr *api.ClickHouseInstallation) error {
if util.IsContextDone(ctx) {
log.V(2).Info("task is done")
return nil
}
- w.a.V(2).M(chi).S().P()
- defer w.a.V(2).M(chi).E().P()
+ w.a.V(2).M(cr).S().P()
+ defer w.a.V(2).M(cr).E().P()
- counters := api.NewChiHostReconcileAttributesCounters()
- chi.WalkHosts(func(host *api.ChiHost) error {
+ counters := api.NewHostReconcileAttributesCounters()
+ cr.WalkHosts(func(host *api.Host) error {
counters.Add(host.GetReconcileAttributes())
return nil
})
- if counters.GetAdd() > 0 && counters.GetFound() == 0 && counters.GetModify() == 0 && counters.GetRemove() == 0 {
- w.a.V(1).M(chi).Info(
- "Looks like we are just adding hosts to a new CHI. Enabling full fan-out mode. CHI: %s/%s",
- chi.Namespace, chi.Name)
- ctx = context.WithValue(ctx, ReconcileShardsAndHostsOptionsCtxKey, &ReconcileShardsAndHostsOptions{
- fullFanOut: true,
+ if counters.AddOnly() {
+ w.a.V(1).M(cr).Info("Enabling full fan-out mode. CHI: %s", util.NamespaceNameString(cr))
+ ctx = context.WithValue(ctx, common.ReconcileShardsAndHostsOptionsCtxKey, &common.ReconcileShardsAndHostsOptions{
+ FullFanOut: true,
})
}
- return chi.WalkTillError(
+ return cr.WalkTillError(
ctx,
- w.reconcileCHIAuxObjectsPreliminary,
+ w.reconcileCRAuxObjectsPreliminary,
w.reconcileCluster,
w.reconcileShardsAndHosts,
- w.reconcileCHIAuxObjectsFinal,
+ w.reconcileCRAuxObjectsFinal,
)
}
-// reconcileCHIAuxObjectsPreliminary reconciles CHI preliminary in order to ensure that ConfigMaps are in place
-func (w *worker) reconcileCHIAuxObjectsPreliminary(ctx context.Context, chi *api.ClickHouseInstallation) error {
+// reconcileCRAuxObjectsPreliminary reconciles CR preliminary in order to ensure that ConfigMaps are in place
+func (w *worker) reconcileCRAuxObjectsPreliminary(ctx context.Context, cr *api.ClickHouseInstallation) error {
if util.IsContextDone(ctx) {
log.V(2).Info("task is done")
return nil
}
- w.a.V(2).M(chi).S().P()
- defer w.a.V(2).M(chi).E().P()
+ w.a.V(2).M(cr).S().P()
+ defer w.a.V(2).M(cr).E().P()
- // CHI common ConfigMap without added hosts
- chi.EnsureRuntime().LockCommonConfig()
- if err := w.reconcileCHIConfigMapCommon(ctx, chi, w.options()); err != nil {
+ // CR common ConfigMap without added hosts
+ cr.GetRuntime().LockCommonConfig()
+ if err := w.reconcileConfigMapCommon(ctx, cr, w.options()); err != nil {
w.a.F().Error("failed to reconcile config map common. err: %v", err)
}
- chi.EnsureRuntime().UnlockCommonConfig()
+ cr.GetRuntime().UnlockCommonConfig()
- // 3. CHI users ConfigMap
- if err := w.reconcileCHIConfigMapUsers(ctx, chi); err != nil {
+ // CR users ConfigMap - common for all hosts
+ if err := w.reconcileConfigMapCommonUsers(ctx, cr); err != nil {
w.a.F().Error("failed to reconcile config map users. err: %v", err)
}
return nil
}
-// reconcileCHIServicePreliminary runs first stage of CHI reconcile process
-func (w *worker) reconcileCHIServicePreliminary(ctx context.Context, chi *api.ClickHouseInstallation) error {
- if chi.IsStopped() {
- // Stopped CHI must have no entry point
- _ = w.c.deleteServiceCHI(ctx, chi)
+// reconcileCRServicePreliminary runs first stage of CR reconcile process
+func (w *worker) reconcileCRServicePreliminary(ctx context.Context, cr api.ICustomResource) error {
+ if cr.IsStopped() {
+ // Stopped CR must have no entry point
+ _ = w.c.deleteServiceCR(ctx, cr)
}
return nil
}
-// reconcileCHIServiceFinal runs second stage of CHI reconcile process
-func (w *worker) reconcileCHIServiceFinal(ctx context.Context, chi *api.ClickHouseInstallation) error {
- if chi.IsStopped() {
+// reconcileCRServiceFinal runs second stage of CR reconcile process
+func (w *worker) reconcileCRServiceFinal(ctx context.Context, cr api.ICustomResource) error {
+ if cr.IsStopped() {
// Stopped CHI must have no entry point
return nil
}
// Create entry point for the whole CHI
- if service := w.task.creator.CreateServiceCHI(); service != nil {
- if err := w.reconcileService(ctx, chi, service); err != nil {
+ if service := w.task.Creator().CreateService(interfaces.ServiceCR); service != nil {
+ if err := w.reconcileService(ctx, cr, service); err != nil {
// Service not reconciled
- w.task.registryFailed.RegisterService(service.ObjectMeta)
+ w.task.RegistryFailed().RegisterService(service.GetObjectMeta())
return err
}
- w.task.registryReconciled.RegisterService(service.ObjectMeta)
+ w.task.RegistryReconciled().RegisterService(service.GetObjectMeta())
}
return nil
}
-// reconcileCHIAuxObjectsFinal reconciles CHI global objects
-func (w *worker) reconcileCHIAuxObjectsFinal(ctx context.Context, chi *api.ClickHouseInstallation) (err error) {
+// reconcileCRAuxObjectsFinal reconciles CR global objects
+func (w *worker) reconcileCRAuxObjectsFinal(ctx context.Context, cr *api.ClickHouseInstallation) (err error) {
if util.IsContextDone(ctx) {
log.V(2).Info("task is done")
return nil
}
- w.a.V(2).M(chi).S().P()
- defer w.a.V(2).M(chi).E().P()
+ w.a.V(2).M(cr).S().P()
+ defer w.a.V(2).M(cr).E().P()
- // CHI ConfigMaps with update
- chi.EnsureRuntime().LockCommonConfig()
- err = w.reconcileCHIConfigMapCommon(ctx, chi, nil)
- chi.EnsureRuntime().UnlockCommonConfig()
+ // CR ConfigMaps with update
+ cr.GetRuntime().LockCommonConfig()
+ err = w.reconcileConfigMapCommon(ctx, cr, nil)
+ cr.GetRuntime().UnlockCommonConfig()
return err
}
-// reconcileCHIConfigMapCommon reconciles all CHI's common ConfigMap
-func (w *worker) reconcileCHIConfigMapCommon(
+// reconcileConfigMapCommon reconciles common ConfigMap
+func (w *worker) reconcileConfigMapCommon(
ctx context.Context,
- chi *api.ClickHouseInstallation,
- options *model.ClickHouseConfigFilesGeneratorOptions,
+ cr api.ICustomResource,
+ options *config.FilesGeneratorOptions,
) error {
if util.IsContextDone(ctx) {
log.V(2).Info("task is done")
@@ -261,143 +248,57 @@ func (w *worker) reconcileCHIConfigMapCommon(
// ConfigMap common for all resources in CHI
// contains several sections, mapped as separated chopConfig files,
// such as remote servers, zookeeper setup, etc
- configMapCommon := w.task.creator.CreateConfigMapCHICommon(options)
- err := w.reconcileConfigMap(ctx, chi, configMapCommon)
+ configMapCommon := w.task.Creator().CreateConfigMap(interfaces.ConfigMapCommon, options)
+ err := w.reconcileConfigMap(ctx, cr, configMapCommon)
if err == nil {
- w.task.registryReconciled.RegisterConfigMap(configMapCommon.ObjectMeta)
+ w.task.RegistryReconciled().RegisterConfigMap(configMapCommon.GetObjectMeta())
} else {
- w.task.registryFailed.RegisterConfigMap(configMapCommon.ObjectMeta)
+ w.task.RegistryFailed().RegisterConfigMap(configMapCommon.GetObjectMeta())
}
return err
}
-// reconcileCHIConfigMapUsers reconciles all CHI's users ConfigMap
+// reconcileConfigMapCommonUsers reconciles all CHI's users ConfigMap
// ConfigMap common for all users resources in CHI
-func (w *worker) reconcileCHIConfigMapUsers(ctx context.Context, chi *api.ClickHouseInstallation) error {
+func (w *worker) reconcileConfigMapCommonUsers(ctx context.Context, cr api.ICustomResource) error {
if util.IsContextDone(ctx) {
log.V(2).Info("task is done")
return nil
}
// ConfigMap common for all users resources in CHI
- configMapUsers := w.task.creator.CreateConfigMapCHICommonUsers()
- err := w.reconcileConfigMap(ctx, chi, configMapUsers)
+ configMapUsers := w.task.Creator().CreateConfigMap(interfaces.ConfigMapCommonUsers)
+ err := w.reconcileConfigMap(ctx, cr, configMapUsers)
if err == nil {
- w.task.registryReconciled.RegisterConfigMap(configMapUsers.ObjectMeta)
+ w.task.RegistryReconciled().RegisterConfigMap(configMapUsers.GetObjectMeta())
} else {
- w.task.registryFailed.RegisterConfigMap(configMapUsers.ObjectMeta)
+ w.task.RegistryFailed().RegisterConfigMap(configMapUsers.GetObjectMeta())
}
return err
}
-// reconcileHostConfigMap reconciles host's personal ConfigMap
-func (w *worker) reconcileHostConfigMap(ctx context.Context, host *api.ChiHost) error {
+// reconcileConfigMapHost reconciles host's personal ConfigMap
+func (w *worker) reconcileConfigMapHost(ctx context.Context, host *api.Host) error {
if util.IsContextDone(ctx) {
log.V(2).Info("task is done")
return nil
}
// ConfigMap for a host
- configMap := w.task.creator.CreateConfigMapHost(host)
- err := w.reconcileConfigMap(ctx, host.GetCHI(), configMap)
+ configMap := w.task.Creator().CreateConfigMap(interfaces.ConfigMapHost, host)
+ err := w.reconcileConfigMap(ctx, host.GetCR(), configMap)
if err == nil {
- w.task.registryReconciled.RegisterConfigMap(configMap.ObjectMeta)
+ w.task.RegistryReconciled().RegisterConfigMap(configMap.GetObjectMeta())
} else {
- w.task.registryFailed.RegisterConfigMap(configMap.ObjectMeta)
+ w.task.RegistryFailed().RegisterConfigMap(configMap.GetObjectMeta())
return err
}
return nil
}
-const unknownVersion = "failed to query"
-
-type versionOptions struct {
- skipNew bool
- skipStopped bool
- skipStoppedAncestor bool
-}
-
-func (opts versionOptions) shouldSkip(host *api.ChiHost) (bool, string) {
- if opts.skipNew && (host.IsNewOne()) {
- return true, "host is a new one, version is not not applicable"
- }
-
- if opts.skipStopped && host.IsStopped() {
- return true, "host is stopped, version is not applicable"
- }
-
- if opts.skipStoppedAncestor && host.GetAncestor().IsStopped() {
- return true, "host ancestor is stopped, version is not applicable"
- }
-
- return false, ""
-}
-
-// getHostClickHouseVersion gets host ClickHouse version
-func (w *worker) getHostClickHouseVersion(ctx context.Context, host *api.ChiHost, opts versionOptions) (string, error) {
- if skip, description := opts.shouldSkip(host); skip {
- return description, nil
- }
-
- version, err := w.ensureClusterSchemer(host).HostClickHouseVersion(ctx, host)
- if err != nil {
- w.a.V(1).M(host).F().Warning("Failed to get ClickHouse version on host: %s", host.GetName())
- return unknownVersion, err
- }
-
- w.a.V(1).M(host).F().Info("Get ClickHouse version on host: %s version: %s", host.GetName(), version)
- host.Runtime.Version = swversion.NewSoftWareVersion(version)
-
- return version, nil
-}
-
-func (w *worker) pollHostForClickHouseVersion(ctx context.Context, host *api.ChiHost) (version string, err error) {
- err = w.c.pollHost(
- ctx,
- host,
- nil,
- func(_ctx context.Context, _host *api.ChiHost) bool {
- var e error
- version, e = w.getHostClickHouseVersion(_ctx, _host, versionOptions{skipStopped: true})
- if e == nil {
- return true
- }
- w.a.V(1).M(host).F().Warning("Host is NOT alive: %s ", host.GetName())
- return false
- },
- )
- return
-}
-
-type reconcileHostStatefulSetOptions struct {
- forceRecreate bool
-}
-
-func (o *reconcileHostStatefulSetOptions) ForceRecreate() bool {
- if o == nil {
- return false
- }
- return o.forceRecreate
-}
-
-type reconcileHostStatefulSetOptionsArr []*reconcileHostStatefulSetOptions
-
-// NewReconcileHostStatefulSetOptionsArr creates new reconcileHostStatefulSetOptions array
-func NewReconcileHostStatefulSetOptionsArr(opts ...*reconcileHostStatefulSetOptions) (res reconcileHostStatefulSetOptionsArr) {
- return append(res, opts...)
-}
-
-// First gets first option
-func (a reconcileHostStatefulSetOptionsArr) First() *reconcileHostStatefulSetOptions {
- if len(a) > 0 {
- return a[0]
- }
- return nil
-}
-
// reconcileHostStatefulSet reconciles host's StatefulSet
-func (w *worker) reconcileHostStatefulSet(ctx context.Context, host *api.ChiHost, opts ...*reconcileHostStatefulSetOptions) error {
+func (w *worker) reconcileHostStatefulSet(ctx context.Context, host *api.Host, opts *statefulset.ReconcileOptions) error {
if util.IsContextDone(ctx) {
log.V(2).Info("task is done")
return nil
@@ -406,38 +307,38 @@ func (w *worker) reconcileHostStatefulSet(ctx context.Context, host *api.ChiHost
log.V(1).M(host).F().S().Info("reconcile StatefulSet start")
defer log.V(1).M(host).F().E().Info("reconcile StatefulSet end")
- version, _ := w.getHostClickHouseVersion(ctx, host, versionOptions{skipNew: true, skipStoppedAncestor: true})
- host.Runtime.CurStatefulSet, _ = w.c.getStatefulSet(host, false)
+ version := w.getHostSoftwareVersion(ctx, host)
+ host.Runtime.CurStatefulSet, _ = w.c.kube.STS().Get(ctx, host)
- w.a.V(1).M(host).F().Info("Reconcile host: %s. ClickHouse version: %s", host.GetName(), version)
+ w.a.V(1).M(host).F().Info("Reconcile host: %s. App version: %s", host.GetName(), version)
// In case we have to force-restart host
// We'll do it via replicas: 0 in StatefulSet.
if w.shouldForceRestartHost(host) {
w.a.V(1).M(host).F().Info("Reconcile host: %s. Shutting host down due to force restart", host.GetName())
- w.prepareHostStatefulSetWithStatus(ctx, host, true)
- _ = w.reconcileStatefulSet(ctx, host, false)
- metricsHostReconcilesRestart(ctx, host.GetCHI())
+ w.stsReconciler.PrepareHostStatefulSetWithStatus(ctx, host, true)
+ _ = w.stsReconciler.ReconcileStatefulSet(ctx, host, false, opts)
+ metrics.HostReconcilesRestart(ctx, host.GetCR())
// At this moment StatefulSet has 0 replicas.
// First stage of RollingUpdate completed.
}
// We are in place, where we can reconcile StatefulSet to desired configuration.
w.a.V(1).M(host).F().Info("Reconcile host: %s. Reconcile StatefulSet", host.GetName())
- w.prepareHostStatefulSetWithStatus(ctx, host, false)
- err := w.reconcileStatefulSet(ctx, host, true, opts...)
+ w.stsReconciler.PrepareHostStatefulSetWithStatus(ctx, host, false)
+ err := w.stsReconciler.ReconcileStatefulSet(ctx, host, true, opts)
if err == nil {
- w.task.registryReconciled.RegisterStatefulSet(host.Runtime.DesiredStatefulSet.ObjectMeta)
+ w.task.RegistryReconciled().RegisterStatefulSet(host.Runtime.DesiredStatefulSet.GetObjectMeta())
} else {
- w.task.registryFailed.RegisterStatefulSet(host.Runtime.DesiredStatefulSet.ObjectMeta)
- if err == errCRUDIgnore {
+ w.task.RegistryFailed().RegisterStatefulSet(host.Runtime.DesiredStatefulSet.GetObjectMeta())
+ if err == common.ErrCRUDIgnore {
// Pretend nothing happened in case of ignore
err = nil
}
- host.GetCHI().EnsureStatus().HostFailed()
- w.a.WithEvent(host.GetCHI(), eventActionReconcile, eventReasonReconcileFailed).
- WithStatusAction(host.GetCHI()).
- WithStatusError(host.GetCHI()).
+ host.GetCR().IEnsureStatus().HostFailed()
+ w.a.WithEvent(host.GetCR(), common.EventActionReconcile, common.EventReasonReconcileFailed).
+ WithStatusAction(host.GetCR()).
+ WithStatusError(host.GetCR()).
M(host).F().
Error("FAILED to reconcile StatefulSet for host: %s", host.GetName())
}
@@ -445,24 +346,36 @@ func (w *worker) reconcileHostStatefulSet(ctx context.Context, host *api.ChiHost
return err
}
+func (w *worker) getHostSoftwareVersion(ctx context.Context, host *api.Host) string {
+ version, _ := w.getHostClickHouseVersion(
+ ctx,
+ host,
+ versionOptions{
+ skipNew: true,
+ skipStoppedAncestor: true,
+ },
+ )
+ return version
+}
+
// reconcileHostService reconciles host's Service
-func (w *worker) reconcileHostService(ctx context.Context, host *api.ChiHost) error {
+func (w *worker) reconcileHostService(ctx context.Context, host *api.Host) error {
if util.IsContextDone(ctx) {
log.V(2).Info("task is done")
return nil
}
- service := w.task.creator.CreateServiceHost(host)
+ service := w.task.Creator().CreateService(interfaces.ServiceHost, host)
if service == nil {
// This is not a problem, service may be omitted
return nil
}
- err := w.reconcileService(ctx, host.GetCHI(), service)
+ err := w.reconcileService(ctx, host.GetCR(), service)
if err == nil {
w.a.V(1).M(host).F().Info("DONE Reconcile service of the host: %s", host.GetName())
- w.task.registryReconciled.RegisterService(service.ObjectMeta)
+ w.task.RegistryReconciled().RegisterService(service.GetObjectMeta())
} else {
w.a.V(1).M(host).F().Warning("FAILED Reconcile service of the host: %s", host.GetName())
- w.task.registryFailed.RegisterService(service.ObjectMeta)
+ w.task.RegistryFailed().RegisterService(service.GetObjectMeta())
}
return err
}
@@ -477,44 +390,49 @@ func (w *worker) reconcileCluster(ctx context.Context, cluster *api.Cluster) err
w.a.V(2).M(cluster).S().P()
defer w.a.V(2).M(cluster).E().P()
- // Add ChkCluster's Service
- if service := w.task.creator.CreateServiceCluster(cluster); service != nil {
- if err := w.reconcileService(ctx, cluster.Runtime.CHI, service); err == nil {
- w.task.registryReconciled.RegisterService(service.ObjectMeta)
+ // Add Cluster Service
+ if service := w.task.Creator().CreateService(interfaces.ServiceCluster, cluster); service != nil {
+ if err := w.reconcileService(ctx, cluster.GetRuntime().GetCR(), service); err == nil {
+ w.task.RegistryReconciled().RegisterService(service.GetObjectMeta())
} else {
- w.task.registryFailed.RegisterService(service.ObjectMeta)
+ w.task.RegistryFailed().RegisterService(service.GetObjectMeta())
}
}
- // Add ChkCluster's Auto Secret
- if cluster.Secret.Source() == api.ClusterSecretSourceAuto {
- if secret := w.task.creator.CreateClusterSecret(model.CreateClusterAutoSecretName(cluster)); secret != nil {
- if err := w.reconcileSecret(ctx, cluster.Runtime.CHI, secret); err == nil {
- w.task.registryReconciled.RegisterSecret(secret.ObjectMeta)
- } else {
- w.task.registryFailed.RegisterSecret(secret.ObjectMeta)
- }
- }
- }
+ w.reconcileClusterSecret(ctx, cluster)
- pdb := w.task.creator.NewPodDisruptionBudget(cluster)
+ pdb := w.task.Creator().CreatePodDisruptionBudget(cluster)
if err := w.reconcilePDB(ctx, cluster, pdb); err == nil {
- w.task.registryReconciled.RegisterPDB(pdb.ObjectMeta)
+ w.task.RegistryReconciled().RegisterPDB(pdb.GetObjectMeta())
} else {
- w.task.registryFailed.RegisterPDB(pdb.ObjectMeta)
+ w.task.RegistryFailed().RegisterPDB(pdb.GetObjectMeta())
}
+ reconcileZookeeperRootPath(cluster)
return nil
}
+func (w *worker) reconcileClusterSecret(ctx context.Context, cluster *api.Cluster) {
+ // Add cluster's Auto Secret
+ if cluster.Secret.Source() == api.ClusterSecretSourceAuto {
+ if secret := w.task.Creator().CreateClusterSecret(w.c.namer.Name(interfaces.NameClusterAutoSecret, cluster)); secret != nil {
+ if err := w.reconcileSecret(ctx, cluster.Runtime.CHI, secret); err == nil {
+ w.task.RegistryReconciled().RegisterSecret(secret.GetObjectMeta())
+ } else {
+ w.task.RegistryFailed().RegisterSecret(secret.GetObjectMeta())
+ }
+ }
+ }
+}
+
// getReconcileShardsWorkersNum calculates how many workers are allowed to be used for concurrent shard reconcile
-func (w *worker) getReconcileShardsWorkersNum(shards []*api.ChiShard, opts *ReconcileShardsAndHostsOptions) int {
+func (w *worker) getReconcileShardsWorkersNum(shards []*api.ChiShard, opts *common.ReconcileShardsAndHostsOptions) int {
availableWorkers := float64(chop.Config().Reconcile.Runtime.ReconcileShardsThreadsNumber)
maxConcurrencyPercent := float64(chop.Config().Reconcile.Runtime.ReconcileShardsMaxConcurrencyPercent)
_100Percent := float64(100)
shardsNum := float64(len(shards))
- if opts.FullFanOut() {
+ if opts.FullFanOut {
// For full fan-out scenarios use all available workers.
// Always allow at least 1 worker.
return int(math.Max(availableWorkers, 1))
@@ -526,38 +444,28 @@ func (w *worker) getReconcileShardsWorkersNum(shards []*api.ChiShard, opts *Reco
return int(math.Min(availableWorkers, maxAllowedWorkers))
}
-// ReconcileShardsAndHostsOptions is and options for reconciler
-type ReconcileShardsAndHostsOptions struct {
- fullFanOut bool
-}
-
-// FullFanOut gets value
-func (o *ReconcileShardsAndHostsOptions) FullFanOut() bool {
- if o == nil {
- return false
- }
- return o.fullFanOut
-}
-
// reconcileShardsAndHosts reconciles shards and hosts of each shard
func (w *worker) reconcileShardsAndHosts(ctx context.Context, shards []*api.ChiShard) error {
- // Sanity check - CHI has to have shard(s)
+ // Sanity check - has to have shard(s)
if len(shards) == 0 {
return nil
}
+ log.V(1).F().S().Info("reconcileShardsAndHosts start")
+ defer log.V(1).F().E().Info("reconcileShardsAndHosts end")
+
// Try to fetch options
- opts, ok := ctx.Value(ReconcileShardsAndHostsOptionsCtxKey).(*ReconcileShardsAndHostsOptions)
+ opts, ok := ctx.Value(common.ReconcileShardsAndHostsOptionsCtxKey).(*common.ReconcileShardsAndHostsOptions)
if ok {
w.a.V(1).Info("found ReconcileShardsAndHostsOptionsCtxKey")
} else {
w.a.V(1).Info("not found ReconcileShardsAndHostsOptionsCtxKey, use empty opts")
- opts = &ReconcileShardsAndHostsOptions{}
+ opts = &common.ReconcileShardsAndHostsOptions{}
}
// Which shard to start concurrent processing with
var startShard int
- if opts.FullFanOut() {
+ if opts.FullFanOut {
// For full fan-out scenarios we'll start shards processing from the very beginning
startShard = 0
w.a.V(1).Info("full fan-out requested")
@@ -614,21 +522,17 @@ func (w *worker) reconcileShardsAndHosts(ctx context.Context, shards []*api.ChiS
return nil
}
-func (w *worker) reconcileShardWithHosts(ctx context.Context, shard *api.ChiShard) error {
+func (w *worker) reconcileShardWithHosts(ctx context.Context, shard api.IShard) error {
if err := w.reconcileShard(ctx, shard); err != nil {
return err
}
- for replicaIndex := range shard.Hosts {
- host := shard.Hosts[replicaIndex]
- if err := w.reconcileHost(ctx, host); err != nil {
- return err
- }
- }
- return nil
+ return shard.WalkHostsAbortOnError(func(host *api.Host) error {
+ return w.reconcileHost(ctx, host)
+ })
}
// reconcileShard reconciles specified shard, excluding nested replicas
-func (w *worker) reconcileShard(ctx context.Context, shard *api.ChiShard) error {
+func (w *worker) reconcileShard(ctx context.Context, shard api.IShard) error {
if util.IsContextDone(ctx) {
log.V(2).Info("task is done")
return nil
@@ -637,28 +541,29 @@ func (w *worker) reconcileShard(ctx context.Context, shard *api.ChiShard) error
w.a.V(2).M(shard).S().P()
defer w.a.V(2).M(shard).E().P()
+ err := w.reconcileShardService(ctx, shard)
+
+ return err
+}
+
+func (w *worker) reconcileShardService(ctx context.Context, shard api.IShard) error {
// Add Shard's Service
- service := w.task.creator.CreateServiceShard(shard)
+ service := w.task.Creator().CreateService(interfaces.ServiceShard, shard)
if service == nil {
// This is not a problem, ServiceShard may be omitted
return nil
}
- err := w.reconcileService(ctx, shard.Runtime.CHI, service)
+ err := w.reconcileService(ctx, shard.GetRuntime().GetCR(), service)
if err == nil {
- w.task.registryReconciled.RegisterService(service.ObjectMeta)
+ w.task.RegistryReconciled().RegisterService(service.GetObjectMeta())
} else {
- w.task.registryFailed.RegisterService(service.ObjectMeta)
+ w.task.RegistryFailed().RegisterService(service.GetObjectMeta())
}
return err
}
// reconcileHost reconciles specified ClickHouse host
-func (w *worker) reconcileHost(ctx context.Context, host *api.ChiHost) error {
- var (
- reconcileHostStatefulSetOpts *reconcileHostStatefulSetOptions
- migrateTableOpts *migrateTableOptions
- )
-
+func (w *worker) reconcileHost(ctx context.Context, host *api.Host) error {
if util.IsContextDone(ctx) {
log.V(2).Info("task is done")
return nil
@@ -667,60 +572,113 @@ func (w *worker) reconcileHost(ctx context.Context, host *api.ChiHost) error {
w.a.V(2).M(host).S().P()
defer w.a.V(2).M(host).E().P()
- metricsHostReconcilesStarted(ctx, host.GetCHI())
+ metrics.HostReconcilesStarted(ctx, host.GetCR())
startTime := time.Now()
if host.IsFirst() {
- w.reconcileCHIServicePreliminary(ctx, host.GetCHI())
- defer w.reconcileCHIServiceFinal(ctx, host.GetCHI())
+ _ = w.reconcileCRServicePreliminary(ctx, host.GetCR())
+ defer w.reconcileCRServiceFinal(ctx, host.GetCR())
+ }
+
+ // Create artifacts
+ w.stsReconciler.PrepareHostStatefulSetWithStatus(ctx, host, false)
+
+ if err := w.reconcileHostPrepare(ctx, host); err != nil {
+ return err
+ }
+ if err := w.reconcileHostMain(ctx, host); err != nil {
+ return err
+ }
+ // Host is now added and functional
+ host.GetReconcileAttributes().UnsetAdd()
+ if err := w.reconcileHostBootstrap(ctx, host); err != nil {
+ return err
+ }
+
+ now := time.Now()
+ hostsCompleted := 0
+ hostsCount := 0
+ host.GetCR().IEnsureStatus().HostCompleted()
+ if host.GetCR() != nil && host.GetCR().GetStatus() != nil {
+ hostsCompleted = host.GetCR().GetStatus().GetHostsCompletedCount()
+ hostsCount = host.GetCR().GetStatus().GetHostsCount()
}
+ w.a.V(1).
+ WithEvent(host.GetCR(), common.EventActionProgress, common.EventReasonProgressHostsCompleted).
+ WithStatusAction(host.GetCR()).
+ M(host).F().
+ Info("[now: %s] %s: %d of %d", now, common.EventReasonProgressHostsCompleted, hostsCompleted, hostsCount)
+ _ = w.c.updateCRObjectStatus(ctx, host.GetCR(), types.UpdateStatusOptions{
+ CopyStatusOptions: types.CopyStatusOptions{
+ MainFields: true,
+ },
+ })
+
+ metrics.HostReconcilesCompleted(ctx, host.GetCR())
+ metrics.HostReconcilesTimings(ctx, host.GetCR(), time.Now().Sub(startTime).Seconds())
+
+ return nil
+}
+
+// reconcileHostPrepare reconciles specified ClickHouse host
+func (w *worker) reconcileHostPrepare(ctx context.Context, host *api.Host) error {
// Check whether ClickHouse is running and accessible and what version is available
if version, err := w.getHostClickHouseVersion(ctx, host, versionOptions{skipNew: true, skipStoppedAncestor: true}); err == nil {
w.a.V(1).
- WithEvent(host.GetCHI(), eventActionReconcile, eventReasonReconcileStarted).
- WithStatusAction(host.GetCHI()).
+ WithEvent(host.GetCR(), common.EventActionReconcile, common.EventReasonReconcileStarted).
+ WithStatusAction(host.GetCR()).
M(host).F().
Info("Reconcile Host start. Host: %s ClickHouse version running: %s", host.GetName(), version)
} else {
w.a.V(1).
- WithEvent(host.GetCHI(), eventActionReconcile, eventReasonReconcileStarted).
- WithStatusAction(host.GetCHI()).
+ WithEvent(host.GetCR(), common.EventActionReconcile, common.EventReasonReconcileStarted).
+ WithStatusAction(host.GetCR()).
M(host).F().
Warning("Reconcile Host start. Host: %s Failed to get ClickHouse version: %s", host.GetName(), version)
}
- // Create artifacts
- w.prepareHostStatefulSetWithStatus(ctx, host, false)
-
- if err := w.excludeHost(ctx, host); err != nil {
- metricsHostReconcilesErrors(ctx, host.GetCHI())
- w.a.V(1).
- M(host).F().
- Warning("Reconcile Host interrupted with an error 1. Host: %s Err: %v", host.GetName(), err)
- return err
+ if w.excludeHost(ctx, host) {
+ // Need to wait to complete queries only in case host is excluded from the cluster
+ // In case host is not excluded from the cluster queries would continue to be started on the host
+ // and there is no reason to wait for queries to complete. We may wait endlessly.
+ _ = w.completeQueries(ctx, host)
}
- _ = w.completeQueries(ctx, host)
+ return nil
+}
- if err := w.reconcileHostConfigMap(ctx, host); err != nil {
- metricsHostReconcilesErrors(ctx, host.GetCHI())
+// reconcileHostMain reconciles specified ClickHouse host
+func (w *worker) reconcileHostMain(ctx context.Context, host *api.Host) error {
+ var (
+ reconcileStatefulSetOpts *statefulset.ReconcileOptions
+ migrateTableOpts *migrateTableOptions
+ )
+
+ if err := w.reconcileConfigMapHost(ctx, host); err != nil {
+ metrics.HostReconcilesErrors(ctx, host.GetCR())
w.a.V(1).
M(host).F().
Warning("Reconcile Host interrupted with an error 2. Host: %s Err: %v", host.GetName(), err)
return err
}
+ w.setHasData(host)
+
w.a.V(1).
M(host).F().
Info("Reconcile PVCs and check possible data loss for host: %s", host.GetName())
- if errIsDataLoss(w.reconcilePVCs(ctx, host, api.DesiredStatefulSet)) {
+ if storage.ErrIsDataLoss(
+ storage.NewStorageReconciler(
+ w.task,
+ w.c.namer,
+ storage.NewStoragePVC(w.c.kube.Storage()),
+ ).ReconcilePVCs(ctx, host, api.DesiredStatefulSet),
+ ) {
// In case of data loss detection on existing volumes, we need to:
// 1. recreate StatefulSet
// 2. run tables migration again
- reconcileHostStatefulSetOpts = &reconcileHostStatefulSetOptions{
- forceRecreate: true,
- }
+ reconcileStatefulSetOpts = reconcileStatefulSetOpts.SetForceRecreate()
migrateTableOpts = &migrateTableOptions{
forceMigrate: true,
dropReplica: true,
@@ -730,20 +688,22 @@ func (w *worker) reconcileHost(ctx context.Context, host *api.ChiHost) error {
Info("Data loss detected for host: %s. Will do force migrate", host.GetName())
}
- if err := w.reconcileHostStatefulSet(ctx, host, reconcileHostStatefulSetOpts); err != nil {
- metricsHostReconcilesErrors(ctx, host.GetCHI())
+ if err := w.reconcileHostStatefulSet(ctx, host, reconcileStatefulSetOpts); err != nil {
+ metrics.HostReconcilesErrors(ctx, host.GetCR())
w.a.V(1).
M(host).F().
Warning("Reconcile Host interrupted with an error 3. Host: %s Err: %v", host.GetName(), err)
return err
}
// Polish all new volumes that operator has to create
- _ = w.reconcilePVCs(ctx, host, api.DesiredStatefulSet)
+ _ = storage.NewStorageReconciler(
+ w.task,
+ w.c.namer,
+ storage.NewStoragePVC(w.c.kube.Storage()),
+ ).ReconcilePVCs(ctx, host, api.DesiredStatefulSet)
_ = w.reconcileHostService(ctx, host)
- host.GetReconcileAttributes().UnsetAdd()
-
// Prepare for tables migration.
// Sometimes service needs some time to start after creation|modification before being accessible for usage
// Check whether ClickHouse is running and accessible and what version is available.
@@ -758,8 +718,13 @@ func (w *worker) reconcileHost(ctx context.Context, host *api.ChiHost) error {
}
_ = w.migrateTables(ctx, host, migrateTableOpts)
+ return nil
+}
+
+// reconcileHostBootstrap reconciles specified ClickHouse host
+func (w *worker) reconcileHostBootstrap(ctx context.Context, host *api.Host) error {
if err := w.includeHost(ctx, host); err != nil {
- metricsHostReconcilesErrors(ctx, host.GetCHI())
+ metrics.HostReconcilesErrors(ctx, host.GetCR())
w.a.V(1).
M(host).F().
Warning("Reconcile Host interrupted with an error 4. Host: %s Err: %v", host.GetName(), err)
@@ -770,511 +735,17 @@ func (w *worker) reconcileHost(ctx context.Context, host *api.ChiHost) error {
// Sometimes service needs some time to start after creation|modification before being accessible for usage
if version, err := w.pollHostForClickHouseVersion(ctx, host); err == nil {
w.a.V(1).
- WithEvent(host.GetCHI(), eventActionReconcile, eventReasonReconcileCompleted).
- WithStatusAction(host.GetCHI()).
+ WithEvent(host.GetCR(), common.EventActionReconcile, common.EventReasonReconcileCompleted).
+ WithStatusAction(host.GetCR()).
M(host).F().
Info("Reconcile Host completed. Host: %s ClickHouse version running: %s", host.GetName(), version)
} else {
w.a.V(1).
- WithEvent(host.GetCHI(), eventActionReconcile, eventReasonReconcileCompleted).
- WithStatusAction(host.GetCHI()).
+ WithEvent(host.GetCR(), common.EventActionReconcile, common.EventReasonReconcileCompleted).
+ WithStatusAction(host.GetCR()).
M(host).F().
Warning("Reconcile Host completed. Host: %s Failed to get ClickHouse version: %s", host.GetName(), version)
}
- now := time.Now()
- hostsCompleted := 0
- hostsCount := 0
- host.GetCHI().EnsureStatus().HostCompleted()
- if host.GetCHI() != nil && host.GetCHI().Status != nil {
- hostsCompleted = host.GetCHI().Status.GetHostsCompletedCount()
- hostsCount = host.GetCHI().Status.GetHostsCount()
- }
- w.a.V(1).
- WithEvent(host.GetCHI(), eventActionProgress, eventReasonProgressHostsCompleted).
- WithStatusAction(host.GetCHI()).
- M(host).F().
- Info("[now: %s] %s: %d of %d", now, eventReasonProgressHostsCompleted, hostsCompleted, hostsCount)
-
- _ = w.c.updateCHIObjectStatus(ctx, host.GetCHI(), UpdateCHIStatusOptions{
- CopyCHIStatusOptions: api.CopyCHIStatusOptions{
- MainFields: true,
- },
- })
-
- metricsHostReconcilesCompleted(ctx, host.GetCHI())
- metricsHostReconcilesTimings(ctx, host.GetCHI(), time.Now().Sub(startTime).Seconds())
-
- return nil
-}
-
-// reconcilePDB reconciles PodDisruptionBudget
-func (w *worker) reconcilePDB(ctx context.Context, cluster *api.Cluster, pdb *policy.PodDisruptionBudget) error {
- cur, err := w.c.kubeClient.PolicyV1().PodDisruptionBudgets(pdb.Namespace).Get(ctx, pdb.Name, controller.NewGetOptions())
- switch {
- case err == nil:
- pdb.ResourceVersion = cur.ResourceVersion
- _, err := w.c.kubeClient.PolicyV1().PodDisruptionBudgets(pdb.Namespace).Update(ctx, pdb, controller.NewUpdateOptions())
- if err == nil {
- log.V(1).Info("PDB updated: %s/%s", pdb.Namespace, pdb.Name)
- } else {
- log.Error("FAILED to update PDB: %s/%s err: %v", pdb.Namespace, pdb.Name, err)
- return nil
- }
- case apiErrors.IsNotFound(err):
- _, err := w.c.kubeClient.PolicyV1().PodDisruptionBudgets(pdb.Namespace).Create(ctx, pdb, controller.NewCreateOptions())
- if err == nil {
- log.V(1).Info("PDB created: %s/%s", pdb.Namespace, pdb.Name)
- } else {
- log.Error("FAILED create PDB: %s/%s err: %v", pdb.Namespace, pdb.Name, err)
- return err
- }
- default:
- log.Error("FAILED get PDB: %s/%s err: %v", pdb.Namespace, pdb.Name, err)
- return err
- }
-
return nil
}
-
-// reconcileConfigMap reconciles core.ConfigMap which belongs to specified CHI
-func (w *worker) reconcileConfigMap(
- ctx context.Context,
- chi *api.ClickHouseInstallation,
- configMap *core.ConfigMap,
-) error {
- if util.IsContextDone(ctx) {
- log.V(2).Info("task is done")
- return nil
- }
-
- w.a.V(2).M(chi).S().P()
- defer w.a.V(2).M(chi).E().P()
-
- // Check whether this object already exists in k8s
- curConfigMap, err := w.c.getConfigMap(&configMap.ObjectMeta, true)
-
- if curConfigMap != nil {
- // We have ConfigMap - try to update it
- err = w.updateConfigMap(ctx, chi, configMap)
- }
-
- if apiErrors.IsNotFound(err) {
- // ConfigMap not found - even during Update process - try to create it
- err = w.createConfigMap(ctx, chi, configMap)
- }
-
- if err != nil {
- w.a.WithEvent(chi, eventActionReconcile, eventReasonReconcileFailed).
- WithStatusAction(chi).
- WithStatusError(chi).
- M(chi).F().
- Error("FAILED to reconcile ConfigMap: %s CHI: %s ", configMap.Name, chi.Name)
- }
-
- return err
-}
-
-// hasService checks whether specified service exists
-func (w *worker) hasService(ctx context.Context, chi *api.ClickHouseInstallation, service *core.Service) bool {
- // Check whether this object already exists
- curService, _ := w.c.getService(service)
- return curService != nil
-}
-
-// reconcileService reconciles core.Service
-func (w *worker) reconcileService(ctx context.Context, chi *api.ClickHouseInstallation, service *core.Service) error {
- if util.IsContextDone(ctx) {
- log.V(2).Info("task is done")
- return nil
- }
-
- w.a.V(2).M(chi).S().Info(service.Name)
- defer w.a.V(2).M(chi).E().Info(service.Name)
-
- // Check whether this object already exists
- curService, err := w.c.getService(service)
-
- if curService != nil {
- // We have the Service - try to update it
- w.a.V(1).M(chi).F().Info("Service found: %s/%s. Will try to update", service.Namespace, service.Name)
- err = w.updateService(ctx, chi, curService, service)
- }
-
- if err != nil {
- if apiErrors.IsNotFound(err) {
- // The Service is either not found or not updated. Try to recreate it
- w.a.V(1).M(chi).F().Info("Service: %s/%s not found. err: %v", service.Namespace, service.Name, err)
- } else {
- // The Service is either not found or not updated. Try to recreate it
- w.a.WithEvent(chi, eventActionUpdate, eventReasonUpdateFailed).
- WithStatusAction(chi).
- WithStatusError(chi).
- M(chi).F().
- Error("Update Service: %s/%s failed with error: %v", service.Namespace, service.Name, err)
- }
-
- _ = w.c.deleteServiceIfExists(ctx, service.Namespace, service.Name)
- err = w.createService(ctx, chi, service)
- }
-
- if err == nil {
- w.a.V(1).M(chi).F().Info("Service reconcile successful: %s/%s", service.Namespace, service.Name)
- } else {
- w.a.WithEvent(chi, eventActionReconcile, eventReasonReconcileFailed).
- WithStatusAction(chi).
- WithStatusError(chi).
- M(chi).F().
- Error("FAILED to reconcile Service: %s/%s CHI: %s ", service.Namespace, service.Name, chi.Name)
- }
-
- return err
-}
-
-// reconcileSecret reconciles core.Secret
-func (w *worker) reconcileSecret(ctx context.Context, chi *api.ClickHouseInstallation, secret *core.Secret) error {
- if util.IsContextDone(ctx) {
- log.V(2).Info("task is done")
- return nil
- }
-
- w.a.V(2).M(chi).S().Info(secret.Name)
- defer w.a.V(2).M(chi).E().Info(secret.Name)
-
- // Check whether this object already exists
- if _, err := w.c.getSecret(secret); err == nil {
- // We have Secret - try to update it
- return nil
- }
-
- // Secret not found or broken. Try to recreate
- _ = w.c.deleteSecretIfExists(ctx, secret.Namespace, secret.Name)
- err := w.createSecret(ctx, chi, secret)
- if err != nil {
- w.a.WithEvent(chi, eventActionReconcile, eventReasonReconcileFailed).
- WithStatusAction(chi).
- WithStatusError(chi).
- M(chi).F().
- Error("FAILED to reconcile Secret: %s CHI: %s ", secret.Name, chi.Name)
- }
-
- return err
-}
-
-func (w *worker) dumpStatefulSetDiff(host *api.ChiHost, cur, new *apps.StatefulSet) {
- if cur == nil {
- w.a.V(1).M(host).Info("Cur StatefulSet is not available, nothing to compare to")
- return
- }
- if new == nil {
- w.a.V(1).M(host).Info("New StatefulSet is not available, nothing to compare to")
- return
- }
-
- if diff, equal := messagediff.DeepDiff(cur.Spec, new.Spec); equal {
- w.a.V(1).M(host).Info("StatefulSet.Spec ARE EQUAL")
- } else {
- w.a.V(1).Info(
- "StatefulSet.Spec ARE DIFFERENT:\nadded:\n%s\nmodified:\n%s\nremoved:\n%s",
- util.MessageDiffItemString("added .spec items", "none", "", diff.Added),
- util.MessageDiffItemString("modified .spec items", "none", "", diff.Modified),
- util.MessageDiffItemString("removed .spec items", "none", "", diff.Removed),
- )
- }
- if diff, equal := messagediff.DeepDiff(cur.Labels, new.Labels); equal {
- w.a.V(1).M(host).Info("StatefulSet.Labels ARE EQUAL")
- } else {
- if len(cur.Labels)+len(new.Labels) > 0 {
- w.a.V(1).Info(
- "StatefulSet.Labels ARE DIFFERENT:\nadded:\n%s\nmodified:\n%s\nremoved:\n%s",
- util.MessageDiffItemString("added .labels items", "none", "", diff.Added),
- util.MessageDiffItemString("modified .labels items", "none", "", diff.Modified),
- util.MessageDiffItemString("removed .labels items", "none", "", diff.Removed),
- )
- }
- }
- if diff, equal := messagediff.DeepDiff(cur.Annotations, new.Annotations); equal {
- w.a.V(1).M(host).Info("StatefulSet.Annotations ARE EQUAL")
- } else {
- if len(cur.Annotations)+len(new.Annotations) > 0 {
- w.a.V(1).Info(
- "StatefulSet.Annotations ARE DIFFERENT:\nadded:\n%s\nmodified:\n%s\nremoved:\n%s",
- util.MessageDiffItemString("added .annotations items", "none", "", diff.Added),
- util.MessageDiffItemString("modified .annotations items", "none", "", diff.Modified),
- util.MessageDiffItemString("removed .annotations items", "none", "", diff.Removed),
- )
- }
- }
-}
-
-// reconcileStatefulSet reconciles StatefulSet of a host
-func (w *worker) reconcileStatefulSet(
- ctx context.Context,
- host *api.ChiHost,
- register bool,
- opts ...*reconcileHostStatefulSetOptions,
-) (err error) {
- if util.IsContextDone(ctx) {
- log.V(2).Info("task is done")
- return nil
- }
-
- newStatefulSet := host.Runtime.DesiredStatefulSet
-
- w.a.V(2).M(host).S().Info(util.NamespaceNameString(newStatefulSet.ObjectMeta))
- defer w.a.V(2).M(host).E().Info(util.NamespaceNameString(newStatefulSet.ObjectMeta))
-
- if host.GetReconcileAttributes().GetStatus() == api.ObjectStatusSame {
- w.a.V(2).M(host).F().Info("No need to reconcile THE SAME StatefulSet: %s", util.NamespaceNameString(newStatefulSet.ObjectMeta))
- if register {
- host.GetCHI().EnsureStatus().HostUnchanged()
- _ = w.c.updateCHIObjectStatus(ctx, host.GetCHI(), UpdateCHIStatusOptions{
- CopyCHIStatusOptions: api.CopyCHIStatusOptions{
- MainFields: true,
- },
- })
- }
- return nil
- }
-
- // Check whether this object already exists in k8s
- host.Runtime.CurStatefulSet, err = w.c.getStatefulSet(&newStatefulSet.ObjectMeta, false)
-
- // Report diff to trace
- if host.GetReconcileAttributes().GetStatus() == api.ObjectStatusModified {
- w.a.V(1).M(host).F().Info("Need to reconcile MODIFIED StatefulSet: %s", util.NamespaceNameString(newStatefulSet.ObjectMeta))
- w.dumpStatefulSetDiff(host, host.Runtime.CurStatefulSet, newStatefulSet)
- }
-
- opt := NewReconcileHostStatefulSetOptionsArr(opts...).First()
- switch {
- case opt.ForceRecreate():
- // Force recreate prevails over all other requests
- w.recreateStatefulSet(ctx, host, register)
- default:
- // We have (or had in the past) StatefulSet - try to update|recreate it
- err = w.updateStatefulSet(ctx, host, register)
- }
-
- if apiErrors.IsNotFound(err) {
- // StatefulSet not found - even during Update process - try to create it
- err = w.createStatefulSet(ctx, host, register)
- }
-
- // Host has to know current StatefulSet and Pod
- host.Runtime.CurStatefulSet, _ = w.c.getStatefulSet(&newStatefulSet.ObjectMeta, false)
-
- return err
-}
-
-// Comment out PV
-// reconcilePersistentVolumes reconciles all PVs of a host
-//func (w *worker) reconcilePersistentVolumes(ctx context.Context, host *api.ChiHost) {
-// if util.IsContextDone(ctx) {
-// return
-// }
-//
-// w.c.walkPVs(host, func(pv *core.PersistentVolume) {
-// pv = w.task.creator.PreparePersistentVolume(pv, host)
-// _, _ = w.c.updatePersistentVolume(ctx, pv)
-// })
-//}
-
-// reconcilePVCs reconciles all PVCs of a host
-func (w *worker) reconcilePVCs(ctx context.Context, host *api.ChiHost, which api.WhichStatefulSet) (res ErrorDataPersistence) {
- if util.IsContextDone(ctx) {
- return nil
- }
-
- namespace := host.Runtime.Address.Namespace
- w.a.V(2).M(host).S().Info("host %s/%s", namespace, host.GetName())
- defer w.a.V(2).M(host).E().Info("host %s/%s", namespace, host.GetName())
-
- host.WalkVolumeMounts(which, func(volumeMount *core.VolumeMount) {
- if util.IsContextDone(ctx) {
- return
- }
- if e := w.reconcilePVCFromVolumeMount(ctx, host, volumeMount); e != nil {
- if res == nil {
- res = e
- }
- }
- })
-
- return
-}
-
-func isLostPVC(pvc *core.PersistentVolumeClaim, isJustCreated bool, host *api.ChiHost) bool {
- if !model.HostHasTablesCreated(host) {
- // No data to loose
- return false
- }
-
- // Now we assume that this PVC has had some data in the past, since tables were created on it
-
- if pvc == nil {
- // No PVC available at all, was it deleted?
- // Lost PVC
- return true
- }
-
- if isJustCreated {
- // PVC was just created by the operator, not fetched
- // Lost PVC
- return true
- }
-
- // PVC is in place
- return false
-}
-
-func (w *worker) reconcilePVCFromVolumeMount(
- ctx context.Context,
- host *api.ChiHost,
- volumeMount *core.VolumeMount,
-) (
- res ErrorDataPersistence,
-) {
- // Which PVC are we going to reconcile
- pvc, volumeClaimTemplate, isModelCreated, err := w.fetchPVC(ctx, host, volumeMount)
- if err != nil {
- // Unable to fetch or model PVC correctly.
- // May be volume is not built from VolumeClaimTemplate, it may be reference to ConfigMap
- return nil
- }
-
- // PVC available. Either fetched or not found and model created (from templates)
-
- pvcName := "pvc-name-unknown-pvc-not-exist"
- namespace := host.Runtime.Address.Namespace
-
- if pvc != nil {
- pvcName = pvc.Name
- }
-
- w.a.V(2).M(host).S().Info("reconcile volumeMount (%s/%s/%s/%s)", namespace, host.GetName(), volumeMount.Name, pvcName)
- defer w.a.V(2).M(host).E().Info("reconcile volumeMount (%s/%s/%s/%s)", namespace, host.GetName(), volumeMount.Name, pvcName)
-
- // Check scenario 1 - no PVC available
- // Such a PVC should be re-created
- if isLostPVC(pvc, isModelCreated, host) {
- // Looks like data loss detected
- w.a.V(1).M(host).Warning("PVC is either newly added to the host or was lost earlier (%s/%s/%s/%s)", namespace, host.GetName(), volumeMount.Name, pvcName)
- res = errPVCIsLost
- }
-
- // Check scenario 2 - PVC exists, but no PV available
- // Such a PVC should be deleted and re-created
- if w.isLostPV(pvc) {
- // This PVC has no PV available
- // Looks like data loss detected
- w.deletePVC(ctx, pvc)
- w.a.V(1).M(host).Info("deleted PVC with lost PV (%s/%s/%s/%s)", namespace, host.GetName(), volumeMount.Name, pvcName)
-
- // Refresh PVC model. Since PVC is just deleted refreshed model may not be fetched from the k8s,
- // but can be provided by the operator still
- pvc, volumeClaimTemplate, _, _ = w.fetchPVC(ctx, host, volumeMount)
- res = errPVCWithLostPVDeleted
- }
-
- // In any case - be PVC available or not - need to reconcile it
-
- switch pvcReconciled, err := w.reconcilePVC(ctx, pvc, host, volumeClaimTemplate); err {
- case errNilPVC:
- w.a.M(host).F().Error("Unable to reconcile nil PVC: %s/%s", namespace, pvcName)
- case nil:
- w.task.registryReconciled.RegisterPVC(pvcReconciled.ObjectMeta)
- default:
- w.task.registryFailed.RegisterPVC(pvc.ObjectMeta)
- w.a.M(host).F().Error("Unable to reconcile PVC: %s/%s err: %v", pvc.Namespace, pvc.Name, err)
- }
-
- // It still may return data loss errors
- return res
-}
-
-func (w *worker) fetchPVC(
- ctx context.Context,
- host *api.ChiHost,
- volumeMount *core.VolumeMount,
-) (
- pvc *core.PersistentVolumeClaim,
- vct *api.VolumeClaimTemplate,
- isModelCreated bool,
- err error,
-) {
- namespace := host.Runtime.Address.Namespace
-
- // Try to find volumeClaimTemplate that is used to build this mounted volume
- // Volume mount can point not only to volume claim, but also to other entities, such as ConfigMap, for example.
- pvcName, ok := model.CreatePVCNameByVolumeMount(host, volumeMount)
- if !ok {
- // No this is not a reference to VolumeClaimTemplate, it may be reference to ConfigMap
- return nil, nil, false, fmt.Errorf("unable to make PVC name from volume mount")
- }
- volumeClaimTemplate, ok := model.GetVolumeClaimTemplate(host, volumeMount)
- if !ok {
- // No this is not a reference to VolumeClaimTemplate, it may be reference to ConfigMap
- return nil, nil, false, fmt.Errorf("unable to find VolumeClaimTemplate from volume mount")
- }
-
- // We have a VolumeClaimTemplate for this VolumeMount
- // Treat it as persistent storage mount
-
- _pvc, e := w.c.kubeClient.CoreV1().PersistentVolumeClaims(namespace).Get(ctx, pvcName, controller.NewGetOptions())
- if e == nil {
- w.a.V(2).M(host).Info("PVC (%s/%s/%s/%s) found", namespace, host.GetName(), volumeMount.Name, pvcName)
- return _pvc, volumeClaimTemplate, false, nil
- }
-
- // We have an error. PVC not fetched
-
- if !apiErrors.IsNotFound(e) {
- // In case of any non-NotFound API error - unable to proceed
- w.a.M(host).F().Error("ERROR unable to get PVC(%s/%s) err: %v", namespace, pvcName, e)
- return nil, nil, false, e
- }
-
- // We have NotFound error - PVC not found
- // This is not an error per se, means PVC is not created (yet)?
- w.a.V(2).M(host).Info("PVC (%s/%s/%s/%s) not found", namespace, host.GetName(), volumeMount.Name, pvcName)
-
- if creator.OperatorShouldCreatePVC(host, volumeClaimTemplate) {
- // Operator is in charge of PVCs
- // Create PVC model.
- pvc = w.task.creator.CreatePVC(pvcName, host, &volumeClaimTemplate.Spec)
- w.a.V(1).M(host).Info("PVC (%s/%s/%s/%s) model provided by the operator", namespace, host.GetName(), volumeMount.Name, pvcName)
- return pvc, volumeClaimTemplate, true, nil
- }
-
- // PVC is not available and the operator is not expected to create PVC
- w.a.V(1).M(host).Info("PVC (%s/%s/%s/%s) not found and model will not be provided by the operator", namespace, host.GetName(), volumeMount.Name, pvcName)
- return nil, volumeClaimTemplate, false, nil
-}
-
-var errNilPVC = fmt.Errorf("nil PVC, nothing to reconcile")
-
-// reconcilePVC reconciles specified PVC
-func (w *worker) reconcilePVC(
- ctx context.Context,
- pvc *core.PersistentVolumeClaim,
- host *api.ChiHost,
- template *api.VolumeClaimTemplate,
-) (*core.PersistentVolumeClaim, error) {
- if pvc == nil {
- w.a.V(2).M(host).F().Info("nil PVC, nothing to reconcile")
- return nil, errNilPVC
- }
-
- w.a.V(2).M(host).S().Info("reconcile PVC (%s/%s/%s)", pvc.Namespace, pvc.Name, host.GetName())
- defer w.a.V(2).M(host).E().Info("reconcile PVC (%s/%s/%s)", pvc.Namespace, pvc.Name, host.GetName())
-
- if util.IsContextDone(ctx) {
- log.V(2).Info("task is done")
- return nil, fmt.Errorf("task is done")
- }
-
- w.applyPVCResourcesRequests(pvc, template)
- pvc = w.task.creator.PreparePersistentVolumeClaim(pvc, host, template)
- return w.c.updatePersistentVolumeClaim(ctx, pvc)
-}
diff --git a/pkg/controller/chi/worker-chit-reconciler.go b/pkg/controller/chi/worker-chit-reconciler.go
index 34db29af6..caa8d96a5 100644
--- a/pkg/controller/chi/worker-chit-reconciler.go
+++ b/pkg/controller/chi/worker-chit-reconciler.go
@@ -36,28 +36,28 @@ func (w *worker) shouldUpdateCHITList() bool {
// addChit sync new CHIT - creates all its resources
func (w *worker) addChit(chit *api.ClickHouseInstallationTemplate) error {
if w.shouldUpdateCHITList() {
- log.V(1).M(chit).F().Info("Add CHIT: %s/%s", chit.Namespace, chit.Name)
+ log.V(1).M(chit).F().Info("Add CHIT: %s/%s", chit.GetNamespace(), chit.GetName())
chop.Config().AddCHITemplate((*api.ClickHouseInstallation)(chit))
} else {
- log.V(1).M(chit).F().Info("CHIT will not be added: %s/%s", chit.Namespace, chit.Name)
+ log.V(1).M(chit).F().Info("CHIT will not be added: %s/%s", chit.GetNamespace(), chit.GetName())
}
return nil
}
// updateChit sync CHIT which was already created earlier
func (w *worker) updateChit(old, new *api.ClickHouseInstallationTemplate) error {
- if old.ObjectMeta.ResourceVersion == new.ObjectMeta.ResourceVersion {
- log.V(2).M(old).F().Info("ResourceVersion did not change: %s", old.ObjectMeta.ResourceVersion)
+ if old.GetObjectMeta().GetResourceVersion() == new.GetObjectMeta().GetResourceVersion() {
+ log.V(2).M(old).F().Info("ResourceVersion did not change: %s", old.GetObjectMeta().GetResourceVersion())
// No need to react
return nil
}
- log.V(1).M(new).F().Info("ResourceVersion change: %s to %s", old.ObjectMeta.ResourceVersion, new.ObjectMeta.ResourceVersion)
+ log.V(1).M(new).F().Info("ResourceVersion change: %s to %s", old.GetObjectMeta().GetResourceVersion(), new.GetObjectMeta().GetResourceVersion())
if w.shouldUpdateCHITList() {
- log.V(1).M(new).F().Info("Update CHIT: %s/%s", new.Namespace, new.Name)
+ log.V(1).M(new).F().Info("Update CHIT: %s/%s", new.GetNamespace(), new.GetName())
chop.Config().UpdateCHITemplate((*api.ClickHouseInstallation)(new))
} else {
- log.V(1).M(new).F().Info("CHIT will not be updated: %s/%s", new.Namespace, new.Name)
+ log.V(1).M(new).F().Info("CHIT will not be updated: %s/%s", new.GetNamespace(), new.GetName())
}
return nil
}
@@ -67,10 +67,10 @@ func (w *worker) deleteChit(chit *api.ClickHouseInstallationTemplate) error {
log.V(1).M(chit).F().P()
if w.shouldUpdateCHITList() {
- log.V(1).M(chit).F().Info("Delete CHIT: %s/%s", chit.Namespace, chit.Name)
+ log.V(1).M(chit).F().Info("Delete CHIT: %s/%s", chit.GetNamespace(), chit.GetName())
chop.Config().DeleteCHITemplate((*api.ClickHouseInstallation)(chit))
} else {
- log.V(1).M(chit).F().Info("CHIT will not be deleted: %s/%s", chit.Namespace, chit.Name)
+ log.V(1).M(chit).F().Info("CHIT will not be deleted: %s/%s", chit.GetNamespace(), chit.GetName())
}
return nil
}
diff --git a/pkg/controller/chi/worker-config-map.go b/pkg/controller/chi/worker-config-map.go
new file mode 100644
index 000000000..1817a7ec2
--- /dev/null
+++ b/pkg/controller/chi/worker-config-map.go
@@ -0,0 +1,119 @@
+// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package chi
+
+import (
+ "context"
+ "time"
+
+ core "k8s.io/api/core/v1"
+ apiErrors "k8s.io/apimachinery/pkg/api/errors"
+
+ log "github.com/altinity/clickhouse-operator/pkg/announcer"
+ api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1"
+ "github.com/altinity/clickhouse-operator/pkg/controller/common"
+ "github.com/altinity/clickhouse-operator/pkg/util"
+)
+
+// reconcileConfigMap reconciles core.ConfigMap which belongs to specified CHI
+func (w *worker) reconcileConfigMap(
+ ctx context.Context,
+ cr api.ICustomResource,
+ configMap *core.ConfigMap,
+) error {
+ if util.IsContextDone(ctx) {
+ log.V(2).Info("task is done")
+ return nil
+ }
+
+ w.a.V(2).M(cr).S().P()
+ defer w.a.V(2).M(cr).E().P()
+
+ // Check whether this object already exists in k8s
+ curConfigMap, err := w.c.getConfigMap(ctx, configMap.GetObjectMeta(), true)
+
+ if curConfigMap != nil {
+ // We have ConfigMap - try to update it
+ err = w.updateConfigMap(ctx, cr, configMap)
+ }
+
+ if apiErrors.IsNotFound(err) {
+ // ConfigMap not found - even during Update process - try to create it
+ err = w.createConfigMap(ctx, cr, configMap)
+ }
+
+ if err != nil {
+ w.a.WithEvent(cr, common.EventActionReconcile, common.EventReasonReconcileFailed).
+ WithStatusAction(cr).
+ WithStatusError(cr).
+ M(cr).F().
+ Error("FAILED to reconcile ConfigMap: %s CHI: %s ", configMap.GetName(), cr.GetName())
+ }
+
+ return err
+}
+
+// updateConfigMap
+func (w *worker) updateConfigMap(ctx context.Context, cr api.ICustomResource, configMap *core.ConfigMap) error {
+ if util.IsContextDone(ctx) {
+ log.V(2).Info("task is done")
+ return nil
+ }
+
+ updatedConfigMap, err := w.c.updateConfigMap(ctx, configMap)
+ if err == nil {
+ w.a.V(1).
+ WithEvent(cr, common.EventActionUpdate, common.EventReasonUpdateCompleted).
+ WithStatusAction(cr).
+ M(cr).F().
+ Info("Update ConfigMap %s/%s", configMap.Namespace, configMap.Name)
+ if updatedConfigMap.ResourceVersion != configMap.ResourceVersion {
+ w.task.SetCmUpdate(time.Now())
+ }
+ } else {
+ w.a.WithEvent(cr, common.EventActionUpdate, common.EventReasonUpdateFailed).
+ WithStatusAction(cr).
+ WithStatusError(cr).
+ M(cr).F().
+ Error("Update ConfigMap %s/%s failed with error %v", configMap.Namespace, configMap.Name, err)
+ }
+
+ return err
+}
+
+// createConfigMap
+func (w *worker) createConfigMap(ctx context.Context, cr api.ICustomResource, configMap *core.ConfigMap) error {
+ if util.IsContextDone(ctx) {
+ log.V(2).Info("task is done")
+ return nil
+ }
+
+ err := w.c.createConfigMap(ctx, configMap)
+ if err == nil {
+ w.a.V(1).
+ WithEvent(cr, common.EventActionCreate, common.EventReasonCreateCompleted).
+ WithStatusAction(cr).
+ M(cr).F().
+ Info("Create ConfigMap %s", util.NamespaceNameString(configMap))
+ } else {
+ w.a.WithEvent(cr, common.EventActionCreate, common.EventReasonCreateFailed).
+ WithStatusAction(cr).
+ WithStatusError(cr).
+ M(cr).F().
+ Error("Create ConfigMap %s failed with error %v", util.NamespaceNameString(configMap), err)
+ }
+
+ return err
+}
diff --git a/pkg/controller/chi/worker-deleter.go b/pkg/controller/chi/worker-deleter.go
index d908bd384..003c3104e 100644
--- a/pkg/controller/chi/worker-deleter.go
+++ b/pkg/controller/chi/worker-deleter.go
@@ -19,116 +19,113 @@ import (
"time"
core "k8s.io/api/core/v1"
- apiErrors "k8s.io/apimachinery/pkg/api/errors"
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
log "github.com/altinity/clickhouse-operator/pkg/announcer"
api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1"
+ "github.com/altinity/clickhouse-operator/pkg/apis/common/types"
"github.com/altinity/clickhouse-operator/pkg/controller"
- model "github.com/altinity/clickhouse-operator/pkg/model/chi"
- "github.com/altinity/clickhouse-operator/pkg/model/chi/normalizer"
+ "github.com/altinity/clickhouse-operator/pkg/controller/chi/cmd_queue"
+ "github.com/altinity/clickhouse-operator/pkg/controller/common"
+ "github.com/altinity/clickhouse-operator/pkg/controller/common/storage"
+ "github.com/altinity/clickhouse-operator/pkg/model"
+ chiLabeler "github.com/altinity/clickhouse-operator/pkg/model/chi/tags/labeler"
+ "github.com/altinity/clickhouse-operator/pkg/model/common/action_plan"
+ "github.com/altinity/clickhouse-operator/pkg/model/common/normalizer"
"github.com/altinity/clickhouse-operator/pkg/util"
)
-func (w *worker) clean(ctx context.Context, chi *api.ClickHouseInstallation) {
+func (w *worker) clean(ctx context.Context, cr api.ICustomResource) {
if util.IsContextDone(ctx) {
log.V(2).Info("task is done")
return
}
w.a.V(1).
- WithEvent(chi, eventActionReconcile, eventReasonReconcileInProgress).
- WithStatusAction(chi).
- M(chi).F().
+ WithEvent(cr, common.EventActionReconcile, common.EventReasonReconcileInProgress).
+ WithStatusAction(cr).
+ M(cr).F().
Info("remove items scheduled for deletion")
// Remove deleted items
- w.a.V(1).M(chi).F().Info("List of objects which have failed to reconcile:\n%s", w.task.registryFailed)
- w.a.V(1).M(chi).F().Info("List of successfully reconciled objects:\n%s", w.task.registryReconciled)
- objs := w.c.discovery(ctx, chi)
- need := w.task.registryReconciled
- w.a.V(1).M(chi).F().Info("Existing objects:\n%s", objs)
+ w.a.V(1).M(cr).F().Info("List of objects which have failed to reconcile:\n%s", w.task.RegistryFailed)
+ w.a.V(1).M(cr).F().Info("List of successfully reconciled objects:\n%s", w.task.RegistryReconciled)
+ objs := w.c.discovery(ctx, cr)
+ need := w.task.RegistryReconciled()
+ w.a.V(1).M(cr).F().Info("Existing objects:\n%s", objs)
objs.Subtract(need)
- w.a.V(1).M(chi).F().Info("Non-reconciled objects:\n%s", objs)
- if w.purge(ctx, chi, objs, w.task.registryFailed) > 0 {
- w.c.enqueueObject(NewDropDns(&chi.ObjectMeta))
+ w.a.V(1).M(cr).F().Info("Non-reconciled objects:\n%s", objs)
+ if w.purge(ctx, cr, objs, w.task.RegistryFailed()) > 0 {
+ w.c.enqueueObject(cmd_queue.NewDropDns(cr))
util.WaitContextDoneOrTimeout(ctx, 1*time.Minute)
}
- chi.EnsureStatus().SyncHostTablesCreated()
+ cr.(*api.ClickHouseInstallation).EnsureStatus().SyncHostTablesCreated()
}
// dropReplicas cleans Zookeeper for replicas that are properly deleted - via AP
-func (w *worker) dropReplicas(ctx context.Context, chi *api.ClickHouseInstallation, ap *model.ActionPlan) {
+func (w *worker) dropReplicas(ctx context.Context, cr api.ICustomResource, ap *action_plan.ActionPlan) {
if util.IsContextDone(ctx) {
log.V(2).Info("task is done")
return
}
- w.a.V(1).M(chi).F().S().Info("drop replicas based on AP")
+ w.a.V(1).M(cr).F().S().Info("drop replicas based on AP")
cnt := 0
ap.WalkRemoved(
- func(cluster *api.Cluster) {
+ func(cluster api.ICluster) {
},
- func(shard *api.ChiShard) {
+ func(shard api.IShard) {
},
- func(host *api.ChiHost) {
+ func(host *api.Host) {
_ = w.dropReplica(ctx, host)
cnt++
},
)
- w.a.V(1).M(chi).F().E().Info("processed replicas: %d", cnt)
-}
-
-func shouldPurgeStatefulSet(chi *api.ClickHouseInstallation, reconcileFailedObjs *model.Registry, m meta.ObjectMeta) bool {
- if reconcileFailedObjs.HasStatefulSet(m) {
- return chi.GetReconciling().GetCleanup().GetReconcileFailedObjects().GetStatefulSet() == api.ObjectsCleanupDelete
- }
- return chi.GetReconciling().GetCleanup().GetUnknownObjects().GetStatefulSet() == api.ObjectsCleanupDelete
-}
-
-func shouldPurgePVC(chi *api.ClickHouseInstallation, reconcileFailedObjs *model.Registry, m meta.ObjectMeta) bool {
- if reconcileFailedObjs.HasPVC(m) {
- return chi.GetReconciling().GetCleanup().GetReconcileFailedObjects().GetPVC() == api.ObjectsCleanupDelete
- }
- return chi.GetReconciling().GetCleanup().GetUnknownObjects().GetPVC() == api.ObjectsCleanupDelete
-}
-
-func shouldPurgeConfigMap(chi *api.ClickHouseInstallation, reconcileFailedObjs *model.Registry, m meta.ObjectMeta) bool {
- if reconcileFailedObjs.HasConfigMap(m) {
- return chi.GetReconciling().GetCleanup().GetReconcileFailedObjects().GetConfigMap() == api.ObjectsCleanupDelete
- }
- return chi.GetReconciling().GetCleanup().GetUnknownObjects().GetConfigMap() == api.ObjectsCleanupDelete
-}
-
-func shouldPurgeService(chi *api.ClickHouseInstallation, reconcileFailedObjs *model.Registry, m meta.ObjectMeta) bool {
- if reconcileFailedObjs.HasService(m) {
- return chi.GetReconciling().GetCleanup().GetReconcileFailedObjects().GetService() == api.ObjectsCleanupDelete
- }
- return chi.GetReconciling().GetCleanup().GetUnknownObjects().GetService() == api.ObjectsCleanupDelete
+ w.a.V(1).M(cr).F().E().Info("processed replicas: %d", cnt)
}
-func shouldPurgeSecret(chi *api.ClickHouseInstallation, reconcileFailedObjs *model.Registry, m meta.ObjectMeta) bool {
- if reconcileFailedObjs.HasSecret(m) {
- return chi.GetReconciling().GetCleanup().GetReconcileFailedObjects().GetSecret() == api.ObjectsCleanupDelete
+// purge
+func (w *worker) purge(
+ ctx context.Context,
+ cr api.ICustomResource,
+ reg *model.Registry,
+ reconcileFailedObjs *model.Registry,
+) (cnt int) {
+ if util.IsContextDone(ctx) {
+ log.V(2).Info("task is done")
+ return cnt
}
- return chi.GetReconciling().GetCleanup().GetUnknownObjects().GetSecret() == api.ObjectsCleanupDelete
-}
-func shouldPurgePDB(chi *api.ClickHouseInstallation, reconcileFailedObjs *model.Registry, m meta.ObjectMeta) bool {
- return true
+ reg.Walk(func(entityType model.EntityType, m meta.Object) {
+ switch entityType {
+ case model.StatefulSet:
+ cnt += w.purgeStatefulSet(ctx, cr, reconcileFailedObjs, m)
+ case model.PVC:
+ w.purgePVC(ctx, cr, reconcileFailedObjs, m)
+ case model.ConfigMap:
+ w.purgeConfigMap(ctx, cr, reconcileFailedObjs, m)
+ case model.Service:
+ w.purgeService(ctx, cr, reconcileFailedObjs, m)
+ case model.Secret:
+ w.purgeSecret(ctx, cr, reconcileFailedObjs, m)
+ case model.PDB:
+ w.purgePDB(ctx, cr, reconcileFailedObjs, m)
+ }
+ })
+ return cnt
}
func (w *worker) purgeStatefulSet(
ctx context.Context,
- chi *api.ClickHouseInstallation,
+ cr api.ICustomResource,
reconcileFailedObjs *model.Registry,
- m meta.ObjectMeta,
+ m meta.Object,
) int {
- if shouldPurgeStatefulSet(chi, reconcileFailedObjs, m) {
- w.a.V(1).M(m).F().Info("Delete StatefulSet: %s/%s", m.Namespace, m.Name)
- if err := w.c.kubeClient.AppsV1().StatefulSets(m.Namespace).Delete(ctx, m.Name, controller.NewDeleteOptions()); err != nil {
- w.a.V(1).M(m).F().Error("FAILED to delete StatefulSet: %s/%s, err: %v", m.Namespace, m.Name, err)
+ if shouldPurgeStatefulSet(cr, reconcileFailedObjs, m) {
+ w.a.V(1).M(m).F().Info("Delete StatefulSet: %s", util.NamespaceNameString(m))
+ if err := w.c.kube.STS().Delete(ctx, m.GetNamespace(), m.GetName()); err != nil {
+ w.a.V(1).M(m).F().Error("FAILED to delete StatefulSet: %s, err: %v", util.NamespaceNameString(m), err)
}
return 1
}
@@ -137,15 +134,15 @@ func (w *worker) purgeStatefulSet(
func (w *worker) purgePVC(
ctx context.Context,
- chi *api.ClickHouseInstallation,
+ cr api.ICustomResource,
reconcileFailedObjs *model.Registry,
- m meta.ObjectMeta,
+ m meta.Object,
) {
- if shouldPurgePVC(chi, reconcileFailedObjs, m) {
- if model.GetReclaimPolicy(m) == api.PVCReclaimPolicyDelete {
- w.a.V(1).M(m).F().Info("Delete PVC: %s/%s", m.Namespace, m.Name)
- if err := w.c.kubeClient.CoreV1().PersistentVolumeClaims(m.Namespace).Delete(ctx, m.Name, controller.NewDeleteOptions()); err != nil {
- w.a.V(1).M(m).F().Error("FAILED to delete PVC: %s/%s, err: %v", m.Namespace, m.Name, err)
+ if shouldPurgePVC(cr, reconcileFailedObjs, m) {
+ if chiLabeler.New(nil).GetReclaimPolicy(m) == api.PVCReclaimPolicyDelete {
+ w.a.V(1).M(m).F().Info("Delete PVC: %s", util.NamespaceNameString(m))
+ if err := w.c.kube.Storage().Delete(ctx, m.GetNamespace(), m.GetName()); err != nil {
+ w.a.V(1).M(m).F().Error("FAILED to delete PVC: %s, err: %v", util.NamespaceNameString(m), err)
}
}
}
@@ -153,106 +150,114 @@ func (w *worker) purgePVC(
func (w *worker) purgeConfigMap(
ctx context.Context,
- chi *api.ClickHouseInstallation,
+ cr api.ICustomResource,
reconcileFailedObjs *model.Registry,
- m meta.ObjectMeta,
+ m meta.Object,
) {
- if shouldPurgeConfigMap(chi, reconcileFailedObjs, m) {
- w.a.V(1).M(m).F().Info("Delete ConfigMap: %s/%s", m.Namespace, m.Name)
- if err := w.c.kubeClient.CoreV1().ConfigMaps(m.Namespace).Delete(ctx, m.Name, controller.NewDeleteOptions()); err != nil {
- w.a.V(1).M(m).F().Error("FAILED to delete ConfigMap: %s/%s, err: %v", m.Namespace, m.Name, err)
+ if shouldPurgeConfigMap(cr, reconcileFailedObjs, m) {
+ w.a.V(1).M(m).F().Info("Delete ConfigMap: %s", util.NamespaceNameString(m))
+ if err := w.c.kube.ConfigMap().Delete(ctx, m.GetNamespace(), m.GetName()); err != nil {
+ w.a.V(1).M(m).F().Error("FAILED to delete ConfigMap: %s, err: %v", util.NamespaceNameString(m), err)
}
}
}
func (w *worker) purgeService(
ctx context.Context,
- chi *api.ClickHouseInstallation,
+ cr api.ICustomResource,
reconcileFailedObjs *model.Registry,
- m meta.ObjectMeta,
+ m meta.Object,
) {
- if shouldPurgeService(chi, reconcileFailedObjs, m) {
- w.a.V(1).M(m).F().Info("Delete Service: %s/%s", m.Namespace, m.Name)
- if err := w.c.kubeClient.CoreV1().Services(m.Namespace).Delete(ctx, m.Name, controller.NewDeleteOptions()); err != nil {
- w.a.V(1).M(m).F().Error("FAILED to delete Service: %s/%s, err: %v", m.Namespace, m.Name, err)
+ if shouldPurgeService(cr, reconcileFailedObjs, m) {
+ w.a.V(1).M(m).F().Info("Delete Service: %s", util.NamespaceNameString(m))
+ if err := w.c.kube.Service().Delete(ctx, m.GetNamespace(), m.GetName()); err != nil {
+ w.a.V(1).M(m).F().Error("FAILED to delete Service: %s, err: %v", util.NamespaceNameString(m), err)
}
}
}
func (w *worker) purgeSecret(
ctx context.Context,
- chi *api.ClickHouseInstallation,
+ cr api.ICustomResource,
reconcileFailedObjs *model.Registry,
- m meta.ObjectMeta,
+ m meta.Object,
) {
- if shouldPurgeSecret(chi, reconcileFailedObjs, m) {
- w.a.V(1).M(m).F().Info("Delete Secret: %s/%s", m.Namespace, m.Name)
- if err := w.c.kubeClient.CoreV1().Secrets(m.Namespace).Delete(ctx, m.Name, controller.NewDeleteOptions()); err != nil {
- w.a.V(1).M(m).F().Error("FAILED to delete Secret: %s/%s, err: %v", m.Namespace, m.Name, err)
+ if shouldPurgeSecret(cr, reconcileFailedObjs, m) {
+ w.a.V(1).M(m).F().Info("Delete Secret: %s", util.NamespaceNameString(m))
+ if err := w.c.kube.Secret().Delete(ctx, m.GetNamespace(), m.GetName()); err != nil {
+ w.a.V(1).M(m).F().Error("FAILED to delete Secret: %s, err: %v", util.NamespaceNameString(m), err)
}
}
}
func (w *worker) purgePDB(
ctx context.Context,
- chi *api.ClickHouseInstallation,
+ cr api.ICustomResource,
reconcileFailedObjs *model.Registry,
- m meta.ObjectMeta,
+ m meta.Object,
) {
- if shouldPurgePDB(chi, reconcileFailedObjs, m) {
- w.a.V(1).M(m).F().Info("Delete PDB: %s/%s", m.Namespace, m.Name)
- if err := w.c.kubeClient.PolicyV1().PodDisruptionBudgets(m.Namespace).Delete(ctx, m.Name, controller.NewDeleteOptions()); err != nil {
- w.a.V(1).M(m).F().Error("FAILED to delete PDB: %s/%s, err: %v", m.Namespace, m.Name, err)
+ if shouldPurgePDB(cr, reconcileFailedObjs, m) {
+ w.a.V(1).M(m).F().Info("Delete PDB: %s", util.NamespaceNameString(m))
+ if err := w.c.kube.PDB().Delete(ctx, m.GetNamespace(), m.GetName()); err != nil {
+ w.a.V(1).M(m).F().Error("FAILED to delete PDB: %s, err: %v", util.NamespaceNameString(m), err)
}
}
}
-// purge
-func (w *worker) purge(
- ctx context.Context,
- chi *api.ClickHouseInstallation,
- reg *model.Registry,
- reconcileFailedObjs *model.Registry,
-) (cnt int) {
- if util.IsContextDone(ctx) {
- log.V(2).Info("task is done")
- return cnt
+func shouldPurgeStatefulSet(cr api.ICustomResource, reconcileFailedObjs *model.Registry, m meta.Object) bool {
+ if reconcileFailedObjs.HasStatefulSet(m) {
+ return cr.GetReconciling().GetCleanup().GetReconcileFailedObjects().GetStatefulSet() == api.ObjectsCleanupDelete
}
+ return cr.GetReconciling().GetCleanup().GetUnknownObjects().GetStatefulSet() == api.ObjectsCleanupDelete
+}
- reg.Walk(func(entityType model.EntityType, m meta.ObjectMeta) {
- switch entityType {
- case model.StatefulSet:
- cnt += w.purgeStatefulSet(ctx, chi, reconcileFailedObjs, m)
- case model.PVC:
- w.purgePVC(ctx, chi, reconcileFailedObjs, m)
- case model.ConfigMap:
- w.purgeConfigMap(ctx, chi, reconcileFailedObjs, m)
- case model.Service:
- w.purgeService(ctx, chi, reconcileFailedObjs, m)
- case model.Secret:
- w.purgeSecret(ctx, chi, reconcileFailedObjs, m)
- case model.PDB:
- w.purgePDB(ctx, chi, reconcileFailedObjs, m)
- }
- })
- return cnt
+func shouldPurgePVC(cr api.ICustomResource, reconcileFailedObjs *model.Registry, m meta.Object) bool {
+ if reconcileFailedObjs.HasPVC(m) {
+ return cr.GetReconciling().GetCleanup().GetReconcileFailedObjects().GetPVC() == api.ObjectsCleanupDelete
+ }
+ return cr.GetReconciling().GetCleanup().GetUnknownObjects().GetPVC() == api.ObjectsCleanupDelete
+}
+
+func shouldPurgeConfigMap(cr api.ICustomResource, reconcileFailedObjs *model.Registry, m meta.Object) bool {
+ if reconcileFailedObjs.HasConfigMap(m) {
+ return cr.GetReconciling().GetCleanup().GetReconcileFailedObjects().GetConfigMap() == api.ObjectsCleanupDelete
+ }
+ return cr.GetReconciling().GetCleanup().GetUnknownObjects().GetConfigMap() == api.ObjectsCleanupDelete
+}
+
+func shouldPurgeService(cr api.ICustomResource, reconcileFailedObjs *model.Registry, m meta.Object) bool {
+ if reconcileFailedObjs.HasService(m) {
+ return cr.GetReconciling().GetCleanup().GetReconcileFailedObjects().GetService() == api.ObjectsCleanupDelete
+ }
+ return cr.GetReconciling().GetCleanup().GetUnknownObjects().GetService() == api.ObjectsCleanupDelete
}
-// discoveryAndDeleteCHI deletes all kubernetes resources related to chi *chop.ClickHouseInstallation
-func (w *worker) discoveryAndDeleteCHI(ctx context.Context, chi *api.ClickHouseInstallation) error {
+func shouldPurgeSecret(cr api.ICustomResource, reconcileFailedObjs *model.Registry, m meta.Object) bool {
+ if reconcileFailedObjs.HasSecret(m) {
+ return cr.GetReconciling().GetCleanup().GetReconcileFailedObjects().GetSecret() == api.ObjectsCleanupDelete
+ }
+ return cr.GetReconciling().GetCleanup().GetUnknownObjects().GetSecret() == api.ObjectsCleanupDelete
+}
+
+func shouldPurgePDB(cr api.ICustomResource, reconcileFailedObjs *model.Registry, m meta.Object) bool {
+ return true
+}
+
+// discoveryAndDeleteCR deletes all kubernetes resources related to chi *chop.ClickHouseInstallation
+func (w *worker) discoveryAndDeleteCR(ctx context.Context, cr api.ICustomResource) error {
if util.IsContextDone(ctx) {
log.V(2).Info("task is done")
return nil
}
- objs := w.c.discovery(ctx, chi)
+ objs := w.c.discovery(ctx, cr)
if objs.NumStatefulSet() > 0 {
- chi.WalkHosts(func(host *api.ChiHost) error {
+ cr.WalkHosts(func(host *api.Host) error {
_ = w.ensureClusterSchemer(host).HostSyncTables(ctx, host)
return nil
})
}
- w.purge(ctx, chi, objs, nil)
+ w.purge(ctx, cr, objs, nil)
return nil
}
@@ -267,9 +272,9 @@ func (w *worker) deleteCHIProtocol(ctx context.Context, chi *api.ClickHouseInsta
defer w.a.V(2).M(chi).E().P()
var err error
- chi, err = w.normalizer.CreateTemplatedCHI(chi, normalizer.NewOptions())
+ chi, err = w.normalizer.CreateTemplated(chi, normalizer.NewOptions())
if err != nil {
- w.a.WithEvent(chi, eventActionDelete, eventReasonDeleteFailed).
+ w.a.WithEvent(chi, common.EventActionDelete, common.EventReasonDeleteFailed).
WithStatusError(chi).
M(chi).F().
Error("Delete CHI failed - unable to normalize: %q", err)
@@ -278,15 +283,15 @@ func (w *worker) deleteCHIProtocol(ctx context.Context, chi *api.ClickHouseInsta
// Announce delete procedure
w.a.V(1).
- WithEvent(chi, eventActionDelete, eventReasonDeleteStarted).
+ WithEvent(chi, common.EventActionDelete, common.EventReasonDeleteStarted).
WithStatusAction(chi).
M(chi).F().
Info("Delete CHI started")
chi.EnsureStatus().DeleteStart()
- if err := w.c.updateCHIObjectStatus(ctx, chi, UpdateCHIStatusOptions{
+ if err := w.c.updateCRObjectStatus(ctx, chi, types.UpdateStatusOptions{
TolerateAbsence: true,
- CopyCHIStatusOptions: api.CopyCHIStatusOptions{
+ CopyStatusOptions: types.CopyStatusOptions{
MainFields: true,
},
}); err != nil {
@@ -300,16 +305,16 @@ func (w *worker) deleteCHIProtocol(ctx context.Context, chi *api.ClickHouseInsta
w.c.deleteWatch(chi)
// Delete Service
- _ = w.c.deleteServiceCHI(ctx, chi)
+ _ = w.c.deleteServiceCR(ctx, chi)
- chi.WalkHosts(func(host *api.ChiHost) error {
+ chi.WalkHosts(func(host *api.Host) error {
_ = w.ensureClusterSchemer(host).HostSyncTables(ctx, host)
return nil
})
// Delete all clusters
- chi.WalkClusters(func(cluster *api.Cluster) error {
- return w.deleteCluster(ctx, chi, cluster)
+ chi.WalkClusters(func(cluster api.ICluster) error {
+ return w.deleteCluster(ctx, chi, cluster.(*api.Cluster))
})
if util.IsContextDone(ctx) {
@@ -321,7 +326,7 @@ func (w *worker) deleteCHIProtocol(ctx context.Context, chi *api.ClickHouseInsta
_ = w.c.deleteConfigMapsCHI(ctx, chi)
w.a.V(1).
- WithEvent(chi, eventActionDelete, eventReasonDeleteCompleted).
+ WithEvent(chi, common.EventActionDelete, common.EventReasonDeleteCompleted).
WithStatusAction(chi).
M(chi).F().
Info("Delete CHI completed")
@@ -330,7 +335,7 @@ func (w *worker) deleteCHIProtocol(ctx context.Context, chi *api.ClickHouseInsta
}
// canDropReplica
-func (w *worker) canDropReplica(host *api.ChiHost, opts ...*dropReplicaOptions) (can bool) {
+func (w *worker) canDropReplica(ctx context.Context, host *api.Host, opts ...*dropReplicaOptions) (can bool) {
o := NewDropReplicaOptionsArr(opts...).First()
if o.ForceDrop() {
@@ -338,10 +343,10 @@ func (w *worker) canDropReplica(host *api.ChiHost, opts ...*dropReplicaOptions)
}
can = true
- w.c.walkDiscoveredPVCs(host, func(pvc *core.PersistentVolumeClaim) {
+ storage.NewStoragePVC(w.c.kube.Storage()).WalkDiscoveredPVCs(ctx, host, func(pvc *core.PersistentVolumeClaim) {
// Replica's state has to be kept in Zookeeper for retained volumes.
// ClickHouse expects to have state of the non-empty replica in-place when replica rejoins.
- if model.GetReclaimPolicy(pvc.ObjectMeta) == api.PVCReclaimPolicyRetain {
+ if chiLabeler.New(nil).GetReclaimPolicy(pvc.GetObjectMeta()) == api.PVCReclaimPolicyRetain {
w.a.V(1).F().Info("PVC: %s/%s blocks drop replica. Reclaim policy: %s", api.PVCReclaimPolicyRetain.String())
can = false
}
@@ -377,7 +382,7 @@ func (a dropReplicaOptionsArr) First() *dropReplicaOptions {
}
// dropReplica drops replica's info from Zookeeper
-func (w *worker) dropReplica(ctx context.Context, hostToDrop *api.ChiHost, opts ...*dropReplicaOptions) error {
+func (w *worker) dropReplica(ctx context.Context, hostToDrop *api.Host, opts ...*dropReplicaOptions) error {
if util.IsContextDone(ctx) {
log.V(2).Info("task is done")
return nil
@@ -388,13 +393,13 @@ func (w *worker) dropReplica(ctx context.Context, hostToDrop *api.ChiHost, opts
return nil
}
- if !w.canDropReplica(hostToDrop, opts...) {
+ if !w.canDropReplica(ctx, hostToDrop, opts...) {
w.a.V(1).F().Warning("CAN NOT drop replica. hostToDrop: %s", hostToDrop.GetName())
return nil
}
// Sometimes host to drop is already unavailable, so let's run SQL statement of the first replica in the shard
- var hostToRunOn *api.ChiHost
+ var hostToRunOn *api.Host
if shard := hostToDrop.GetShard(); shard != nil {
hostToRunOn = shard.FirstHost()
}
@@ -408,13 +413,13 @@ func (w *worker) dropReplica(ctx context.Context, hostToDrop *api.ChiHost, opts
if err == nil {
w.a.V(1).
- WithEvent(hostToRunOn.GetCHI(), eventActionDelete, eventReasonDeleteCompleted).
- WithStatusAction(hostToRunOn.GetCHI()).
+ WithEvent(hostToRunOn.GetCR(), common.EventActionDelete, common.EventReasonDeleteCompleted).
+ WithStatusAction(hostToRunOn.GetCR()).
M(hostToRunOn).F().
Info("Drop replica host: %s in cluster: %s", hostToDrop.GetName(), hostToDrop.Runtime.Address.ClusterName)
} else {
- w.a.WithEvent(hostToRunOn.GetCHI(), eventActionDelete, eventReasonDeleteFailed).
- WithStatusError(hostToRunOn.GetCHI()).
+ w.a.WithEvent(hostToRunOn.GetCR(), common.EventActionDelete, common.EventReasonDeleteFailed).
+ WithStatusError(hostToRunOn.GetCR()).
M(hostToRunOn).F().
Error("FAILED to drop replica on host: %s with error: %v", hostToDrop.GetName(), err)
}
@@ -423,27 +428,27 @@ func (w *worker) dropReplica(ctx context.Context, hostToDrop *api.ChiHost, opts
}
// deleteTables
-func (w *worker) deleteTables(ctx context.Context, host *api.ChiHost) error {
+func (w *worker) deleteTables(ctx context.Context, host *api.Host) error {
if util.IsContextDone(ctx) {
log.V(2).Info("task is done")
return nil
}
- if !model.HostCanDeleteAllPVCs(host) {
+ if !w.c.pvcDeleter.HostCanDeleteAllPVCs(host) {
return nil
}
err := w.ensureClusterSchemer(host).HostDropTables(ctx, host)
if err == nil {
w.a.V(1).
- WithEvent(host.GetCHI(), eventActionDelete, eventReasonDeleteCompleted).
- WithStatusAction(host.GetCHI()).
+ WithEvent(host.GetCR(), common.EventActionDelete, common.EventReasonDeleteCompleted).
+ WithStatusAction(host.GetCR()).
M(host).F().
Info("Deleted tables on host: %s replica: %d to shard: %d in cluster: %s",
host.GetName(), host.Runtime.Address.ReplicaIndex, host.Runtime.Address.ShardIndex, host.Runtime.Address.ClusterName)
} else {
- w.a.WithEvent(host.GetCHI(), eventActionDelete, eventReasonDeleteFailed).
- WithStatusError(host.GetCHI()).
+ w.a.WithEvent(host.GetCR(), common.EventActionDelete, common.EventReasonDeleteFailed).
+ WithStatusError(host.GetCR()).
M(host).F().
Error("FAILED to delete tables on host: %s with error: %v", host.GetName(), err)
}
@@ -453,7 +458,7 @@ func (w *worker) deleteTables(ctx context.Context, host *api.ChiHost) error {
// deleteHost deletes all kubernetes resources related to a host
// chi is the new CHI in which there will be no more this host
-func (w *worker) deleteHost(ctx context.Context, chi *api.ClickHouseInstallation, host *api.ChiHost) error {
+func (w *worker) deleteHost(ctx context.Context, chi *api.ClickHouseInstallation, host *api.Host) error {
if util.IsContextDone(ctx) {
log.V(2).Info("task is done")
return nil
@@ -463,15 +468,15 @@ func (w *worker) deleteHost(ctx context.Context, chi *api.ClickHouseInstallation
defer w.a.V(2).M(host).E().Info(host.Runtime.Address.HostName)
w.a.V(1).
- WithEvent(host.GetCHI(), eventActionDelete, eventReasonDeleteStarted).
- WithStatusAction(host.GetCHI()).
+ WithEvent(host.GetCR(), common.EventActionDelete, common.EventReasonDeleteStarted).
+ WithStatusAction(host.GetCR()).
M(host).F().
Info("Delete host: %s/%s - started", host.Runtime.Address.ClusterName, host.GetName())
var err error
- if host.Runtime.CurStatefulSet, err = w.c.getStatefulSet(host); err != nil {
- w.a.WithEvent(host.GetCHI(), eventActionDelete, eventReasonDeleteCompleted).
- WithStatusAction(host.GetCHI()).
+ if host.Runtime.CurStatefulSet, err = w.c.kube.STS().Get(ctx, host); err != nil {
+ w.a.WithEvent(host.GetCR(), common.EventActionDelete, common.EventReasonDeleteCompleted).
+ WithStatusAction(host.GetCR()).
M(host).F().
Info("Delete host: %s/%s - completed StatefulSet not found - already deleted? err: %v",
host.Runtime.Address.ClusterName, host.GetName(), err)
@@ -490,22 +495,22 @@ func (w *worker) deleteHost(ctx context.Context, chi *api.ClickHouseInstallation
// When deleting the whole CHI (not particular host), CHI may already be unavailable, so update CHI tolerantly
chi.EnsureStatus().HostDeleted()
- _ = w.c.updateCHIObjectStatus(ctx, chi, UpdateCHIStatusOptions{
+ _ = w.c.updateCRObjectStatus(ctx, chi, types.UpdateStatusOptions{
TolerateAbsence: true,
- CopyCHIStatusOptions: api.CopyCHIStatusOptions{
+ CopyStatusOptions: types.CopyStatusOptions{
MainFields: true,
},
})
if err == nil {
w.a.V(1).
- WithEvent(host.GetCHI(), eventActionDelete, eventReasonDeleteCompleted).
- WithStatusAction(host.GetCHI()).
+ WithEvent(host.GetCR(), common.EventActionDelete, common.EventReasonDeleteCompleted).
+ WithStatusAction(host.GetCR()).
M(host).F().
Info("Delete host: %s/%s - completed", host.Runtime.Address.ClusterName, host.GetName())
} else {
- w.a.WithEvent(host.GetCHI(), eventActionDelete, eventReasonDeleteFailed).
- WithStatusError(host.GetCHI()).
+ w.a.WithEvent(host.GetCR(), common.EventActionDelete, common.EventReasonDeleteFailed).
+ WithStatusError(host.GetCR()).
M(host).F().
Error("FAILED Delete host: %s/%s - completed", host.Runtime.Address.ClusterName, host.GetName())
}
@@ -525,7 +530,7 @@ func (w *worker) deleteShard(ctx context.Context, chi *api.ClickHouseInstallatio
defer w.a.V(2).M(shard).E().P()
w.a.V(1).
- WithEvent(shard.Runtime.CHI, eventActionDelete, eventReasonDeleteStarted).
+ WithEvent(shard.Runtime.CHI, common.EventActionDelete, common.EventReasonDeleteStarted).
WithStatusAction(shard.Runtime.CHI).
M(shard).F().
Info("Delete shard: %s/%s - started", shard.Runtime.Address.Namespace, shard.Name)
@@ -534,12 +539,12 @@ func (w *worker) deleteShard(ctx context.Context, chi *api.ClickHouseInstallatio
_ = w.c.deleteServiceShard(ctx, shard)
// Delete all replicas
- shard.WalkHosts(func(host *api.ChiHost) error {
+ shard.WalkHosts(func(host *api.Host) error {
return w.deleteHost(ctx, chi, host)
})
w.a.V(1).
- WithEvent(shard.Runtime.CHI, eventActionDelete, eventReasonDeleteCompleted).
+ WithEvent(shard.Runtime.CHI, common.EventActionDelete, common.EventReasonDeleteCompleted).
WithStatusAction(shard.Runtime.CHI).
M(shard).F().
Info("Delete shard: %s/%s - completed", shard.Runtime.Address.Namespace, shard.Name)
@@ -559,7 +564,7 @@ func (w *worker) deleteCluster(ctx context.Context, chi *api.ClickHouseInstallat
defer w.a.V(2).M(cluster).E().P()
w.a.V(1).
- WithEvent(cluster.Runtime.CHI, eventActionDelete, eventReasonDeleteStarted).
+ WithEvent(cluster.Runtime.CHI, common.EventActionDelete, common.EventReasonDeleteStarted).
WithStatusAction(cluster.Runtime.CHI).
M(cluster).F().
Info("Delete cluster: %s/%s - started", cluster.Runtime.Address.Namespace, cluster.Name)
@@ -574,12 +579,12 @@ func (w *worker) deleteCluster(ctx context.Context, chi *api.ClickHouseInstallat
}
// Delete all shards
- cluster.WalkShards(func(index int, shard *api.ChiShard) error {
- return w.deleteShard(ctx, chi, shard)
+ cluster.WalkShards(func(index int, shard api.IShard) error {
+ return w.deleteShard(ctx, chi, shard.(*api.ChiShard))
})
w.a.V(1).
- WithEvent(cluster.Runtime.CHI, eventActionDelete, eventReasonDeleteCompleted).
+ WithEvent(cluster.Runtime.CHI, common.EventActionDelete, common.EventReasonDeleteCompleted).
WithStatusAction(cluster.Runtime.CHI).
M(cluster).F().
Info("Delete cluster: %s/%s - completed", cluster.Runtime.Address.Namespace, cluster.Name)
@@ -595,7 +600,7 @@ func (w *worker) deleteCHI(ctx context.Context, old, new *api.ClickHouseInstalla
}
// Do we have pending request for CHI to be deleted?
- if new.ObjectMeta.DeletionTimestamp.IsZero() {
+ if new.GetDeletionTimestamp().IsZero() {
// CHI is not being deleted and operator has not deleted anything.
return false
}
@@ -613,7 +618,7 @@ func (w *worker) deleteCHI(ctx context.Context, old, new *api.ClickHouseInstalla
crd, err := w.c.extClient.ApiextensionsV1().CustomResourceDefinitions().Get(ctx, "clickhouseinstallations.clickhouse.altinity.com", controller.NewGetOptions())
if err == nil {
// CRD is in place
- if crd.ObjectMeta.DeletionTimestamp.IsZero() {
+ if crd.GetObjectMeta().GetDeletionTimestamp().IsZero() {
// CRD is not being deleted. It is standard request to delete a CHI.
// Operator can delete all child resources.
w.a.V(1).M(new).F().Info("CRD: %s/%s is not being deleted, operator will delete child resources", crd.Namespace, crd.Name)
@@ -641,15 +646,15 @@ func (w *worker) deleteCHI(ctx context.Context, old, new *api.ClickHouseInstalla
return false
}
- if !util.InArray(FinalizerName, new.ObjectMeta.Finalizers) {
+ if !util.InArray(FinalizerName, new.GetFinalizers()) {
// No finalizer found, unexpected behavior
return false
}
_ = w.deleteCHIProtocol(ctx, new)
} else {
- new.EnsureRuntime().GetAttributes().SkipOwnerRef = true
- _ = w.reconcileCHI(ctx, old, new)
+ new.GetRuntime().GetAttributes().SetSkipOwnerRef(true)
+ _ = w.reconcileCR(ctx, old, new)
}
// We need to uninstall finalizer in order to allow k8s to delete CHI resource
@@ -661,43 +666,3 @@ func (w *worker) deleteCHI(ctx context.Context, old, new *api.ClickHouseInstalla
// CHI delete completed
return true
}
-
-func (w *worker) isLostPV(pvc *core.PersistentVolumeClaim) bool {
- if pvc == nil {
- return false
- }
-
- return pvc.Status.Phase == core.ClaimLost
-}
-
-func (w *worker) deletePVC(ctx context.Context, pvc *core.PersistentVolumeClaim) bool {
- w.a.V(1).M(pvc).F().S().Info("delete PVC with lost PV start: %s/%s", pvc.Namespace, pvc.Name)
- defer w.a.V(1).M(pvc).F().E().Info("delete PVC with lost PV end: %s/%s", pvc.Namespace, pvc.Name)
-
- w.a.V(2).M(pvc).F().Info("PVC with lost PV about to be deleted: %s/%s", pvc.Namespace, pvc.Name)
- w.c.kubeClient.CoreV1().PersistentVolumeClaims(pvc.Namespace).Delete(ctx, pvc.Name, controller.NewDeleteOptions())
-
- for i := 0; i < 360; i++ {
-
- // Check availability
- w.a.V(2).M(pvc).F().Info("check PVC with lost PV availability: %s/%s", pvc.Namespace, pvc.Name)
- curPVC, err := w.c.kubeClient.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(ctx, pvc.Name, controller.NewGetOptions())
- if err != nil {
- if apiErrors.IsNotFound(err) {
- // Not available - concider to bbe deleted
- w.a.V(1).M(pvc).F().Warning("PVC with lost PV was deleted: %s/%s", pvc.Namespace, pvc.Name)
- return true
- }
- }
-
- // PVC is not deleted (yet?). May be it has finalizers installed. Need to clean them.
- if len(curPVC.Finalizers) > 0 {
- w.a.V(2).M(pvc).F().Info("clean finalizers for PVC with lost PV: %s/%s", pvc.Namespace, pvc.Name)
- curPVC.Finalizers = nil
- w.c.updatePersistentVolumeClaim(ctx, curPVC)
- }
- time.Sleep(10 * time.Second)
- }
-
- return false
-}
diff --git a/pkg/controller/chi/worker-exclude-include-wait.go b/pkg/controller/chi/worker-exclude-include-wait.go
new file mode 100644
index 000000000..16edf6830
--- /dev/null
+++ b/pkg/controller/chi/worker-exclude-include-wait.go
@@ -0,0 +1,356 @@
+// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package chi
+
+import (
+ "context"
+ "time"
+
+ log "github.com/altinity/clickhouse-operator/pkg/announcer"
+ api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1"
+ "github.com/altinity/clickhouse-operator/pkg/chop"
+ "github.com/altinity/clickhouse-operator/pkg/controller/common/poller/domain"
+ "github.com/altinity/clickhouse-operator/pkg/util"
+)
+
+func (w *worker) waitForIPAddresses(ctx context.Context, chi *api.ClickHouseInstallation) {
+ if util.IsContextDone(ctx) {
+ log.V(2).Info("task is done")
+ return
+ }
+ if chi.IsStopped() {
+ // No need to wait for stopped CHI
+ return
+ }
+ w.a.V(1).M(chi).F().S().Info("wait for IP addresses to be assigned to all pods")
+ start := time.Now()
+ w.c.poll(ctx, chi, func(c *api.ClickHouseInstallation, e error) bool {
+ // TODO fix later
+ // status IPs list can be empty
+ // Instead of doing in status:
+ // podIPs := c.getPodsIPs(chi)
+ // cur.EnsureStatus().SetPodIPs(podIPs)
+ // and here
+ // c.Status.GetPodIPs()
+ podIPs := w.c.getPodsIPs(chi)
+ if len(podIPs) >= len(c.Status.GetPods()) {
+ // Stop polling
+ w.a.V(1).M(c).Info("all IP addresses are in place")
+ return false
+ }
+ if time.Now().Sub(start) > 1*time.Minute {
+ // Stop polling
+ w.a.V(1).M(c).Warning("not all IP addresses are in place but time has elapsed")
+ return false
+ }
+ // Continue polling
+ w.a.V(1).M(c).Warning("still waiting - not all IP addresses are in place yet")
+ return true
+ })
+}
+
+// excludeHost excludes host from ClickHouse clusters if required
+func (w *worker) excludeHost(ctx context.Context, host *api.Host) bool {
+ if util.IsContextDone(ctx) {
+ log.V(2).Info("task is done")
+ return false
+ }
+
+ log.V(1).M(host).F().S().Info("exclude host start")
+ defer log.V(1).M(host).F().E().Info("exclude host end")
+
+ if !w.shouldExcludeHost(host) {
+ w.a.V(1).
+ M(host).F().
+ Info("No need to exclude host from cluster. Host/shard/cluster: %d/%d/%s",
+ host.Runtime.Address.ReplicaIndex, host.Runtime.Address.ShardIndex, host.Runtime.Address.ClusterName)
+ return false
+ }
+
+ w.a.V(1).
+ M(host).F().
+ Info("Exclude host from cluster. Host/shard/cluster: %d/%d/%s",
+ host.Runtime.Address.ReplicaIndex, host.Runtime.Address.ShardIndex, host.Runtime.Address.ClusterName)
+
+ _ = w.excludeHostFromService(ctx, host)
+ w.excludeHostFromClickHouseCluster(ctx, host)
+ return true
+}
+
+// completeQueries wait for running queries to complete
+func (w *worker) completeQueries(ctx context.Context, host *api.Host) error {
+ log.V(1).M(host).F().S().Info("complete queries start")
+ defer log.V(1).M(host).F().E().Info("complete queries end")
+
+ if w.shouldWaitQueries(host) {
+ return w.waitHostNoActiveQueries(ctx, host)
+ }
+
+ return nil
+}
+
+// shouldIncludeHost determines whether host to be included into cluster after reconciling
+func (w *worker) shouldIncludeHost(host *api.Host) bool {
+ switch {
+ case host.IsStopped():
+ // No need to include stopped host
+ return false
+ }
+ return true
+}
+
+// includeHost includes host back back into ClickHouse clusters
+func (w *worker) includeHost(ctx context.Context, host *api.Host) error {
+ if util.IsContextDone(ctx) {
+ log.V(2).Info("task is done")
+ return nil
+ }
+
+ if !w.shouldIncludeHost(host) {
+ w.a.V(1).
+ M(host).F().
+ Info("No need to include host into cluster. Host/shard/cluster: %d/%d/%s",
+ host.Runtime.Address.ReplicaIndex, host.Runtime.Address.ShardIndex, host.Runtime.Address.ClusterName)
+ return nil
+ }
+
+ w.a.V(1).
+ M(host).F().
+ Info("Include host into cluster. Host/shard/cluster: %d/%d/%s",
+ host.Runtime.Address.ReplicaIndex, host.Runtime.Address.ShardIndex, host.Runtime.Address.ClusterName)
+
+ w.includeHostIntoClickHouseCluster(ctx, host)
+ _ = w.includeHostIntoService(ctx, host)
+
+ return nil
+}
+
+// excludeHostFromService
+func (w *worker) excludeHostFromService(ctx context.Context, host *api.Host) error {
+ if util.IsContextDone(ctx) {
+ log.V(2).Info("task is done")
+ return nil
+ }
+
+ _ = w.c.ctrlLabeler.DeleteReadyMarkOnPodAndService(ctx, host)
+ return nil
+}
+
+// includeHostIntoService
+func (w *worker) includeHostIntoService(ctx context.Context, host *api.Host) error {
+ if util.IsContextDone(ctx) {
+ log.V(2).Info("task is done")
+ return nil
+ }
+
+ _ = w.c.ctrlLabeler.SetReadyMarkOnPodAndService(ctx, host)
+ return nil
+}
+
+// excludeHostFromClickHouseCluster excludes host from ClickHouse configuration
+func (w *worker) excludeHostFromClickHouseCluster(ctx context.Context, host *api.Host) {
+ if util.IsContextDone(ctx) {
+ log.V(2).Info("task is done")
+ return
+ }
+
+ w.a.V(1).
+ M(host).F().
+ Info("going to exclude host. Host/shard/cluster: %d/%d/%s",
+ host.Runtime.Address.ReplicaIndex, host.Runtime.Address.ShardIndex, host.Runtime.Address.ClusterName)
+
+ // Specify in options to exclude this host from ClickHouse config file
+ host.GetCR().GetRuntime().LockCommonConfig()
+ host.GetReconcileAttributes().SetExclude()
+ _ = w.reconcileConfigMapCommon(ctx, host.GetCR(), w.options())
+ host.GetCR().GetRuntime().UnlockCommonConfig()
+
+ if !w.shouldWaitExcludeHost(host) {
+ return
+ }
+ // Wait for ClickHouse to pick-up the change
+ _ = w.waitHostNotInCluster(ctx, host)
+}
+
+// includeHostIntoClickHouseCluster includes host into ClickHouse configuration
+func (w *worker) includeHostIntoClickHouseCluster(ctx context.Context, host *api.Host) {
+ if util.IsContextDone(ctx) {
+ log.V(2).Info("task is done")
+ return
+ }
+
+ w.a.V(1).
+ M(host).F().
+ Info("going to include host. Host/shard/cluster: %d/%d/%s",
+ host.Runtime.Address.ReplicaIndex, host.Runtime.Address.ShardIndex, host.Runtime.Address.ClusterName)
+
+ // Specify in options to add this host into ClickHouse config file
+ host.GetCR().GetRuntime().LockCommonConfig()
+ host.GetReconcileAttributes().UnsetExclude()
+ _ = w.reconcileConfigMapCommon(ctx, host.GetCR(), w.options())
+ host.GetCR().GetRuntime().UnlockCommonConfig()
+
+ if !w.shouldWaitIncludeHost(host) {
+ return
+ }
+ // Wait for ClickHouse to pick-up the change
+ _ = w.waitHostInCluster(ctx, host)
+}
+
+// shouldExcludeHost determines whether host to be excluded from cluster before reconciling
+func (w *worker) shouldExcludeHost(host *api.Host) bool {
+ switch {
+ case host.IsStopped():
+ w.a.V(1).
+ M(host).F().
+ Info("Host is stopped, no need to exclude stopped host. Host/shard/cluster: %d/%d/%s",
+ host.Runtime.Address.ReplicaIndex, host.Runtime.Address.ShardIndex, host.Runtime.Address.ClusterName)
+ return false
+ case host.GetShard().HostsCount() == 1:
+ w.a.V(1).
+ M(host).F().
+ Info("Host is the only host in the shard (means no replication), no need to exclude. Host/shard/cluster: %d/%d/%s",
+ host.Runtime.Address.ReplicaIndex, host.Runtime.Address.ShardIndex, host.Runtime.Address.ClusterName)
+ return false
+ case w.shouldForceRestartHost(host):
+ w.a.V(1).
+ M(host).F().
+ Info("Host should be restarted, need to exclude. Host/shard/cluster: %d/%d/%s",
+ host.Runtime.Address.ReplicaIndex, host.Runtime.Address.ShardIndex, host.Runtime.Address.ClusterName)
+ return true
+ case host.GetReconcileAttributes().GetStatus() == api.ObjectStatusNew:
+ w.a.V(1).
+ M(host).F().
+ Info("Host is new, no need to exclude. Host/shard/cluster: %d/%d/%s",
+ host.Runtime.Address.ReplicaIndex, host.Runtime.Address.ShardIndex, host.Runtime.Address.ClusterName)
+ return false
+ case host.GetReconcileAttributes().GetStatus() == api.ObjectStatusSame:
+ w.a.V(1).
+ M(host).F().
+ Info("Host is the same, would not be updated, no need to exclude. Host/shard/cluster: %d/%d/%s",
+ host.Runtime.Address.ReplicaIndex, host.Runtime.Address.ShardIndex, host.Runtime.Address.ClusterName)
+ return false
+ }
+
+ w.a.V(1).
+ M(host).F().
+ Info("Host should be excluded. Host/shard/cluster: %d/%d/%s",
+ host.Runtime.Address.ReplicaIndex, host.Runtime.Address.ShardIndex, host.Runtime.Address.ClusterName)
+
+ return true
+}
+
+// shouldWaitExcludeHost determines whether reconciler should wait for the host to be excluded from cluster
+func (w *worker) shouldWaitExcludeHost(host *api.Host) bool {
+ // Check CHI settings
+ switch {
+ case host.GetCR().GetReconciling().IsReconcilingPolicyWait():
+ w.a.V(1).
+ M(host).F().
+ Info("IsReconcilingPolicyWait() need to wait to exclude host. Host/shard/cluster: %d/%d/%s",
+ host.Runtime.Address.ReplicaIndex, host.Runtime.Address.ShardIndex, host.Runtime.Address.ClusterName)
+ return true
+ case host.GetCR().GetReconciling().IsReconcilingPolicyNoWait():
+ w.a.V(1).
+ M(host).F().
+ Info("IsReconcilingPolicyNoWait() need NOT to wait to exclude host. Host/shard/cluster: %d/%d/%s",
+ host.Runtime.Address.ReplicaIndex, host.Runtime.Address.ShardIndex, host.Runtime.Address.ClusterName)
+ return false
+ }
+
+ w.a.V(1).
+ M(host).F().
+ Info("wait to exclude host fallback to operator's settings. Host/shard/cluster: %d/%d/%s",
+ host.Runtime.Address.ReplicaIndex, host.Runtime.Address.ShardIndex, host.Runtime.Address.ClusterName)
+ return chop.Config().Reconcile.Host.Wait.Exclude.Value()
+}
+
+// shouldWaitQueries determines whether reconciler should wait for the host to complete running queries
+func (w *worker) shouldWaitQueries(host *api.Host) bool {
+ switch {
+ case host.GetReconcileAttributes().GetStatus() == api.ObjectStatusNew:
+ w.a.V(1).
+ M(host).F().
+ Info("No need to wait for queries to complete on a host, host is a new one. "+
+ "Host/shard/cluster: %d/%d/%s",
+ host.Runtime.Address.ReplicaIndex, host.Runtime.Address.ShardIndex, host.Runtime.Address.ClusterName)
+ return false
+ case chop.Config().Reconcile.Host.Wait.Queries.Value():
+ w.a.V(1).
+ M(host).F().
+ Info("Will wait for queries to complete on a host according to CHOp config '.reconcile.host.wait.queries' setting. "+
+ "Host/shard/cluster: %d/%d/%s",
+ host.Runtime.Address.ReplicaIndex, host.Runtime.Address.ShardIndex, host.Runtime.Address.ClusterName)
+ return true
+ case host.GetCR().GetReconciling().IsReconcilingPolicyWait():
+ w.a.V(1).
+ M(host).F().
+ Info("Will wait for queries to complete on a host according to CHI 'reconciling.policy' setting. "+
+ "Host/shard/cluster: %d/%d/%s",
+ host.Runtime.Address.ReplicaIndex, host.Runtime.Address.ShardIndex, host.Runtime.Address.ClusterName)
+ return true
+ }
+
+ w.a.V(1).
+ M(host).F().
+ Info("Will NOT wait for queries to complete on a host. "+
+ "Host/shard/cluster: %d/%d/%s",
+ host.Runtime.Address.ReplicaIndex, host.Runtime.Address.ShardIndex, host.Runtime.Address.ClusterName)
+ return false
+}
+
+// shouldWaitIncludeHost determines whether reconciler should wait for the host to be included into cluster
+func (w *worker) shouldWaitIncludeHost(host *api.Host) bool {
+ status := host.GetReconcileAttributes().GetStatus()
+ switch {
+ case status == api.ObjectStatusNew:
+ return false
+ case status == api.ObjectStatusSame:
+ // The same host was not modified and no need to wait it to be included - it already is
+ return false
+ case host.GetShard().HostsCount() == 1:
+ // No need to wait one-host-shard
+ return false
+ case host.GetCR().GetReconciling().IsReconcilingPolicyWait():
+ // Check CHI settings - explicitly requested to wait
+ return true
+ case host.GetCR().GetReconciling().IsReconcilingPolicyNoWait():
+ // Check CHI settings - explicitly requested to not wait
+ return false
+ }
+
+ // Fallback to operator's settings
+ return chop.Config().Reconcile.Host.Wait.Include.Value()
+}
+
+// waitHostInCluster
+func (w *worker) waitHostInCluster(ctx context.Context, host *api.Host) error {
+ return domain.PollHost(ctx, host, w.ensureClusterSchemer(host).IsHostInCluster)
+}
+
+// waitHostNotInCluster
+func (w *worker) waitHostNotInCluster(ctx context.Context, host *api.Host) error {
+ return domain.PollHost(ctx, host, func(ctx context.Context, host *api.Host) bool {
+ return !w.ensureClusterSchemer(host).IsHostInCluster(ctx, host)
+ })
+}
+
+// waitHostNoActiveQueries
+func (w *worker) waitHostNoActiveQueries(ctx context.Context, host *api.Host) error {
+ return domain.PollHost(ctx, host, func(ctx context.Context, host *api.Host) bool {
+ n, _ := w.ensureClusterSchemer(host).HostActiveQueriesNum(ctx, host)
+ return n <= 1
+ })
+}
diff --git a/pkg/controller/chi/worker-migrator.go b/pkg/controller/chi/worker-migrator.go
new file mode 100644
index 000000000..f563b324d
--- /dev/null
+++ b/pkg/controller/chi/worker-migrator.go
@@ -0,0 +1,189 @@
+// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package chi
+
+import (
+ "context"
+
+ log "github.com/altinity/clickhouse-operator/pkg/announcer"
+ api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1"
+ "github.com/altinity/clickhouse-operator/pkg/chop"
+ "github.com/altinity/clickhouse-operator/pkg/controller/common"
+ "github.com/altinity/clickhouse-operator/pkg/interfaces"
+ "github.com/altinity/clickhouse-operator/pkg/model/chi/schemer"
+ "github.com/altinity/clickhouse-operator/pkg/model/clickhouse"
+ "github.com/altinity/clickhouse-operator/pkg/util"
+)
+
+type migrateTableOptions struct {
+ forceMigrate bool
+ dropReplica bool
+}
+
+func (o *migrateTableOptions) ForceMigrate() bool {
+ if o == nil {
+ return false
+ }
+ return o.forceMigrate
+}
+
+func (o *migrateTableOptions) DropReplica() bool {
+ if o == nil {
+ return false
+ }
+ return o.dropReplica
+}
+
+type migrateTableOptionsArr []*migrateTableOptions
+
+// NewMigrateTableOptionsArr creates new migrateTableOptions array
+func NewMigrateTableOptionsArr(opts ...*migrateTableOptions) (res migrateTableOptionsArr) {
+ return append(res, opts...)
+}
+
+// First gets first option
+func (a migrateTableOptionsArr) First() *migrateTableOptions {
+ if len(a) > 0 {
+ return a[0]
+ }
+ return nil
+}
+
+// migrateTables
+func (w *worker) migrateTables(ctx context.Context, host *api.Host, opts ...*migrateTableOptions) error {
+ if util.IsContextDone(ctx) {
+ log.V(2).Info("task is done")
+ return nil
+ }
+
+ if !w.shouldMigrateTables(host, opts...) {
+ w.a.V(1).
+ M(host).F().
+ Info(
+ "No need to add tables on host %d to shard %d in cluster %s",
+ host.Runtime.Address.ReplicaIndex, host.Runtime.Address.ShardIndex, host.Runtime.Address.ClusterName)
+ return nil
+ }
+
+ // Need to migrate tables
+
+ if w.shouldDropReplica(host, opts...) {
+ w.a.V(1).
+ M(host).F().
+ Info(
+ "Need to drop replica on host %d to shard %d in cluster %s",
+ host.Runtime.Address.ReplicaIndex, host.Runtime.Address.ShardIndex, host.Runtime.Address.ClusterName)
+ w.dropReplica(ctx, host, &dropReplicaOptions{forceDrop: true})
+ }
+
+ w.a.V(1).
+ WithEvent(host.GetCR(), common.EventActionCreate, common.EventReasonCreateStarted).
+ WithStatusAction(host.GetCR()).
+ M(host).F().
+ Info(
+ "Adding tables on shard/host:%d/%d cluster:%s",
+ host.Runtime.Address.ShardIndex, host.Runtime.Address.ReplicaIndex, host.Runtime.Address.ClusterName)
+
+ err := w.ensureClusterSchemer(host).HostCreateTables(ctx, host)
+ if err == nil {
+ w.a.V(1).
+ WithEvent(host.GetCR(), common.EventActionCreate, common.EventReasonCreateCompleted).
+ WithStatusAction(host.GetCR()).
+ M(host).F().
+ Info("Tables added successfully on shard/host:%d/%d cluster:%s",
+ host.Runtime.Address.ShardIndex, host.Runtime.Address.ReplicaIndex, host.Runtime.Address.ClusterName)
+ host.GetCR().IEnsureStatus().PushHostTablesCreated(w.c.namer.Name(interfaces.NameFQDN, host))
+ } else {
+ w.a.V(1).
+ WithEvent(host.GetCR(), common.EventActionCreate, common.EventReasonCreateFailed).
+ WithStatusAction(host.GetCR()).
+ M(host).F().
+ Error("ERROR add tables added successfully on shard/host:%d/%d cluster:%s err:%v",
+ host.Runtime.Address.ShardIndex, host.Runtime.Address.ReplicaIndex, host.Runtime.Address.ClusterName, err)
+ }
+ return err
+}
+
+func (w *worker) setHasData(host *api.Host) {
+ host.SetHasData(host.HasListedTablesCreated(w.c.namer.Name(interfaces.NameFQDN, host)))
+}
+
+// shouldMigrateTables
+func (w *worker) shouldMigrateTables(host *api.Host, opts ...*migrateTableOptions) bool {
+ o := NewMigrateTableOptionsArr(opts...).First()
+
+ // Deal with special cases in order of priority
+ switch {
+ case host.IsStopped():
+ // Stopped host is not able to receive any data, migration is inapplicable
+ return false
+
+ case o.ForceMigrate():
+ // Force migration requested
+ return true
+
+ case host.HasData():
+ // This host is listed as having tables created already, no need to migrate again
+ return false
+
+ case host.IsInNewCluster():
+ // CHI is new, all hosts were added
+ return false
+ }
+
+ // In all the rest cases - perform migration
+ return true
+}
+
+// shouldDropTables
+func (w *worker) shouldDropReplica(host *api.Host, opts ...*migrateTableOptions) bool {
+ o := NewMigrateTableOptionsArr(opts...).First()
+
+ // Deal with special cases
+ switch {
+ case o.DropReplica():
+ return true
+
+ }
+
+ return false
+}
+
+func (w *worker) ensureClusterSchemer(host *api.Host) *schemer.ClusterSchemer {
+ if w == nil {
+ return nil
+ }
+ // Make base cluster connection params
+ clusterConnectionParams := clickhouse.NewClusterConnectionParamsFromCHOpConfig(chop.Config())
+ // Adjust base cluster connection params with per-host props
+ switch clusterConnectionParams.Scheme {
+ case api.ChSchemeAuto:
+ switch {
+ case host.HTTPPort.HasValue():
+ clusterConnectionParams.Scheme = "http"
+ clusterConnectionParams.Port = host.HTTPPort.IntValue()
+ case host.HTTPSPort.HasValue():
+ clusterConnectionParams.Scheme = "https"
+ clusterConnectionParams.Port = host.HTTPSPort.IntValue()
+ }
+ case api.ChSchemeHTTP:
+ clusterConnectionParams.Port = host.HTTPPort.IntValue()
+ case api.ChSchemeHTTPS:
+ clusterConnectionParams.Port = host.HTTPSPort.IntValue()
+ }
+ w.schemer = schemer.NewClusterSchemer(clusterConnectionParams, host.Runtime.Version)
+
+ return w.schemer
+}
diff --git a/pkg/controller/chi/worker-pdb.go b/pkg/controller/chi/worker-pdb.go
new file mode 100644
index 000000000..515e0f932
--- /dev/null
+++ b/pkg/controller/chi/worker-pdb.go
@@ -0,0 +1,55 @@
+// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package chi
+
+import (
+ "context"
+
+ policy "k8s.io/api/policy/v1"
+ apiErrors "k8s.io/apimachinery/pkg/api/errors"
+
+ log "github.com/altinity/clickhouse-operator/pkg/announcer"
+ api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1"
+ "github.com/altinity/clickhouse-operator/pkg/util"
+)
+
+// reconcilePDB reconciles PodDisruptionBudget
+func (w *worker) reconcilePDB(ctx context.Context, cluster api.ICluster, pdb *policy.PodDisruptionBudget) error {
+ cur, err := w.c.getPDB(ctx, pdb)
+ switch {
+ case err == nil:
+ pdb.ResourceVersion = cur.ResourceVersion
+ err := w.c.updatePDB(ctx, pdb)
+ if err == nil {
+ log.V(1).Info("PDB updated: %s", util.NamespaceNameString(pdb))
+ } else {
+ log.Error("FAILED to update PDB: %s err: %v", util.NamespaceNameString(pdb), err)
+ return nil
+ }
+ case apiErrors.IsNotFound(err):
+ err := w.c.createPDB(ctx, pdb)
+ if err == nil {
+ log.V(1).Info("PDB created: %s", util.NamespaceNameString(pdb))
+ } else {
+ log.Error("FAILED create PDB: %s err: %v", util.NamespaceNameString(pdb), err)
+ return err
+ }
+ default:
+ log.Error("FAILED get PDB: %s err: %v", util.NamespaceNameString(pdb), err)
+ return err
+ }
+
+ return nil
+}
diff --git a/pkg/controller/chi/worker-secret.go b/pkg/controller/chi/worker-secret.go
new file mode 100644
index 000000000..0ba50de27
--- /dev/null
+++ b/pkg/controller/chi/worker-secret.go
@@ -0,0 +1,81 @@
+// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package chi
+
+import (
+ "context"
+
+ core "k8s.io/api/core/v1"
+
+ log "github.com/altinity/clickhouse-operator/pkg/announcer"
+ api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1"
+ "github.com/altinity/clickhouse-operator/pkg/controller/common"
+ "github.com/altinity/clickhouse-operator/pkg/util"
+)
+
+// reconcileSecret reconciles core.Secret
+func (w *worker) reconcileSecret(ctx context.Context, cr api.ICustomResource, secret *core.Secret) error {
+ if util.IsContextDone(ctx) {
+ log.V(2).Info("task is done")
+ return nil
+ }
+
+ w.a.V(2).M(cr).S().Info(secret.Name)
+ defer w.a.V(2).M(cr).E().Info(secret.Name)
+
+ // Check whether this object already exists
+ if _, err := w.c.getSecret(ctx, secret); err == nil {
+ // We have Secret - try to update it
+ return nil
+ }
+
+ // Secret not found or broken. Try to recreate
+ _ = w.c.deleteSecretIfExists(ctx, secret.Namespace, secret.Name)
+ err := w.createSecret(ctx, cr, secret)
+ if err != nil {
+ w.a.WithEvent(cr, common.EventActionReconcile, common.EventReasonReconcileFailed).
+ WithStatusAction(cr).
+ WithStatusError(cr).
+ M(cr).F().
+ Error("FAILED to reconcile Secret: %s CHI: %s ", secret.Name, cr.GetName())
+ }
+
+ return err
+}
+
+// createSecret
+func (w *worker) createSecret(ctx context.Context, cr api.ICustomResource, secret *core.Secret) error {
+ if util.IsContextDone(ctx) {
+ log.V(2).Info("task is done")
+ return nil
+ }
+
+ err := w.c.createSecret(ctx, secret)
+ if err == nil {
+ w.a.V(1).
+ WithEvent(cr, common.EventActionCreate, common.EventReasonCreateCompleted).
+ WithStatusAction(cr).
+ M(cr).F().
+ Info("Create Secret %s/%s", secret.Namespace, secret.Name)
+ } else {
+ w.a.WithEvent(cr, common.EventActionCreate, common.EventReasonCreateFailed).
+ WithStatusAction(cr).
+ WithStatusError(cr).
+ M(cr).F().
+ Error("Create Secret %s/%s failed with error %v", secret.Namespace, secret.Name, err)
+ }
+
+ return err
+}
diff --git a/pkg/controller/chi/worker-service.go b/pkg/controller/chi/worker-service.go
new file mode 100644
index 000000000..89655ec92
--- /dev/null
+++ b/pkg/controller/chi/worker-service.go
@@ -0,0 +1,214 @@
+// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package chi
+
+import (
+ "context"
+ "fmt"
+
+ core "k8s.io/api/core/v1"
+ apiErrors "k8s.io/apimachinery/pkg/api/errors"
+
+ log "github.com/altinity/clickhouse-operator/pkg/announcer"
+ api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1"
+ "github.com/altinity/clickhouse-operator/pkg/controller/common"
+ "github.com/altinity/clickhouse-operator/pkg/util"
+)
+
+// reconcileService reconciles core.Service
+func (w *worker) reconcileService(ctx context.Context, cr api.ICustomResource, service *core.Service) error {
+ if util.IsContextDone(ctx) {
+ log.V(2).Info("task is done")
+ return nil
+ }
+
+ w.a.V(2).M(cr).S().Info(service.GetName())
+ defer w.a.V(2).M(cr).E().Info(service.GetName())
+
+ // Check whether this object already exists
+ curService, err := w.c.getService(ctx, service)
+
+ if curService != nil {
+ // We have the Service - try to update it
+ w.a.V(1).M(cr).F().Info("Service found: %s. Will try to update", util.NamespaceNameString(service))
+ err = w.updateService(ctx, cr, curService, service)
+ }
+
+ if err != nil {
+ if apiErrors.IsNotFound(err) {
+ // The Service is either not found or not updated. Try to recreate it
+ w.a.V(1).M(cr).F().Info("Service: %s not found. err: %v", util.NamespaceNameString(service), err)
+ } else {
+ // The Service is either not found or not updated. Try to recreate it
+ w.a.WithEvent(cr, common.EventActionUpdate, common.EventReasonUpdateFailed).
+ WithStatusAction(cr).
+ WithStatusError(cr).
+ M(cr).F().
+ Error("Update Service: %s failed with error: %v", util.NamespaceNameString(service), err)
+ }
+
+ _ = w.c.deleteServiceIfExists(ctx, service.GetNamespace(), service.GetName())
+ err = w.createService(ctx, cr, service)
+ }
+
+ if err == nil {
+ w.a.V(1).M(cr).F().Info("Service reconcile successful: %s", util.NamespaceNameString(service))
+ } else {
+ w.a.WithEvent(cr, common.EventActionReconcile, common.EventReasonReconcileFailed).
+ WithStatusAction(cr).
+ WithStatusError(cr).
+ M(cr).F().
+ Error("FAILED to reconcile Service: %s CHI: %s ", util.NamespaceNameString(service), cr.GetName())
+ }
+
+ return err
+}
+
+// updateService
+func (w *worker) updateService(
+ ctx context.Context,
+ cr api.ICustomResource,
+ curService *core.Service,
+ targetService *core.Service,
+) error {
+ if util.IsContextDone(ctx) {
+ log.V(2).Info("task is done")
+ return nil
+ }
+
+ if curService.Spec.Type != targetService.Spec.Type {
+ return fmt.Errorf(
+ "just recreate the service in case of service type change '%s'=>'%s'",
+ curService.Spec.Type, targetService.Spec.Type)
+ }
+
+ // Updating a Service is a complicated business
+
+ newService := targetService.DeepCopy()
+
+ // spec.resourceVersion is required in order to update an object
+ newService.ResourceVersion = curService.ResourceVersion
+
+ //
+ // Migrate ClusterIP to the new service
+ //
+ // spec.clusterIP field is immutable, need to use already assigned value
+ // From https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service
+ // Kubernetes assigns this Service an IP address (sometimes called the “cluster IP”), which is used by the Service proxies
+ // See also https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
+ // You can specify your own cluster IP address as part of a Service creation request. To do this, set the .spec.clusterIP
+ newService.Spec.ClusterIP = curService.Spec.ClusterIP
+
+ //
+ // Migrate existing ports to the new service for NodePort and LoadBalancer services
+ //
+ // The port on each node on which this service is exposed when type=NodePort or LoadBalancer.
+ // Usually assigned by the system. If specified, it will be allocated to the service if unused
+ // or else creation of the service will fail.
+ // Default is to auto-allocate a port if the ServiceType of this Service requires one.
+ // More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
+
+ // !!! IMPORTANT !!!
+ // No changes in service type is allowed.
+ // Already exposed port details can not be changed.
+
+ serviceTypeIsNodePort := (curService.Spec.Type == core.ServiceTypeNodePort) && (newService.Spec.Type == core.ServiceTypeNodePort)
+ serviceTypeIsLoadBalancer := (curService.Spec.Type == core.ServiceTypeLoadBalancer) && (newService.Spec.Type == core.ServiceTypeLoadBalancer)
+ if serviceTypeIsNodePort || serviceTypeIsLoadBalancer {
+ for i := range newService.Spec.Ports {
+ newPort := &newService.Spec.Ports[i]
+ for j := range curService.Spec.Ports {
+ curPort := &curService.Spec.Ports[j]
+ if newPort.Port == curPort.Port {
+ // Already have this port specified - reuse all internals,
+ // due to limitations with auto-assigned values
+ *newPort = *curPort
+ w.a.M(cr).F().Info("reuse Port %d values", newPort.Port)
+ break
+ }
+ }
+ }
+ }
+
+ //
+ // Migrate HealthCheckNodePort to the new service
+ //
+ // spec.healthCheckNodePort field is used with ExternalTrafficPolicy=Local only and is immutable within ExternalTrafficPolicy=Local
+ // In case ExternalTrafficPolicy is changed it seems to be irrelevant
+ // https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip
+ curExternalTrafficPolicyTypeLocal := curService.Spec.ExternalTrafficPolicy == core.ServiceExternalTrafficPolicyTypeLocal
+ newExternalTrafficPolicyTypeLocal := newService.Spec.ExternalTrafficPolicy == core.ServiceExternalTrafficPolicyTypeLocal
+ if curExternalTrafficPolicyTypeLocal && newExternalTrafficPolicyTypeLocal {
+ newService.Spec.HealthCheckNodePort = curService.Spec.HealthCheckNodePort
+ }
+
+ //
+ // Migrate LoadBalancerClass to the new service
+ //
+ // This field can only be set when creating or updating a Service to type 'LoadBalancer'.
+ // Once set, it can not be changed. This field will be wiped when a service is updated to a non 'LoadBalancer' type.
+ if curService.Spec.LoadBalancerClass != nil {
+ newService.Spec.LoadBalancerClass = curService.Spec.LoadBalancerClass
+ }
+
+ //
+ // Migrate labels, annotations and finalizers to the new service
+ //
+ newService.GetObjectMeta().SetLabels(util.MergeStringMapsPreserve(newService.GetObjectMeta().GetLabels(), curService.GetObjectMeta().GetLabels()))
+ newService.GetObjectMeta().SetAnnotations(util.MergeStringMapsPreserve(newService.GetObjectMeta().GetAnnotations(), curService.GetObjectMeta().GetAnnotations()))
+ newService.GetObjectMeta().SetFinalizers(util.MergeStringArrays(newService.GetObjectMeta().GetFinalizers(), curService.GetObjectMeta().GetFinalizers()))
+
+ //
+ // And only now we are ready to actually update the service with new version of the service
+ //
+
+ err := w.c.updateService(ctx, newService)
+ if err == nil {
+ w.a.V(1).
+ WithEvent(cr, common.EventActionUpdate, common.EventReasonUpdateCompleted).
+ WithStatusAction(cr).
+ M(cr).F().
+ Info("Update Service success: %s", util.NamespaceNameString(newService))
+ } else {
+ w.a.M(cr).F().Error("Update Service fail: %s failed with error: %v", util.NamespaceNameString(newService))
+ }
+
+ return err
+}
+
+// createService
+func (w *worker) createService(ctx context.Context, cr api.ICustomResource, service *core.Service) error {
+ if util.IsContextDone(ctx) {
+ log.V(2).Info("task is done")
+ return nil
+ }
+
+ err := w.c.createService(ctx, service)
+ if err == nil {
+ w.a.V(1).
+ WithEvent(cr, common.EventActionCreate, common.EventReasonCreateCompleted).
+ WithStatusAction(cr).
+ M(cr).F().
+ Info("OK Create Service: %s", util.NamespaceNameString(service))
+ } else {
+ w.a.WithEvent(cr, common.EventActionCreate, common.EventReasonCreateFailed).
+ WithStatusAction(cr).
+ WithStatusError(cr).
+ M(cr).F().
+ Error("FAILED Create Service: %s err: %v", util.NamespaceNameString(service), err)
+ }
+
+ return err
+}
diff --git a/pkg/controller/chi/worker-statefulset-rollback.go b/pkg/controller/chi/worker-statefulset-rollback.go
new file mode 100644
index 000000000..cc122520e
--- /dev/null
+++ b/pkg/controller/chi/worker-statefulset-rollback.go
@@ -0,0 +1,144 @@
+// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package chi
+
+import (
+ "context"
+
+ apps "k8s.io/api/apps/v1"
+
+ log "github.com/altinity/clickhouse-operator/pkg/announcer"
+ api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1"
+ "github.com/altinity/clickhouse-operator/pkg/chop"
+ "github.com/altinity/clickhouse-operator/pkg/controller/common"
+ "github.com/altinity/clickhouse-operator/pkg/interfaces"
+ "github.com/altinity/clickhouse-operator/pkg/util"
+)
+
+// OnStatefulSetCreateFailed handles situation when StatefulSet create failed on k8s level
+func (c *Controller) OnStatefulSetCreateFailed(ctx context.Context, host *api.Host) common.ErrorCRUD {
+ if util.IsContextDone(ctx) {
+ log.V(2).Info("task is done")
+ return common.ErrCRUDIgnore
+ }
+
+ // What to do with StatefulSet - look into chop configuration settings
+ switch chop.Config().Reconcile.StatefulSet.Create.OnFailure {
+ case api.OnStatefulSetCreateFailureActionAbort:
+ // Report appropriate error, it will break reconcile loop
+ log.V(1).M(host).F().Info("abort")
+ return common.ErrCRUDAbort
+
+ case api.OnStatefulSetCreateFailureActionDelete:
+ // Delete gracefully failed StatefulSet
+ log.V(1).M(host).F().Info(
+ "going to DELETE FAILED StatefulSet %s",
+ util.NamespaceNameString(host.Runtime.DesiredStatefulSet.GetObjectMeta()))
+ _ = c.deleteHost(ctx, host)
+ return c.shouldContinueOnCreateFailed()
+
+ case api.OnStatefulSetCreateFailureActionIgnore:
+ // Ignore error, continue reconcile loop
+ log.V(1).M(host).F().Info(
+ "going to ignore error %s",
+ util.NamespaceNameString(host.Runtime.DesiredStatefulSet.GetObjectMeta()))
+ return common.ErrCRUDIgnore
+
+ default:
+ log.V(1).M(host).F().Error(
+ "Unknown c.chop.Config().OnStatefulSetCreateFailureAction=%s",
+ chop.Config().Reconcile.StatefulSet.Create.OnFailure)
+ return common.ErrCRUDIgnore
+ }
+
+ return common.ErrCRUDUnexpectedFlow
+}
+
+// OnStatefulSetUpdateFailed handles situation when StatefulSet update failed in k8s level
+// It can try to revert StatefulSet to its previous version, specified in rollbackStatefulSet
+func (c *Controller) OnStatefulSetUpdateFailed(ctx context.Context, rollbackStatefulSet *apps.StatefulSet, host *api.Host, kubeSTS interfaces.IKubeSTS) common.ErrorCRUD {
+ if util.IsContextDone(ctx) {
+ log.V(2).Info("task is done")
+ return common.ErrCRUDIgnore
+ }
+
+ // What to do with StatefulSet - look into chop configuration settings
+ switch chop.Config().Reconcile.StatefulSet.Update.OnFailure {
+ case api.OnStatefulSetUpdateFailureActionAbort:
+ // Report appropriate error, it will break reconcile loop
+ log.V(1).M(host).F().Info("abort StatefulSet %s", util.NamespaceNameString(rollbackStatefulSet.GetObjectMeta()))
+ return common.ErrCRUDAbort
+
+ case api.OnStatefulSetUpdateFailureActionRollback:
+ // Need to revert current StatefulSet to oldStatefulSet
+ log.V(1).M(host).F().Info("going to ROLLBACK FAILED StatefulSet %s", util.NamespaceNameString(rollbackStatefulSet.GetObjectMeta()))
+ curStatefulSet, err := kubeSTS.Get(ctx, host)
+ if err != nil {
+ log.V(1).M(host).F().Warning("Unable to fetch current StatefulSet %s. err: %q", util.NamespaceNameString(rollbackStatefulSet.GetObjectMeta()), err)
+ return c.shouldContinueOnUpdateFailed()
+ }
+
+ // Make copy of "rollback to" .Spec just to be sure nothing gets corrupted
+ // Update StatefulSet to its 'rollback to' StatefulSet - this is expected to rollback inapplicable changes
+ // Having StatefulSet .spec in rolled back status we need to delete current Pod - because in case of Pod
+ // being seriously broken, it is the only way to go.
+ // Just delete Pod and StatefulSet will recreated Pod with current .spec
+ // This will rollback Pod to "rollback to" .spec
+ curStatefulSet.Spec = *rollbackStatefulSet.Spec.DeepCopy()
+ curStatefulSet, _ = kubeSTS.Update(ctx, curStatefulSet)
+ _ = c.statefulSetDeletePod(ctx, curStatefulSet, host)
+
+ return c.shouldContinueOnUpdateFailed()
+
+ case api.OnStatefulSetUpdateFailureActionIgnore:
+ // Ignore error, continue reconcile loop
+ log.V(1).M(host).F().Info("going to ignore error %s", util.NamespaceNameString(rollbackStatefulSet.GetObjectMeta()))
+ return common.ErrCRUDIgnore
+
+ default:
+ log.V(1).M(host).F().Error("Unknown c.chop.Config().OnStatefulSetUpdateFailureAction=%s", chop.Config().Reconcile.StatefulSet.Update.OnFailure)
+ return common.ErrCRUDIgnore
+ }
+
+ return common.ErrCRUDUnexpectedFlow
+}
+
+// shouldContinueOnCreateFailed return nil in case 'continue' or error in case 'do not continue'
+func (c *Controller) shouldContinueOnCreateFailed() common.ErrorCRUD {
+ // Check configuration option regarding should we continue when errors met on the way
+ // c.chopConfig.OnStatefulSetUpdateFailureAction
+ var continueUpdate = false
+ if continueUpdate {
+ // Continue update
+ return common.ErrCRUDIgnore
+ }
+
+ // Do not continue update
+ return common.ErrCRUDAbort
+}
+
+// shouldContinueOnUpdateFailed return nil in case 'continue' or error in case 'do not continue'
+func (c *Controller) shouldContinueOnUpdateFailed() common.ErrorCRUD {
+ // Check configuration option regarding should we continue when errors met on the way
+ // c.chopConfig.OnStatefulSetUpdateFailureAction
+ var continueUpdate = false
+ if continueUpdate {
+ // Continue update
+ return common.ErrCRUDIgnore
+ }
+
+ // Do not continue update
+ return common.ErrCRUDAbort
+}
diff --git a/pkg/model/chk/labeler.go b/pkg/controller/chi/worker-zk-integration.go
similarity index 63%
rename from pkg/model/chk/labeler.go
rename to pkg/controller/chi/worker-zk-integration.go
index 5816ef055..4541fb938 100644
--- a/pkg/model/chk/labeler.go
+++ b/pkg/controller/chi/worker-zk-integration.go
@@ -12,23 +12,20 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-package chk
+package chi
import (
- api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse-keeper.altinity.com/v1"
+ api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1"
+ "github.com/altinity/clickhouse-operator/pkg/model/zookeeper"
)
-func GetPodLabels(chk *api.ClickHouseKeeperInstallation) map[string]string {
- // In case Pod template has labels explicitly specified - use them
- labels := getPodTemplateLabels(chk)
- if labels != nil {
- return labels
- }
-
- // Either no pod template or labels specified.
- // Construct default labels
- return map[string]string{
- "app": chk.GetName(),
- "uid": string(chk.UID),
+func reconcileZookeeperRootPath(cluster *api.Cluster) {
+ if cluster.Zookeeper.IsEmpty() {
+ // Nothing to reconcile
+ return
}
+ conn := zookeeper.NewConnection(cluster.Zookeeper.Nodes)
+ path := zookeeper.NewPathManager(conn)
+ path.Ensure(cluster.Zookeeper.Root)
+ path.Close()
}
diff --git a/pkg/controller/chi/worker.go b/pkg/controller/chi/worker.go
index df7b64998..91b15a7c1 100644
--- a/pkg/controller/chi/worker.go
+++ b/pkg/controller/chi/worker.go
@@ -17,30 +17,36 @@ package chi
import (
"context"
"errors"
- "fmt"
"time"
- "github.com/juliangruber/go-intersect"
core "k8s.io/api/core/v1"
- apiErrors "k8s.io/apimachinery/pkg/api/errors"
- "k8s.io/apimachinery/pkg/api/resource"
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
- utilRuntime "k8s.io/apimachinery/pkg/util/runtime"
-
- "github.com/altinity/queue"
log "github.com/altinity/clickhouse-operator/pkg/announcer"
api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1"
+ "github.com/altinity/clickhouse-operator/pkg/apis/common/types"
"github.com/altinity/clickhouse-operator/pkg/apis/deployment"
"github.com/altinity/clickhouse-operator/pkg/chop"
- "github.com/altinity/clickhouse-operator/pkg/controller"
- model "github.com/altinity/clickhouse-operator/pkg/model/chi"
- chiCreator "github.com/altinity/clickhouse-operator/pkg/model/chi/creator"
+ "github.com/altinity/clickhouse-operator/pkg/controller/common"
+ "github.com/altinity/clickhouse-operator/pkg/controller/common/poller/domain"
+ "github.com/altinity/clickhouse-operator/pkg/controller/common/statefulset"
+ "github.com/altinity/clickhouse-operator/pkg/controller/common/storage"
+ "github.com/altinity/clickhouse-operator/pkg/interfaces"
+ "github.com/altinity/clickhouse-operator/pkg/model"
+ "github.com/altinity/clickhouse-operator/pkg/model/chi/config"
+ "github.com/altinity/clickhouse-operator/pkg/model/chi/macro"
+ "github.com/altinity/clickhouse-operator/pkg/model/chi/namer"
"github.com/altinity/clickhouse-operator/pkg/model/chi/normalizer"
"github.com/altinity/clickhouse-operator/pkg/model/chi/schemer"
- "github.com/altinity/clickhouse-operator/pkg/model/clickhouse"
- "github.com/altinity/clickhouse-operator/pkg/model/k8s"
+ "github.com/altinity/clickhouse-operator/pkg/model/chi/tags/labeler"
+ "github.com/altinity/clickhouse-operator/pkg/model/common/action_plan"
+ commonConfig "github.com/altinity/clickhouse-operator/pkg/model/common/config"
+ commonCreator "github.com/altinity/clickhouse-operator/pkg/model/common/creator"
+ commonMacro "github.com/altinity/clickhouse-operator/pkg/model/common/macro"
+ commonNormalizer "github.com/altinity/clickhouse-operator/pkg/model/common/normalizer"
+ "github.com/altinity/clickhouse-operator/pkg/model/managers"
"github.com/altinity/clickhouse-operator/pkg/util"
+ "github.com/altinity/queue"
)
// FinalizerName specifies name of the finalizer to be used with CHI
@@ -49,57 +55,94 @@ const FinalizerName = "finalizer.clickhouseinstallation.altinity.com"
// worker represents worker thread which runs reconcile tasks
type worker struct {
c *Controller
- a Announcer
+ a common.Announcer
+
//queue workqueue.RateLimitingInterface
- queue queue.PriorityQueue
- normalizer *normalizer.Normalizer
- schemer *schemer.ClusterSchemer
- start time.Time
- task task
-}
+ queue queue.PriorityQueue
+ schemer *schemer.ClusterSchemer
-// task represents context of a worker. This also can be called "a reconcile task"
-type task struct {
- creator *chiCreator.Creator
- registryReconciled *model.Registry
- registryFailed *model.Registry
- cmUpdate time.Time
- start time.Time
-}
+ normalizer *normalizer.Normalizer
+ task *common.Task
+ stsReconciler *statefulset.Reconciler
-// newTask creates new context
-func newTask(creator *chiCreator.Creator) task {
- return task{
- creator: creator,
- registryReconciled: model.NewRegistry(),
- registryFailed: model.NewRegistry(),
- cmUpdate: time.Time{},
- start: time.Now(),
- }
+ start time.Time
}
// newWorker
-// func (c *Controller) newWorker(q workqueue.RateLimitingInterface) *worker {
func (c *Controller) newWorker(q queue.PriorityQueue, sys bool) *worker {
start := time.Now()
if !sys {
start = start.Add(api.DefaultReconcileThreadsWarmup)
}
+ kind := "ClickHouseInstallation"
+ generateName := "chop-chi-"
+ component := componentName
+
+ announcer := common.NewAnnouncer(
+ common.NewEventEmitter(c.kube.Event(), kind, generateName, component),
+ c.kube.CR(),
+ )
+
return &worker{
- c: c,
- a: NewAnnouncer().WithController(c),
- queue: q,
- normalizer: normalizer.NewNormalizer(func(namespace, name string) (*core.Secret, error) {
- return c.kubeClient.CoreV1().Secrets(namespace).Get(context.TODO(), name, controller.NewGetOptions())
- }),
+ c: c,
+ a: announcer,
+
+ queue: q,
schemer: nil,
- start: start,
- }
-}
-// newContext creates new reconcile task
-func (w *worker) newTask(chi *api.ClickHouseInstallation) {
- w.task = newTask(chiCreator.NewCreator(chi))
+ normalizer: normalizer.New(func(namespace, name string) (*core.Secret, error) {
+ return c.kube.Secret().Get(context.TODO(), &core.Secret{
+ ObjectMeta: meta.ObjectMeta{
+ Namespace: namespace,
+ Name: name,
+ },
+ })
+ }),
+ start: start,
+ task: nil,
+ }
+}
+
+func configGeneratorOptions(cr *api.ClickHouseInstallation) *config.GeneratorOptions {
+ return &config.GeneratorOptions{
+ Users: cr.GetSpecT().Configuration.Users,
+ Profiles: cr.GetSpecT().Configuration.Profiles,
+ Quotas: cr.GetSpecT().Configuration.Quotas,
+ Settings: cr.GetSpecT().Configuration.Settings,
+ Files: cr.GetSpecT().Configuration.Files,
+ DistributedDDL: cr.GetSpecT().Defaults.DistributedDDL,
+ }
+}
+
+func (w *worker) newTask(cr *api.ClickHouseInstallation) {
+ w.task = common.NewTask(
+ commonCreator.NewCreator(
+ cr,
+ managers.NewConfigFilesGenerator(managers.FilesGeneratorTypeClickHouse, cr, configGeneratorOptions(cr)),
+ managers.NewContainerManager(managers.ContainerManagerTypeClickHouse),
+ managers.NewTagManager(managers.TagManagerTypeClickHouse, cr),
+ managers.NewProbeManager(managers.ProbeManagerTypeClickHouse),
+ managers.NewServiceManager(managers.ServiceManagerTypeClickHouse),
+ managers.NewVolumeManager(managers.VolumeManagerTypeClickHouse),
+ managers.NewConfigMapManager(managers.ConfigMapManagerTypeClickHouse),
+ managers.NewNameManager(managers.NameManagerTypeClickHouse),
+ managers.NewOwnerReferencesManager(managers.OwnerReferencesManagerTypeClickHouse),
+ namer.New(),
+ commonMacro.New(macro.List),
+ labeler.New(cr),
+ ),
+ )
+
+ w.stsReconciler = statefulset.NewReconciler(
+ w.a,
+ w.task,
+ domain.NewHostStatefulSetPoller(domain.NewStatefulSetPoller(w.c.kube), w.c.kube, w.c.ctrlLabeler),
+ w.c.namer,
+ labeler.New(cr),
+ storage.NewStorageReconciler(w.task, w.c.namer, w.c.kube.Storage()),
+ w.c.kube,
+ w.c,
+ )
}
// timeToStart specifies time that operator does not accept changes
@@ -110,15 +153,11 @@ func (w *worker) isJustStarted() bool {
return time.Since(w.start) < timeToStart
}
-func (w *worker) isConfigurationChangeRequiresReboot(host *api.ChiHost) bool {
- return model.IsConfigurationChangeRequiresReboot(host)
-}
-
// shouldForceRestartHost checks whether cluster requires hosts restart
-func (w *worker) shouldForceRestartHost(host *api.ChiHost) bool {
+func (w *worker) shouldForceRestartHost(host *api.Host) bool {
// RollingUpdate purpose is to always shut the host down.
// It is such an interesting policy.
- if host.GetCHI().IsRollingUpdate() {
+ if host.GetCR().IsRollingUpdate() {
w.a.V(1).M(host).F().Info("RollingUpdate requires force restart. Host: %s", host.GetName())
return true
}
@@ -134,14 +173,14 @@ func (w *worker) shouldForceRestartHost(host *api.ChiHost) bool {
}
// For some configuration changes we have to force restart host
- if w.isConfigurationChangeRequiresReboot(host) {
+ if model.IsConfigurationChangeRequiresReboot(host) {
w.a.V(1).M(host).F().Info("Config change(s) require host restart. Host: %s", host.GetName())
return true
}
podIsCrushed := false
// pod.Status.ContainerStatuses[0].State.Waiting.Reason
- if pod, err := w.c.getPod(host); err == nil {
+ if pod, err := w.c.kube.Pod().Get(host); err == nil {
if len(pod.Status.ContainerStatuses) > 0 {
if pod.Status.ContainerStatuses[0].State.Waiting != nil {
if pod.Status.ContainerStatuses[0].State.Waiting.Reason == "CrashLoopBackOff" {
@@ -160,189 +199,24 @@ func (w *worker) shouldForceRestartHost(host *api.ChiHost) bool {
return false
}
-// run is an endless work loop, expected to be run in a thread
-func (w *worker) run() {
- w.a.V(2).S().P()
- defer w.a.V(2).E().P()
-
- // For system thread let's wait its 'official start time', thus giving it time to bootstrap
- util.WaitContextDoneUntil(context.Background(), w.start)
-
- // Events loop
- for {
- // Get() blocks until it can return an item
- item, ctx, ok := w.queue.Get()
- if !ok {
- w.a.Info("shutdown request")
- return
- }
-
- //item, shut := w.queue.Get()
- //task := context.Background()
- //if shut {
- // w.a.Info("shutdown request")
- // return
- //}
-
- if err := w.processItem(ctx, item); err != nil {
- // Item not processed
- // this code cannot return an error and needs to indicate error has been ignored
- utilRuntime.HandleError(err)
- }
-
- // Forget indicates that an item is finished being retried. Doesn't matter whether its for perm failing
- // or for success, we'll stop the rate limiter from tracking it. This only clears the `rateLimiter`, you
- // still have to call `Done` on the queue.
- //w.queue.Forget(item)
-
- // Remove item from processing set when processing completed
- w.queue.Done(item)
- }
-}
-
-func (w *worker) processReconcileCHI(ctx context.Context, cmd *ReconcileCHI) error {
- switch cmd.cmd {
- case reconcileAdd:
- return w.updateCHI(ctx, nil, cmd.new)
- case reconcileUpdate:
- return w.updateCHI(ctx, cmd.old, cmd.new)
- case reconcileDelete:
- return w.discoveryAndDeleteCHI(ctx, cmd.old)
- }
-
- // Unknown item type, don't know what to do with it
- // Just skip it and behave like it never existed
- utilRuntime.HandleError(fmt.Errorf("unexpected reconcile - %#v", cmd))
- return nil
-}
-
-func (w *worker) processReconcileCHIT(cmd *ReconcileCHIT) error {
- switch cmd.cmd {
- case reconcileAdd:
- return w.addChit(cmd.new)
- case reconcileUpdate:
- return w.updateChit(cmd.old, cmd.new)
- case reconcileDelete:
- return w.deleteChit(cmd.old)
- }
-
- // Unknown item type, don't know what to do with it
- // Just skip it and behave like it never existed
- utilRuntime.HandleError(fmt.Errorf("unexpected reconcile - %#v", cmd))
- return nil
-}
-
-func (w *worker) processReconcileChopConfig(cmd *ReconcileChopConfig) error {
- switch cmd.cmd {
- case reconcileAdd:
- return w.c.addChopConfig(cmd.new)
- case reconcileUpdate:
- return w.c.updateChopConfig(cmd.old, cmd.new)
- case reconcileDelete:
- return w.c.deleteChopConfig(cmd.old)
- }
-
- // Unknown item type, don't know what to do with it
- // Just skip it and behave like it never existed
- utilRuntime.HandleError(fmt.Errorf("unexpected reconcile - %#v", cmd))
- return nil
-}
-
-func (w *worker) processReconcileEndpoints(ctx context.Context, cmd *ReconcileEndpoints) error {
- switch cmd.cmd {
- case reconcileUpdate:
- return w.updateEndpoints(ctx, cmd.old, cmd.new)
- }
-
- // Unknown item type, don't know what to do with it
- // Just skip it and behave like it never existed
- utilRuntime.HandleError(fmt.Errorf("unexpected reconcile - %#v", cmd))
- return nil
-}
-
-func (w *worker) processReconcilePod(ctx context.Context, cmd *ReconcilePod) error {
- switch cmd.cmd {
- case reconcileAdd:
- w.a.V(1).M(cmd.new).F().Info("Add Pod. %s/%s", cmd.new.Namespace, cmd.new.Name)
- metricsPodAdd(ctx)
- return nil
- case reconcileUpdate:
- //ignore
- //w.a.V(1).M(cmd.new).F().Info("Update Pod. %s/%s", cmd.new.Namespace, cmd.new.Name)
- //metricsPodUpdate(ctx)
- return nil
- case reconcileDelete:
- w.a.V(1).M(cmd.old).F().Info("Delete Pod. %s/%s", cmd.old.Namespace, cmd.old.Name)
- metricsPodDelete(ctx)
- return nil
- }
-
- // Unknown item type, don't know what to do with it
- // Just skip it and behave like it never existed
- utilRuntime.HandleError(fmt.Errorf("unexpected reconcile - %#v", cmd))
- return nil
-}
-
-func (w *worker) processDropDns(ctx context.Context, cmd *DropDns) error {
- if chi, err := w.createCHIFromObjectMeta(cmd.initiator, false, normalizer.NewOptions()); err == nil {
- w.a.V(2).M(cmd.initiator).Info("flushing DNS for CHI %s", chi.Name)
- _ = w.ensureClusterSchemer(chi.FirstHost()).CHIDropDnsCache(ctx, chi)
- } else {
- w.a.M(cmd.initiator).F().Error("unable to find CHI by %v err: %v", cmd.initiator.Labels, err)
- }
- return nil
-}
-
-// processItem processes one work item according to its type
-func (w *worker) processItem(ctx context.Context, item interface{}) error {
- if util.IsContextDone(ctx) {
- log.V(2).Info("task is done")
- return nil
- }
-
- w.a.V(3).S().P()
- defer w.a.V(3).E().P()
-
- switch cmd := item.(type) {
- case *ReconcileCHI:
- return w.processReconcileCHI(ctx, cmd)
- case *ReconcileCHIT:
- return w.processReconcileCHIT(cmd)
- case *ReconcileChopConfig:
- return w.processReconcileChopConfig(cmd)
- case *ReconcileEndpoints:
- return w.processReconcileEndpoints(ctx, cmd)
- case *ReconcilePod:
- return w.processReconcilePod(ctx, cmd)
- case *DropDns:
- return w.processDropDns(ctx, cmd)
- }
-
- // Unknown item type, don't know what to do with it
- // Just skip it and behave like it never existed
- utilRuntime.HandleError(fmt.Errorf("unexpected item in the queue - %#v", item))
- return nil
-}
-
// normalize
func (w *worker) normalize(c *api.ClickHouseInstallation) *api.ClickHouseInstallation {
-
- chi, err := w.normalizer.CreateTemplatedCHI(c, normalizer.NewOptions())
+ chi, err := w.normalizer.CreateTemplated(c, commonNormalizer.NewOptions())
if err != nil {
- w.a.WithEvent(chi, eventActionReconcile, eventReasonReconcileFailed).
+ w.a.WithEvent(chi, common.EventActionReconcile, common.EventReasonReconcileFailed).
WithStatusError(chi).
M(chi).F().
- Error("FAILED to normalize CHI 1: %v", err)
+ Error("FAILED to normalize CR 1: %v", err)
}
ips := w.c.getPodsIPs(chi)
w.a.V(1).M(chi).Info("IPs of the CHI normalizer %s/%s: len: %d %v", chi.Namespace, chi.Name, len(ips), ips)
- opts := normalizer.NewOptions()
+ opts := commonNormalizer.NewOptions()
opts.DefaultUserAdditionalIPs = ips
- chi, err = w.normalizer.CreateTemplatedCHI(c, opts)
+ chi, err = w.normalizer.CreateTemplated(c, opts)
if err != nil {
- w.a.WithEvent(chi, eventActionReconcile, eventReasonReconcileFailed).
+ w.a.WithEvent(chi, common.EventActionReconcile, common.EventReasonReconcileFailed).
WithStatusError(chi).
M(chi).F().
Error("FAILED to normalize CHI 2: %v", err)
@@ -359,12 +233,12 @@ func (w *worker) ensureFinalizer(ctx context.Context, chi *api.ClickHouseInstall
}
// In case CHI is being deleted already, no need to meddle with finalizers
- if !chi.ObjectMeta.DeletionTimestamp.IsZero() {
+ if !chi.GetDeletionTimestamp().IsZero() {
return false
}
// Finalizer can already be listed in CHI, do nothing in this case
- if util.InArray(FinalizerName, chi.ObjectMeta.Finalizers) {
+ if util.InArray(FinalizerName, chi.GetFinalizers()) {
w.a.V(2).M(chi).F().Info("finalizer already installed")
return false
}
@@ -383,29 +257,29 @@ func (w *worker) ensureFinalizer(ctx context.Context, chi *api.ClickHouseInstall
// updateEndpoints updates endpoints
func (w *worker) updateEndpoints(ctx context.Context, old, new *core.Endpoints) error {
- if chi, err := w.createCHIFromObjectMeta(&new.ObjectMeta, false, normalizer.NewOptions()); err == nil {
- w.a.V(1).M(chi).Info("updating endpoints for CHI-1 %s", chi.Name)
+ if chi, err := w.createCRFromObjectMeta(new.GetObjectMeta(), false, commonNormalizer.NewOptions()); err == nil {
+ w.a.V(1).M(chi).Info("updating endpoints for CR-1 %s", chi.Name)
ips := w.c.getPodsIPs(chi)
- w.a.V(1).M(chi).Info("IPs of the CHI-1 update endpoints %s/%s: len: %d %v", chi.Namespace, chi.Name, len(ips), ips)
- opts := normalizer.NewOptions()
+ w.a.V(1).M(chi).Info("IPs of the CR-1 update endpoints %s/%s: len: %d %v", chi.Namespace, chi.Name, len(ips), ips)
+ opts := commonNormalizer.NewOptions()
opts.DefaultUserAdditionalIPs = ips
- if chi, err := w.createCHIFromObjectMeta(&new.ObjectMeta, false, opts); err == nil {
+ if chi, err := w.createCRFromObjectMeta(new.GetObjectMeta(), false, opts); err == nil {
w.a.V(1).M(chi).Info("Update users IPS-1")
// TODO unify with finalize reconcile
w.newTask(chi)
- w.reconcileCHIConfigMapUsers(ctx, chi)
- w.c.updateCHIObjectStatus(ctx, chi, UpdateCHIStatusOptions{
+ w.reconcileConfigMapCommonUsers(ctx, chi)
+ w.c.updateCRObjectStatus(ctx, chi, types.UpdateStatusOptions{
TolerateAbsence: true,
- CopyCHIStatusOptions: api.CopyCHIStatusOptions{
+ CopyStatusOptions: types.CopyStatusOptions{
Normalized: true,
},
})
} else {
- w.a.M(&new.ObjectMeta).F().Error("internal unable to find CHI by %v err: %v", new.Labels, err)
+ w.a.M(new.GetObjectMeta()).F().Error("internal unable to find CHI by %v err: %v", new.GetLabels(), err)
}
} else {
- w.a.M(&new.ObjectMeta).F().Error("external unable to find CHI by %v err %v", new.Labels, err)
+ w.a.M(new.GetObjectMeta()).F().Error("external unable to find CHI by %v err %v", new.GetLabels(), err)
}
return nil
}
@@ -422,9 +296,9 @@ func (w *worker) updateCHI(ctx context.Context, old, new *api.ClickHouseInstalla
update := (old != nil) && (new != nil)
- if update && (old.ObjectMeta.ResourceVersion == new.ObjectMeta.ResourceVersion) {
+ if update && (old.GetResourceVersion() == new.GetResourceVersion()) {
// No need to react
- w.a.V(3).M(new).F().Info("ResourceVersion did not change: %s", new.ObjectMeta.ResourceVersion)
+ w.a.V(3).M(new).F().Info("ResourceVersion did not change: %s", new.GetResourceVersion())
return nil
}
@@ -466,7 +340,7 @@ func (w *worker) updateCHI(ctx context.Context, old, new *api.ClickHouseInstalla
}
// CHI is being reconciled
- return w.reconcileCHI(ctx, old, new)
+ return w.reconcileCR(ctx, old, new)
}
// isCHIProcessedOnTheSameIP checks whether it is just a restart of the operator on the same IP
@@ -513,11 +387,11 @@ func (w *worker) isCleanRestart(chi *api.ClickHouseInstallation) bool {
generationIsOk := false
// However, completed CHI still can be missing, for example, in newly requested CHI
if chi.HasAncestor() {
- generationIsOk = chi.Generation == chi.GetAncestor().Generation
+ generationIsOk = chi.Generation == chi.GetAncestor().GetGeneration()
log.V(1).Info(
"CHI %s has ancestor. Generations. Prev: %d Cur: %d Generation is the same: %t",
chi.Name,
- chi.GetAncestor().Generation,
+ chi.GetAncestor().GetGeneration(),
chi.Generation,
generationIsOk,
)
@@ -556,59 +430,7 @@ func (w *worker) isGenerationTheSame(old, new *api.ClickHouseInstallation) bool
return false
}
- return old.Generation == new.Generation
-}
-
-// logCHI writes a CHI into the log
-func (w *worker) logCHI(name string, chi *api.ClickHouseInstallation) {
- w.a.V(1).M(chi).Info(
- "logCHI %s start--------------------------------------------:\n%s\nlogCHI %s end--------------------------------------------",
- name,
- name,
- chi.YAML(api.CopyCHIOptions{SkipStatus: true, SkipManagedFields: true}),
- )
-}
-
-// logActionPlan logs action plan
-func (w *worker) logActionPlan(ap *model.ActionPlan) {
- w.a.Info(
- "ActionPlan start---------------------------------------------:\n%s\nActionPlan end---------------------------------------------",
- ap,
- )
-}
-
-// logOldAndNew writes old and new CHIs into the log
-func (w *worker) logOldAndNew(name string, old, new *api.ClickHouseInstallation) {
- w.logCHI(name+" old", old)
- w.logCHI(name+" new", new)
-}
-
-func (w *worker) waitForIPAddresses(ctx context.Context, chi *api.ClickHouseInstallation) {
- if util.IsContextDone(ctx) {
- log.V(2).Info("task is done")
- return
- }
- if chi.IsStopped() {
- // No need to wait for stopped CHI
- return
- }
- w.a.V(1).M(chi).F().S().Info("wait for IP addresses to be assigned to all pods")
- start := time.Now()
- w.c.poll(ctx, chi, func(c *api.ClickHouseInstallation, e error) bool {
- if len(c.Status.GetPodIPs()) >= len(c.Status.GetPods()) {
- // Stop polling
- w.a.V(1).M(c).Info("all IP addresses are in place")
- return false
- }
- if time.Now().Sub(start) > 1*time.Minute {
- // Stop polling
- w.a.V(1).M(c).Warning("not all IP addresses are in place but time has elapsed")
- return false
- }
- // Continue polling
- w.a.V(1).M(c).Warning("still waiting - not all IP addresses are in place yet")
- return true
- })
+ return old.GetGeneration() == new.GetGeneration()
}
// excludeStoppedCHIFromMonitoring excludes stopped CHI from monitoring
@@ -619,7 +441,7 @@ func (w *worker) excludeStoppedCHIFromMonitoring(chi *api.ClickHouseInstallation
}
w.a.V(1).
- WithEvent(chi, eventActionReconcile, eventReasonReconcileInProgress).
+ WithEvent(chi, common.EventActionReconcile, common.EventReasonReconcileInProgress).
WithStatusAction(chi).
M(chi).F().
Info("exclude CHI from monitoring")
@@ -634,34 +456,34 @@ func (w *worker) addCHIToMonitoring(chi *api.ClickHouseInstallation) {
}
w.a.V(1).
- WithEvent(chi, eventActionReconcile, eventReasonReconcileInProgress).
+ WithEvent(chi, common.EventActionReconcile, common.EventReasonReconcileInProgress).
WithStatusAction(chi).
M(chi).F().
Info("add CHI to monitoring")
w.c.updateWatch(chi)
}
-func (w *worker) markReconcileStart(ctx context.Context, chi *api.ClickHouseInstallation, ap *model.ActionPlan) {
+func (w *worker) markReconcileStart(ctx context.Context, cr *api.ClickHouseInstallation, ap *action_plan.ActionPlan) {
if util.IsContextDone(ctx) {
log.V(2).Info("task is done")
return
}
// Write desired normalized CHI with initialized .Status, so it would be possible to monitor progress
- chi.EnsureStatus().ReconcileStart(ap.GetRemovedHostsNum())
- _ = w.c.updateCHIObjectStatus(ctx, chi, UpdateCHIStatusOptions{
- CopyCHIStatusOptions: api.CopyCHIStatusOptions{
+ cr.EnsureStatus().ReconcileStart(ap.GetRemovedHostsNum())
+ _ = w.c.updateCRObjectStatus(ctx, cr, types.UpdateStatusOptions{
+ CopyStatusOptions: types.CopyStatusOptions{
MainFields: true,
},
})
w.a.V(1).
- WithEvent(chi, eventActionReconcile, eventReasonReconcileStarted).
- WithStatusAction(chi).
- WithStatusActions(chi).
- M(chi).F().
- Info("reconcile started, task id: %s", chi.Spec.GetTaskID())
- w.a.V(2).M(chi).F().Info("action plan\n%s\n", ap.String())
+ WithEvent(cr, common.EventActionReconcile, common.EventReasonReconcileStarted).
+ WithStatusAction(cr).
+ WithStatusActions(cr).
+ M(cr).F().
+ Info("reconcile started, task id: %s", cr.GetSpecT().GetTaskID())
+ w.a.V(2).M(cr).F().Info("action plan\n%s\n", ap.String())
}
func (w *worker) finalizeReconcileAndMarkCompleted(ctx context.Context, _chi *api.ClickHouseInstallation) {
@@ -673,38 +495,38 @@ func (w *worker) finalizeReconcileAndMarkCompleted(ctx context.Context, _chi *ap
w.a.V(1).M(_chi).F().S().Info("finalize reconcile")
// Update CHI object
- if chi, err := w.createCHIFromObjectMeta(&_chi.ObjectMeta, true, normalizer.NewOptions()); err == nil {
- w.a.V(1).M(chi).Info("updating endpoints for CHI-2 %s", chi.Name)
+ if chi, err := w.createCRFromObjectMeta(_chi, true, commonNormalizer.NewOptions()); err == nil {
+ w.a.V(1).M(chi).Info("updating endpoints for CR-2 %s", chi.Name)
ips := w.c.getPodsIPs(chi)
- w.a.V(1).M(chi).Info("IPs of the CHI-2 finalize reconcile %s/%s: len: %d %v", chi.Namespace, chi.Name, len(ips), ips)
- opts := normalizer.NewOptions()
+ w.a.V(1).M(chi).Info("IPs of the CR-2 finalize reconcile %s/%s: len: %d %v", chi.Namespace, chi.Name, len(ips), ips)
+ opts := commonNormalizer.NewOptions()
opts.DefaultUserAdditionalIPs = ips
- if chi, err := w.createCHIFromObjectMeta(&_chi.ObjectMeta, true, opts); err == nil {
+ if chi, err := w.createCRFromObjectMeta(_chi, true, opts); err == nil {
w.a.V(1).M(chi).Info("Update users IPS-2")
chi.SetAncestor(chi.GetTarget())
chi.SetTarget(nil)
chi.EnsureStatus().ReconcileComplete()
// TODO unify with update endpoints
w.newTask(chi)
- w.reconcileCHIConfigMapUsers(ctx, chi)
- w.c.updateCHIObjectStatus(ctx, chi, UpdateCHIStatusOptions{
- CopyCHIStatusOptions: api.CopyCHIStatusOptions{
+ w.reconcileConfigMapCommonUsers(ctx, chi)
+ w.c.updateCRObjectStatus(ctx, chi, types.UpdateStatusOptions{
+ CopyStatusOptions: types.CopyStatusOptions{
WholeStatus: true,
},
})
} else {
- w.a.M(&_chi.ObjectMeta).F().Error("internal unable to find CHI by %v err: %v", _chi.Labels, err)
+ w.a.M(_chi).F().Error("internal unable to find CHI by %v err: %v", _chi.GetLabels(), err)
}
} else {
- w.a.M(&_chi.ObjectMeta).F().Error("external unable to find CHI by %v err %v", _chi.Labels, err)
+ w.a.M(_chi).F().Error("external unable to find CHI by %v err %v", _chi.GetLabels(), err)
}
w.a.V(1).
- WithEvent(_chi, eventActionReconcile, eventReasonReconcileCompleted).
+ WithEvent(_chi, common.EventActionReconcile, common.EventReasonReconcileCompleted).
WithStatusAction(_chi).
WithStatusActions(_chi).
M(_chi).F().
- Info("reconcile completed successfully, task id: %s", _chi.Spec.GetTaskID())
+ Info("reconcile completed successfully, task id: %s", _chi.GetSpecT().GetTaskID())
}
func (w *worker) markReconcileCompletedUnsuccessfully(ctx context.Context, chi *api.ClickHouseInstallation, err error) {
@@ -716,42 +538,46 @@ func (w *worker) markReconcileCompletedUnsuccessfully(ctx context.Context, chi *
switch {
case err == nil:
chi.EnsureStatus().ReconcileComplete()
- case errors.Is(err, errCRUDAbort):
+ case errors.Is(err, common.ErrCRUDAbort):
chi.EnsureStatus().ReconcileAbort()
}
- w.c.updateCHIObjectStatus(ctx, chi, UpdateCHIStatusOptions{
- CopyCHIStatusOptions: api.CopyCHIStatusOptions{
+ w.c.updateCRObjectStatus(ctx, chi, types.UpdateStatusOptions{
+ CopyStatusOptions: types.CopyStatusOptions{
MainFields: true,
},
})
w.a.V(1).
- WithEvent(chi, eventActionReconcile, eventReasonReconcileFailed).
+ WithEvent(chi, common.EventActionReconcile, common.EventReasonReconcileFailed).
WithStatusAction(chi).
WithStatusActions(chi).
M(chi).F().
- Warning("reconcile completed UNSUCCESSFULLY, task id: %s", chi.Spec.GetTaskID())
+ Warning("reconcile completed UNSUCCESSFULLY, task id: %s", chi.GetSpecT().GetTaskID())
}
-func (w *worker) walkHosts(ctx context.Context, chi *api.ClickHouseInstallation, ap *model.ActionPlan) {
+func (w *worker) walkHosts(ctx context.Context, chi *api.ClickHouseInstallation, ap *action_plan.ActionPlan) {
if util.IsContextDone(ctx) {
log.V(2).Info("task is done")
return
}
- objs := w.c.discovery(ctx, chi)
+ existingObjects := w.c.discovery(ctx, chi)
ap.WalkAdded(
// Walk over added clusters
- func(cluster *api.Cluster) {
- cluster.WalkHosts(func(host *api.ChiHost) error {
+ func(cluster api.ICluster) {
+ w.a.V(1).M(chi).Info("Walking over AP added clusters. Cluster: %s", cluster.GetName())
+
+ cluster.WalkHosts(func(host *api.Host) error {
+ w.a.V(1).M(chi).Info("Walking over hosts in added clusters. Cluster: %s Host: %s", cluster.GetName(), host.GetName())
// Name of the StatefulSet for this host
- name := model.CreateStatefulSetName(host)
+ name := w.c.namer.Name(interfaces.NameStatefulSet, host)
// Have we found this StatefulSet
found := false
- objs.WalkStatefulSet(func(meta meta.ObjectMeta) {
- if name == meta.Name {
+ existingObjects.WalkStatefulSet(func(meta meta.Object) {
+ w.a.V(3).M(chi).Info("Walking over existing sts list. sts: %s", util.NamespacedName(meta))
+ if name == meta.GetName() {
// StatefulSet of this host already exist
found = true
}
@@ -760,57 +586,68 @@ func (w *worker) walkHosts(ctx context.Context, chi *api.ClickHouseInstallation,
if found {
// StatefulSet of this host already exist, we can't ADD it for sure
// It looks like FOUND is the most correct approach
+ w.a.V(1).M(chi).Info("Add host as FOUND via cluster. Host was found as sts. Host: %s", host.GetName())
host.GetReconcileAttributes().SetFound()
- w.a.V(1).M(chi).Info("Add host as FOUND. Host was found as sts %s", host.GetName())
} else {
// StatefulSet of this host does not exist, looks like we need to ADD it
+ w.a.V(1).M(chi).Info("Add host as ADD via cluster. Host was not found as sts. Host: %s", host.GetName())
host.GetReconcileAttributes().SetAdd()
- w.a.V(1).M(chi).Info("Add host as ADD. Host was not found as sts %s", host.GetName())
}
return nil
})
},
// Walk over added shards
- func(shard *api.ChiShard) {
+ func(shard api.IShard) {
+ w.a.V(1).M(chi).Info("Walking over AP added shards. Shard: %s", shard.GetName())
// Mark all hosts of the shard as newly added
- shard.WalkHosts(func(host *api.ChiHost) error {
+ shard.WalkHosts(func(host *api.Host) error {
+ w.a.V(1).M(chi).Info("Add host as ADD via shard. Shard: %s Host: %s", shard.GetName(), host.GetName())
host.GetReconcileAttributes().SetAdd()
return nil
})
},
// Walk over added hosts
- func(host *api.ChiHost) {
+ func(host *api.Host) {
+ w.a.V(1).M(chi).Info("Walking over AP added hosts. Host: %s", host.GetName())
+ w.a.V(1).M(chi).Info("Add host as ADD via host. Host: %s", host.GetName())
host.GetReconcileAttributes().SetAdd()
},
)
ap.WalkModified(
- func(cluster *api.Cluster) {
+ func(cluster api.ICluster) {
+ w.a.V(1).M(chi).Info("Walking over AP modified clusters. Cluster: %s", cluster.GetName())
},
- func(shard *api.ChiShard) {
+ func(shard api.IShard) {
+ w.a.V(1).M(chi).Info("Walking over AP modified shards. Shard: %s", shard.GetName())
},
- func(host *api.ChiHost) {
+ func(host *api.Host) {
+ w.a.V(1).M(chi).Info("Walking over AP modified hosts. Host: %s", host.GetName())
+ w.a.V(1).M(chi).Info("Add host as MODIFIED via host. Host: %s", host.GetName())
host.GetReconcileAttributes().SetModify()
},
)
- chi.WalkHosts(func(host *api.ChiHost) error {
+ chi.WalkHosts(func(host *api.Host) error {
+ w.a.V(3).M(chi).Info("Walking over CR hosts. Host: %s", host.GetName())
switch {
case host.GetReconcileAttributes().IsAdd():
- // Already added
+ w.a.V(3).M(chi).Info("Walking over CR hosts. Host: is already added Host: %s", host.GetName())
return nil
case host.GetReconcileAttributes().IsModify():
- // Already modified
+ w.a.V(3).M(chi).Info("Walking over CR hosts. Host: is already modified Host: %s", host.GetName())
return nil
default:
- // Not clear yet
+ w.a.V(3).M(chi).Info("Walking over CR hosts. Host: is not clear yet (not detected as added or modified) Host: %s", host.GetName())
+ w.a.V(1).M(chi).Info("Add host as FOUND via host. Host: %s", host.GetName())
host.GetReconcileAttributes().SetFound()
}
return nil
})
- chi.WalkHosts(func(host *api.ChiHost) error {
+ // Log hosts statuses
+ chi.WalkHosts(func(host *api.Host) error {
switch {
case host.GetReconcileAttributes().IsAdd():
w.a.M(host).Info("ADD host: %s", host.Runtime.Address.CompactString())
@@ -825,1033 +662,39 @@ func (w *worker) walkHosts(ctx context.Context, chi *api.ClickHouseInstallation,
})
}
-// getRemoteServersGeneratorOptions build base set of RemoteServersGeneratorOptions
-// which are applied on each of `remote_servers` reconfiguration during reconcile cycle
-func (w *worker) getRemoteServersGeneratorOptions() *model.RemoteServersGeneratorOptions {
- // Base model.RemoteServersGeneratorOptions specifies to exclude:
+// getRemoteServersGeneratorOptions build base set of RemoteServersOptions
+func (w *worker) getRemoteServersGeneratorOptions() *commonConfig.HostSelector {
+ // Base model specifies to exclude:
// 1. all newly added hosts
// 2. all explicitly excluded hosts
- return model.NewRemoteServersGeneratorOptions().ExcludeReconcileAttributes(
- api.NewChiHostReconcileAttributes().
+ return commonConfig.NewHostSelector().ExcludeReconcileAttributes(
+ api.NewHostReconcileAttributes().
SetAdd().
SetExclude(),
)
}
-// options build ClickHouseConfigFilesGeneratorOptions
-func (w *worker) options() *model.ClickHouseConfigFilesGeneratorOptions {
+// options build FilesGeneratorOptionsClickHouse
+func (w *worker) options() *config.FilesGeneratorOptions {
opts := w.getRemoteServersGeneratorOptions()
- w.a.Info("RemoteServersGeneratorOptions: %s", opts)
- return model.NewClickHouseConfigFilesGeneratorOptions().SetRemoteServersGeneratorOptions(opts)
-}
-
-// prepareHostStatefulSetWithStatus prepares host's StatefulSet status
-func (w *worker) prepareHostStatefulSetWithStatus(ctx context.Context, host *api.ChiHost, shutdown bool) {
- if util.IsContextDone(ctx) {
- log.V(2).Info("task is done")
- return
- }
-
- w.prepareDesiredStatefulSet(host, shutdown)
- host.GetReconcileAttributes().SetStatus(w.getStatefulSetStatus(host))
-}
-
-// prepareDesiredStatefulSet prepares desired StatefulSet
-func (w *worker) prepareDesiredStatefulSet(host *api.ChiHost, shutdown bool) {
- host.Runtime.DesiredStatefulSet = w.task.creator.CreateStatefulSet(host, shutdown)
-}
-
-type migrateTableOptions struct {
- forceMigrate bool
- dropReplica bool
-}
-
-func (o *migrateTableOptions) ForceMigrate() bool {
- if o == nil {
- return false
- }
- return o.forceMigrate
-}
-
-func (o *migrateTableOptions) DropReplica() bool {
- if o == nil {
- return false
- }
- return o.dropReplica
-}
-
-type migrateTableOptionsArr []*migrateTableOptions
-
-// NewMigrateTableOptionsArr creates new migrateTableOptions array
-func NewMigrateTableOptionsArr(opts ...*migrateTableOptions) (res migrateTableOptionsArr) {
- return append(res, opts...)
-}
-
-// First gets first option
-func (a migrateTableOptionsArr) First() *migrateTableOptions {
- if len(a) > 0 {
- return a[0]
- }
- return nil
-}
-
-// migrateTables
-func (w *worker) migrateTables(ctx context.Context, host *api.ChiHost, opts ...*migrateTableOptions) error {
- if util.IsContextDone(ctx) {
- log.V(2).Info("task is done")
- return nil
- }
-
- if !w.shouldMigrateTables(host, opts...) {
- w.a.V(1).
- M(host).F().
- Info(
- "No need to add tables on host %d to shard %d in cluster %s",
- host.Runtime.Address.ReplicaIndex, host.Runtime.Address.ShardIndex, host.Runtime.Address.ClusterName)
- return nil
- }
-
- // Need to migrate tables
-
- if w.shouldDropReplica(host, opts...) {
- w.a.V(1).
- M(host).F().
- Info(
- "Need to drop replica on host %d to shard %d in cluster %s",
- host.Runtime.Address.ReplicaIndex, host.Runtime.Address.ShardIndex, host.Runtime.Address.ClusterName)
- w.dropReplica(ctx, host, &dropReplicaOptions{forceDrop: true})
- }
-
- w.a.V(1).
- WithEvent(host.GetCHI(), eventActionCreate, eventReasonCreateStarted).
- WithStatusAction(host.GetCHI()).
- M(host).F().
- Info(
- "Adding tables on shard/host:%d/%d cluster:%s",
- host.Runtime.Address.ShardIndex, host.Runtime.Address.ReplicaIndex, host.Runtime.Address.ClusterName)
-
- err := w.ensureClusterSchemer(host).HostCreateTables(ctx, host)
- if err == nil {
- w.a.V(1).
- WithEvent(host.GetCHI(), eventActionCreate, eventReasonCreateCompleted).
- WithStatusAction(host.GetCHI()).
- M(host).F().
- Info("Tables added successfully on shard/host:%d/%d cluster:%s",
- host.Runtime.Address.ShardIndex, host.Runtime.Address.ReplicaIndex, host.Runtime.Address.ClusterName)
- host.GetCHI().EnsureStatus().PushHostTablesCreated(model.CreateFQDN(host))
- } else {
- w.a.V(1).
- WithEvent(host.GetCHI(), eventActionCreate, eventReasonCreateFailed).
- WithStatusAction(host.GetCHI()).
- M(host).F().
- Error("ERROR add tables added successfully on shard/host:%d/%d cluster:%s err:%v",
- host.Runtime.Address.ShardIndex, host.Runtime.Address.ReplicaIndex, host.Runtime.Address.ClusterName, err)
- }
- return err
-}
-
-// shouldMigrateTables
-func (w *worker) shouldMigrateTables(host *api.ChiHost, opts ...*migrateTableOptions) bool {
- o := NewMigrateTableOptionsArr(opts...).First()
-
- // Deal with special cases in order of priority
- switch {
- case host.IsStopped():
- // Stopped host is not able to receive any data, migration is inapplicable
- return false
-
- case o.ForceMigrate():
- // Force migration requested
- return true
-
- case model.HostHasTablesCreated(host):
- // This host is listed as having tables created already, no need to migrate again
- return false
-
- case model.HostIsNewOne(host):
- // CHI is new, all hosts were added
- return false
- }
-
- // In all the rest cases - perform migration
- return true
-}
-
-// shouldDropTables
-func (w *worker) shouldDropReplica(host *api.ChiHost, opts ...*migrateTableOptions) bool {
- o := NewMigrateTableOptionsArr(opts...).First()
-
- // Deal with special cases
- switch {
- case o.DropReplica():
- return true
-
- }
-
- return false
-}
-
-// excludeHost excludes host from ClickHouse clusters if required
-func (w *worker) excludeHost(ctx context.Context, host *api.ChiHost) error {
- if util.IsContextDone(ctx) {
- log.V(2).Info("task is done")
- return nil
- }
-
- log.V(1).M(host).F().S().Info("exclude host start")
- defer log.V(1).M(host).F().E().Info("exclude host end")
-
- if !w.shouldExcludeHost(host) {
- return nil
- }
-
- w.a.V(1).
- M(host).F().
- Info("Exclude from cluster host %d shard %d cluster %s",
- host.Runtime.Address.ReplicaIndex, host.Runtime.Address.ShardIndex, host.Runtime.Address.ClusterName)
-
- _ = w.excludeHostFromService(ctx, host)
- w.excludeHostFromClickHouseCluster(ctx, host)
- return nil
-}
-
-// completeQueries wait for running queries to complete
-func (w *worker) completeQueries(ctx context.Context, host *api.ChiHost) error {
- log.V(1).M(host).F().S().Info("complete queries start")
- defer log.V(1).M(host).F().E().Info("complete queries end")
-
- if w.shouldWaitQueries(host) {
- return w.waitHostNoActiveQueries(ctx, host)
- }
-
- return nil
-}
-
-// shouldIncludeHost determines whether host to be included into cluster after reconciling
-func (w *worker) shouldIncludeHost(host *api.ChiHost) bool {
- switch {
- case host.IsStopped():
- // No need to include stopped host
- return false
- }
- return true
-}
-
-// includeHost includes host back back into ClickHouse clusters
-func (w *worker) includeHost(ctx context.Context, host *api.ChiHost) error {
- if util.IsContextDone(ctx) {
- log.V(2).Info("task is done")
- return nil
- }
-
- if !w.shouldIncludeHost(host) {
- w.a.V(1).
- M(host).F().
- Info("No need to include into cluster host %d shard %d cluster %s",
- host.Runtime.Address.ReplicaIndex, host.Runtime.Address.ShardIndex, host.Runtime.Address.ClusterName)
- return nil
- }
-
- w.a.V(1).
- M(host).F().
- Info("Include into cluster host %d shard %d cluster %s",
- host.Runtime.Address.ReplicaIndex, host.Runtime.Address.ShardIndex, host.Runtime.Address.ClusterName)
-
- w.includeHostIntoClickHouseCluster(ctx, host)
- _ = w.includeHostIntoService(ctx, host)
-
- return nil
-}
-
-// excludeHostFromService
-func (w *worker) excludeHostFromService(ctx context.Context, host *api.ChiHost) error {
- if util.IsContextDone(ctx) {
- log.V(2).Info("task is done")
- return nil
- }
-
- _ = w.c.deleteLabelReadyPod(ctx, host)
- _ = w.c.deleteAnnotationReadyService(ctx, host)
- return nil
-}
-
-// includeHostIntoService
-func (w *worker) includeHostIntoService(ctx context.Context, host *api.ChiHost) error {
- if util.IsContextDone(ctx) {
- log.V(2).Info("task is done")
- return nil
- }
-
- _ = w.c.appendLabelReadyOnPod(ctx, host)
- _ = w.c.appendAnnotationReadyOnService(ctx, host)
- return nil
-}
-
-// excludeHostFromClickHouseCluster excludes host from ClickHouse configuration
-func (w *worker) excludeHostFromClickHouseCluster(ctx context.Context, host *api.ChiHost) {
- if util.IsContextDone(ctx) {
- log.V(2).Info("task is done")
- return
- }
-
- w.a.V(1).
- M(host).F().
- Info("going to exclude host %d shard %d cluster %s",
- host.Runtime.Address.ReplicaIndex, host.Runtime.Address.ShardIndex, host.Runtime.Address.ClusterName)
-
- // Specify in options to exclude this host from ClickHouse config file
- host.GetCHI().EnsureRuntime().LockCommonConfig()
- host.GetReconcileAttributes().SetExclude()
- _ = w.reconcileCHIConfigMapCommon(ctx, host.GetCHI(), w.options())
- host.GetCHI().EnsureRuntime().UnlockCommonConfig()
-
- if !w.shouldWaitExcludeHost(host) {
- return
- }
- // Wait for ClickHouse to pick-up the change
- _ = w.waitHostNotInCluster(ctx, host)
-}
-
-// includeHostIntoClickHouseCluster includes host into ClickHouse configuration
-func (w *worker) includeHostIntoClickHouseCluster(ctx context.Context, host *api.ChiHost) {
- if util.IsContextDone(ctx) {
- log.V(2).Info("task is done")
- return
- }
-
- w.a.V(1).
- M(host).F().
- Info("going to include host %d shard %d cluster %s",
- host.Runtime.Address.ReplicaIndex, host.Runtime.Address.ShardIndex, host.Runtime.Address.ClusterName)
-
- // Specify in options to add this host into ClickHouse config file
- host.GetCHI().EnsureRuntime().LockCommonConfig()
- host.GetReconcileAttributes().UnsetExclude()
- _ = w.reconcileCHIConfigMapCommon(ctx, host.GetCHI(), w.options())
- host.GetCHI().EnsureRuntime().UnlockCommonConfig()
-
- if !w.shouldWaitIncludeHost(host) {
- return
- }
- // Wait for ClickHouse to pick-up the change
- _ = w.waitHostInCluster(ctx, host)
-}
-
-// shouldExcludeHost determines whether host to be excluded from cluster before reconciling
-func (w *worker) shouldExcludeHost(host *api.ChiHost) bool {
- switch {
- case host.IsStopped():
- w.a.V(1).
- M(host).F().
- Info("Host is stopped, no need to exclude stopped host. Host/shard/cluster: %d/%d/%s",
- host.Runtime.Address.ReplicaIndex, host.Runtime.Address.ShardIndex, host.Runtime.Address.ClusterName)
- return false
- case host.GetShard().HostsCount() == 1:
- w.a.V(1).
- M(host).F().
- Info("Host is the only host in the shard (means no replication), no need to exclude. Host/shard/cluster: %d/%d/%s",
- host.Runtime.Address.ReplicaIndex, host.Runtime.Address.ShardIndex, host.Runtime.Address.ClusterName)
- return false
- case w.shouldForceRestartHost(host):
- w.a.V(1).
- M(host).F().
- Info("Host should be restarted, need to exclude. Host/shard/cluster: %d/%d/%s",
- host.Runtime.Address.ReplicaIndex, host.Runtime.Address.ShardIndex, host.Runtime.Address.ClusterName)
- return true
- case host.GetReconcileAttributes().GetStatus() == api.ObjectStatusNew:
- w.a.V(1).
- M(host).F().
- Info("Host is new, no need to exclude. Host/shard/cluster: %d/%d/%s",
- host.Runtime.Address.ReplicaIndex, host.Runtime.Address.ShardIndex, host.Runtime.Address.ClusterName)
- return false
- case host.GetReconcileAttributes().GetStatus() == api.ObjectStatusSame:
- w.a.V(1).
- M(host).F().
- Info("Host is the same, would not be updated, no need to exclude. Host/shard/cluster: %d/%d/%s",
- host.Runtime.Address.ReplicaIndex, host.Runtime.Address.ShardIndex, host.Runtime.Address.ClusterName)
- return false
- }
-
- w.a.V(1).
- M(host).F().
- Info("Host should be excluded. Host/shard/cluster: %d/%d/%s",
- host.Runtime.Address.ReplicaIndex, host.Runtime.Address.ShardIndex, host.Runtime.Address.ClusterName)
-
- return true
-}
-
-// shouldWaitExcludeHost determines whether reconciler should wait for the host to be excluded from cluster
-func (w *worker) shouldWaitExcludeHost(host *api.ChiHost) bool {
- // Check CHI settings
- switch {
- case host.GetCHI().GetReconciling().IsReconcilingPolicyWait():
- w.a.V(1).
- M(host).F().
- Info("IsReconcilingPolicyWait() need to wait to exclude host %d shard %d cluster %s",
- host.Runtime.Address.ReplicaIndex, host.Runtime.Address.ShardIndex, host.Runtime.Address.ClusterName)
- return true
- case host.GetCHI().GetReconciling().IsReconcilingPolicyNoWait():
- w.a.V(1).
- M(host).F().
- Info("IsReconcilingPolicyNoWait() need NOT to wait to exclude host %d shard %d cluster %s",
- host.Runtime.Address.ReplicaIndex, host.Runtime.Address.ShardIndex, host.Runtime.Address.ClusterName)
- return false
- }
-
- w.a.V(1).
- M(host).F().
- Info("wait to exclude host fallback to operator's settings. host %d shard %d cluster %s",
- host.Runtime.Address.ReplicaIndex, host.Runtime.Address.ShardIndex, host.Runtime.Address.ClusterName)
- return chop.Config().Reconcile.Host.Wait.Exclude.Value()
-}
-
-// shouldWaitQueries determines whether reconciler should wait for the host to complete running queries
-func (w *worker) shouldWaitQueries(host *api.ChiHost) bool {
- switch {
- case host.GetReconcileAttributes().GetStatus() == api.ObjectStatusNew:
- w.a.V(1).
- M(host).F().
- Info("No need to wait for queries to complete, host is a new one. Host/shard/cluster: %d/%d/%s",
- host.Runtime.Address.ReplicaIndex, host.Runtime.Address.ShardIndex, host.Runtime.Address.ClusterName)
- return false
- case chop.Config().Reconcile.Host.Wait.Queries.Value():
- w.a.V(1).
- M(host).F().
- Info("Will wait for queries to complete according to CHOp config 'reconcile.host.wait.queries' setting. "+
- "Host is not yet in the cluster. Host/shard/cluster: %d/%d/%s",
- host.Runtime.Address.ReplicaIndex, host.Runtime.Address.ShardIndex, host.Runtime.Address.ClusterName)
- return true
- case host.GetCHI().GetReconciling().IsReconcilingPolicyWait():
- w.a.V(1).
- M(host).F().
- Info("Will wait for queries to complete according to CHI 'reconciling.policy' setting. "+
- "Host is not yet in the cluster. Host/shard/cluster: %d/%d/%s",
- host.Runtime.Address.ReplicaIndex, host.Runtime.Address.ShardIndex, host.Runtime.Address.ClusterName)
- return true
- }
-
- w.a.V(1).
- M(host).F().
- Info("Will NOT wait for queries to complete on the host. Host/shard/cluster: %d/%d/%s",
- host.Runtime.Address.ReplicaIndex, host.Runtime.Address.ShardIndex, host.Runtime.Address.ClusterName)
- return false
-}
-
-// shouldWaitIncludeHost determines whether reconciler should wait for the host to be included into cluster
-func (w *worker) shouldWaitIncludeHost(host *api.ChiHost) bool {
- status := host.GetReconcileAttributes().GetStatus()
- switch {
- case status == api.ObjectStatusNew:
- return false
- case status == api.ObjectStatusSame:
- // The same host was not modified and no need to wait it to be included - it already is
- return false
- case host.GetShard().HostsCount() == 1:
- // No need to wait one-host-shard
- return false
- case host.GetCHI().GetReconciling().IsReconcilingPolicyWait():
- // Check CHI settings - explicitly requested to wait
- return true
- case host.GetCHI().GetReconciling().IsReconcilingPolicyNoWait():
- // Check CHI settings - explicitly requested to not wait
- return false
- }
-
- // Fallback to operator's settings
- return chop.Config().Reconcile.Host.Wait.Include.Value()
-}
-
-// waitHostInCluster
-func (w *worker) waitHostInCluster(ctx context.Context, host *api.ChiHost) error {
- return w.c.pollHost(ctx, host, nil, w.ensureClusterSchemer(host).IsHostInCluster)
-}
-
-// waitHostNotInCluster
-func (w *worker) waitHostNotInCluster(ctx context.Context, host *api.ChiHost) error {
- return w.c.pollHost(ctx, host, nil, func(ctx context.Context, host *api.ChiHost) bool {
- return !w.ensureClusterSchemer(host).IsHostInCluster(ctx, host)
- })
+ w.a.Info("RemoteServersOptions: %s", opts)
+ return config.NewFilesGeneratorOptions().SetRemoteServersOptions(opts)
}
-// waitHostNoActiveQueries
-func (w *worker) waitHostNoActiveQueries(ctx context.Context, host *api.ChiHost) error {
- return w.c.pollHost(ctx, host, nil, func(ctx context.Context, host *api.ChiHost) bool {
- n, _ := w.ensureClusterSchemer(host).HostActiveQueriesNum(ctx, host)
- return n <= 1
- })
-}
+// createCRFromObjectMeta
+func (w *worker) createCRFromObjectMeta(meta meta.Object, isCHI bool, options *commonNormalizer.Options) (*api.ClickHouseInstallation, error) {
+ w.a.V(3).M(meta).S().P()
+ defer w.a.V(3).M(meta).E().P()
-// createCHIFromObjectMeta
-func (w *worker) createCHIFromObjectMeta(objectMeta *meta.ObjectMeta, isCHI bool, options *normalizer.Options) (*api.ClickHouseInstallation, error) {
- w.a.V(3).M(objectMeta).S().P()
- defer w.a.V(3).M(objectMeta).E().P()
-
- chi, err := w.c.GetCHIByObjectMeta(objectMeta, isCHI)
+ chi, err := w.c.GetCHIByObjectMeta(meta, isCHI)
if err != nil {
return nil, err
}
- chi, err = w.normalizer.CreateTemplatedCHI(chi, options)
+ chi, err = w.normalizer.CreateTemplated(chi, options)
if err != nil {
return nil, err
}
return chi, nil
}
-
-// updateConfigMap
-func (w *worker) updateConfigMap(ctx context.Context, chi *api.ClickHouseInstallation, configMap *core.ConfigMap) error {
- if util.IsContextDone(ctx) {
- log.V(2).Info("task is done")
- return nil
- }
-
- updatedConfigMap, err := w.c.kubeClient.CoreV1().ConfigMaps(configMap.Namespace).Update(ctx, configMap, controller.NewUpdateOptions())
- if err == nil {
- w.a.V(1).
- WithEvent(chi, eventActionUpdate, eventReasonUpdateCompleted).
- WithStatusAction(chi).
- M(chi).F().
- Info("Update ConfigMap %s/%s", configMap.Namespace, configMap.Name)
- if updatedConfigMap.ResourceVersion != configMap.ResourceVersion {
- w.task.cmUpdate = time.Now()
- }
- } else {
- w.a.WithEvent(chi, eventActionUpdate, eventReasonUpdateFailed).
- WithStatusAction(chi).
- WithStatusError(chi).
- M(chi).F().
- Error("Update ConfigMap %s/%s failed with error %v", configMap.Namespace, configMap.Name, err)
- }
-
- return err
-}
-
-// createConfigMap
-func (w *worker) createConfigMap(ctx context.Context, chi *api.ClickHouseInstallation, configMap *core.ConfigMap) error {
- if util.IsContextDone(ctx) {
- log.V(2).Info("task is done")
- return nil
- }
-
- _, err := w.c.kubeClient.CoreV1().ConfigMaps(configMap.Namespace).Create(ctx, configMap, controller.NewCreateOptions())
- if err == nil {
- w.a.V(1).
- WithEvent(chi, eventActionCreate, eventReasonCreateCompleted).
- WithStatusAction(chi).
- M(chi).F().
- Info("Create ConfigMap %s/%s", configMap.Namespace, configMap.Name)
- } else {
- w.a.WithEvent(chi, eventActionCreate, eventReasonCreateFailed).
- WithStatusAction(chi).
- WithStatusError(chi).
- M(chi).F().
- Error("Create ConfigMap %s/%s failed with error %v", configMap.Namespace, configMap.Name, err)
- }
-
- return err
-}
-
-// updateService
-func (w *worker) updateService(
- ctx context.Context,
- chi *api.ClickHouseInstallation,
- curService *core.Service,
- targetService *core.Service,
-) error {
- if util.IsContextDone(ctx) {
- log.V(2).Info("task is done")
- return nil
- }
-
- if curService.Spec.Type != targetService.Spec.Type {
- return fmt.Errorf(
- "just recreate the service in case of service type change '%s'=>'%s'",
- curService.Spec.Type, targetService.Spec.Type)
- }
-
- // Updating a Service is a complicated business
-
- newService := targetService.DeepCopy()
-
- // spec.resourceVersion is required in order to update an object
- newService.ResourceVersion = curService.ResourceVersion
-
- //
- // Migrate ClusterIP to the new service
- //
- // spec.clusterIP field is immutable, need to use already assigned value
- // From https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service
- // Kubernetes assigns this Service an IP address (sometimes called the “cluster IP”), which is used by the Service proxies
- // See also https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
- // You can specify your own cluster IP address as part of a Service creation request. To do this, set the .spec.clusterIP
- newService.Spec.ClusterIP = curService.Spec.ClusterIP
-
- //
- // Migrate existing ports to the new service for NodePort and LoadBalancer services
- //
- // The port on each node on which this service is exposed when type=NodePort or LoadBalancer.
- // Usually assigned by the system. If specified, it will be allocated to the service if unused
- // or else creation of the service will fail.
- // Default is to auto-allocate a port if the ServiceType of this Service requires one.
- // More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
-
- // !!! IMPORTANT !!!
- // No changes in service type is allowed.
- // Already exposed port details can not be changed.
-
- serviceTypeIsNodePort := (curService.Spec.Type == core.ServiceTypeNodePort) && (newService.Spec.Type == core.ServiceTypeNodePort)
- serviceTypeIsLoadBalancer := (curService.Spec.Type == core.ServiceTypeLoadBalancer) && (newService.Spec.Type == core.ServiceTypeLoadBalancer)
- if serviceTypeIsNodePort || serviceTypeIsLoadBalancer {
- for i := range newService.Spec.Ports {
- newPort := &newService.Spec.Ports[i]
- for j := range curService.Spec.Ports {
- curPort := &curService.Spec.Ports[j]
- if newPort.Port == curPort.Port {
- // Already have this port specified - reuse all internals,
- // due to limitations with auto-assigned values
- *newPort = *curPort
- w.a.M(chi).F().Info("reuse Port %d values", newPort.Port)
- break
- }
- }
- }
- }
-
- //
- // Migrate HealthCheckNodePort to the new service
- //
- // spec.healthCheckNodePort field is used with ExternalTrafficPolicy=Local only and is immutable within ExternalTrafficPolicy=Local
- // In case ExternalTrafficPolicy is changed it seems to be irrelevant
- // https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip
- curExternalTrafficPolicyTypeLocal := curService.Spec.ExternalTrafficPolicy == core.ServiceExternalTrafficPolicyTypeLocal
- newExternalTrafficPolicyTypeLocal := newService.Spec.ExternalTrafficPolicy == core.ServiceExternalTrafficPolicyTypeLocal
- if curExternalTrafficPolicyTypeLocal && newExternalTrafficPolicyTypeLocal {
- newService.Spec.HealthCheckNodePort = curService.Spec.HealthCheckNodePort
- }
-
- //
- // Migrate LoadBalancerClass to the new service
- //
- // This field can only be set when creating or updating a Service to type 'LoadBalancer'.
- // Once set, it can not be changed. This field will be wiped when a service is updated to a non 'LoadBalancer' type.
- if curService.Spec.LoadBalancerClass != nil {
- newService.Spec.LoadBalancerClass = curService.Spec.LoadBalancerClass
- }
-
- //
- // Migrate labels, annotations and finalizers to the new service
- //
- newService.ObjectMeta.Labels = util.MergeStringMapsPreserve(newService.ObjectMeta.Labels, curService.ObjectMeta.Labels)
- newService.ObjectMeta.Annotations = util.MergeStringMapsPreserve(newService.ObjectMeta.Annotations, curService.ObjectMeta.Annotations)
- newService.ObjectMeta.Finalizers = util.MergeStringArrays(newService.ObjectMeta.Finalizers, curService.ObjectMeta.Finalizers)
-
- //
- // And only now we are ready to actually update the service with new version of the service
- //
-
- _, err := w.c.kubeClient.CoreV1().Services(newService.Namespace).Update(ctx, newService, controller.NewUpdateOptions())
- if err == nil {
- w.a.V(1).
- WithEvent(chi, eventActionUpdate, eventReasonUpdateCompleted).
- WithStatusAction(chi).
- M(chi).F().
- Info("Update Service success: %s/%s", newService.Namespace, newService.Name)
- } else {
- w.a.M(chi).F().Error("Update Service fail: %s/%s failed with error %v", newService.Namespace, newService.Name)
- }
-
- return err
-}
-
-// createService
-func (w *worker) createService(ctx context.Context, chi *api.ClickHouseInstallation, service *core.Service) error {
- if util.IsContextDone(ctx) {
- log.V(2).Info("task is done")
- return nil
- }
-
- _, err := w.c.kubeClient.CoreV1().Services(service.Namespace).Create(ctx, service, controller.NewCreateOptions())
- if err == nil {
- w.a.V(1).
- WithEvent(chi, eventActionCreate, eventReasonCreateCompleted).
- WithStatusAction(chi).
- M(chi).F().
- Info("OK Create Service: %s/%s", service.Namespace, service.Name)
- } else {
- w.a.WithEvent(chi, eventActionCreate, eventReasonCreateFailed).
- WithStatusAction(chi).
- WithStatusError(chi).
- M(chi).F().
- Error("FAILED Create Service: %s/%s err: %v", service.Namespace, service.Name, err)
- }
-
- return err
-}
-
-// createSecret
-func (w *worker) createSecret(ctx context.Context, chi *api.ClickHouseInstallation, secret *core.Secret) error {
- if util.IsContextDone(ctx) {
- log.V(2).Info("task is done")
- return nil
- }
-
- _, err := w.c.kubeClient.CoreV1().Secrets(secret.Namespace).Create(ctx, secret, controller.NewCreateOptions())
- if err == nil {
- w.a.V(1).
- WithEvent(chi, eventActionCreate, eventReasonCreateCompleted).
- WithStatusAction(chi).
- M(chi).F().
- Info("Create Secret %s/%s", secret.Namespace, secret.Name)
- } else {
- w.a.WithEvent(chi, eventActionCreate, eventReasonCreateFailed).
- WithStatusAction(chi).
- WithStatusError(chi).
- M(chi).F().
- Error("Create Secret %s/%s failed with error %v", secret.Namespace, secret.Name, err)
- }
-
- return err
-}
-
-// getStatefulSetStatus gets StatefulSet status
-func (w *worker) getStatefulSetStatus(host *api.ChiHost) api.ObjectStatus {
- meta := host.Runtime.DesiredStatefulSet.ObjectMeta
- w.a.V(2).M(meta).S().Info(util.NamespaceNameString(meta))
- defer w.a.V(2).M(meta).E().Info(util.NamespaceNameString(meta))
-
- curStatefulSet, err := w.c.getStatefulSet(&meta, false)
- switch {
- case curStatefulSet != nil:
- w.a.V(2).M(meta).Info("Have StatefulSet available, try to perform label-based comparison for %s/%s", meta.Namespace, meta.Name)
- return w.getObjectStatusFromMetas(curStatefulSet.ObjectMeta, meta)
-
- case apiErrors.IsNotFound(err):
- // StatefulSet is not found at the moment.
- // However, it may be just deleted
- w.a.V(2).M(meta).Info("No cur StatefulSet available and it is not found. Either new one or deleted for %s/%s", meta.Namespace, meta.Name)
- if host.IsNewOne() {
- w.a.V(2).M(meta).Info("No cur StatefulSet available and it is not found and is a new one. New one for %s/%s", meta.Namespace, meta.Name)
- return api.ObjectStatusNew
- }
- w.a.V(1).M(meta).Warning("No cur StatefulSet available but host has an ancestor. Found deleted StatefulSet. for %s/%s", meta.Namespace, meta.Name)
- return api.ObjectStatusModified
-
- default:
- w.a.V(2).M(meta).Warning("Have no StatefulSet available, nor it is not found for %s/%s err: %v", meta.Namespace, meta.Name, err)
- return api.ObjectStatusUnknown
- }
-}
-
-// getObjectStatusFromMetas gets StatefulSet status from cur and new meta infos
-func (w *worker) getObjectStatusFromMetas(curMeta, newMeta meta.ObjectMeta) api.ObjectStatus {
- // Try to perform label-based version comparison
- curVersion, curHasLabel := model.GetObjectVersion(curMeta)
- newVersion, newHasLabel := model.GetObjectVersion(newMeta)
-
- if !curHasLabel || !newHasLabel {
- w.a.M(newMeta).F().Warning(
- "Not enough labels to compare objects, can not say for sure what exactly is going on. Object: %s",
- util.NamespaceNameString(newMeta),
- )
- return api.ObjectStatusUnknown
- }
-
- //
- // We have both set of labels, can compare them
- //
-
- if curVersion == newVersion {
- w.a.M(newMeta).F().Info(
- "cur and new objects are equal based on object version label. Update of the object is not required. Object: %s",
- util.NamespaceNameString(newMeta),
- )
- return api.ObjectStatusSame
- }
-
- w.a.M(newMeta).F().Info(
- "cur and new objects ARE DIFFERENT based on object version label: Update of the object is required. Object: %s",
- util.NamespaceNameString(newMeta),
- )
-
- return api.ObjectStatusModified
-}
-
-// createStatefulSet
-func (w *worker) createStatefulSet(ctx context.Context, host *api.ChiHost, register bool) error {
- if util.IsContextDone(ctx) {
- log.V(2).Info("task is done")
- return nil
- }
-
- statefulSet := host.Runtime.DesiredStatefulSet
-
- w.a.V(2).M(host).S().Info(util.NamespaceNameString(statefulSet.ObjectMeta))
- defer w.a.V(2).M(host).E().Info(util.NamespaceNameString(statefulSet.ObjectMeta))
-
- w.a.V(1).
- WithEvent(host.GetCHI(), eventActionCreate, eventReasonCreateStarted).
- WithStatusAction(host.GetCHI()).
- M(host).F().
- Info("Create StatefulSet %s/%s - started", statefulSet.Namespace, statefulSet.Name)
-
- action := w.c.createStatefulSet(ctx, host)
-
- if register {
- host.GetCHI().EnsureStatus().HostAdded()
- _ = w.c.updateCHIObjectStatus(ctx, host.GetCHI(), UpdateCHIStatusOptions{
- CopyCHIStatusOptions: api.CopyCHIStatusOptions{
- MainFields: true,
- },
- })
- }
-
- switch action {
- case nil:
- w.a.V(1).
- WithEvent(host.GetCHI(), eventActionCreate, eventReasonCreateCompleted).
- WithStatusAction(host.GetCHI()).
- M(host).F().
- Info("Create StatefulSet %s/%s - completed", statefulSet.Namespace, statefulSet.Name)
- return nil
- case errCRUDAbort:
- w.a.WithEvent(host.GetCHI(), eventActionCreate, eventReasonCreateFailed).
- WithStatusAction(host.GetCHI()).
- WithStatusError(host.GetCHI()).
- M(host).F().
- Error("Create StatefulSet %s/%s - failed with error %v", statefulSet.Namespace, statefulSet.Name, action)
- return action
- case errCRUDIgnore:
- w.a.WithEvent(host.GetCHI(), eventActionCreate, eventReasonCreateFailed).
- WithStatusAction(host.GetCHI()).
- M(host).F().
- Warning("Create StatefulSet %s/%s - error ignored", statefulSet.Namespace, statefulSet.Name)
- return nil
- case errCRUDRecreate:
- w.a.V(1).M(host).Warning("Got recreate action. Ignore and continue for now")
- return nil
- case errCRUDUnexpectedFlow:
- w.a.V(1).M(host).Warning("Got unexpected flow action. Ignore and continue for now")
- return nil
- }
-
- w.a.V(1).M(host).Warning("Got unexpected flow. This is strange. Ignore and continue for now")
- return nil
-}
-
-// waitConfigMapPropagation
-func (w *worker) waitConfigMapPropagation(ctx context.Context, host *api.ChiHost) bool {
- // No need to wait for ConfigMap propagation on stopped host
- if host.IsStopped() {
- w.a.V(1).M(host).F().Info("No need to wait for ConfigMap propagation - on stopped host")
- return false
- }
-
- // No need to wait on unchanged ConfigMap
- if w.task.cmUpdate.IsZero() {
- w.a.V(1).M(host).F().Info("No need to wait for ConfigMap propagation - no changes in ConfigMap")
- return false
- }
-
- // What timeout is expected to be enough for ConfigMap propagation?
- // In case timeout is not specified, no need to wait
- timeout := host.GetCHI().GetReconciling().GetConfigMapPropagationTimeoutDuration()
- if timeout == 0 {
- w.a.V(1).M(host).F().Info("No need to wait for ConfigMap propagation - not applicable")
- return false
- }
-
- // How much time has elapsed since last ConfigMap update?
- // May be there is not need to wait already
- elapsed := time.Now().Sub(w.task.cmUpdate)
- if elapsed >= timeout {
- w.a.V(1).M(host).F().Info("No need to wait for ConfigMap propagation - already elapsed. %s/%s", elapsed, timeout)
- return false
- }
-
- // Looks like we need to wait for Configmap propagation, after all
- wait := timeout - elapsed
- w.a.V(1).M(host).F().Info("Wait for ConfigMap propagation for %s %s/%s", wait, elapsed, timeout)
- if util.WaitContextDoneOrTimeout(ctx, wait) {
- log.V(2).Info("task is done")
- return true
- }
-
- return false
-}
-
-// updateStatefulSet
-func (w *worker) updateStatefulSet(ctx context.Context, host *api.ChiHost, register bool) error {
- if util.IsContextDone(ctx) {
- log.V(2).Info("task is done")
- return nil
- }
-
- // Helpers
- newStatefulSet := host.Runtime.DesiredStatefulSet
- curStatefulSet := host.Runtime.CurStatefulSet
-
- w.a.V(2).M(host).S().Info(newStatefulSet.Name)
- defer w.a.V(2).M(host).E().Info(newStatefulSet.Name)
-
- namespace := newStatefulSet.Namespace
- name := newStatefulSet.Name
-
- w.a.V(1).
- WithEvent(host.GetCHI(), eventActionCreate, eventReasonCreateStarted).
- WithStatusAction(host.GetCHI()).
- M(host).F().
- Info("Update StatefulSet(%s/%s) - started", namespace, name)
-
- if w.waitConfigMapPropagation(ctx, host) {
- log.V(2).Info("task is done")
- return nil
- }
-
- action := errCRUDRecreate
- if k8s.IsStatefulSetReady(curStatefulSet) {
- action = w.c.updateStatefulSet(ctx, curStatefulSet, newStatefulSet, host)
- }
-
- switch action {
- case nil:
- if register {
- host.GetCHI().EnsureStatus().HostUpdated()
- _ = w.c.updateCHIObjectStatus(ctx, host.GetCHI(), UpdateCHIStatusOptions{
- CopyCHIStatusOptions: api.CopyCHIStatusOptions{
- MainFields: true,
- },
- })
- }
- w.a.V(1).
- WithEvent(host.GetCHI(), eventActionUpdate, eventReasonUpdateCompleted).
- WithStatusAction(host.GetCHI()).
- M(host).F().
- Info("Update StatefulSet(%s/%s) - completed", namespace, name)
- return nil
- case errCRUDAbort:
- w.a.V(1).M(host).Info("Update StatefulSet(%s/%s) - got abort. Abort", namespace, name)
- return errCRUDAbort
- case errCRUDIgnore:
- w.a.V(1).M(host).Info("Update StatefulSet(%s/%s) - got ignore. Ignore", namespace, name)
- return nil
- case errCRUDRecreate:
- w.a.WithEvent(host.GetCHI(), eventActionUpdate, eventReasonUpdateInProgress).
- WithStatusAction(host.GetCHI()).
- M(host).F().
- Info("Update StatefulSet(%s/%s) switch from Update to Recreate", namespace, name)
- w.dumpStatefulSetDiff(host, curStatefulSet, newStatefulSet)
- return w.recreateStatefulSet(ctx, host, register)
- case errCRUDUnexpectedFlow:
- w.a.V(1).M(host).Warning("Got unexpected flow action. Ignore and continue for now")
- return nil
- }
-
- w.a.V(1).M(host).Warning("Got unexpected flow. This is strange. Ignore and continue for now")
- return nil
-}
-
-// recreateStatefulSet
-func (w *worker) recreateStatefulSet(ctx context.Context, host *api.ChiHost, register bool) error {
- if util.IsContextDone(ctx) {
- log.V(2).Info("task is done")
- return nil
- }
-
- _ = w.c.deleteStatefulSet(ctx, host)
- _ = w.reconcilePVCs(ctx, host, api.DesiredStatefulSet)
- return w.createStatefulSet(ctx, host, register)
-}
-
-// applyPVCResourcesRequests
-func (w *worker) applyPVCResourcesRequests(
- pvc *core.PersistentVolumeClaim,
- template *api.VolumeClaimTemplate,
-) bool {
- return w.applyResourcesList(pvc.Spec.Resources.Requests, template.Spec.Resources.Requests)
-}
-
-// applyResourcesList
-func (w *worker) applyResourcesList(
- curResourceList core.ResourceList,
- desiredResourceList core.ResourceList,
-) bool {
- // Prepare lists of resource names
- var curResourceNames []core.ResourceName
- for resourceName := range curResourceList {
- curResourceNames = append(curResourceNames, resourceName)
- }
- var desiredResourceNames []core.ResourceName
- for resourceName := range desiredResourceList {
- desiredResourceNames = append(desiredResourceNames, resourceName)
- }
-
- resourceNames := intersect.Simple(curResourceNames, desiredResourceNames)
- updated := false
- for _, resourceName := range resourceNames.([]interface{}) {
- updated = updated || w.applyResource(curResourceList, desiredResourceList, resourceName.(core.ResourceName))
- }
- return updated
-}
-
-// applyResource
-func (w *worker) applyResource(
- curResourceList core.ResourceList,
- desiredResourceList core.ResourceList,
- resourceName core.ResourceName,
-) bool {
- if (curResourceList == nil) || (desiredResourceList == nil) {
- // Nowhere or nothing to apply
- return false
- }
-
- var ok bool
- var curResourceQuantity resource.Quantity
- var desiredResourceQuantity resource.Quantity
-
- if curResourceQuantity, ok = curResourceList[resourceName]; !ok {
- // No such resource in target list
- return false
- }
-
- if desiredResourceQuantity, ok = desiredResourceList[resourceName]; !ok {
- // No such resource in desired list
- return false
- }
-
- if curResourceQuantity.Equal(desiredResourceQuantity) {
- // No need to apply
- return false
- }
-
- // Update resource
- curResourceList[resourceName] = desiredResourceList[resourceName]
- return true
-}
-
-func (w *worker) ensureClusterSchemer(host *api.ChiHost) *schemer.ClusterSchemer {
- if w == nil {
- return nil
- }
- // Make base cluster connection params
- clusterConnectionParams := clickhouse.NewClusterConnectionParamsFromCHOpConfig(chop.Config())
- // Adjust base cluster connection params with per-host props
- switch clusterConnectionParams.Scheme {
- case api.ChSchemeAuto:
- switch {
- case api.IsPortAssigned(host.HTTPPort):
- clusterConnectionParams.Scheme = "http"
- clusterConnectionParams.Port = int(host.HTTPPort)
- case api.IsPortAssigned(host.HTTPSPort):
- clusterConnectionParams.Scheme = "https"
- clusterConnectionParams.Port = int(host.HTTPSPort)
- }
- case api.ChSchemeHTTP:
- clusterConnectionParams.Port = int(host.HTTPPort)
- case api.ChSchemeHTTPS:
- clusterConnectionParams.Port = int(host.HTTPSPort)
- }
- w.schemer = schemer.NewClusterSchemer(clusterConnectionParams, host.Runtime.Version)
-
- return w.schemer
-}
diff --git a/pkg/controller/chk/controller-config-map.go b/pkg/controller/chk/controller-config-map.go
new file mode 100644
index 000000000..67a443cee
--- /dev/null
+++ b/pkg/controller/chk/controller-config-map.go
@@ -0,0 +1,50 @@
+// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package chk
+
+import (
+ "context"
+
+ core "k8s.io/api/core/v1"
+ meta "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+ log "github.com/altinity/clickhouse-operator/pkg/announcer"
+ "github.com/altinity/clickhouse-operator/pkg/util"
+)
+
+// getConfigMap gets ConfigMap
+func (c *Controller) getConfigMap(ctx context.Context, meta meta.Object) (*core.ConfigMap, error) {
+ return c.kube.ConfigMap().Get(ctx, meta.GetNamespace(), meta.GetName())
+}
+
+func (c *Controller) createConfigMap(ctx context.Context, cm *core.ConfigMap) error {
+ if util.IsContextDone(ctx) {
+ log.V(2).Info("task is done")
+ return nil
+ }
+
+ _, err := c.kube.ConfigMap().Create(ctx, cm)
+
+ return err
+}
+
+func (c *Controller) updateConfigMap(ctx context.Context, cm *core.ConfigMap) (*core.ConfigMap, error) {
+ if util.IsContextDone(ctx) {
+ log.V(2).Info("task is done")
+ return nil, nil
+ }
+
+ return c.kube.ConfigMap().Update(ctx, cm)
+}
diff --git a/pkg/model/chi/normalizer/entities/host.go b/pkg/controller/chk/controller-deleter.go
similarity index 55%
rename from pkg/model/chi/normalizer/entities/host.go
rename to pkg/controller/chk/controller-deleter.go
index ac40667e2..709511c0a 100644
--- a/pkg/model/chi/normalizer/entities/host.go
+++ b/pkg/controller/chk/controller-deleter.go
@@ -12,24 +12,26 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-package entities
+package chk
import (
- core "k8s.io/api/core/v1"
+ "context"
+ log "github.com/altinity/clickhouse-operator/pkg/announcer"
api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1"
- model "github.com/altinity/clickhouse-operator/pkg/model/chi"
+ "github.com/altinity/clickhouse-operator/pkg/interfaces"
+ "github.com/altinity/clickhouse-operator/pkg/util"
)
-// NormalizeHostPorts ensures api.ChiReplica.Port is reasonable
-func NormalizeHostPorts(host *api.ChiHost) {
- // Walk over all assigned ports of the host and append each port to the list of service's ports
- model.HostWalkInvalidPorts(
- host,
- func(name string, port *int32, protocol core.Protocol) bool {
- *port = api.PortUnassigned()
- // Do not abort, continue iterating
- return false
- },
- )
+// deleteServiceCR
+func (c *Controller) deleteServiceCR(ctx context.Context, cr api.ICustomResource) error {
+ if util.IsContextDone(ctx) {
+ log.V(2).Info("task is done")
+ return nil
+ }
+
+ serviceName := c.namer.Name(interfaces.NameCRService, cr)
+ namespace := cr.GetNamespace()
+ log.V(1).M(cr).F().Info("%s/%s", namespace, serviceName)
+ return c.deleteServiceIfExists(ctx, namespace, serviceName)
}
diff --git a/pkg/controller/chk/controller-discoverer.go b/pkg/controller/chk/controller-discoverer.go
new file mode 100644
index 000000000..24db55559
--- /dev/null
+++ b/pkg/controller/chk/controller-discoverer.go
@@ -0,0 +1,158 @@
+// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package chk
+
+import (
+ "context"
+
+ meta "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+ log "github.com/altinity/clickhouse-operator/pkg/announcer"
+ api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1"
+ "github.com/altinity/clickhouse-operator/pkg/controller"
+ "github.com/altinity/clickhouse-operator/pkg/interfaces"
+ "github.com/altinity/clickhouse-operator/pkg/model"
+ chkLabeler "github.com/altinity/clickhouse-operator/pkg/model/chk/tags/labeler"
+ "github.com/altinity/clickhouse-operator/pkg/util"
+)
+
+func getLabeler(cr api.ICustomResource) interfaces.ILabeler {
+ return chkLabeler.New(cr)
+}
+
+func (c *Controller) discovery(ctx context.Context, cr api.ICustomResource) *model.Registry {
+ if util.IsContextDone(ctx) {
+ log.V(2).Info("task is done")
+ return nil
+ }
+
+ opts := controller.NewListOptions(getLabeler(cr).Selector(interfaces.SelectorCRScope))
+ r := model.NewRegistry()
+ c.discoveryStatefulSets(ctx, r, cr, opts)
+ c.discoveryConfigMaps(ctx, r, cr, opts)
+ c.discoveryServices(ctx, r, cr, opts)
+ c.discoverySecrets(ctx, r, cr, opts)
+ c.discoveryPVCs(ctx, r, cr, opts)
+ // Comment out PV
+ //c.discoveryPVs(ctx, r, chi, opts)
+ c.discoveryPDBs(ctx, r, cr, opts)
+ return r
+}
+
+func (c *Controller) discoveryStatefulSets(ctx context.Context, r *model.Registry, cr api.ICustomResource, opts meta.ListOptions) {
+ list, err := c.kube.STS().List(ctx, cr.GetNamespace(), opts)
+ if err != nil {
+ log.M(cr).F().Error("FAIL to list StatefulSet - err: %v", err)
+ return
+ }
+ if list == nil {
+ log.M(cr).F().Error("FAIL to list StatefulSet - list is nil")
+ return
+ }
+ for _, obj := range list {
+ r.RegisterStatefulSet(obj.GetObjectMeta())
+ }
+}
+
+func (c *Controller) discoveryConfigMaps(ctx context.Context, r *model.Registry, cr api.ICustomResource, opts meta.ListOptions) {
+ list, err := c.kube.ConfigMap().List(ctx, cr.GetNamespace(), opts)
+ if err != nil {
+ log.M(cr).F().Error("FAIL to list ConfigMap - err: %v", err)
+ return
+ }
+ if list == nil {
+ log.M(cr).F().Error("FAIL to list ConfigMap - list is nil")
+ return
+ }
+ for _, obj := range list {
+ r.RegisterConfigMap(obj.GetObjectMeta())
+ }
+}
+
+func (c *Controller) discoveryServices(ctx context.Context, r *model.Registry, cr api.ICustomResource, opts meta.ListOptions) {
+ list, err := c.kube.Service().List(ctx, cr.GetNamespace(), opts)
+ if err != nil {
+ log.M(cr).F().Error("FAIL to list Service - err: %v", err)
+ return
+ }
+ if list == nil {
+ log.M(cr).F().Error("FAIL to list Service - list is nil")
+ return
+ }
+ for _, obj := range list {
+ r.RegisterService(obj.GetObjectMeta())
+ }
+}
+
+func (c *Controller) discoverySecrets(ctx context.Context, r *model.Registry, cr api.ICustomResource, opts meta.ListOptions) {
+ list, err := c.kube.Secret().List(ctx, cr.GetNamespace(), opts)
+ if err != nil {
+ log.M(cr).F().Error("FAIL to list Secret - err: %v", err)
+ return
+ }
+ if list == nil {
+ log.M(cr).F().Error("FAIL to list Secret - list is nil")
+ return
+ }
+ for _, obj := range list {
+ r.RegisterSecret(obj.GetObjectMeta())
+ }
+}
+
+func (c *Controller) discoveryPVCs(ctx context.Context, r *model.Registry, cr api.ICustomResource, opts meta.ListOptions) {
+ list, err := c.kube.Storage().List(ctx, cr.GetNamespace(), opts)
+ if err != nil {
+ log.M(cr).F().Error("FAIL to list PVC - err: %v", err)
+ return
+ }
+ if list == nil {
+ log.M(cr).F().Error("FAIL to list PVC - list is nil")
+ return
+ }
+ for _, obj := range list {
+ r.RegisterPVC(obj.GetObjectMeta())
+ }
+}
+
+// Comment out PV
+//func (c *Controller) discoveryPVs(ctx context.Context, r *chopModel.Registry, cr api.ICustomResource, opts meta.ListOptions) {
+// list, err := c.kubeClient.CoreV1().PersistentVolumes().List(ctx, opts)
+// if err != nil {
+// log.M(cr).F().Error("FAIL list PV err: %v", err)
+// return
+// }
+// if list == nil {
+// log.M(cr).F().Error("FAIL list PV list is nil")
+// return
+// }
+// for _, obj := range list.Items {
+// r.RegisterPV(obj.ObjectMeta)
+// }
+//}
+
+func (c *Controller) discoveryPDBs(ctx context.Context, r *model.Registry, cr api.ICustomResource, opts meta.ListOptions) {
+ list, err := c.kube.PDB().List(ctx, cr.GetNamespace(), opts)
+ if err != nil {
+ log.M(cr).F().Error("FAIL to list PDB - err: %v", err)
+ return
+ }
+ if list == nil {
+ log.M(cr).F().Error("FAIL to list PDB - list is nil")
+ return
+ }
+ for _, obj := range list {
+ r.RegisterPDB(obj.GetObjectMeta())
+ }
+}
diff --git a/pkg/controller/chk/controller-getter.go b/pkg/controller/chk/controller-getter.go
new file mode 100644
index 000000000..8c140e80c
--- /dev/null
+++ b/pkg/controller/chk/controller-getter.go
@@ -0,0 +1,62 @@
+// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package chk
+
+import (
+ "fmt"
+
+ meta "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+ log "github.com/altinity/clickhouse-operator/pkg/announcer"
+ apiChk "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse-keeper.altinity.com/v1"
+ "github.com/altinity/clickhouse-operator/pkg/controller"
+ chkLabeler "github.com/altinity/clickhouse-operator/pkg/model/chk/tags/labeler"
+)
+
+// getPodsIPs gets all pod IPs
+func (c *Controller) getPodsIPs(obj interface{}) (ips []string) {
+ log.V(3).M(obj).F().S().Info("looking for pods IPs")
+ defer log.V(3).M(obj).F().E().Info("looking for pods IPs")
+
+ for _, pod := range c.kube.Pod().GetAll(obj) {
+ if ip := pod.Status.PodIP; ip == "" {
+ log.V(3).M(pod).F().Warning("Pod NO IP address found. Pod: %s/%s", pod.Namespace, pod.Name)
+ } else {
+ ips = append(ips, ip)
+ log.V(3).M(pod).F().Info("Pod IP address found. Pod: %s/%s IP: %s", pod.Namespace, pod.Name, ip)
+ }
+ }
+ return ips
+}
+
+// GetCHIByObjectMeta gets CHI by namespaced name
+func (c *Controller) GetCHIByObjectMeta(meta meta.Object, isCR bool) (*apiChk.ClickHouseKeeperInstallation, error) {
+ var crName string
+ if isCR {
+ crName = meta.GetName()
+ } else {
+ var err error
+ crName, err = chkLabeler.New(nil).GetCRNameFromObjectMeta(meta)
+ if err != nil {
+ return nil, fmt.Errorf("unable to find CR by name: '%s'. More info: %v", meta.GetName(), err)
+ }
+ }
+
+ cr, err := c.kube.CR().Get(controller.NewContext(), meta.GetNamespace(), crName)
+ if cr == nil {
+ return nil, err
+ }
+ return cr.(*apiChk.ClickHouseKeeperInstallation), err
+}
diff --git a/pkg/controller/chk/controller-pdb.go b/pkg/controller/chk/controller-pdb.go
new file mode 100644
index 000000000..6692f5924
--- /dev/null
+++ b/pkg/controller/chk/controller-pdb.go
@@ -0,0 +1,50 @@
+// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package chk
+
+import (
+ "context"
+
+ policy "k8s.io/api/policy/v1"
+
+ log "github.com/altinity/clickhouse-operator/pkg/announcer"
+ "github.com/altinity/clickhouse-operator/pkg/util"
+)
+
+func (c *Controller) getPDB(ctx context.Context, pdb *policy.PodDisruptionBudget) (*policy.PodDisruptionBudget, error) {
+ return c.kube.PDB().Get(ctx, pdb.GetNamespace(), pdb.GetName())
+}
+
+func (c *Controller) createPDB(ctx context.Context, pdb *policy.PodDisruptionBudget) error {
+ if util.IsContextDone(ctx) {
+ log.V(2).Info("task is done")
+ return nil
+ }
+
+ _, err := c.kube.PDB().Create(ctx, pdb)
+
+ return err
+}
+
+func (c *Controller) updatePDB(ctx context.Context, pdb *policy.PodDisruptionBudget) error {
+ if util.IsContextDone(ctx) {
+ log.V(2).Info("task is done")
+ return nil
+ }
+
+ _, err := c.kube.PDB().Update(ctx, pdb)
+
+ return err
+}
diff --git a/pkg/controller/chk/controller-service.go b/pkg/controller/chk/controller-service.go
new file mode 100644
index 000000000..d60370a38
--- /dev/null
+++ b/pkg/controller/chk/controller-service.go
@@ -0,0 +1,71 @@
+// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package chk
+
+import (
+ "context"
+
+ core "k8s.io/api/core/v1"
+ meta "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+ log "github.com/altinity/clickhouse-operator/pkg/announcer"
+ "github.com/altinity/clickhouse-operator/pkg/util"
+)
+
+func (c *Controller) getService(ctx context.Context, service *core.Service) (*core.Service, error) {
+ return c.kube.Service().Get(ctx, service)
+}
+
+func (c *Controller) createService(ctx context.Context, service *core.Service) error {
+ _, err := c.kube.Service().Create(ctx, service)
+ return err
+}
+
+func (c *Controller) updateService(ctx context.Context, service *core.Service) error {
+ _, err := c.kube.Service().Update(ctx, service)
+ return err
+}
+
+// deleteServiceIfExists deletes Service in case it does not exist
+func (c *Controller) deleteServiceIfExists(ctx context.Context, namespace, name string) error {
+ if util.IsContextDone(ctx) {
+ log.V(2).Info("task is done")
+ return nil
+ }
+
+ // Check specified service exists
+ _, err := c.kube.Service().Get(ctx, &core.Service{
+ ObjectMeta: meta.ObjectMeta{
+ Namespace: namespace,
+ Name: name,
+ },
+ })
+
+ if err != nil {
+ // No such a service, nothing to delete
+ log.V(1).M(namespace, name).F().Info("Not Found Service: %s/%s err: %v", namespace, name, err)
+ return nil
+ }
+
+ // Delete service
+ err = c.kube.Service().Delete(ctx, namespace, name)
+ if err == nil {
+ log.V(1).M(namespace, name).F().Info("OK delete Service: %s/%s", namespace, name)
+ } else {
+ log.V(1).M(namespace, name).F().Error("FAIL delete Service: %s/%s err:%v", namespace, name, err)
+ }
+
+ return err
+}
diff --git a/pkg/controller/chk/controller-status.go b/pkg/controller/chk/controller-status.go
new file mode 100644
index 000000000..4cc91c3ca
--- /dev/null
+++ b/pkg/controller/chk/controller-status.go
@@ -0,0 +1,82 @@
+// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package chk
+
+import (
+ "context"
+
+ log "github.com/altinity/clickhouse-operator/pkg/announcer"
+ apiChk "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse-keeper.altinity.com/v1"
+ api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1"
+ "github.com/altinity/clickhouse-operator/pkg/apis/common/types"
+ "github.com/altinity/clickhouse-operator/pkg/util"
+)
+
+// updateCRObjectStatus updates Custom Resource object's Status
+func (c *Controller) updateCRObjectStatus(ctx context.Context, cr api.ICustomResource, opts types.UpdateStatusOptions) (err error) {
+ return c.kube.CR().StatusUpdate(ctx, cr, opts)
+}
+
+func (c *Controller) reconcileClusterStatus(chk *apiChk.ClickHouseKeeperInstallation) (err error) {
+ return nil
+ //readyMembers, err := c.getReadyPods(chk)
+ if err != nil {
+ return err
+ }
+
+ for {
+ // Fetch the latest ClickHouseKeeper instance again
+ cur := &apiChk.ClickHouseKeeperInstallation{}
+ if err := c.Client.Get(context.TODO(), util.NamespacedName(chk), cur); err != nil {
+ log.V(1).Error("Error: not found %s err: %s", chk.Name, err)
+ return err
+ }
+
+ if cur.GetStatus() == nil {
+ cur.Status = cur.EnsureStatus()
+ }
+ //cur.Status.Replicas = int32(model.GetReplicasCount(chk))
+ //
+ //cur.Status.ReadyReplicas = []apiChi.ZookeeperNode{}
+ //for _, readyOne := range readyMembers {
+ // cur.Status.ReadyReplicas = append(cur.Status.ReadyReplicas,
+ // apiChi.ZookeeperNode{
+ // Host: fmt.Sprintf("%s.%s.svc.cluster.local", readyOne, chk.Namespace),
+ // Port: types.NewInt32(int32(chk.Spec.GetClientPort())),
+ // Secure: types.NewStringBool(false),
+ // })
+ //}
+ //
+ //log.V(2).Info("ReadyReplicas: " + fmt.Sprintf("%v", cur.Status.ReadyReplicas))
+
+ //if len(readyMembers) == model.GetReplicasCount(chk) {
+ // cur.Status.Status = "Completed"
+ //} else {
+ // cur.Status.Status = "In progress"
+ //}
+
+ cur.Status.NormalizedCR = nil
+ cur.Status.NormalizedCRCompleted = chk.DeepCopy()
+ cur.Status.NormalizedCRCompleted.ObjectMeta.ResourceVersion = ""
+ cur.Status.NormalizedCRCompleted.ObjectMeta.ManagedFields = nil
+ cur.Status.NormalizedCRCompleted.Status = nil
+
+ if err := c.Status().Update(context.TODO(), cur); err != nil {
+ log.V(1).Error("err: %s", err.Error())
+ } else {
+ return nil
+ }
+ }
+}
diff --git a/pkg/controller/chk/controller.go b/pkg/controller/chk/controller.go
new file mode 100644
index 000000000..4cf5629de
--- /dev/null
+++ b/pkg/controller/chk/controller.go
@@ -0,0 +1,156 @@
+// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package chk
+
+import (
+ "context"
+ "time"
+
+ log "github.com/altinity/clickhouse-operator/pkg/announcer"
+ apiChk "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse-keeper.altinity.com/v1"
+ api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1"
+ "github.com/altinity/clickhouse-operator/pkg/controller/chk/kube"
+ "github.com/altinity/clickhouse-operator/pkg/interfaces"
+ "github.com/altinity/clickhouse-operator/pkg/model/managers"
+ "github.com/altinity/clickhouse-operator/pkg/util"
+ apiErrors "k8s.io/apimachinery/pkg/api/errors"
+ meta "k8s.io/apimachinery/pkg/apis/meta/v1"
+ apiMachinery "k8s.io/apimachinery/pkg/runtime"
+ ctrl "sigs.k8s.io/controller-runtime"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+ ctrlUtil "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
+)
+
+// Controller reconciles a ClickHouseKeeper object
+type Controller struct {
+ client.Client
+ Scheme *apiMachinery.Scheme
+
+ namer interfaces.INameManager
+ kube interfaces.IKube
+ //labeler *Labeler
+ //pvcDeleter *volume.PVCDeleter
+}
+
+func (c *Controller) new() {
+ c.namer = managers.NewNameManager(managers.NameManagerTypeKeeper)
+ c.kube = kube.NewAdapter(c.Client, c.namer)
+ //labeler: NewLabeler(kube),
+ //pvcDeleter := volume.NewPVCDeleter(managers.NewNameManager(managers.NameManagerTypeKeeper))
+
+}
+
+func (c *Controller) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
+ if util.IsContextDone(ctx) {
+ log.V(2).Info("task is done")
+ return ctrl.Result{}, nil
+ }
+
+ // Fetch the ClickHouseKeeper instance
+ new := &apiChk.ClickHouseKeeperInstallation{}
+ if err := c.Client.Get(ctx, req.NamespacedName, new); err != nil {
+ if apiErrors.IsNotFound(err) {
+ // Request object not found, could have been deleted after reconcile request.
+ // Owned objects are automatically garbage collected.
+ // For additional cleanup logic use finalizers.
+ // Return and don't requeue
+ return ctrl.Result{}, nil
+ }
+ // Return and requeue
+ return ctrl.Result{}, err
+ }
+
+ c.new()
+ w := c.newWorker()
+
+ w.reconcileCR(context.TODO(), nil, new)
+
+ //// Fetch the ClickHouseKeeper instance
+ //dummy := &apiChk.ClickHouseKeeperInstallation{}
+ //if err := c.Client.Get(ctx, req.NamespacedName, dummy); err != nil {
+ // if apiErrors.IsNotFound(err) {
+ // // Request object not found, could have been deleted after reconcile request.
+ // // Owned objects are automatically garbage collected.
+ // // For additional cleanup logic use finalizers.
+ // // Return and don't requeue
+ // return ctrl.Result{}, nil
+ // }
+ // // Return and requeue
+ // return ctrl.Result{}, err
+ //}
+
+ return ctrl.Result{}, nil
+}
+
+func (c *Controller) reconcile(
+ owner meta.Object,
+ cur client.Object,
+ new client.Object,
+ name string,
+ updater func(cur, new client.Object) error,
+) (err error) {
+ // TODO unify approach with CHI - set OWNER REFERENCE
+ if err = ctrlUtil.SetControllerReference(owner, new, c.Scheme); err != nil {
+ return err
+ }
+
+ err = c.Client.Get(context.TODO(), util.NamespacedName(new), cur)
+ if err != nil && apiErrors.IsNotFound(err) {
+ log.V(1).Info("Creating new " + name)
+
+ if err = c.Client.Create(context.TODO(), new); err != nil {
+ return err
+ }
+ } else if err != nil {
+ return err
+ } else {
+ if updater == nil {
+ log.V(1).Info("Updater not provided")
+ } else {
+ log.V(1).Info("Updating existing " + name)
+ if err = updater(cur, new); err != nil {
+ return err
+ }
+ if err = c.Client.Update(context.TODO(), cur); err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func (c *Controller) poll(ctx context.Context, cr api.ICustomResource, f func(c *apiChk.ClickHouseKeeperInstallation, e error) bool) {
+ if util.IsContextDone(ctx) {
+ log.V(2).Info("task is done")
+ return
+ }
+
+ namespace, name := util.NamespaceName(cr)
+
+ for {
+ cur, err := c.kube.CR().Get(ctx, namespace, name)
+ if f(cur.(*apiChk.ClickHouseKeeperInstallation), err) {
+ // Continue polling
+ if util.IsContextDone(ctx) {
+ log.V(2).Info("task is done")
+ return
+ }
+ time.Sleep(15 * time.Second)
+ } else {
+ // Stop polling
+ return
+ }
+ }
+}
diff --git a/pkg/controller/chk/kube/adapter-kube.go b/pkg/controller/chk/kube/adapter-kube.go
new file mode 100644
index 000000000..991f156ee
--- /dev/null
+++ b/pkg/controller/chk/kube/adapter-kube.go
@@ -0,0 +1,114 @@
+// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package kube
+
+import (
+ "sigs.k8s.io/controller-runtime/pkg/client"
+
+ "github.com/altinity/clickhouse-operator/pkg/controller/common/storage"
+ "github.com/altinity/clickhouse-operator/pkg/interfaces"
+)
+
+type Adapter struct {
+
+ // Set of CR k8s components
+
+ cr *CR
+
+ // Set of k8s components
+
+ configMap *ConfigMap
+ deployment *Deployment
+ event *Event
+ pdb *PDB
+ pod *Pod
+ pvc *storage.PVC
+ replicaSet *ReplicaSet
+ secret *Secret
+ service *Service
+ sts *STS
+}
+
+func NewAdapter(kubeClient client.Client, namer interfaces.INameManager) *Adapter {
+ return &Adapter{
+ cr: NewCR(kubeClient),
+
+ configMap: NewConfigMap(kubeClient),
+ deployment: NewDeployment(kubeClient),
+ event: NewEvent(kubeClient),
+ pdb: NewPDB(kubeClient),
+ pod: NewPod(kubeClient, namer),
+ pvc: storage.NewStoragePVC(NewPVC(kubeClient)),
+ replicaSet: NewReplicaSet(kubeClient),
+ secret: NewSecret(kubeClient, namer),
+ service: NewService(kubeClient, namer),
+ sts: NewSTS(kubeClient, namer),
+ }
+}
+
+// CR is a getter
+func (k *Adapter) CR() interfaces.IKubeCR {
+ return k.cr
+}
+
+// ConfigMap is a getter
+func (k *Adapter) ConfigMap() interfaces.IKubeConfigMap {
+ return k.configMap
+}
+
+// Deployment is a getter
+func (k *Adapter) Deployment() interfaces.IKubeDeployment {
+ return k.deployment
+}
+
+// Event is a getter
+func (k *Adapter) Event() interfaces.IKubeEvent {
+ return k.event
+}
+
+// PDB is a getter
+func (k *Adapter) PDB() interfaces.IKubePDB {
+ return k.pdb
+}
+
+// Pod is a getter
+func (k *Adapter) Pod() interfaces.IKubePod {
+ return k.pod
+}
+
+// Storage is a getter
+func (k *Adapter) Storage() interfaces.IKubeStoragePVC {
+ return k.pvc
+}
+
+// ReplicaSet is a getter
+func (k *Adapter) ReplicaSet() interfaces.IKubeReplicaSet {
+ return k.replicaSet
+}
+
+// Secret is a getter
+func (k *Adapter) Secret() interfaces.IKubeSecret {
+ return k.secret
+}
+
+// Service is a getter
+func (k *Adapter) Service() interfaces.IKubeService {
+ return k.service
+}
+
+// STS is a getter
+func (k *Adapter) STS() interfaces.IKubeSTS {
+ return k.sts
+}
diff --git a/pkg/controller/chk/kube/config-map.go b/pkg/controller/chk/kube/config-map.go
new file mode 100644
index 000000000..558cdc581
--- /dev/null
+++ b/pkg/controller/chk/kube/config-map.go
@@ -0,0 +1,87 @@
+// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package kube
+
+import (
+ "context"
+ "k8s.io/apimachinery/pkg/labels"
+
+ core "k8s.io/api/core/v1"
+ meta "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/types"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+)
+
+type ConfigMap struct {
+ kubeClient client.Client
+}
+
+func NewConfigMap(kubeClient client.Client) *ConfigMap {
+ return &ConfigMap{
+ kubeClient: kubeClient,
+ }
+}
+
+func (c *ConfigMap) Create(ctx context.Context, cm *core.ConfigMap) (*core.ConfigMap, error) {
+ err := c.kubeClient.Create(ctx, cm)
+ return cm, err
+}
+
+func (c *ConfigMap) Get(ctx context.Context, namespace, name string) (*core.ConfigMap, error) {
+ cm := &core.ConfigMap{}
+ err := c.kubeClient.Get(ctx, types.NamespacedName{
+ Namespace: namespace,
+ Name: name,
+ }, cm)
+ if err == nil {
+ return cm, nil
+ } else {
+ return nil, err
+ }
+}
+
+func (c *ConfigMap) Update(ctx context.Context, cm *core.ConfigMap) (*core.ConfigMap, error) {
+ err := c.kubeClient.Update(ctx, cm)
+ return cm, err
+}
+
+func (c *ConfigMap) Delete(ctx context.Context, namespace, name string) error {
+ cm := &core.ConfigMap{
+ ObjectMeta: meta.ObjectMeta{
+ Namespace: namespace,
+ Name: name,
+ },
+ }
+ return c.kubeClient.Delete(ctx, cm)
+}
+
+func (c *ConfigMap) List(ctx context.Context, namespace string, opts meta.ListOptions) ([]core.ConfigMap, error) {
+ list := &core.ConfigMapList{}
+ selector, err := labels.Parse(opts.LabelSelector)
+ if err != nil {
+ return nil, err
+ }
+ err = c.kubeClient.List(ctx, list, &client.ListOptions{
+ Namespace: namespace,
+ LabelSelector: selector,
+ })
+ if err != nil {
+ return nil, err
+ }
+ if list == nil {
+ return nil, err
+ }
+ return list.Items, nil
+}
diff --git a/pkg/controller/chk/kube/cr.go b/pkg/controller/chk/kube/cr.go
new file mode 100644
index 000000000..68363e40f
--- /dev/null
+++ b/pkg/controller/chk/kube/cr.go
@@ -0,0 +1,133 @@
+// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package kube
+
+import (
+ "context"
+ "fmt"
+ "time"
+
+ "k8s.io/apimachinery/pkg/types"
+
+ log "github.com/altinity/clickhouse-operator/pkg/announcer"
+ apiChk "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse-keeper.altinity.com/v1"
+ api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1"
+ commonTypes "github.com/altinity/clickhouse-operator/pkg/apis/common/types"
+ "github.com/altinity/clickhouse-operator/pkg/util"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+)
+
+type CR struct {
+ kubeClient client.Client
+}
+
+func NewCR(kubeClient client.Client) *CR {
+ return &CR{
+ kubeClient: kubeClient,
+ }
+}
+
+func (c *CR) Get(ctx context.Context, namespace, name string) (api.ICustomResource, error) {
+ cm := &apiChk.ClickHouseKeeperInstallation{}
+ err := c.kubeClient.Get(ctx, types.NamespacedName{
+ Namespace: namespace,
+ Name: name,
+ }, cm)
+ if err == nil {
+ return cm, nil
+ } else {
+ return nil, err
+ }
+}
+
+// updateCHIObjectStatus updates ClickHouseInstallation object's Status
+func (c *CR) StatusUpdate(ctx context.Context, cr api.ICustomResource, opts commonTypes.UpdateStatusOptions) (err error) {
+ if util.IsContextDone(ctx) {
+ log.V(2).Info("task is done")
+ return nil
+ }
+
+ for retry, attempt := true, 1; retry; attempt++ {
+ if attempt > 60 {
+ retry = false
+ }
+
+ err = c.doUpdateCRStatus(ctx, cr, opts)
+ if err == nil {
+ return nil
+ }
+
+ if retry {
+ log.V(2).M(cr).F().Warning("got error, will retry. err: %q", err)
+ time.Sleep(1 * time.Second)
+ } else {
+ log.V(1).M(cr).F().Error("got error, all retries are exhausted. err: %q", err)
+ }
+ }
+ return
+}
+
+// doUpdateCRStatus updates ClickHouseInstallation object's Status
+func (c *CR) doUpdateCRStatus(ctx context.Context, cr api.ICustomResource, opts commonTypes.UpdateStatusOptions) error {
+ if util.IsContextDone(ctx) {
+ log.V(2).Info("task is done")
+ return nil
+ }
+
+ chk := cr.(*apiChk.ClickHouseKeeperInstallation)
+ namespace, name := util.NamespaceName(chk)
+ log.V(3).M(chk).F().Info("Update CHK status")
+
+ _cur, err := c.Get(ctx, namespace, name)
+ cur := _cur.(*apiChk.ClickHouseKeeperInstallation)
+ if err != nil {
+ if opts.TolerateAbsence {
+ return nil
+ }
+ log.V(1).M(chk).F().Error("%q", err)
+ return err
+ }
+ if cur == nil {
+ if opts.TolerateAbsence {
+ return nil
+ }
+ log.V(1).M(chk).F().Error("NULL returned")
+ return fmt.Errorf("ERROR GetCR (%s/%s): NULL returned", namespace, name)
+ }
+
+ // Update status of a real object.
+ cur.EnsureStatus().CopyFrom(chk.Status, opts.CopyStatusOptions)
+
+ err = c.kubeClient.Status().Update(ctx, cur)
+ if err != nil {
+ // Error update
+ log.V(2).M(chk).F().Info("Got error upon update, may retry. err: %q", err)
+ return err
+ }
+
+ _cur, err = c.Get(ctx, namespace, name)
+ cur = _cur.(*apiChk.ClickHouseKeeperInstallation)
+
+ // Propagate updated ResourceVersion into chi
+ if chk.GetResourceVersion() != cur.GetResourceVersion() {
+ log.V(3).M(chk).F().Info("ResourceVersion change: %s to %s", chk.GetResourceVersion(), cur.GetResourceVersion())
+ chk.SetResourceVersion(cur.GetResourceVersion())
+ return nil
+ }
+
+ // ResourceVersion not changed - no update performed?
+
+ return nil
+}
diff --git a/pkg/controller/chk/kube/deployment.go b/pkg/controller/chk/kube/deployment.go
new file mode 100644
index 000000000..1cb103cac
--- /dev/null
+++ b/pkg/controller/chk/kube/deployment.go
@@ -0,0 +1,51 @@
+// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package kube
+
+import (
+ apps "k8s.io/api/apps/v1"
+ "k8s.io/apimachinery/pkg/types"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+
+ "github.com/altinity/clickhouse-operator/pkg/controller"
+)
+
+type Deployment struct {
+ kubeClient client.Client
+}
+
+func NewDeployment(kubeClient client.Client) *Deployment {
+ return &Deployment{
+ kubeClient: kubeClient,
+ }
+}
+
+func (c *Deployment) Get(namespace, name string) (*apps.Deployment, error) {
+ deployment := &apps.Deployment{}
+ err := c.kubeClient.Get(controller.NewContext(), types.NamespacedName{
+ Namespace: namespace,
+ Name: name,
+ }, deployment)
+ if err == nil {
+ return deployment, nil
+ } else {
+ return nil, err
+ }
+}
+
+func (c *Deployment) Update(deployment *apps.Deployment) (*apps.Deployment, error) {
+ err := c.kubeClient.Update(controller.NewContext(), deployment)
+ return deployment, err
+}
diff --git a/pkg/controller/chk/kube/event.go b/pkg/controller/chk/kube/event.go
new file mode 100644
index 000000000..625db1a86
--- /dev/null
+++ b/pkg/controller/chk/kube/event.go
@@ -0,0 +1,37 @@
+// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package kube
+
+import (
+ "context"
+
+ core "k8s.io/api/core/v1"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+)
+
+type Event struct {
+ kubeClient client.Client
+}
+
+func NewEvent(kubeClient client.Client) *Event {
+ return &Event{
+ kubeClient: kubeClient,
+ }
+}
+
+func (c *Event) Create(ctx context.Context, event *core.Event) (*core.Event, error) {
+ err := c.kubeClient.Create(ctx, event)
+ return event, err
+}
diff --git a/pkg/controller/chk/kube/pdb.go b/pkg/controller/chk/kube/pdb.go
new file mode 100644
index 000000000..39bcc33c4
--- /dev/null
+++ b/pkg/controller/chk/kube/pdb.go
@@ -0,0 +1,87 @@
+// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package kube
+
+import (
+ "context"
+ "k8s.io/apimachinery/pkg/labels"
+
+ policy "k8s.io/api/policy/v1"
+ meta "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/types"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+)
+
+type PDB struct {
+ kubeClient client.Client
+}
+
+func NewPDB(kubeClient client.Client) *PDB {
+ return &PDB{
+ kubeClient: kubeClient,
+ }
+}
+
+func (c *PDB) Create(ctx context.Context, pdb *policy.PodDisruptionBudget) (*policy.PodDisruptionBudget, error) {
+ err := c.kubeClient.Create(ctx, pdb)
+ return pdb, err
+}
+
+func (c *PDB) Get(ctx context.Context, namespace, name string) (*policy.PodDisruptionBudget, error) {
+ pdb := &policy.PodDisruptionBudget{}
+ err := c.kubeClient.Get(ctx, types.NamespacedName{
+ Namespace: namespace,
+ Name: name,
+ }, pdb)
+ if err == nil {
+ return pdb, nil
+ } else {
+ return nil, err
+ }
+}
+
+func (c *PDB) Update(ctx context.Context, pdb *policy.PodDisruptionBudget) (*policy.PodDisruptionBudget, error) {
+ err := c.kubeClient.Update(ctx, pdb)
+ return pdb, err
+}
+
+func (c *PDB) Delete(ctx context.Context, namespace, name string) error {
+ pdb := &policy.PodDisruptionBudget{
+ ObjectMeta: meta.ObjectMeta{
+ Namespace: namespace,
+ Name: name,
+ },
+ }
+ return c.kubeClient.Delete(ctx, pdb)
+}
+
+func (c *PDB) List(ctx context.Context, namespace string, opts meta.ListOptions) ([]policy.PodDisruptionBudget, error) {
+ list := &policy.PodDisruptionBudgetList{}
+ selector, err := labels.Parse(opts.LabelSelector)
+ if err != nil {
+ return nil, err
+ }
+ err = c.kubeClient.List(ctx, list, &client.ListOptions{
+ Namespace: namespace,
+ LabelSelector: selector,
+ })
+ if err != nil {
+ return nil, err
+ }
+ if list == nil {
+ return nil, err
+ }
+ return list.Items, nil
+}
diff --git a/pkg/controller/chk/kube/pod.go b/pkg/controller/chk/kube/pod.go
new file mode 100644
index 000000000..ecfaa9d2c
--- /dev/null
+++ b/pkg/controller/chk/kube/pod.go
@@ -0,0 +1,144 @@
+// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package kube
+
+import (
+ "context"
+
+ apps "k8s.io/api/apps/v1"
+ core "k8s.io/api/core/v1"
+ meta "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/types"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+
+ api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1"
+ "github.com/altinity/clickhouse-operator/pkg/controller"
+ "github.com/altinity/clickhouse-operator/pkg/interfaces"
+)
+
+type Pod struct {
+ kubeClient client.Client
+ namer interfaces.INameManager
+}
+
+func NewPod(kubeClient client.Client, namer interfaces.INameManager) *Pod {
+ return &Pod{
+ kubeClient: kubeClient,
+ namer: namer,
+ }
+}
+
+// Get gets pod. Accepted types:
+// 1. *apps.StatefulSet
+// 2. *chop.Host
+func (c *Pod) Get(params ...any) (*core.Pod, error) {
+ var name, namespace string
+ switch len(params) {
+ case 2:
+ // Expecting namespace name
+ namespace = params[0].(string)
+ name = params[1].(string)
+ case 1:
+ // Expecting obj
+ obj := params[0]
+ switch typedObj := obj.(type) {
+ case *apps.StatefulSet:
+ name = c.namer.Name(interfaces.NamePod, obj)
+ namespace = typedObj.Namespace
+ case *api.Host:
+ name = c.namer.Name(interfaces.NamePod, obj)
+ namespace = typedObj.Runtime.Address.Namespace
+ default:
+ panic(any("unknown param"))
+ }
+ default:
+ panic(any("incorrect number or params"))
+ }
+ pod := &core.Pod{}
+ err := c.kubeClient.Get(controller.NewContext(), types.NamespacedName{
+ Namespace: namespace,
+ Name: name,
+ }, pod)
+ return pod, err
+}
+
+// GetAll gets all pods for provided entity
+func (c *Pod) GetAll(obj any) []*core.Pod {
+ switch typed := obj.(type) {
+ case api.ICustomResource:
+ return c.getPodsOfCR(typed)
+ case api.ICluster:
+ return c.getPodsOfCluster(typed)
+ case api.IShard:
+ return c.getPodsOfShard(typed)
+ case *api.Host:
+ if pod, err := c.Get(typed); err == nil {
+ return []*core.Pod{
+ pod,
+ }
+ }
+ default:
+ panic(any("unknown type"))
+ }
+ return nil
+}
+
+func (c *Pod) Update(ctx context.Context, pod *core.Pod) (*core.Pod, error) {
+ err := c.kubeClient.Update(ctx, pod)
+ return pod, err
+}
+
+// getPodsOfCluster gets all pods in a cluster
+func (c *Pod) getPodsOfCluster(cluster api.ICluster) (pods []*core.Pod) {
+ cluster.WalkHosts(func(host *api.Host) error {
+ if pod, err := c.Get(host); err == nil {
+ pods = append(pods, pod)
+ }
+ return nil
+ })
+ return pods
+}
+
+// getPodsOfShard gets all pods in a shard
+func (c *Pod) getPodsOfShard(shard api.IShard) (pods []*core.Pod) {
+ shard.WalkHosts(func(host *api.Host) error {
+ if pod, err := c.Get(host); err == nil {
+ pods = append(pods, pod)
+ }
+ return nil
+ })
+ return pods
+}
+
+// getPodsOfCR gets all pods in a CHI
+func (c *Pod) getPodsOfCR(cr api.ICustomResource) (pods []*core.Pod) {
+ cr.WalkHosts(func(host *api.Host) error {
+ if pod, err := c.Get(host); err == nil {
+ pods = append(pods, pod)
+ }
+ return nil
+ })
+ return pods
+}
+
+func (c *Pod) Delete(ctx context.Context, namespace, name string) error {
+ pod := &core.Pod{
+ ObjectMeta: meta.ObjectMeta{
+ Namespace: namespace,
+ Name: name,
+ },
+ }
+ return c.kubeClient.Delete(ctx, pod)
+}
diff --git a/pkg/controller/chk/kube/pvc.go b/pkg/controller/chk/kube/pvc.go
new file mode 100644
index 000000000..001408974
--- /dev/null
+++ b/pkg/controller/chk/kube/pvc.go
@@ -0,0 +1,105 @@
+// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package kube
+
+import (
+ "context"
+
+ core "k8s.io/api/core/v1"
+ meta "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/labels"
+ "k8s.io/apimachinery/pkg/types"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+
+ api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1"
+ "github.com/altinity/clickhouse-operator/pkg/interfaces"
+ chiLabeler "github.com/altinity/clickhouse-operator/pkg/model/chi/tags/labeler"
+)
+
+type PVC struct {
+ kubeClient client.Client
+}
+
+func NewPVC(kubeClient client.Client) *PVC {
+ return &PVC{
+ kubeClient: kubeClient,
+ }
+}
+
+func (c *PVC) Create(ctx context.Context, pvc *core.PersistentVolumeClaim) (*core.PersistentVolumeClaim, error) {
+ err := c.kubeClient.Create(ctx, pvc)
+ return pvc, err
+}
+
+func (c *PVC) Get(ctx context.Context, namespace, name string) (*core.PersistentVolumeClaim, error) {
+ pvc := &core.PersistentVolumeClaim{}
+ err := c.kubeClient.Get(ctx, types.NamespacedName{
+ Namespace: namespace,
+ Name: name,
+ }, pvc)
+ if err == nil {
+ return pvc, nil
+ } else {
+ return nil, err
+ }
+}
+
+func (c *PVC) Update(ctx context.Context, pvc *core.PersistentVolumeClaim) (*core.PersistentVolumeClaim, error) {
+ err := c.kubeClient.Update(ctx, pvc)
+ return pvc, err
+}
+
+func (c *PVC) Delete(ctx context.Context, namespace, name string) error {
+ pvc := &core.PersistentVolumeClaim{
+ ObjectMeta: meta.ObjectMeta{
+ Namespace: namespace,
+ Name: name,
+ },
+ }
+ return c.kubeClient.Delete(ctx, pvc)
+}
+
+func (c *PVC) List(ctx context.Context, namespace string, opts meta.ListOptions) ([]core.PersistentVolumeClaim, error) {
+ list := &core.PersistentVolumeClaimList{}
+ selector, err := labels.Parse(opts.LabelSelector)
+ if err != nil {
+ return nil, err
+ }
+ err = c.kubeClient.List(ctx, list, &client.ListOptions{
+ Namespace: namespace,
+ LabelSelector: selector,
+ })
+ if err != nil {
+ return nil, err
+ }
+ if list == nil {
+ return nil, err
+ }
+ return list.Items, nil
+}
+
+func (c *PVC) ListForHost(ctx context.Context, host *api.Host) (*core.PersistentVolumeClaimList, error) {
+ list := &core.PersistentVolumeClaimList{}
+ opts := &client.ListOptions{
+ LabelSelector: labels.SelectorFromSet(labeler(host.GetCR()).Selector(interfaces.SelectorHostScope, host)),
+ Namespace: host.Runtime.Address.Namespace,
+ }
+ err := c.kubeClient.List(ctx, list, opts)
+ return list, err
+}
+
+func labeler(cr api.ICustomResource) interfaces.ILabeler {
+ return chiLabeler.New(cr)
+}
diff --git a/pkg/controller/chk/kube/replicaset.go b/pkg/controller/chk/kube/replicaset.go
new file mode 100644
index 000000000..936d08ff3
--- /dev/null
+++ b/pkg/controller/chk/kube/replicaset.go
@@ -0,0 +1,51 @@
+// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package kube
+
+import (
+ "context"
+
+ apps "k8s.io/api/apps/v1"
+ "k8s.io/apimachinery/pkg/types"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+)
+
+type ReplicaSet struct {
+ kubeClient client.Client
+}
+
+func NewReplicaSet(kubeClient client.Client) *ReplicaSet {
+ return &ReplicaSet{
+ kubeClient: kubeClient,
+ }
+}
+
+func (c *ReplicaSet) Get(ctx context.Context, namespace, name string) (*apps.ReplicaSet, error) {
+ rs := &apps.ReplicaSet{}
+ err := c.kubeClient.Get(ctx, types.NamespacedName{
+ Namespace: namespace,
+ Name: name,
+ }, rs)
+ if err == nil {
+ return rs, nil
+ } else {
+ return nil, err
+ }
+}
+
+func (c *ReplicaSet) Update(ctx context.Context, replicaSet *apps.ReplicaSet) (*apps.ReplicaSet, error) {
+ err := c.kubeClient.Update(ctx, replicaSet)
+ return replicaSet, err
+}
diff --git a/pkg/controller/chk/kube/secret.go b/pkg/controller/chk/kube/secret.go
new file mode 100644
index 000000000..ee84fd6ba
--- /dev/null
+++ b/pkg/controller/chk/kube/secret.go
@@ -0,0 +1,113 @@
+// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package kube
+
+import (
+ "context"
+
+ core "k8s.io/api/core/v1"
+ meta "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/labels"
+ "k8s.io/apimachinery/pkg/types"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+
+ api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1"
+ "github.com/altinity/clickhouse-operator/pkg/interfaces"
+)
+
+type Secret struct {
+ kubeClient client.Client
+ namer interfaces.INameManager
+}
+
+func NewSecret(kubeClient client.Client, namer interfaces.INameManager) *Secret {
+ return &Secret{
+ kubeClient: kubeClient,
+ namer: namer,
+ }
+}
+
+// Get gets Service. Accepted types:
+// 1. *core.Service
+// 2. *chop.Host
+func (c *Secret) Get(ctx context.Context, params ...any) (*core.Secret, error) {
+ var name, namespace string
+ switch len(params) {
+ case 2:
+ // Expecting namespace name
+ namespace = params[0].(string)
+ name = params[1].(string)
+ case 1:
+ // Expecting obj
+ obj := params[0]
+ switch typedObj := obj.(type) {
+ case *core.Secret:
+ name = typedObj.Name
+ namespace = typedObj.Namespace
+ case *api.Host:
+ name = c.namer.Name(interfaces.NameStatefulSetService, typedObj)
+ namespace = typedObj.Runtime.Address.Namespace
+ }
+ }
+ service := &core.Secret{}
+ err := c.kubeClient.Get(ctx, types.NamespacedName{
+ Namespace: namespace,
+ Name: name,
+ }, service)
+ if err == nil {
+ return service, nil
+ } else {
+ return nil, err
+ }
+}
+
+func (c *Secret) Create(ctx context.Context, svc *core.Secret) (*core.Secret, error) {
+ err := c.kubeClient.Create(ctx, svc)
+ return svc, err
+}
+
+func (c *Secret) Update(ctx context.Context, svc *core.Secret) (*core.Secret, error) {
+ err := c.kubeClient.Update(ctx, svc)
+ return svc, err
+}
+
+func (c *Secret) Delete(ctx context.Context, namespace, name string) error {
+ svc := &core.Secret{
+ ObjectMeta: meta.ObjectMeta{
+ Namespace: namespace,
+ Name: name,
+ },
+ }
+ return c.kubeClient.Delete(ctx, svc)
+}
+
+func (c *Secret) List(ctx context.Context, namespace string, opts meta.ListOptions) ([]core.Secret, error) {
+ list := &core.SecretList{}
+ selector, err := labels.Parse(opts.LabelSelector)
+ if err != nil {
+ return nil, err
+ }
+ err = c.kubeClient.List(ctx, list, &client.ListOptions{
+ Namespace: namespace,
+ LabelSelector: selector,
+ })
+ if err != nil {
+ return nil, err
+ }
+ if list == nil {
+ return nil, err
+ }
+ return list.Items, nil
+}
diff --git a/pkg/controller/chk/kube/service.go b/pkg/controller/chk/kube/service.go
new file mode 100644
index 000000000..2df9e0760
--- /dev/null
+++ b/pkg/controller/chk/kube/service.go
@@ -0,0 +1,113 @@
+// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package kube
+
+import (
+ "context"
+
+ core "k8s.io/api/core/v1"
+ meta "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/labels"
+ "k8s.io/apimachinery/pkg/types"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+
+ api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1"
+ "github.com/altinity/clickhouse-operator/pkg/interfaces"
+)
+
+type Service struct {
+ kubeClient client.Client
+ namer interfaces.INameManager
+}
+
+func NewService(kubeClient client.Client, namer interfaces.INameManager) *Service {
+ return &Service{
+ kubeClient: kubeClient,
+ namer: namer,
+ }
+}
+
+// Get gets Service. Accepted types:
+// 1. *core.Service
+// 2. *chop.Host
+func (c *Service) Get(ctx context.Context, params ...any) (*core.Service, error) {
+ var name, namespace string
+ switch len(params) {
+ case 2:
+ // Expecting namespace name
+ namespace = params[0].(string)
+ name = params[1].(string)
+ case 1:
+ // Expecting obj
+ obj := params[0]
+ switch typedObj := obj.(type) {
+ case *core.Service:
+ name = typedObj.Name
+ namespace = typedObj.Namespace
+ case *api.Host:
+ name = c.namer.Name(interfaces.NameStatefulSetService, typedObj)
+ namespace = typedObj.Runtime.Address.Namespace
+ }
+ }
+ service := &core.Service{}
+ err := c.kubeClient.Get(ctx, types.NamespacedName{
+ Namespace: namespace,
+ Name: name,
+ }, service)
+ if err == nil {
+ return service, nil
+ } else {
+ return nil, err
+ }
+}
+
+func (c *Service) Create(ctx context.Context, svc *core.Service) (*core.Service, error) {
+ err := c.kubeClient.Create(ctx, svc)
+ return svc, err
+}
+
+func (c *Service) Update(ctx context.Context, svc *core.Service) (*core.Service, error) {
+ err := c.kubeClient.Update(ctx, svc)
+ return svc, err
+}
+
+func (c *Service) Delete(ctx context.Context, namespace, name string) error {
+ svc := &core.Service{
+ ObjectMeta: meta.ObjectMeta{
+ Namespace: namespace,
+ Name: name,
+ },
+ }
+ return c.kubeClient.Delete(ctx, svc)
+}
+
+func (c *Service) List(ctx context.Context, namespace string, opts meta.ListOptions) ([]core.Service, error) {
+ list := &core.ServiceList{}
+ selector, err := labels.Parse(opts.LabelSelector)
+ if err != nil {
+ return nil, err
+ }
+ err = c.kubeClient.List(ctx, list, &client.ListOptions{
+ Namespace: namespace,
+ LabelSelector: selector,
+ })
+ if err != nil {
+ return nil, err
+ }
+ if list == nil {
+ return nil, err
+ }
+ return list.Items, nil
+}
diff --git a/pkg/controller/chk/kube/statesfulset.go b/pkg/controller/chk/kube/statesfulset.go
new file mode 100644
index 000000000..3db13312c
--- /dev/null
+++ b/pkg/controller/chk/kube/statesfulset.go
@@ -0,0 +1,125 @@
+// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package kube
+
+import (
+ "context"
+ "gopkg.in/yaml.v3"
+
+ apps "k8s.io/api/apps/v1"
+ meta "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/labels"
+ "k8s.io/apimachinery/pkg/types"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+
+ log "github.com/altinity/clickhouse-operator/pkg/announcer"
+ api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1"
+ "github.com/altinity/clickhouse-operator/pkg/interfaces"
+ "github.com/altinity/clickhouse-operator/pkg/util"
+)
+
+type STS struct {
+ kubeClient client.Client
+ namer interfaces.INameManager
+}
+
+func NewSTS(kubeClient client.Client, namer interfaces.INameManager) *STS {
+ return &STS{
+ kubeClient: kubeClient,
+ namer: namer,
+ }
+}
+
+// Get gets StatefulSet. Accepted types:
+// 1. *meta.ObjectMeta
+// 2. *chop.Host
+func (c *STS) Get(ctx context.Context, params ...any) (*apps.StatefulSet, error) {
+ var name, namespace string
+ switch len(params) {
+ case 2:
+ // Expecting namespace name
+ namespace = params[0].(string)
+ name = params[1].(string)
+ case 1:
+ // Expecting obj
+ obj := params[0]
+ switch typedObj := obj.(type) {
+ case meta.Object:
+ name = typedObj.GetName()
+ namespace = typedObj.GetNamespace()
+ case *api.Host:
+ // Namespaced name
+ name = c.namer.Name(interfaces.NameStatefulSet, obj)
+ namespace = typedObj.Runtime.Address.Namespace
+ }
+ }
+ return c.get(ctx, namespace, name)
+}
+
+func (c *STS) get(ctx context.Context, namespace, name string) (*apps.StatefulSet, error) {
+ sts := &apps.StatefulSet{}
+ err := c.kubeClient.Get(ctx, types.NamespacedName{
+ Namespace: namespace,
+ Name: name,
+ }, sts)
+ if err == nil {
+ return sts, nil
+ } else {
+ return nil, err
+ }
+}
+
+func (c *STS) Create(ctx context.Context, sts *apps.StatefulSet) (*apps.StatefulSet, error) {
+ yamlBytes, _ := yaml.Marshal(sts)
+ log.V(3).M(sts).Info("Going to create STS: %s\n%s", util.NamespaceNameString(sts), string(yamlBytes))
+ err := c.kubeClient.Create(ctx, sts)
+ return sts, err
+}
+
+func (c *STS) Update(ctx context.Context, sts *apps.StatefulSet) (*apps.StatefulSet, error) {
+ log.V(3).M(sts).Info("Going to update STS: %s", util.NamespaceNameString(sts))
+ err := c.kubeClient.Update(ctx, sts)
+ return sts, err
+}
+
+func (c *STS) Delete(ctx context.Context, namespace, name string) error {
+ log.V(3).M(namespace, name).Info("Going to delete STS: %s/%s", namespace, name)
+ sts := &apps.StatefulSet{
+ ObjectMeta: meta.ObjectMeta{
+ Namespace: namespace,
+ Name: name,
+ },
+ }
+ return c.kubeClient.Delete(ctx, sts)
+}
+
+func (c *STS) List(ctx context.Context, namespace string, opts meta.ListOptions) ([]apps.StatefulSet, error) {
+ list := &apps.StatefulSetList{}
+ selector, err := labels.Parse(opts.LabelSelector)
+ if err != nil {
+ return nil, err
+ }
+ err = c.kubeClient.List(ctx, list, &client.ListOptions{
+ Namespace: namespace,
+ LabelSelector: selector,
+ })
+ if err != nil {
+ return nil, err
+ }
+ if list == nil {
+ return nil, err
+ }
+ return list.Items, nil
+}
diff --git a/pkg/controller/chk/reconciler.go b/pkg/controller/chk/reconciler.go
deleted file mode 100644
index eae56201b..000000000
--- a/pkg/controller/chk/reconciler.go
+++ /dev/null
@@ -1,316 +0,0 @@
-// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package chk
-
-import (
- "context"
- "fmt"
- "time"
-
- apps "k8s.io/api/apps/v1"
- core "k8s.io/api/core/v1"
- policy "k8s.io/api/policy/v1"
- apiErrors "k8s.io/apimachinery/pkg/api/errors"
- apiMachinery "k8s.io/apimachinery/pkg/runtime"
- ctrl "sigs.k8s.io/controller-runtime"
- "sigs.k8s.io/controller-runtime/pkg/client"
- ctrlUtil "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
- "sigs.k8s.io/controller-runtime/pkg/reconcile"
-
- log "github.com/altinity/clickhouse-operator/pkg/announcer"
- apiChk "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse-keeper.altinity.com/v1"
- apiChi "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1"
- "github.com/altinity/clickhouse-operator/pkg/model/chi/normalizer"
- // apiChi "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1"
- model "github.com/altinity/clickhouse-operator/pkg/model/chk"
- "github.com/altinity/clickhouse-operator/pkg/util"
-)
-
-// ReconcileTime is the delay between reconciliations
-const ReconcileTime = 30 * time.Second
-
-// ChkReconciler reconciles a ClickHouseKeeper object
-type ChkReconciler struct {
- client.Client
- Scheme *apiMachinery.Scheme
-}
-
-type reconcileFunc func(cluster *apiChk.ClickHouseKeeperInstallation) error
-
-func (r *ChkReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
- if util.IsContextDone(ctx) {
- log.V(2).Info("task is done")
- return ctrl.Result{}, nil
- }
-
- var old, new *apiChk.ClickHouseKeeperInstallation
-
- // Fetch the ClickHouseKeeper instance
- new = &apiChk.ClickHouseKeeperInstallation{}
- if err := r.Get(ctx, req.NamespacedName, new); err != nil {
- if apiErrors.IsNotFound(err) {
- // Request object not found, could have been deleted after reconcile request.
- // Owned objects are automatically garbage collected.
- // For additional cleanup logic use finalizers.
- // Return and don't requeue
- return ctrl.Result{}, nil
- }
- // Return and requeue
- return ctrl.Result{}, err
- }
-
- if new.HasAncestor() {
- log.V(2).M(new).F().Info("has ancestor, use it as a base for reconcile. CHK: %s/%s", new.Namespace, new.Name)
- old = new.GetAncestor()
- } else {
- log.V(2).M(new).F().Info("has NO ancestor, use empty CHK as a base for reconcile. CHK: %s/%s", new.Namespace, new.Name)
- old = nil
- }
-
- log.V(2).M(new).F().Info("Normalized OLD CHK: %s/%s", new.Namespace, new.Name)
- old = r.normalize(old)
-
- log.V(2).M(new).F().Info("Normalized NEW CHK %s/%s", new.Namespace, new.Name)
- new = r.normalize(new)
- new.SetAncestor(old)
-
- if util.IsContextDone(ctx) {
- log.V(2).Info("task is done")
- return ctrl.Result{}, nil
- }
-
- if old.GetGeneration() != new.GetGeneration() {
- for _, f := range []reconcileFunc{
- r.reconcileConfigMap,
- r.reconcileStatefulSet,
- r.reconcileClientService,
- r.reconcileHeadlessService,
- r.reconcilePodDisruptionBudget,
- } {
- if err := f(new); err != nil {
- log.V(1).Error("Error during reconcile. f: %s err: %s", getFunctionName(f), err)
- return reconcile.Result{}, err
- }
- }
- }
-
- // Fetch the ClickHouseKeeper instance
- dummy := &apiChk.ClickHouseKeeperInstallation{}
- if err := r.Get(ctx, req.NamespacedName, dummy); err != nil {
- if apiErrors.IsNotFound(err) {
- // Request object not found, could have been deleted after reconcile request.
- // Owned objects are automatically garbage collected.
- // For additional cleanup logic use finalizers.
- // Return and don't requeue
- return ctrl.Result{}, nil
- }
- // Return and requeue
- return ctrl.Result{}, err
- }
-
- if err := r.reconcileClusterStatus(new); err != nil {
- log.V(1).Error("Error during reconcile status. f: %s err: %s", getFunctionName(r.reconcileClusterStatus), err)
- return reconcile.Result{}, err
- }
-
- return ctrl.Result{}, nil
-}
-
-func (r *ChkReconciler) reconcileConfigMap(chk *apiChk.ClickHouseKeeperInstallation) error {
- return r.reconcile(
- chk,
- &core.ConfigMap{},
- model.CreateConfigMap(chk),
- "ConfigMap",
- func(curObject, newObject client.Object) error {
- cur, ok1 := curObject.(*core.ConfigMap)
- new, ok2 := newObject.(*core.ConfigMap)
- if !ok1 || !ok2 {
- return fmt.Errorf("unable to cast")
- }
- cur.Data = new.Data
- cur.BinaryData = new.BinaryData
- return nil
- },
- )
-}
-
-func (r *ChkReconciler) reconcileStatefulSet(chk *apiChk.ClickHouseKeeperInstallation) error {
- return r.reconcile(
- chk,
- &apps.StatefulSet{},
- model.CreateStatefulSet(chk),
- "StatefulSet",
- func(curObject, newObject client.Object) error {
- cur, ok1 := curObject.(*apps.StatefulSet)
- new, ok2 := newObject.(*apps.StatefulSet)
- if !ok1 || !ok2 {
- return fmt.Errorf("unable to cast")
- }
- markPodRestartedNow(new)
- cur.Spec.Replicas = new.Spec.Replicas
- cur.Spec.Template = new.Spec.Template
- cur.Spec.UpdateStrategy = new.Spec.UpdateStrategy
- return nil
- },
- )
-}
-
-func (r *ChkReconciler) reconcileClientService(chk *apiChk.ClickHouseKeeperInstallation) error {
- return r.reconcile(
- chk,
- &core.Service{},
- model.CreateClientService(chk),
- "Client Service",
- func(curObject, newObject client.Object) error {
- cur, ok1 := curObject.(*core.Service)
- new, ok2 := newObject.(*core.Service)
- if !ok1 || !ok2 {
- return fmt.Errorf("unable to cast")
- }
- cur.Spec.Ports = new.Spec.Ports
- cur.Spec.Type = new.Spec.Type
- cur.SetAnnotations(new.GetAnnotations())
- return nil
- },
- )
-}
-
-func (r *ChkReconciler) reconcileHeadlessService(chk *apiChk.ClickHouseKeeperInstallation) error {
- return r.reconcile(
- chk,
- &core.Service{},
- model.CreateHeadlessService(chk),
- "Headless Service",
- func(curObject, newObject client.Object) error {
- cur, ok1 := curObject.(*core.Service)
- new, ok2 := newObject.(*core.Service)
- if !ok1 || !ok2 {
- return fmt.Errorf("unable to cast")
- }
- cur.Spec.Ports = new.Spec.Ports
- cur.Spec.Type = new.Spec.Type
- cur.SetAnnotations(new.GetAnnotations())
- return nil
- },
- )
-}
-
-func (r *ChkReconciler) reconcilePodDisruptionBudget(chk *apiChk.ClickHouseKeeperInstallation) error {
- return r.reconcile(
- chk,
- &policy.PodDisruptionBudget{},
- model.CreatePodDisruptionBudget(chk),
- "PodDisruptionBudget",
- nil,
- )
-}
-
-func (r *ChkReconciler) reconcile(
- chk *apiChk.ClickHouseKeeperInstallation,
- cur client.Object,
- new client.Object,
- name string,
- updater func(cur, new client.Object) error,
-) (err error) {
- if err = ctrlUtil.SetControllerReference(chk, new, r.Scheme); err != nil {
- return err
- }
- err = r.Client.Get(context.TODO(), getNamespacedName(new), cur)
- if err != nil && apiErrors.IsNotFound(err) {
- log.V(1).Info("Creating new " + name)
-
- if err = r.Client.Create(context.TODO(), new); err != nil {
- return err
- }
- } else if err != nil {
- return err
- } else {
- if updater == nil {
- log.V(1).Info("Updater not provided")
- } else {
- log.V(1).Info("Updating existing " + name)
- if err = updater(cur, new); err != nil {
- return err
- }
- if err = r.Client.Update(context.TODO(), cur); err != nil {
- return err
- }
- }
- }
- return nil
-}
-
-func (r *ChkReconciler) reconcileClusterStatus(chk *apiChk.ClickHouseKeeperInstallation) (err error) {
- readyMembers, err := r.getReadyPods(chk)
- if err != nil {
- return err
- }
-
- for {
- // Fetch the latest ClickHouseKeeper instance again
- cur := &apiChk.ClickHouseKeeperInstallation{}
- if err := r.Get(context.TODO(), getNamespacedName(chk), cur); err != nil {
- log.V(1).Error("Error: not found %s err: %s", chk.Name, err)
- return err
- }
-
- if cur.GetStatus() == nil {
- cur.Status = cur.EnsureStatus()
- }
- cur.Status.Replicas = int32(model.GetReplicasCount(chk))
-
- cur.Status.ReadyReplicas = []apiChi.ChiZookeeperNode{}
- for _, readyOne := range readyMembers {
- cur.Status.ReadyReplicas = append(cur.Status.ReadyReplicas,
- apiChi.ChiZookeeperNode{
- Host: fmt.Sprintf("%s.%s.svc.cluster.local", readyOne, chk.Namespace),
- Port: int32(chk.Spec.GetClientPort()),
- Secure: apiChi.NewStringBool(false),
- })
- }
-
- log.V(2).Info("ReadyReplicas: " + fmt.Sprintf("%v", cur.Status.ReadyReplicas))
-
- if len(readyMembers) == model.GetReplicasCount(chk) {
- cur.Status.Status = "Completed"
- } else {
- cur.Status.Status = "In progress"
- }
-
- cur.Status.NormalizedCHK = nil
- cur.Status.NormalizedCHKCompleted = chk.DeepCopy()
- cur.Status.NormalizedCHKCompleted.ObjectMeta.ResourceVersion = ""
- cur.Status.NormalizedCHKCompleted.ObjectMeta.ManagedFields = nil
- cur.Status.NormalizedCHKCompleted.Status = nil
-
- if err := r.Status().Update(context.TODO(), cur); err != nil {
- log.V(1).Error("err: %s", err.Error())
- } else {
- return nil
- }
- }
-}
-
-// normalize
-func (r *ChkReconciler) normalize(c *apiChk.ClickHouseKeeperInstallation) *apiChk.ClickHouseKeeperInstallation {
- chk, err := model.NewNormalizer().CreateTemplatedCHK(c, normalizer.NewOptions())
- if err != nil {
- log.V(1).
- M(chk).F().
- Error("FAILED to normalize CHI 1: %v", err)
- }
- return chk
-}
diff --git a/pkg/controller/chk/reconciler_util.go b/pkg/controller/chk/reconciler_util.go
deleted file mode 100644
index 9b89e5abb..000000000
--- a/pkg/controller/chk/reconciler_util.go
+++ /dev/null
@@ -1,86 +0,0 @@
-// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package chk
-
-import (
- "context"
- "encoding/json"
- "reflect"
- "runtime"
- "time"
-
- apps "k8s.io/api/apps/v1"
- core "k8s.io/api/core/v1"
- meta "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/labels"
- "k8s.io/apimachinery/pkg/types"
- "sigs.k8s.io/controller-runtime/pkg/client"
-
- api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse-keeper.altinity.com/v1"
- model "github.com/altinity/clickhouse-operator/pkg/model/chk"
-)
-
-func getNamespacedName(obj meta.Object) types.NamespacedName {
- return types.NamespacedName{
- Namespace: obj.GetNamespace(),
- Name: obj.GetName(),
- }
-}
-
-func getFunctionName(i interface{}) string {
- return runtime.FuncForPC(reflect.ValueOf(i).Pointer()).Name()
-}
-
-func getKeeperFromAnnotationLastAppliedConfiguration(chk *api.ClickHouseKeeperInstallation) *api.ClickHouseKeeperInstallation {
- lastApplied := chk.Annotations["kubectl.kubernetes.io/last-applied-configuration"]
-
- tmp := api.ClickHouseKeeperInstallation{}
-
- json.Unmarshal([]byte(lastApplied), &tmp)
- return &tmp
-}
-
-func (r *ChkReconciler) getReadyPods(chk *api.ClickHouseKeeperInstallation) ([]string, error) {
- labelSelector := labels.SelectorFromSet(model.GetPodLabels(chk))
- listOps := &client.ListOptions{
- Namespace: chk.Namespace,
- LabelSelector: labelSelector,
- }
- podList := &core.PodList{}
- if err := r.List(context.TODO(), podList, listOps); err != nil {
- return nil, err
- }
-
- var readyPods []string
- for _, pod := range podList.Items {
- // Pod is ready only in case all containers are ready
- podIsReady := true
- for _, containerStatus := range pod.Status.ContainerStatuses {
- if !containerStatus.Ready {
- podIsReady = false
- }
- }
- if podIsReady {
- readyPods = append(readyPods, pod.Name)
- }
- }
-
- return readyPods, nil
-}
-
-func markPodRestartedNow(sts *apps.StatefulSet) {
- v, _ := time.Now().UTC().MarshalText()
- sts.Spec.Template.Annotations = map[string]string{"kubectl.kubernetes.io/restartedAt": string(v)}
-}
diff --git a/pkg/controller/chk/worker-chk-reconciler.go b/pkg/controller/chk/worker-chk-reconciler.go
new file mode 100644
index 000000000..eea2aecf7
--- /dev/null
+++ b/pkg/controller/chk/worker-chk-reconciler.go
@@ -0,0 +1,631 @@
+// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package chk
+
+import (
+ "context"
+ "errors"
+ "sync"
+ "time"
+
+ log "github.com/altinity/clickhouse-operator/pkg/announcer"
+ apiChk "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse-keeper.altinity.com/v1"
+ api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1"
+ "github.com/altinity/clickhouse-operator/pkg/apis/common/types"
+ "github.com/altinity/clickhouse-operator/pkg/controller/chi/metrics"
+ "github.com/altinity/clickhouse-operator/pkg/controller/chk/kube"
+ "github.com/altinity/clickhouse-operator/pkg/controller/common"
+ "github.com/altinity/clickhouse-operator/pkg/controller/common/statefulset"
+ "github.com/altinity/clickhouse-operator/pkg/controller/common/storage"
+ "github.com/altinity/clickhouse-operator/pkg/interfaces"
+ "github.com/altinity/clickhouse-operator/pkg/model/chk/config"
+ "github.com/altinity/clickhouse-operator/pkg/model/common/action_plan"
+ "github.com/altinity/clickhouse-operator/pkg/util"
+)
+
+// reconcileCR runs reconcile cycle for a Custom Resource
+func (w *worker) reconcileCR(ctx context.Context, old, new *apiChk.ClickHouseKeeperInstallation) error {
+ if util.IsContextDone(ctx) {
+ log.V(2).Info("task is done")
+ return nil
+ }
+
+ w.a.M(new).S().P()
+ defer w.a.M(new).E().P()
+
+ if new.HasAncestor() {
+ log.V(2).M(new).F().Info("has ancestor, use it as a base for reconcile. CR: %s", util.NamespaceNameString(new))
+ old = new.GetAncestorT()
+ } else {
+ log.V(2).M(new).F().Info("has NO ancestor, use empty base for reconcile. CR: %s", util.NamespaceNameString(new))
+ old = nil
+ }
+
+ common.LogOldAndNew("non-normalized yet (native)", old, new)
+
+ switch {
+ case w.isGenerationTheSame(old, new):
+ log.V(2).M(new).F().Info("isGenerationTheSame() - nothing to do here, exit")
+ return nil
+ }
+
+ log.V(2).M(new).F().Info("Normalized OLD: %s", util.NamespaceNameString(new))
+ old = w.normalize(old)
+
+ log.V(2).M(new).F().Info("Normalized NEW: %s", util.NamespaceNameString(new))
+ new = w.normalize(new)
+
+ new.SetAncestor(old)
+ common.LogOldAndNew("normalized", old, new)
+
+ actionPlan := action_plan.NewActionPlan(old, new)
+ common.LogActionPlan(actionPlan)
+
+ switch {
+ case actionPlan.HasActionsToDo():
+ w.a.M(new).F().Info("ActionPlan has actions - continue reconcile")
+ default:
+ w.a.M(new).F().Info("ActionPlan has no actions and no need to install finalizer - nothing to do")
+ return nil
+ }
+
+ if util.IsContextDone(ctx) {
+ log.V(2).Info("task is done")
+ return nil
+ }
+
+ w.newTask(new)
+ w.markReconcileStart(ctx, new, actionPlan)
+ w.walkHosts(ctx, new, actionPlan)
+
+ if err := w.reconcile(ctx, new); err != nil {
+ // Something went wrong
+ w.a.WithEvent(new, common.EventActionReconcile, common.EventReasonReconcileFailed).
+ WithStatusError(new).
+ M(new).F().
+ Error("FAILED to reconcile CR %s, err: %v", util.NamespaceNameString(new), err)
+ w.markReconcileCompletedUnsuccessfully(ctx, new, err)
+ if errors.Is(err, common.ErrCRUDAbort) {
+ }
+ } else {
+ // Reconcile successful
+ // Post-process added items
+ if util.IsContextDone(ctx) {
+ log.V(2).Info("task is done")
+ return nil
+ }
+ w.clean(ctx, new)
+ w.waitForIPAddresses(ctx, new)
+ w.finalizeReconcileAndMarkCompleted(ctx, new)
+ }
+
+ return nil
+}
+
+// reconcile reconciles Custom Resource
+func (w *worker) reconcile(ctx context.Context, cr *apiChk.ClickHouseKeeperInstallation) error {
+ if util.IsContextDone(ctx) {
+ log.V(2).Info("task is done")
+ return nil
+ }
+
+ w.a.V(2).M(cr).S().P()
+ defer w.a.V(2).M(cr).E().P()
+
+ counters := api.NewHostReconcileAttributesCounters()
+ cr.WalkHosts(func(host *api.Host) error {
+ counters.Add(host.GetReconcileAttributes())
+ return nil
+ })
+
+ if counters.AddOnly() {
+ w.a.V(1).M(cr).Info("Enabling full fan-out mode. CHI: %s", util.NamespaceNameString(cr))
+ ctx = context.WithValue(ctx, common.ReconcileShardsAndHostsOptionsCtxKey, &common.ReconcileShardsAndHostsOptions{
+ FullFanOut: true,
+ })
+ }
+
+ return cr.WalkTillError(
+ ctx,
+ w.reconcileCRAuxObjectsPreliminary,
+ w.reconcileCluster,
+ w.reconcileShardsAndHosts,
+ w.reconcileCRAuxObjectsFinal,
+ )
+}
+
+// reconcileCRAuxObjectsPreliminary reconciles CR preliminary in order to ensure that ConfigMaps are in place
+func (w *worker) reconcileCRAuxObjectsPreliminary(ctx context.Context, cr *apiChk.ClickHouseKeeperInstallation) error {
+ if util.IsContextDone(ctx) {
+ log.V(2).Info("task is done")
+ return nil
+ }
+
+ w.a.V(2).M(cr).S().P()
+ defer w.a.V(2).M(cr).E().P()
+
+ // CR common ConfigMap without added hosts
+ cr.GetRuntime().LockCommonConfig()
+ if err := w.reconcileConfigMapCommon(ctx, cr, w.options()); err != nil {
+ w.a.F().Error("failed to reconcile config map common. err: %v", err)
+ }
+ cr.GetRuntime().UnlockCommonConfig()
+
+ // CR users ConfigMap - common for all hosts
+ if err := w.reconcileConfigMapCommonUsers(ctx, cr); err != nil {
+ w.a.F().Error("failed to reconcile config map users. err: %v", err)
+ }
+
+ return nil
+}
+
+// reconcileCRServicePreliminary runs first stage of CR reconcile process
+func (w *worker) reconcileCRServicePreliminary(ctx context.Context, cr api.ICustomResource) error {
+ if cr.IsStopped() {
+ // Stopped CR must have no entry point
+ _ = w.c.deleteServiceCR(ctx, cr)
+ }
+ return nil
+}
+
+// reconcileCRServiceFinal runs second stage of CR reconcile process
+func (w *worker) reconcileCRServiceFinal(ctx context.Context, cr api.ICustomResource) error {
+ if cr.IsStopped() {
+ // Stopped CHI must have no entry point
+ return nil
+ }
+
+ // Create entry point for the whole CHI
+ if service := w.task.Creator().CreateService(interfaces.ServiceCR); service != nil {
+ if err := w.reconcileService(ctx, cr, service); err != nil {
+ // Service not reconciled
+ w.task.RegistryFailed().RegisterService(service.GetObjectMeta())
+ return err
+ }
+ w.task.RegistryReconciled().RegisterService(service.GetObjectMeta())
+ }
+
+ return nil
+}
+
+// reconcileCRAuxObjectsFinal reconciles CR global objects
+func (w *worker) reconcileCRAuxObjectsFinal(ctx context.Context, cr *apiChk.ClickHouseKeeperInstallation) (err error) {
+ if util.IsContextDone(ctx) {
+ log.V(2).Info("task is done")
+ return nil
+ }
+
+ w.a.V(2).M(cr).S().P()
+ defer w.a.V(2).M(cr).E().P()
+
+ // CR ConfigMaps with update
+ cr.GetRuntime().LockCommonConfig()
+ err = w.reconcileConfigMapCommon(ctx, cr, nil)
+ cr.GetRuntime().UnlockCommonConfig()
+ return err
+}
+
+// reconcileConfigMapCommon reconciles common ConfigMap
+func (w *worker) reconcileConfigMapCommon(
+ ctx context.Context,
+ cr api.ICustomResource,
+ options *config.FilesGeneratorOptions,
+) error {
+ if util.IsContextDone(ctx) {
+ log.V(2).Info("task is done")
+ return nil
+ }
+
+ // ConfigMap common for all resources in CHI
+ // contains several sections, mapped as separated chopConfig files,
+ // such as remote servers, zookeeper setup, etc
+ configMapCommon := w.task.Creator().CreateConfigMap(interfaces.ConfigMapCommon, options)
+ err := w.reconcileConfigMap(ctx, cr, configMapCommon)
+ if err == nil {
+ w.task.RegistryReconciled().RegisterConfigMap(configMapCommon.GetObjectMeta())
+ } else {
+ w.task.RegistryFailed().RegisterConfigMap(configMapCommon.GetObjectMeta())
+ }
+ return err
+}
+
+// reconcileConfigMapCommonUsers reconciles all CHI's users ConfigMap
+// ConfigMap common for all users resources in CHI
+func (w *worker) reconcileConfigMapCommonUsers(ctx context.Context, cr api.ICustomResource) error {
+ if util.IsContextDone(ctx) {
+ log.V(2).Info("task is done")
+ return nil
+ }
+
+ // ConfigMap common for all users resources in CHI
+ configMapUsers := w.task.Creator().CreateConfigMap(interfaces.ConfigMapCommonUsers)
+ err := w.reconcileConfigMap(ctx, cr, configMapUsers)
+ if err == nil {
+ w.task.RegistryReconciled().RegisterConfigMap(configMapUsers.GetObjectMeta())
+ } else {
+ w.task.RegistryFailed().RegisterConfigMap(configMapUsers.GetObjectMeta())
+ }
+ return err
+}
+
+// reconcileConfigMapHost reconciles host's personal ConfigMap
+func (w *worker) reconcileConfigMapHost(ctx context.Context, host *api.Host) error {
+ if util.IsContextDone(ctx) {
+ log.V(2).Info("task is done")
+ return nil
+ }
+
+ // ConfigMap for a host
+ configMap := w.task.Creator().CreateConfigMap(interfaces.ConfigMapHost, host)
+ err := w.reconcileConfigMap(ctx, host.GetCR(), configMap)
+ if err == nil {
+ w.task.RegistryReconciled().RegisterConfigMap(configMap.GetObjectMeta())
+ } else {
+ w.task.RegistryFailed().RegisterConfigMap(configMap.GetObjectMeta())
+ return err
+ }
+
+ return nil
+}
+
+// reconcileHostStatefulSet reconciles host's StatefulSet
+func (w *worker) reconcileHostStatefulSet(ctx context.Context, host *api.Host, opts *statefulset.ReconcileOptions) error {
+ if util.IsContextDone(ctx) {
+ log.V(2).Info("task is done")
+ return nil
+ }
+
+ log.V(1).M(host).F().S().Info("reconcile StatefulSet start")
+ defer log.V(1).M(host).F().E().Info("reconcile StatefulSet end")
+
+ version := w.getHostSoftwareVersion(ctx, host)
+ host.Runtime.CurStatefulSet, _ = w.c.kube.STS().Get(ctx, host)
+
+ w.a.V(1).M(host).F().Info("Reconcile host: %s. App version: %s", host.GetName(), version)
+ // In case we have to force-restart host
+ // We'll do it via replicas: 0 in StatefulSet.
+ if w.shouldForceRestartHost(host) {
+ w.a.V(1).M(host).F().Info("Reconcile host: %s. Shutting host down due to force restart", host.GetName())
+ w.stsReconciler.PrepareHostStatefulSetWithStatus(ctx, host, true)
+ _ = w.stsReconciler.ReconcileStatefulSet(ctx, host, false, opts)
+ metrics.HostReconcilesRestart(ctx, host.GetCR())
+ // At this moment StatefulSet has 0 replicas.
+ // First stage of RollingUpdate completed.
+ }
+
+ // We are in place, where we can reconcile StatefulSet to desired configuration.
+ w.a.V(1).M(host).F().Info("Reconcile host: %s. Reconcile StatefulSet", host.GetName())
+ w.stsReconciler.PrepareHostStatefulSetWithStatus(ctx, host, false)
+ err := w.stsReconciler.ReconcileStatefulSet(ctx, host, true, opts)
+ if err == nil {
+ w.task.RegistryReconciled().RegisterStatefulSet(host.Runtime.DesiredStatefulSet.GetObjectMeta())
+ } else {
+ w.task.RegistryFailed().RegisterStatefulSet(host.Runtime.DesiredStatefulSet.GetObjectMeta())
+ if err == common.ErrCRUDIgnore {
+ // Pretend nothing happened in case of ignore
+ err = nil
+ }
+
+ host.GetCR().IEnsureStatus().HostFailed()
+ w.a.WithEvent(host.GetCR(), common.EventActionReconcile, common.EventReasonReconcileFailed).
+ WithStatusAction(host.GetCR()).
+ WithStatusError(host.GetCR()).
+ M(host).F().
+ Error("FAILED to reconcile StatefulSet for host: %s", host.GetName())
+ }
+
+ return err
+}
+
+func (w *worker) getHostSoftwareVersion(ctx context.Context, host *api.Host) string {
+ return "undefined"
+}
+
+// reconcileHostService reconciles host's Service
+func (w *worker) reconcileHostService(ctx context.Context, host *api.Host) error {
+ if util.IsContextDone(ctx) {
+ log.V(2).Info("task is done")
+ return nil
+ }
+ service := w.task.Creator().CreateService(interfaces.ServiceHost, host)
+ if service == nil {
+ // This is not a problem, service may be omitted
+ return nil
+ }
+ err := w.reconcileService(ctx, host.GetCR(), service)
+ if err == nil {
+ w.a.V(1).M(host).F().Info("DONE Reconcile service of the host: %s", host.GetName())
+ w.task.RegistryReconciled().RegisterService(service.GetObjectMeta())
+ } else {
+ w.a.V(1).M(host).F().Warning("FAILED Reconcile service of the host: %s", host.GetName())
+ w.task.RegistryFailed().RegisterService(service.GetObjectMeta())
+ }
+ return err
+}
+
+// reconcileCluster reconciles ChkCluster, excluding nested shards
+func (w *worker) reconcileCluster(ctx context.Context, cluster *apiChk.Cluster) error {
+ if util.IsContextDone(ctx) {
+ log.V(2).Info("task is done")
+ return nil
+ }
+
+ w.a.V(2).M(cluster).S().P()
+ defer w.a.V(2).M(cluster).E().P()
+
+ // Add Cluster Service
+ if service := w.task.Creator().CreateService(interfaces.ServiceCluster, cluster); service != nil {
+ if err := w.reconcileService(ctx, cluster.GetRuntime().GetCR(), service); err == nil {
+ w.task.RegistryReconciled().RegisterService(service.GetObjectMeta())
+ } else {
+ w.task.RegistryFailed().RegisterService(service.GetObjectMeta())
+ }
+ }
+
+ w.reconcileClusterSecret(ctx, cluster)
+
+ pdb := w.task.Creator().CreatePodDisruptionBudget(cluster)
+ if err := w.reconcilePDB(ctx, cluster, pdb); err == nil {
+ w.task.RegistryReconciled().RegisterPDB(pdb.GetObjectMeta())
+ } else {
+ w.task.RegistryFailed().RegisterPDB(pdb.GetObjectMeta())
+ }
+
+ return nil
+}
+
+func (w *worker) reconcileClusterSecret(ctx context.Context, cluster *apiChk.Cluster) {
+}
+
+// getReconcileShardsWorkersNum calculates how many workers are allowed to be used for concurrent shard reconcile
+func (w *worker) getReconcileShardsWorkersNum(shards []*apiChk.ChkShard, opts *common.ReconcileShardsAndHostsOptions) int {
+ return 1
+}
+
+// reconcileShardsAndHosts reconciles shards and hosts of each shard
+func (w *worker) reconcileShardsAndHosts(ctx context.Context, shards []*apiChk.ChkShard) error {
+ // Sanity check - has to have shard(s)
+ if len(shards) == 0 {
+ return nil
+ }
+
+ log.V(1).F().S().Info("reconcileShardsAndHosts start")
+ defer log.V(1).F().E().Info("reconcileShardsAndHosts end")
+
+ // Try to fetch options
+ opts, ok := ctx.Value(common.ReconcileShardsAndHostsOptionsCtxKey).(*common.ReconcileShardsAndHostsOptions)
+ if ok {
+ w.a.V(1).Info("found ReconcileShardsAndHostsOptionsCtxKey")
+ } else {
+ w.a.V(1).Info("not found ReconcileShardsAndHostsOptionsCtxKey, use empty opts")
+ opts = &common.ReconcileShardsAndHostsOptions{}
+ }
+
+ // Which shard to start concurrent processing with
+ var startShard int
+ if opts.FullFanOut {
+ // For full fan-out scenarios we'll start shards processing from the very beginning
+ startShard = 0
+ w.a.V(1).Info("full fan-out requested")
+ } else {
+ // For non-full fan-out scenarios, we'll process the first shard separately.
+ // This gives us some early indicator on whether the reconciliation would fail,
+ // and for large clusters it is a small price to pay before performing concurrent fan-out.
+ w.a.V(1).Info("starting first shard separately")
+ if err := w.reconcileShardWithHosts(ctx, shards[0]); err != nil {
+ w.a.V(1).Warning("first shard failed, skipping rest of shards due to an error: %v", err)
+ return err
+ }
+
+ // Since shard with 0 index is already done, we'll proceed with the 1-st
+ startShard = 1
+ }
+
+ // Process shards using specified concurrency level while maintaining specified max concurrency percentage.
+ // Loop over shards.
+ workersNum := w.getReconcileShardsWorkersNum(shards, opts)
+ w.a.V(1).Info("Starting rest of shards on workers: %d", workersNum)
+ for startShardIndex := startShard; startShardIndex < len(shards); startShardIndex += workersNum {
+ endShardIndex := startShardIndex + workersNum
+ if endShardIndex > len(shards) {
+ endShardIndex = len(shards)
+ }
+ concurrentlyProcessedShards := shards[startShardIndex:endShardIndex]
+
+ // Processing error protected with mutex
+ var err error
+ var errLock sync.Mutex
+
+ wg := sync.WaitGroup{}
+ wg.Add(len(concurrentlyProcessedShards))
+ // Launch shard concurrent processing
+ for j := range concurrentlyProcessedShards {
+ shard := concurrentlyProcessedShards[j]
+ go func() {
+ defer wg.Done()
+ if e := w.reconcileShardWithHosts(ctx, shard); e != nil {
+ errLock.Lock()
+ err = e
+ errLock.Unlock()
+ return
+ }
+ }()
+ }
+ wg.Wait()
+ if err != nil {
+ w.a.V(1).Warning("Skipping rest of shards due to an error: %v", err)
+ return err
+ }
+ }
+ return nil
+}
+
+func (w *worker) reconcileShardWithHosts(ctx context.Context, shard api.IShard) error {
+ if err := w.reconcileShard(ctx, shard); err != nil {
+ return err
+ }
+ return shard.WalkHostsAbortOnError(func(host *api.Host) error {
+ return w.reconcileHost(ctx, host)
+ })
+}
+
+// reconcileShard reconciles specified shard, excluding nested replicas
+func (w *worker) reconcileShard(ctx context.Context, shard api.IShard) error {
+ if util.IsContextDone(ctx) {
+ log.V(2).Info("task is done")
+ return nil
+ }
+
+ w.a.V(2).M(shard).S().P()
+ defer w.a.V(2).M(shard).E().P()
+
+ err := w.reconcileShardService(ctx, shard)
+
+ return err
+}
+
+func (w *worker) reconcileShardService(ctx context.Context, shard api.IShard) error {
+ return nil
+}
+
+// reconcileHost reconciles specified ClickHouse host
+func (w *worker) reconcileHost(ctx context.Context, host *api.Host) error {
+ if util.IsContextDone(ctx) {
+ log.V(2).Info("task is done")
+ return nil
+ }
+
+ w.a.V(2).M(host).S().P()
+ defer w.a.V(2).M(host).E().P()
+
+ if host.IsFirst() {
+ _ = w.reconcileCRServicePreliminary(ctx, host.GetCR())
+ defer w.reconcileCRServiceFinal(ctx, host.GetCR())
+ }
+
+ // Create artifacts
+ w.stsReconciler.PrepareHostStatefulSetWithStatus(ctx, host, false)
+
+ if err := w.reconcileHostPrepare(ctx, host); err != nil {
+ return err
+ }
+ if err := w.reconcileHostMain(ctx, host); err != nil {
+ return err
+ }
+ // Host is now added and functional
+ host.GetReconcileAttributes().UnsetAdd()
+ if err := w.reconcileHostBootstrap(ctx, host); err != nil {
+ return err
+ }
+
+ now := time.Now()
+ hostsCompleted := 0
+ hostsCount := 0
+ host.GetCR().IEnsureStatus().HostCompleted()
+ if host.GetCR() != nil && host.GetCR().GetStatus() != nil {
+ hostsCompleted = host.GetCR().GetStatus().GetHostsCompletedCount()
+ hostsCount = host.GetCR().GetStatus().GetHostsCount()
+ }
+ w.a.V(1).
+ WithEvent(host.GetCR(), common.EventActionProgress, common.EventReasonProgressHostsCompleted).
+ WithStatusAction(host.GetCR()).
+ M(host).F().
+ Info("[now: %s] %s: %d of %d", now, common.EventReasonProgressHostsCompleted, hostsCompleted, hostsCount)
+
+ _ = w.c.updateCRObjectStatus(ctx, host.GetCR(), types.UpdateStatusOptions{
+ CopyStatusOptions: types.CopyStatusOptions{
+ MainFields: true,
+ },
+ })
+ return nil
+}
+
+// reconcileHostPrepare reconciles specified ClickHouse host
+func (w *worker) reconcileHostPrepare(ctx context.Context, host *api.Host) error {
+ w.a.V(1).
+ M(host).F().
+ Info("Include host into cluster. Host/shard/cluster: %d/%d/%s",
+ host.Runtime.Address.ReplicaIndex, host.Runtime.Address.ShardIndex, host.Runtime.Address.ClusterName)
+
+ w.includeHostIntoRaftCluster(ctx, host)
+ return nil
+}
+
+// reconcileHostMain reconciles specified ClickHouse host
+func (w *worker) reconcileHostMain(ctx context.Context, host *api.Host) error {
+ var (
+ reconcileStatefulSetOpts *statefulset.ReconcileOptions
+ )
+
+ if host.IsFirst() || host.IsLast() {
+ reconcileStatefulSetOpts = reconcileStatefulSetOpts.SetDoNotWait()
+ }
+
+ if err := w.reconcileConfigMapHost(ctx, host); err != nil {
+ w.a.V(1).
+ M(host).F().
+ Warning("Reconcile Host interrupted with an error 2. Host: %s Err: %v", host.GetName(), err)
+ return err
+ }
+
+ w.a.V(1).
+ M(host).F().
+ Info("Reconcile PVCs and check possible data loss for host: %s", host.GetName())
+ if storage.ErrIsDataLoss(
+ storage.NewStorageReconciler(
+ w.task,
+ w.c.namer,
+ storage.NewStoragePVC(kube.NewPVC(w.c.Client)),
+ ).ReconcilePVCs(ctx, host, api.DesiredStatefulSet),
+ ) {
+ // In case of data loss detection on existing volumes, we need to:
+ // 1. recreate StatefulSet
+ // 2. run tables migration again
+ reconcileStatefulSetOpts = reconcileStatefulSetOpts.SetForceRecreate()
+ w.a.V(1).
+ M(host).F().
+ Info("Data loss detected for host: %s. Will do force migrate", host.GetName())
+ }
+
+ if err := w.reconcileHostStatefulSet(ctx, host, reconcileStatefulSetOpts); err != nil {
+ w.a.V(1).
+ M(host).F().
+ Warning("Reconcile Host interrupted with an error 3. Host: %s Err: %v", host.GetName(), err)
+ return err
+ }
+ // Polish all new volumes that operator has to create
+ _ = storage.NewStorageReconciler(
+ w.task,
+ w.c.namer,
+ storage.NewStoragePVC(kube.NewPVC(w.c.Client)),
+ ).ReconcilePVCs(ctx, host, api.DesiredStatefulSet)
+
+ _ = w.reconcileHostService(ctx, host)
+
+ return nil
+}
+
+// reconcileHostBootstrap reconciles specified ClickHouse host
+func (w *worker) reconcileHostBootstrap(ctx context.Context, host *api.Host) error {
+ if err := w.includeHost(ctx, host); err != nil {
+ metrics.HostReconcilesErrors(ctx, host.GetCR())
+ w.a.V(1).
+ M(host).F().
+ Warning("Reconcile Host interrupted with an error 4. Host: %s Err: %v", host.GetName(), err)
+ return err
+ }
+
+ return nil
+}
diff --git a/pkg/controller/chk/worker-config-map.go b/pkg/controller/chk/worker-config-map.go
new file mode 100644
index 000000000..5a3970296
--- /dev/null
+++ b/pkg/controller/chk/worker-config-map.go
@@ -0,0 +1,119 @@
+// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package chk
+
+import (
+ "context"
+ "time"
+
+ core "k8s.io/api/core/v1"
+ apiErrors "k8s.io/apimachinery/pkg/api/errors"
+
+ log "github.com/altinity/clickhouse-operator/pkg/announcer"
+ apiChi "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1"
+ "github.com/altinity/clickhouse-operator/pkg/controller/common"
+ "github.com/altinity/clickhouse-operator/pkg/util"
+)
+
+// reconcileConfigMap reconciles core.ConfigMap which belongs to specified CHI
+func (w *worker) reconcileConfigMap(
+ ctx context.Context,
+ cr apiChi.ICustomResource,
+ configMap *core.ConfigMap,
+) error {
+ if util.IsContextDone(ctx) {
+ log.V(2).Info("task is done")
+ return nil
+ }
+
+ w.a.V(2).M(cr).S().P()
+ defer w.a.V(2).M(cr).E().P()
+
+ // Check whether this object already exists in k8s
+ curConfigMap, err := w.c.getConfigMap(ctx, configMap.GetObjectMeta())
+
+ if curConfigMap != nil {
+ // We have ConfigMap - try to update it
+ err = w.updateConfigMap(ctx, cr, configMap)
+ }
+
+ if apiErrors.IsNotFound(err) {
+ // ConfigMap not found - even during Update process - try to create it
+ err = w.createConfigMap(ctx, cr, configMap)
+ }
+
+ if err != nil {
+ w.a.WithEvent(cr, common.EventActionReconcile, common.EventReasonReconcileFailed).
+ WithStatusAction(cr).
+ WithStatusError(cr).
+ M(cr).F().
+ Error("FAILED to reconcile ConfigMap: %s CHI: %s ", configMap.GetName(), cr.GetName())
+ }
+
+ return err
+}
+
+// updateConfigMap
+func (w *worker) updateConfigMap(ctx context.Context, cr apiChi.ICustomResource, configMap *core.ConfigMap) error {
+ if util.IsContextDone(ctx) {
+ log.V(2).Info("task is done")
+ return nil
+ }
+
+ updatedConfigMap, err := w.c.updateConfigMap(ctx, configMap)
+ if err == nil {
+ w.a.V(1).
+ WithEvent(cr, common.EventActionUpdate, common.EventReasonUpdateCompleted).
+ WithStatusAction(cr).
+ M(cr).F().
+ Info("Update ConfigMap %s/%s", configMap.Namespace, configMap.Name)
+ if updatedConfigMap.ResourceVersion != configMap.ResourceVersion {
+ w.task.SetCmUpdate(time.Now())
+ }
+ } else {
+ w.a.WithEvent(cr, common.EventActionUpdate, common.EventReasonUpdateFailed).
+ WithStatusAction(cr).
+ WithStatusError(cr).
+ M(cr).F().
+ Error("Update ConfigMap %s/%s failed with error %v", configMap.Namespace, configMap.Name, err)
+ }
+
+ return err
+}
+
+// createConfigMap
+func (w *worker) createConfigMap(ctx context.Context, cr apiChi.ICustomResource, configMap *core.ConfigMap) error {
+ if util.IsContextDone(ctx) {
+ log.V(2).Info("task is done")
+ return nil
+ }
+
+ err := w.c.createConfigMap(ctx, configMap)
+ if err == nil {
+ w.a.V(1).
+ WithEvent(cr, common.EventActionCreate, common.EventReasonCreateCompleted).
+ WithStatusAction(cr).
+ M(cr).F().
+ Info("Create ConfigMap %s", util.NamespaceNameString(configMap))
+ } else {
+ w.a.WithEvent(cr, common.EventActionCreate, common.EventReasonCreateFailed).
+ WithStatusAction(cr).
+ WithStatusError(cr).
+ M(cr).F().
+ Error("Create ConfigMap %s failed with error %v", util.NamespaceNameString(configMap), err)
+ }
+
+ return err
+}
diff --git a/pkg/controller/chk/worker-deleter.go b/pkg/controller/chk/worker-deleter.go
new file mode 100644
index 000000000..db6810b9e
--- /dev/null
+++ b/pkg/controller/chk/worker-deleter.go
@@ -0,0 +1,215 @@
+// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package chk
+
+import (
+ "context"
+ "time"
+
+ meta "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+ log "github.com/altinity/clickhouse-operator/pkg/announcer"
+ api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1"
+ "github.com/altinity/clickhouse-operator/pkg/controller/common"
+ "github.com/altinity/clickhouse-operator/pkg/model"
+ chkLabeler "github.com/altinity/clickhouse-operator/pkg/model/chk/tags/labeler"
+ "github.com/altinity/clickhouse-operator/pkg/util"
+)
+
+func (w *worker) clean(ctx context.Context, cr api.ICustomResource) {
+ if util.IsContextDone(ctx) {
+ log.V(2).Info("task is done")
+ return
+ }
+
+ w.a.V(1).
+ WithEvent(cr, common.EventActionReconcile, common.EventReasonReconcileInProgress).
+ WithStatusAction(cr).
+ M(cr).F().
+ Info("remove items scheduled for deletion")
+
+ // Remove deleted items
+ w.a.V(1).M(cr).F().Info("List of objects which have failed to reconcile:\n%s", w.task.RegistryFailed)
+ w.a.V(1).M(cr).F().Info("List of successfully reconciled objects:\n%s", w.task.RegistryReconciled)
+ objs := w.c.discovery(ctx, cr)
+ need := w.task.RegistryReconciled()
+ w.a.V(1).M(cr).F().Info("Existing objects:\n%s", objs)
+ objs.Subtract(need)
+ w.a.V(1).M(cr).F().Info("Non-reconciled objects:\n%s", objs)
+ if w.purge(ctx, cr, objs, w.task.RegistryFailed()) > 0 {
+ //w.c.enqueueObject(cmd_queue.NewDropDns(chk))
+ util.WaitContextDoneOrTimeout(ctx, 1*time.Minute)
+ }
+
+ //cr.EnsureStatus().SyncHostTablesCreated()
+}
+
+// purge
+func (w *worker) purge(
+ ctx context.Context,
+ cr api.ICustomResource,
+ reg *model.Registry,
+ reconcileFailedObjs *model.Registry,
+) (cnt int) {
+ if util.IsContextDone(ctx) {
+ log.V(2).Info("task is done")
+ return cnt
+ }
+
+ reg.Walk(func(entityType model.EntityType, m meta.Object) {
+ switch entityType {
+ case model.StatefulSet:
+ cnt += w.purgeStatefulSet(ctx, cr, reconcileFailedObjs, m)
+ case model.PVC:
+ w.purgePVC(ctx, cr, reconcileFailedObjs, m)
+ case model.ConfigMap:
+ w.purgeConfigMap(ctx, cr, reconcileFailedObjs, m)
+ case model.Service:
+ w.purgeService(ctx, cr, reconcileFailedObjs, m)
+ case model.Secret:
+ w.purgeSecret(ctx, cr, reconcileFailedObjs, m)
+ case model.PDB:
+ w.purgePDB(ctx, cr, reconcileFailedObjs, m)
+ }
+ })
+ return cnt
+}
+
+func (w *worker) purgeStatefulSet(
+ ctx context.Context,
+ cr api.ICustomResource,
+ reconcileFailedObjs *model.Registry,
+ m meta.Object,
+) int {
+ if shouldPurgeStatefulSet(cr, reconcileFailedObjs, m) {
+ w.a.V(1).M(m).F().Info("Delete StatefulSet: %s", util.NamespaceNameString(m))
+ if err := w.c.kube.STS().Delete(ctx, m.GetNamespace(), m.GetName()); err != nil {
+ w.a.V(1).M(m).F().Error("FAILED to delete StatefulSet: %s, err: %v", util.NamespaceNameString(m), err)
+ }
+ return 1
+ }
+ return 0
+}
+
+func (w *worker) purgePVC(
+ ctx context.Context,
+ cr api.ICustomResource,
+ reconcileFailedObjs *model.Registry,
+ m meta.Object,
+) {
+ if shouldPurgePVC(cr, reconcileFailedObjs, m) {
+ if chkLabeler.New(nil).GetReclaimPolicy(m) == api.PVCReclaimPolicyDelete {
+ w.a.V(1).M(m).F().Info("Delete PVC: %s", util.NamespaceNameString(m))
+ if err := w.c.kube.Storage().Delete(ctx, m.GetNamespace(), m.GetName()); err != nil {
+ w.a.V(1).M(m).F().Error("FAILED to delete PVC: %s, err: %v", util.NamespaceNameString(m), err)
+ }
+ }
+ }
+}
+
+func (w *worker) purgeConfigMap(
+ ctx context.Context,
+ cr api.ICustomResource,
+ reconcileFailedObjs *model.Registry,
+ m meta.Object,
+) {
+ if shouldPurgeConfigMap(cr, reconcileFailedObjs, m) {
+ w.a.V(1).M(m).F().Info("Delete ConfigMap: %s", util.NamespaceNameString(m))
+ if err := w.c.kube.ConfigMap().Delete(ctx, m.GetNamespace(), m.GetName()); err != nil {
+ w.a.V(1).M(m).F().Error("FAILED to delete ConfigMap: %s, err: %v", util.NamespaceNameString(m), err)
+ }
+ }
+}
+
+func (w *worker) purgeService(
+ ctx context.Context,
+ cr api.ICustomResource,
+ reconcileFailedObjs *model.Registry,
+ m meta.Object,
+) {
+ if shouldPurgeService(cr, reconcileFailedObjs, m) {
+ w.a.V(1).M(m).F().Info("Delete Service: %s", util.NamespaceNameString(m))
+ if err := w.c.kube.Service().Delete(ctx, m.GetNamespace(), m.GetName()); err != nil {
+ w.a.V(1).M(m).F().Error("FAILED to delete Service: %s, err: %v", util.NamespaceNameString(m), err)
+ }
+ }
+}
+
+func (w *worker) purgeSecret(
+ ctx context.Context,
+ cr api.ICustomResource,
+ reconcileFailedObjs *model.Registry,
+ m meta.Object,
+) {
+ if shouldPurgeSecret(cr, reconcileFailedObjs, m) {
+ w.a.V(1).M(m).F().Info("Delete Secret: %s", util.NamespaceNameString(m))
+ if err := w.c.kube.Secret().Delete(ctx, m.GetNamespace(), m.GetName()); err != nil {
+ w.a.V(1).M(m).F().Error("FAILED to delete Secret: %s, err: %v", util.NamespaceNameString(m), err)
+ }
+ }
+}
+
+func (w *worker) purgePDB(
+ ctx context.Context,
+ cr api.ICustomResource,
+ reconcileFailedObjs *model.Registry,
+ m meta.Object,
+) {
+ if shouldPurgePDB(cr, reconcileFailedObjs, m) {
+ w.a.V(1).M(m).F().Info("Delete PDB: %s", util.NamespaceNameString(m))
+ if err := w.c.kube.PDB().Delete(ctx, m.GetNamespace(), m.GetName()); err != nil {
+ w.a.V(1).M(m).F().Error("FAILED to delete PDB: %s, err: %v", util.NamespaceNameString(m), err)
+ }
+ }
+}
+
+func shouldPurgeStatefulSet(cr api.ICustomResource, reconcileFailedObjs *model.Registry, m meta.Object) bool {
+ if reconcileFailedObjs.HasStatefulSet(m) {
+ return cr.GetReconciling().GetCleanup().GetReconcileFailedObjects().GetStatefulSet() == api.ObjectsCleanupDelete
+ }
+ return cr.GetReconciling().GetCleanup().GetUnknownObjects().GetStatefulSet() == api.ObjectsCleanupDelete
+}
+
+func shouldPurgePVC(cr api.ICustomResource, reconcileFailedObjs *model.Registry, m meta.Object) bool {
+ if reconcileFailedObjs.HasPVC(m) {
+ return cr.GetReconciling().GetCleanup().GetReconcileFailedObjects().GetPVC() == api.ObjectsCleanupDelete
+ }
+ return cr.GetReconciling().GetCleanup().GetUnknownObjects().GetPVC() == api.ObjectsCleanupDelete
+}
+
+func shouldPurgeConfigMap(cr api.ICustomResource, reconcileFailedObjs *model.Registry, m meta.Object) bool {
+ if reconcileFailedObjs.HasConfigMap(m) {
+ return cr.GetReconciling().GetCleanup().GetReconcileFailedObjects().GetConfigMap() == api.ObjectsCleanupDelete
+ }
+ return cr.GetReconciling().GetCleanup().GetUnknownObjects().GetConfigMap() == api.ObjectsCleanupDelete
+}
+
+func shouldPurgeService(cr api.ICustomResource, reconcileFailedObjs *model.Registry, m meta.Object) bool {
+ if reconcileFailedObjs.HasService(m) {
+ return cr.GetReconciling().GetCleanup().GetReconcileFailedObjects().GetService() == api.ObjectsCleanupDelete
+ }
+ return cr.GetReconciling().GetCleanup().GetUnknownObjects().GetService() == api.ObjectsCleanupDelete
+}
+
+func shouldPurgeSecret(cr api.ICustomResource, reconcileFailedObjs *model.Registry, m meta.Object) bool {
+ if reconcileFailedObjs.HasSecret(m) {
+ return cr.GetReconciling().GetCleanup().GetReconcileFailedObjects().GetSecret() == api.ObjectsCleanupDelete
+ }
+ return cr.GetReconciling().GetCleanup().GetUnknownObjects().GetSecret() == api.ObjectsCleanupDelete
+}
+
+func shouldPurgePDB(cr api.ICustomResource, reconcileFailedObjs *model.Registry, m meta.Object) bool {
+ return true
+}
diff --git a/pkg/controller/chk/worker-exclude-include-wait.go b/pkg/controller/chk/worker-exclude-include-wait.go
new file mode 100644
index 000000000..6b8136aac
--- /dev/null
+++ b/pkg/controller/chk/worker-exclude-include-wait.go
@@ -0,0 +1,108 @@
+// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package chk
+
+import (
+ "context"
+ "time"
+
+ log "github.com/altinity/clickhouse-operator/pkg/announcer"
+ apiChk "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse-keeper.altinity.com/v1"
+ api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1"
+ "github.com/altinity/clickhouse-operator/pkg/util"
+)
+
+func (w *worker) waitForIPAddresses(ctx context.Context, chk *apiChk.ClickHouseKeeperInstallation) {
+ if util.IsContextDone(ctx) {
+ log.V(2).Info("task is done")
+ return
+ }
+ if chk.IsStopped() {
+ // No need to wait for stopped CHI
+ return
+ }
+ w.a.V(1).M(chk).F().S().Info("wait for IP addresses to be assigned to all pods")
+ start := time.Now()
+ w.c.poll(ctx, chk, func(c *apiChk.ClickHouseKeeperInstallation, e error) bool {
+ // TODO fix later
+ // status IPs list can be empty
+ // Instead of doing in status:
+ // podIPs := c.getPodsIPs(chi)
+ // cur.EnsureStatus().SetPodIPs(podIPs)
+ // and here
+ // c.Status.GetPodIPs()
+ podIPs := w.c.getPodsIPs(chk)
+ if len(podIPs) >= len(c.Status.GetPods()) {
+ // Stop polling
+ w.a.V(1).M(c).Info("all IP addresses are in place")
+ return false
+ }
+ if time.Now().Sub(start) > 1*time.Minute {
+ // Stop polling
+ w.a.V(1).M(c).Warning("not all IP addresses are in place but time has elapsed")
+ return false
+ }
+ // Continue polling
+ w.a.V(1).M(c).Warning("still waiting - not all IP addresses are in place yet")
+ return true
+ })
+}
+
+// shouldIncludeHost determines whether host to be included into cluster after reconciling
+func (w *worker) shouldIncludeHost(host *api.Host) bool {
+ switch {
+ case host.IsStopped():
+ // No need to include stopped host
+ return false
+ }
+ return true
+}
+
+// includeHost includes host back back into ClickHouse clusters
+func (w *worker) includeHost(ctx context.Context, host *api.Host) error {
+ if util.IsContextDone(ctx) {
+ log.V(2).Info("task is done")
+ return nil
+ }
+
+ if !w.shouldIncludeHost(host) {
+ w.a.V(1).
+ M(host).F().
+ Info("No need to include host into cluster. Host/shard/cluster: %d/%d/%s",
+ host.Runtime.Address.ReplicaIndex, host.Runtime.Address.ShardIndex, host.Runtime.Address.ClusterName)
+ return nil
+ }
+
+ return nil
+}
+
+// includeHostIntoRaftCluster includes host into raft configuration
+func (w *worker) includeHostIntoRaftCluster(ctx context.Context, host *api.Host) {
+ if util.IsContextDone(ctx) {
+ log.V(2).Info("task is done")
+ return
+ }
+
+ w.a.V(1).
+ M(host).F().
+ Info("going to include host. Host/shard/cluster: %d/%d/%s",
+ host.Runtime.Address.ReplicaIndex, host.Runtime.Address.ShardIndex, host.Runtime.Address.ClusterName)
+
+ // Specify in options to add this host into ClickHouse config file
+ host.GetCR().GetRuntime().LockCommonConfig()
+ host.GetReconcileAttributes().UnsetExclude()
+ _ = w.reconcileConfigMapCommon(ctx, host.GetCR(), w.options())
+ host.GetCR().GetRuntime().UnlockCommonConfig()
+}
diff --git a/pkg/controller/chk/worker-pdb.go b/pkg/controller/chk/worker-pdb.go
new file mode 100644
index 000000000..c75056f4a
--- /dev/null
+++ b/pkg/controller/chk/worker-pdb.go
@@ -0,0 +1,65 @@
+// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package chk
+
+import (
+ "context"
+
+ policy "k8s.io/api/policy/v1"
+ apiErrors "k8s.io/apimachinery/pkg/api/errors"
+
+ log "github.com/altinity/clickhouse-operator/pkg/announcer"
+ apiChi "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1"
+ "github.com/altinity/clickhouse-operator/pkg/util"
+)
+
+//func (w *worker) reconcilePDB (chk *apiChk.ClickHouseKeeperInstallation) error {
+// return w.c.reconcile(
+// chk,
+// &policy.PodDisruptionBudget{},
+// creator.CreatePodDisruptionBudget(chk),
+// "PodDisruptionBudget",
+// nil,
+// )
+//}
+
+// reconcilePDB reconciles PodDisruptionBudget
+func (w *worker) reconcilePDB(ctx context.Context, cluster apiChi.ICluster, pdb *policy.PodDisruptionBudget) error {
+ cur, err := w.c.getPDB(ctx, pdb)
+ switch {
+ case err == nil:
+ pdb.ResourceVersion = cur.ResourceVersion
+ err := w.c.updatePDB(ctx, pdb)
+ if err == nil {
+ log.V(1).Info("PDB updated: %s", util.NamespaceNameString(pdb))
+ } else {
+ log.Error("FAILED to update PDB: %s err: %v", util.NamespaceNameString(pdb), err)
+ return nil
+ }
+ case apiErrors.IsNotFound(err):
+ err := w.c.createPDB(ctx, pdb)
+ if err == nil {
+ log.V(1).Info("PDB created: %s", util.NamespaceNameString(pdb))
+ } else {
+ log.Error("FAILED create PDB: %s err: %v", util.NamespaceNameString(pdb), err)
+ return err
+ }
+ default:
+ log.Error("FAILED get PDB: %s err: %v", util.NamespaceNameString(pdb), err)
+ return err
+ }
+
+ return nil
+}
diff --git a/pkg/controller/chk/worker-service.go b/pkg/controller/chk/worker-service.go
new file mode 100644
index 000000000..7b6c7ab9b
--- /dev/null
+++ b/pkg/controller/chk/worker-service.go
@@ -0,0 +1,253 @@
+// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package chk
+
+import (
+ "context"
+ "fmt"
+
+ core "k8s.io/api/core/v1"
+ apiErrors "k8s.io/apimachinery/pkg/api/errors"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+
+ log "github.com/altinity/clickhouse-operator/pkg/announcer"
+ apiChk "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse-keeper.altinity.com/v1"
+ apiChi "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1"
+ "github.com/altinity/clickhouse-operator/pkg/controller/common"
+ "github.com/altinity/clickhouse-operator/pkg/interfaces"
+ "github.com/altinity/clickhouse-operator/pkg/util"
+)
+
+func (w *worker) reconcileClientService(chk *apiChk.ClickHouseKeeperInstallation) error {
+ return w.c.reconcile(
+ chk,
+ &core.Service{},
+ w.task.Creator().CreateService(interfaces.ServiceCR, chk),
+ "Client Service",
+ reconcileUpdaterService,
+ )
+}
+
+func (w *worker) reconcileHeadlessService(chk *apiChk.ClickHouseKeeperInstallation) error {
+ return w.c.reconcile(
+ chk,
+ &core.Service{},
+ w.task.Creator().CreateService(interfaces.ServiceHost, chk),
+ "Headless Service",
+ reconcileUpdaterService,
+ )
+}
+
+func reconcileUpdaterService(_cur, _new client.Object) error {
+ cur, ok1 := _cur.(*core.Service)
+ new, ok2 := _new.(*core.Service)
+ if !ok1 || !ok2 {
+ return fmt.Errorf("unable to cast")
+ }
+ return updateService(cur, new)
+}
+
+func updateService(cur, new *core.Service) error {
+ cur.Spec.Ports = new.Spec.Ports
+ cur.Spec.Type = new.Spec.Type
+ cur.SetAnnotations(new.GetAnnotations())
+ return nil
+}
+
+// reconcileService reconciles core.Service
+func (w *worker) reconcileService(ctx context.Context, cr apiChi.ICustomResource, service *core.Service) error {
+ if util.IsContextDone(ctx) {
+ log.V(2).Info("task is done")
+ return nil
+ }
+
+ w.a.V(2).M(cr).S().Info(service.GetName())
+ defer w.a.V(2).M(cr).E().Info(service.GetName())
+
+ // Check whether this object already exists
+ curService, err := w.c.getService(ctx, service)
+
+ if curService != nil {
+ // We have the Service - try to update it
+ w.a.V(1).M(cr).F().Info("Service found: %s. Will try to update", util.NamespaceNameString(service))
+ err = w.updateService(ctx, cr, curService, service)
+ }
+
+ if err != nil {
+ if apiErrors.IsNotFound(err) {
+ // The Service is either not found or not updated. Try to recreate it
+ w.a.V(1).M(cr).F().Info("Service: %s not found. err: %v", util.NamespaceNameString(service), err)
+ } else {
+ // The Service is either not found or not updated. Try to recreate it
+ w.a.WithEvent(cr, common.EventActionUpdate, common.EventReasonUpdateFailed).
+ WithStatusAction(cr).
+ WithStatusError(cr).
+ M(cr).F().
+ Error("Update Service: %s failed with error: %v", util.NamespaceNameString(service), err)
+ }
+
+ _ = w.c.deleteServiceIfExists(ctx, service.GetNamespace(), service.GetName())
+ err = w.createService(ctx, cr, service)
+ }
+
+ if err == nil {
+ w.a.V(1).M(cr).F().Info("Service reconcile successful: %s", util.NamespaceNameString(service))
+ } else {
+ w.a.WithEvent(cr, common.EventActionReconcile, common.EventReasonReconcileFailed).
+ WithStatusAction(cr).
+ WithStatusError(cr).
+ M(cr).F().
+ Error("FAILED to reconcile Service: %s CHI: %s ", util.NamespaceNameString(service), cr.GetName())
+ }
+
+ return err
+}
+
+// updateService
+func (w *worker) updateService(
+ ctx context.Context,
+ cr apiChi.ICustomResource,
+ curService *core.Service,
+ targetService *core.Service,
+) error {
+ if util.IsContextDone(ctx) {
+ log.V(2).Info("task is done")
+ return nil
+ }
+
+ if curService.Spec.Type != targetService.Spec.Type {
+ return fmt.Errorf(
+ "just recreate the service in case of service type change '%s'=>'%s'",
+ curService.Spec.Type, targetService.Spec.Type)
+ }
+
+ // Updating a Service is a complicated business
+
+ newService := targetService.DeepCopy()
+
+ // spec.resourceVersion is required in order to update an object
+ newService.ResourceVersion = curService.ResourceVersion
+
+ //
+ // Migrate ClusterIP to the new service
+ //
+ // spec.clusterIP field is immutable, need to use already assigned value
+ // From https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service
+ // Kubernetes assigns this Service an IP address (sometimes called the “cluster IP”), which is used by the Service proxies
+ // See also https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
+ // You can specify your own cluster IP address as part of a Service creation request. To do this, set the .spec.clusterIP
+ newService.Spec.ClusterIP = curService.Spec.ClusterIP
+
+ //
+ // Migrate existing ports to the new service for NodePort and LoadBalancer services
+ //
+ // The port on each node on which this service is exposed when type=NodePort or LoadBalancer.
+ // Usually assigned by the system. If specified, it will be allocated to the service if unused
+ // or else creation of the service will fail.
+ // Default is to auto-allocate a port if the ServiceType of this Service requires one.
+ // More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
+
+ // !!! IMPORTANT !!!
+ // No changes in service type is allowed.
+ // Already exposed port details can not be changed.
+
+ serviceTypeIsNodePort := (curService.Spec.Type == core.ServiceTypeNodePort) && (newService.Spec.Type == core.ServiceTypeNodePort)
+ serviceTypeIsLoadBalancer := (curService.Spec.Type == core.ServiceTypeLoadBalancer) && (newService.Spec.Type == core.ServiceTypeLoadBalancer)
+ if serviceTypeIsNodePort || serviceTypeIsLoadBalancer {
+ for i := range newService.Spec.Ports {
+ newPort := &newService.Spec.Ports[i]
+ for j := range curService.Spec.Ports {
+ curPort := &curService.Spec.Ports[j]
+ if newPort.Port == curPort.Port {
+ // Already have this port specified - reuse all internals,
+ // due to limitations with auto-assigned values
+ *newPort = *curPort
+ w.a.M(cr).F().Info("reuse Port %d values", newPort.Port)
+ break
+ }
+ }
+ }
+ }
+
+ //
+ // Migrate HealthCheckNodePort to the new service
+ //
+ // spec.healthCheckNodePort field is used with ExternalTrafficPolicy=Local only and is immutable within ExternalTrafficPolicy=Local
+ // In case ExternalTrafficPolicy is changed it seems to be irrelevant
+ // https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip
+ curExternalTrafficPolicyTypeLocal := curService.Spec.ExternalTrafficPolicy == core.ServiceExternalTrafficPolicyTypeLocal
+ newExternalTrafficPolicyTypeLocal := newService.Spec.ExternalTrafficPolicy == core.ServiceExternalTrafficPolicyTypeLocal
+ if curExternalTrafficPolicyTypeLocal && newExternalTrafficPolicyTypeLocal {
+ newService.Spec.HealthCheckNodePort = curService.Spec.HealthCheckNodePort
+ }
+
+ //
+ // Migrate LoadBalancerClass to the new service
+ //
+ // This field can only be set when creating or updating a Service to type 'LoadBalancer'.
+ // Once set, it can not be changed. This field will be wiped when a service is updated to a non 'LoadBalancer' type.
+ if curService.Spec.LoadBalancerClass != nil {
+ newService.Spec.LoadBalancerClass = curService.Spec.LoadBalancerClass
+ }
+
+ //
+ // Migrate labels, annotations and finalizers to the new service
+ //
+ newService.GetObjectMeta().SetLabels(util.MergeStringMapsPreserve(newService.GetObjectMeta().GetLabels(), curService.GetObjectMeta().GetLabels()))
+ newService.GetObjectMeta().SetAnnotations(util.MergeStringMapsPreserve(newService.GetObjectMeta().GetAnnotations(), curService.GetObjectMeta().GetAnnotations()))
+ newService.GetObjectMeta().SetFinalizers(util.MergeStringArrays(newService.GetObjectMeta().GetFinalizers(), curService.GetObjectMeta().GetFinalizers()))
+
+ //
+ // And only now we are ready to actually update the service with new version of the service
+ //
+
+ err := w.c.updateService(ctx, newService)
+ if err == nil {
+ w.a.V(1).
+ WithEvent(cr, common.EventActionUpdate, common.EventReasonUpdateCompleted).
+ WithStatusAction(cr).
+ M(cr).F().
+ Info("Update Service success: %s", util.NamespaceNameString(newService))
+ } else {
+ w.a.M(cr).F().Error("Update Service fail: %s failed with error: %v", util.NamespaceNameString(newService))
+ }
+
+ return err
+}
+
+// createService
+func (w *worker) createService(ctx context.Context, cr apiChi.ICustomResource, service *core.Service) error {
+ if util.IsContextDone(ctx) {
+ log.V(2).Info("task is done")
+ return nil
+ }
+
+ err := w.c.createService(ctx, service)
+ if err == nil {
+ w.a.V(1).
+ WithEvent(cr, common.EventActionCreate, common.EventReasonCreateCompleted).
+ WithStatusAction(cr).
+ M(cr).F().
+ Info("OK Create Service: %s", util.NamespaceNameString(service))
+ } else {
+ w.a.WithEvent(cr, common.EventActionCreate, common.EventReasonCreateFailed).
+ WithStatusAction(cr).
+ WithStatusError(cr).
+ M(cr).F().
+ Error("FAILED Create Service: %s err: %v", util.NamespaceNameString(service), err)
+ }
+
+ return err
+}
diff --git a/pkg/controller/chk/worker.go b/pkg/controller/chk/worker.go
new file mode 100644
index 000000000..869ce7ffa
--- /dev/null
+++ b/pkg/controller/chk/worker.go
@@ -0,0 +1,439 @@
+// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package chk
+
+import (
+ "context"
+ "errors"
+ "time"
+
+ meta "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+ log "github.com/altinity/clickhouse-operator/pkg/announcer"
+ apiChk "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse-keeper.altinity.com/v1"
+ api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1"
+ "github.com/altinity/clickhouse-operator/pkg/apis/common/types"
+ "github.com/altinity/clickhouse-operator/pkg/controller/common"
+ "github.com/altinity/clickhouse-operator/pkg/controller/common/poller/domain"
+ "github.com/altinity/clickhouse-operator/pkg/controller/common/statefulset"
+ "github.com/altinity/clickhouse-operator/pkg/controller/common/storage"
+ "github.com/altinity/clickhouse-operator/pkg/interfaces"
+ "github.com/altinity/clickhouse-operator/pkg/model"
+ "github.com/altinity/clickhouse-operator/pkg/model/chk/config"
+ "github.com/altinity/clickhouse-operator/pkg/model/chk/macro"
+ "github.com/altinity/clickhouse-operator/pkg/model/chk/namer"
+ "github.com/altinity/clickhouse-operator/pkg/model/chk/normalizer"
+ "github.com/altinity/clickhouse-operator/pkg/model/chk/tags/labeler"
+ "github.com/altinity/clickhouse-operator/pkg/model/common/action_plan"
+ commonConfig "github.com/altinity/clickhouse-operator/pkg/model/common/config"
+ commonCreator "github.com/altinity/clickhouse-operator/pkg/model/common/creator"
+ commonMacro "github.com/altinity/clickhouse-operator/pkg/model/common/macro"
+ commonNormalizer "github.com/altinity/clickhouse-operator/pkg/model/common/normalizer"
+ "github.com/altinity/clickhouse-operator/pkg/model/managers"
+ "github.com/altinity/clickhouse-operator/pkg/util"
+)
+
+// worker represents worker thread which runs reconcile tasks
+type worker struct {
+ c *Controller
+ a common.Announcer
+
+ normalizer *normalizer.Normalizer
+ task *common.Task
+ stsReconciler *statefulset.Reconciler
+
+ start time.Time
+}
+
+// newWorker
+func (c *Controller) newWorker() *worker {
+ start := time.Now()
+ //kind := "ClickHouseKeeperInstallation"
+ //generateName := "chop-chk-"
+ //component := componentName
+
+ announcer := common.NewAnnouncer(
+ //common.NewEventEmitter(c.kube.Event(), kind, generateName, component),
+ nil,
+ c.kube.CR(),
+ )
+
+ return &worker{
+ c: c,
+ a: announcer,
+
+ normalizer: normalizer.New(),
+ start: start,
+ task: nil,
+ }
+}
+
+func configGeneratorOptions(cr *apiChk.ClickHouseKeeperInstallation) *config.GeneratorOptions {
+ return &config.GeneratorOptions{
+ Settings: cr.GetSpecT().Configuration.Settings,
+ Files: cr.GetSpecT().Configuration.Files,
+ }
+}
+
+func (w *worker) newTask(cr *apiChk.ClickHouseKeeperInstallation) {
+ w.task = common.NewTask(
+ commonCreator.NewCreator(
+ cr,
+ managers.NewConfigFilesGenerator(managers.FilesGeneratorTypeKeeper, cr, configGeneratorOptions(cr)),
+ managers.NewContainerManager(managers.ContainerManagerTypeKeeper),
+ managers.NewTagManager(managers.TagManagerTypeKeeper, cr),
+ managers.NewProbeManager(managers.ProbeManagerTypeKeeper),
+ managers.NewServiceManager(managers.ServiceManagerTypeKeeper),
+ managers.NewVolumeManager(managers.VolumeManagerTypeKeeper),
+ managers.NewConfigMapManager(managers.ConfigMapManagerTypeKeeper),
+ managers.NewNameManager(managers.NameManagerTypeKeeper),
+ managers.NewOwnerReferencesManager(managers.OwnerReferencesManagerTypeKeeper),
+ namer.New(),
+ commonMacro.New(macro.List),
+ labeler.New(cr),
+ ),
+ )
+
+ w.stsReconciler = statefulset.NewReconciler(
+ w.a,
+ w.task,
+ //poller.NewHostStatefulSetPoller(poller.NewStatefulSetPoller(w.c.kube), w.c.kube, w.c.labeler),
+ domain.NewHostStatefulSetPoller(domain.NewStatefulSetPoller(w.c.kube), w.c.kube, nil),
+ w.c.namer,
+ labeler.New(cr),
+ storage.NewStorageReconciler(w.task, w.c.namer, w.c.kube.Storage()),
+ w.c.kube,
+ statefulset.NewDefaultFallback(),
+ )
+}
+
+// shouldForceRestartHost checks whether cluster requires hosts restart
+func (w *worker) shouldForceRestartHost(host *api.Host) bool {
+ // RollingUpdate purpose is to always shut the host down.
+ // It is such an interesting policy.
+ if host.GetCR().IsRollingUpdate() {
+ w.a.V(1).M(host).F().Info("RollingUpdate requires force restart. Host: %s", host.GetName())
+ return true
+ }
+
+ if host.GetReconcileAttributes().GetStatus() == api.ObjectStatusNew {
+ w.a.V(1).M(host).F().Info("Host is new, no restart applicable. Host: %s", host.GetName())
+ return false
+ }
+
+ if (host.GetReconcileAttributes().GetStatus() == api.ObjectStatusSame) && !host.HasAncestor() {
+ w.a.V(1).M(host).F().Info("Host already exists, but has no ancestor, no restart applicable. Host: %s", host.GetName())
+ return false
+ }
+
+ // For some configuration changes we have to force restart host
+ if model.IsConfigurationChangeRequiresReboot(host) {
+ w.a.V(1).M(host).F().Info("Config change(s) require host restart. Host: %s", host.GetName())
+ return true
+ }
+
+ podIsCrushed := false
+ // pod.Status.ContainerStatuses[0].State.Waiting.Reason
+ if pod, err := w.c.kube.Pod().Get(host); err == nil {
+ if len(pod.Status.ContainerStatuses) > 0 {
+ if pod.Status.ContainerStatuses[0].State.Waiting != nil {
+ if pod.Status.ContainerStatuses[0].State.Waiting.Reason == "CrashLoopBackOff" {
+ podIsCrushed = true
+ }
+ }
+ }
+ }
+
+ if host.Runtime.Version.IsUnknown() && podIsCrushed {
+ w.a.V(1).M(host).F().Info("Host with unknown version and in CrashLoopBackOff should be restarted. It most likely is unable to start due to bad config. Host: %s", host.GetName())
+ return true
+ }
+
+ w.a.V(1).M(host).F().Info("Host restart is not required. Host: %s", host.GetName())
+ return false
+}
+
+// normalize
+func (w *worker) normalize(c *apiChk.ClickHouseKeeperInstallation) *apiChk.ClickHouseKeeperInstallation {
+ chk, err := normalizer.New().CreateTemplated(c, commonNormalizer.NewOptions())
+ if err != nil {
+ w.a.WithEvent(chk, common.EventActionReconcile, common.EventReasonReconcileFailed).
+ WithStatusError(chk).
+ M(chk).F().
+ Error("FAILED to normalize CR 1: %v", err)
+ }
+ return chk
+}
+
+// areUsableOldAndNew checks whether there are old and new usable
+func (w *worker) areUsableOldAndNew(old, new *apiChk.ClickHouseKeeperInstallation) bool {
+ if old == nil {
+ return false
+ }
+ if new == nil {
+ return false
+ }
+ return true
+}
+
+// isGenerationTheSame checks whether old ans new CHI have the same generation
+func (w *worker) isGenerationTheSame(old, new *apiChk.ClickHouseKeeperInstallation) bool {
+ if !w.areUsableOldAndNew(old, new) {
+ return false
+ }
+
+ return old.GetGeneration() == new.GetGeneration()
+}
+
+func (w *worker) markReconcileStart(ctx context.Context, cr *apiChk.ClickHouseKeeperInstallation, ap *action_plan.ActionPlan) {
+ if util.IsContextDone(ctx) {
+ log.V(2).Info("task is done")
+ return
+ }
+
+ // Write desired normalized CHI with initialized .Status, so it would be possible to monitor progress
+ cr.EnsureStatus().ReconcileStart(ap.GetRemovedHostsNum())
+ _ = w.c.updateCRObjectStatus(ctx, cr, types.UpdateStatusOptions{
+ CopyStatusOptions: types.CopyStatusOptions{
+ MainFields: true,
+ },
+ })
+
+ w.a.V(1).
+ WithEvent(cr, common.EventActionReconcile, common.EventReasonReconcileStarted).
+ WithStatusAction(cr).
+ WithStatusActions(cr).
+ M(cr).F().
+ Info("reconcile started, task id: %s", cr.GetSpecT().GetTaskID())
+ w.a.V(2).M(cr).F().Info("action plan\n%s\n", ap.String())
+}
+
+func (w *worker) finalizeReconcileAndMarkCompleted(ctx context.Context, _chk *apiChk.ClickHouseKeeperInstallation) {
+ if util.IsContextDone(ctx) {
+ log.V(2).Info("task is done")
+ return
+ }
+
+ w.a.V(1).M(_chk).F().S().Info("finalize reconcile")
+
+ // Update CHI object
+ if chi, err := w.createCRFromObjectMeta(_chk, true, commonNormalizer.NewOptions()); err == nil {
+ w.a.V(1).M(chi).Info("updating endpoints for CR-2 %s", chi.Name)
+ ips := w.c.getPodsIPs(chi)
+ w.a.V(1).M(chi).Info("IPs of the CR-2 finalize reconcile %s/%s: len: %d %v", chi.Namespace, chi.Name, len(ips), ips)
+ opts := commonNormalizer.NewOptions()
+ opts.DefaultUserAdditionalIPs = ips
+ if chi, err := w.createCRFromObjectMeta(_chk, true, opts); err == nil {
+ w.a.V(1).M(chi).Info("Update users IPS-2")
+ chi.SetAncestor(chi.GetTarget())
+ chi.SetTarget(nil)
+ chi.EnsureStatus().ReconcileComplete()
+ // TODO unify with update endpoints
+ w.newTask(chi)
+ //w.reconcileConfigMapCommonUsers(ctx, chi)
+ w.c.updateCRObjectStatus(ctx, chi, types.UpdateStatusOptions{
+ CopyStatusOptions: types.CopyStatusOptions{
+ WholeStatus: true,
+ },
+ })
+ } else {
+ w.a.M(_chk).F().Error("internal unable to find CR by %v err: %v", _chk.GetLabels(), err)
+ }
+ } else {
+ w.a.M(_chk).F().Error("external unable to find CR by %v err %v", _chk.GetLabels(), err)
+ }
+
+ w.a.V(1).
+ WithEvent(_chk, common.EventActionReconcile, common.EventReasonReconcileCompleted).
+ WithStatusAction(_chk).
+ WithStatusActions(_chk).
+ M(_chk).F().
+ Info("reconcile completed successfully, task id: %s", _chk.GetSpecT().GetTaskID())
+}
+
+func (w *worker) markReconcileCompletedUnsuccessfully(ctx context.Context, chk *apiChk.ClickHouseKeeperInstallation, err error) {
+ if util.IsContextDone(ctx) {
+ log.V(2).Info("task is done")
+ return
+ }
+
+ switch {
+ case err == nil:
+ chk.EnsureStatus().ReconcileComplete()
+ case errors.Is(err, common.ErrCRUDAbort):
+ chk.EnsureStatus().ReconcileAbort()
+ }
+ w.c.updateCRObjectStatus(ctx, chk, types.UpdateStatusOptions{
+ CopyStatusOptions: types.CopyStatusOptions{
+ MainFields: true,
+ },
+ })
+
+ w.a.V(1).
+ WithEvent(chk, common.EventActionReconcile, common.EventReasonReconcileFailed).
+ WithStatusAction(chk).
+ WithStatusActions(chk).
+ M(chk).F().
+ Warning("reconcile completed UNSUCCESSFULLY, task id: %s", chk.GetSpecT().GetTaskID())
+}
+
+func (w *worker) walkHosts(ctx context.Context, chk *apiChk.ClickHouseKeeperInstallation, ap *action_plan.ActionPlan) {
+ if util.IsContextDone(ctx) {
+ log.V(2).Info("task is done")
+ return
+ }
+
+ existingObjects := w.c.discovery(ctx, chk)
+ ap.WalkAdded(
+ // Walk over added clusters
+ func(cluster api.ICluster) {
+ w.a.V(1).M(chk).Info("Walking over AP added clusters. Cluster: %s", cluster.GetName())
+
+ cluster.WalkHosts(func(host *api.Host) error {
+ w.a.V(1).M(chk).Info("Walking over hosts in added clusters. Cluster: %s Host: %s", cluster.GetName(), host.GetName())
+
+ // Name of the StatefulSet for this host
+ name := w.c.namer.Name(interfaces.NameStatefulSet, host)
+ // Have we found this StatefulSet
+ found := false
+
+ existingObjects.WalkStatefulSet(func(meta meta.Object) {
+ w.a.V(3).M(chk).Info("Walking over existing sts list. sts: %s", util.NamespacedName(meta))
+ if name == meta.GetName() {
+ // StatefulSet of this host already exist
+ found = true
+ }
+ })
+
+ if found {
+ // StatefulSet of this host already exist, we can't ADD it for sure
+ // It looks like FOUND is the most correct approach
+ w.a.V(1).M(chk).Info("Add host as FOUND via cluster. Host was found as sts. Host: %s", host.GetName())
+ host.GetReconcileAttributes().SetFound()
+ } else {
+ // StatefulSet of this host does not exist, looks like we need to ADD it
+ w.a.V(1).M(chk).Info("Add host as ADD via cluster. Host was not found as sts. Host: %s", host.GetName())
+ host.GetReconcileAttributes().SetAdd()
+ }
+
+ return nil
+ })
+ },
+ // Walk over added shards
+ func(shard api.IShard) {
+ w.a.V(1).M(chk).Info("Walking over AP added shards. Shard: %s", shard.GetName())
+ // Mark all hosts of the shard as newly added
+ shard.WalkHosts(func(host *api.Host) error {
+ w.a.V(1).M(chk).Info("Add host as ADD via shard. Shard: %s Host: %s", shard.GetName(), host.GetName())
+ host.GetReconcileAttributes().SetAdd()
+ return nil
+ })
+ },
+ // Walk over added hosts
+ func(host *api.Host) {
+ w.a.V(1).M(chk).Info("Walking over AP added hosts. Host: %s", host.GetName())
+ w.a.V(1).M(chk).Info("Add host as ADD via host. Host: %s", host.GetName())
+ host.GetReconcileAttributes().SetAdd()
+ },
+ )
+
+ ap.WalkModified(
+ func(cluster api.ICluster) {
+ w.a.V(1).M(chk).Info("Walking over AP modified clusters. Cluster: %s", cluster.GetName())
+ },
+ func(shard api.IShard) {
+ w.a.V(1).M(chk).Info("Walking over AP modified shards. Shard: %s", shard.GetName())
+ },
+ func(host *api.Host) {
+ w.a.V(1).M(chk).Info("Walking over AP modified hosts. Host: %s", host.GetName())
+ w.a.V(1).M(chk).Info("Add host as MODIFIED via host. Host: %s", host.GetName())
+ host.GetReconcileAttributes().SetModify()
+ },
+ )
+
+ chk.WalkHosts(func(host *api.Host) error {
+ w.a.V(3).M(chk).Info("Walking over CR hosts. Host: %s", host.GetName())
+ switch {
+ case host.GetReconcileAttributes().IsAdd():
+ w.a.V(3).M(chk).Info("Walking over CR hosts. Host: is already added Host: %s", host.GetName())
+ return nil
+ case host.GetReconcileAttributes().IsModify():
+ w.a.V(3).M(chk).Info("Walking over CR hosts. Host: is already modified Host: %s", host.GetName())
+ return nil
+ default:
+ w.a.V(3).M(chk).Info("Walking over CR hosts. Host: is not clear yet (not detected as added or modified) Host: %s", host.GetName())
+ if host.HasAncestor() {
+ w.a.V(1).M(chk).Info("Add host as FOUND via host. Host: %s", host.GetName())
+ host.GetReconcileAttributes().SetFound()
+ } else {
+ w.a.V(1).M(chk).Info("Add host as ADD via host. Host: %s", host.GetName())
+ host.GetReconcileAttributes().SetAdd()
+ }
+ }
+ return nil
+ })
+
+ // Log hosts statuses
+ chk.WalkHosts(func(host *api.Host) error {
+ switch {
+ case host.GetReconcileAttributes().IsAdd():
+ w.a.M(host).Info("ADD host: %s", host.Runtime.Address.CompactString())
+ case host.GetReconcileAttributes().IsModify():
+ w.a.M(host).Info("MODIFY host: %s", host.Runtime.Address.CompactString())
+ case host.GetReconcileAttributes().IsFound():
+ w.a.M(host).Info("FOUND host: %s", host.Runtime.Address.CompactString())
+ default:
+ w.a.M(host).Info("UNKNOWN host: %s", host.Runtime.Address.CompactString())
+ }
+ return nil
+ })
+}
+
+// getRaftGeneratorOptions build base set of RaftOptions
+func (w *worker) getRaftGeneratorOptions() *commonConfig.HostSelector {
+ // Raft specifies to exclude:
+ // 1. all newly added hosts
+ // 2. all explicitly excluded hosts
+ return commonConfig.NewHostSelector().ExcludeReconcileAttributes(
+ api.NewHostReconcileAttributes(),
+ //SetAdd().
+ //SetExclude(),
+ )
+}
+
+// options build FilesGeneratorOptionsClickHouse
+func (w *worker) options() *config.FilesGeneratorOptions {
+ opts := w.getRaftGeneratorOptions()
+ w.a.Info("RaftOptions: %s", opts)
+ return config.NewFilesGeneratorOptions().SetRaftOptions(opts)
+}
+
+// createCRFromObjectMeta
+func (w *worker) createCRFromObjectMeta(meta meta.Object, isCHI bool, options *commonNormalizer.Options) (*apiChk.ClickHouseKeeperInstallation, error) {
+ w.a.V(3).M(meta).S().P()
+ defer w.a.V(3).M(meta).E().P()
+
+ chi, err := w.c.GetCHIByObjectMeta(meta, isCHI)
+ if err != nil {
+ return nil, err
+ }
+
+ chi, err = w.normalizer.CreateTemplated(chi, options)
+ if err != nil {
+ return nil, err
+ }
+
+ return chi, nil
+}
diff --git a/pkg/controller/chi/announcer.go b/pkg/controller/common/announcer.go
similarity index 65%
rename from pkg/controller/chi/announcer.go
rename to pkg/controller/common/announcer.go
index ba10a06c8..714e47bfc 100644
--- a/pkg/controller/chi/announcer.go
+++ b/pkg/controller/common/announcer.go
@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-package chi
+package common
import (
"context"
@@ -23,14 +23,17 @@ import (
a "github.com/altinity/clickhouse-operator/pkg/announcer"
api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1"
+ "github.com/altinity/clickhouse-operator/pkg/apis/common/types"
+ "github.com/altinity/clickhouse-operator/pkg/interfaces"
)
// Announcer handler all log/event/status messages going outside of controller/worker
type Announcer struct {
a.Announcer
- ctrl *Controller
- chi *api.ClickHouseInstallation
+ eventEmitter interfaces.IEventEmitter
+ statusUpdater interfaces.IKubeCR
+ cr api.ICustomResource
// writeEvent specifies whether to produce k8s event into chi, therefore requires chi to be specified
// See k8s event for details.
@@ -53,9 +56,11 @@ type Announcer struct {
}
// NewAnnouncer creates new announcer
-func NewAnnouncer() Announcer {
+func NewAnnouncer(eventEmitter interfaces.IEventEmitter, statusUpdater interfaces.IKubeCR) Announcer {
return Announcer{
- Announcer: a.New(),
+ Announcer: a.New(),
+ eventEmitter: eventEmitter,
+ statusUpdater: statusUpdater,
}
}
@@ -133,16 +138,16 @@ func (a Announcer) Info(format string, args ...interface{}) {
a.Announcer.Info(format, args...)
// Produce k8s event
- if a.writeEvent && a.chiCapable() {
+ if a.writeEvent && a.capable() {
if len(args) > 0 {
- a.ctrl.EventInfo(a.chi, a.eventAction, a.eventReason, fmt.Sprintf(format, args...))
+ a.eventEmitter.EventInfo(a.cr, a.eventAction, a.eventReason, fmt.Sprintf(format, args...))
} else {
- a.ctrl.EventInfo(a.chi, a.eventAction, a.eventReason, fmt.Sprint(format))
+ a.eventEmitter.EventInfo(a.cr, a.eventAction, a.eventReason, fmt.Sprint(format))
}
}
// Produce chi status record
- a.writeCHIStatus(format, args...)
+ a.writeStatus(format, args...)
}
// Warning is inspired by log.Warningf()
@@ -151,16 +156,16 @@ func (a Announcer) Warning(format string, args ...interface{}) {
a.Announcer.Warning(format, args...)
// Produce k8s event
- if a.writeEvent && a.chiCapable() {
+ if a.writeEvent && a.capable() {
if len(args) > 0 {
- a.ctrl.EventWarning(a.chi, a.eventAction, a.eventReason, fmt.Sprintf(format, args...))
+ a.eventEmitter.EventWarning(a.cr, a.eventAction, a.eventReason, fmt.Sprintf(format, args...))
} else {
- a.ctrl.EventWarning(a.chi, a.eventAction, a.eventReason, fmt.Sprint(format))
+ a.eventEmitter.EventWarning(a.cr, a.eventAction, a.eventReason, fmt.Sprint(format))
}
}
// Produce chi status record
- a.writeCHIStatus(format, args...)
+ a.writeStatus(format, args...)
}
// Error is inspired by log.Errorf()
@@ -169,58 +174,47 @@ func (a Announcer) Error(format string, args ...interface{}) {
a.Announcer.Error(format, args...)
// Produce k8s event
- if a.writeEvent && a.chiCapable() {
+ if a.writeEvent && a.capable() {
if len(args) > 0 {
- a.ctrl.EventError(a.chi, a.eventAction, a.eventReason, fmt.Sprintf(format, args...))
+ a.eventEmitter.EventError(a.cr, a.eventAction, a.eventReason, fmt.Sprintf(format, args...))
} else {
- a.ctrl.EventError(a.chi, a.eventAction, a.eventReason, fmt.Sprint(format))
+ a.eventEmitter.EventError(a.cr, a.eventAction, a.eventReason, fmt.Sprint(format))
}
}
// Produce chi status record
- a.writeCHIStatus(format, args...)
+ a.writeStatus(format, args...)
}
// Fatal is inspired by log.Fatalf()
func (a Announcer) Fatal(format string, args ...interface{}) {
// Produce k8s event
- if a.writeEvent && a.chiCapable() {
+ if a.writeEvent && a.capable() {
if len(args) > 0 {
- a.ctrl.EventError(a.chi, a.eventAction, a.eventReason, fmt.Sprintf(format, args...))
+ a.eventEmitter.EventError(a.cr, a.eventAction, a.eventReason, fmt.Sprintf(format, args...))
} else {
- a.ctrl.EventError(a.chi, a.eventAction, a.eventReason, fmt.Sprint(format))
+ a.eventEmitter.EventError(a.cr, a.eventAction, a.eventReason, fmt.Sprint(format))
}
}
// Produce chi status record
- a.writeCHIStatus(format, args...)
+ a.writeStatus(format, args...)
// Write and exit
a.Announcer.Fatal(format, args...)
}
-// WithController specifies controller to be used in case `chi`-related announces need to be done
-func (a Announcer) WithController(ctrl *Controller) Announcer {
- b := a
- b.ctrl = ctrl
- return b
-}
-
// WithEvent is used in chained calls in order to produce event into `chi`
-func (a Announcer) WithEvent(
- chi *api.ClickHouseInstallation,
- action string,
- reason string,
-) Announcer {
+func (a Announcer) WithEvent(cr api.ICustomResource, action string, reason string) Announcer {
b := a
- if chi == nil {
+ if cr == nil {
b.writeEvent = false
- b.chi = nil
+ b.cr = nil
b.eventAction = ""
b.eventReason = ""
} else {
b.writeEvent = true
- b.chi = chi
+ b.cr = cr
b.eventAction = action
b.eventReason = reason
}
@@ -228,88 +222,92 @@ func (a Announcer) WithEvent(
}
// WithStatusAction is used in chained calls in order to produce action into `ClickHouseInstallation.Status.Action`
-func (a Announcer) WithStatusAction(chi *api.ClickHouseInstallation) Announcer {
+func (a Announcer) WithStatusAction(cr api.ICustomResource) Announcer {
b := a
- if chi == nil {
- b.chi = nil
+ if cr == nil {
+ b.cr = nil
b.writeStatusAction = false
} else {
- b.chi = chi
+ b.cr = cr
b.writeStatusAction = true
}
return b
}
// WithStatusActions is used in chained calls in order to produce action in ClickHouseInstallation.Status.Actions
-func (a Announcer) WithStatusActions(chi *api.ClickHouseInstallation) Announcer {
+func (a Announcer) WithStatusActions(cr api.ICustomResource) Announcer {
b := a
- if chi == nil {
- b.chi = nil
+ if cr == nil {
+ b.cr = nil
b.writeStatusActions = false
} else {
- b.chi = chi
+ b.cr = cr
b.writeStatusActions = true
}
return b
}
// WithStatusError is used in chained calls in order to produce error in ClickHouseInstallation.Status.Error
-func (a Announcer) WithStatusError(chi *api.ClickHouseInstallation) Announcer {
+func (a Announcer) WithStatusError(cr api.ICustomResource) Announcer {
b := a
- if chi == nil {
- b.chi = nil
+ if cr == nil {
+ b.cr = nil
b.writeStatusError = false
} else {
- b.chi = chi
+ b.cr = cr
b.writeStatusError = true
}
return b
}
-// chiCapable checks whether announcer is capable to produce chi-based announcements
-func (a Announcer) chiCapable() bool {
- return (a.ctrl != nil) && (a.chi != nil)
+// capable checks whether announcer is capable to produce chi-based announcements
+func (a Announcer) capable() bool {
+ return (a.eventEmitter != nil) && (a.cr != nil)
}
-// writeCHIStatus is internal function which writes ClickHouseInstallation.Status
-func (a Announcer) writeCHIStatus(format string, args ...interface{}) {
- if !a.chiCapable() {
+// writeStatus is internal function which writes ClickHouseInstallation.Status
+func (a Announcer) writeStatus(format string, args ...interface{}) {
+ if !a.capable() {
return
}
now := time.Now()
prefix := now.Format(time.RFC3339Nano) + " "
+ shouldUpdateStatus := false
if a.writeStatusAction {
+ shouldUpdateStatus = true
if len(args) > 0 {
- a.chi.EnsureStatus().SetAction(fmt.Sprintf(format, args...))
+ a.cr.IEnsureStatus().SetAction(fmt.Sprintf(format, args...))
} else {
- a.chi.EnsureStatus().SetAction(fmt.Sprint(format))
+ a.cr.IEnsureStatus().SetAction(fmt.Sprint(format))
}
}
if a.writeStatusActions {
+ shouldUpdateStatus = true
if len(args) > 0 {
- a.chi.EnsureStatus().PushAction(prefix + fmt.Sprintf(format, args...))
+ a.cr.IEnsureStatus().PushAction(prefix + fmt.Sprintf(format, args...))
} else {
- a.chi.EnsureStatus().PushAction(prefix + fmt.Sprint(format))
+ a.cr.IEnsureStatus().PushAction(prefix + fmt.Sprint(format))
}
}
if a.writeStatusError {
+ shouldUpdateStatus = true
if len(args) > 0 {
// PR review question: should we prefix the string in the SetError call? If so, we can SetAndPushError.
- a.chi.EnsureStatus().SetError(fmt.Sprintf(format, args...))
- a.chi.EnsureStatus().PushError(prefix + fmt.Sprintf(format, args...))
+ a.cr.IEnsureStatus().SetError(fmt.Sprintf(format, args...))
+ a.cr.IEnsureStatus().PushError(prefix + fmt.Sprintf(format, args...))
} else {
- a.chi.EnsureStatus().SetError(fmt.Sprint(format))
- a.chi.EnsureStatus().PushError(prefix + fmt.Sprint(format))
+ a.cr.IEnsureStatus().SetError(fmt.Sprint(format))
+ a.cr.IEnsureStatus().PushError(prefix + fmt.Sprint(format))
}
}
// Propagate status updates into object
- if a.writeStatusAction || a.writeStatusActions || a.writeStatusError {
- _ = a.ctrl.updateCHIObjectStatus(context.Background(), a.chi, UpdateCHIStatusOptions{
+ if shouldUpdateStatus {
+ _ = a.statusUpdater.StatusUpdate(context.Background(), a.cr, types.UpdateStatusOptions{
TolerateAbsence: true,
- CopyCHIStatusOptions: api.CopyCHIStatusOptions{
+ CopyStatusOptions: types.CopyStatusOptions{
Actions: true,
Errors: true,
},
diff --git a/pkg/controller/common/error.go b/pkg/controller/common/error.go
new file mode 100644
index 000000000..5c20a6c29
--- /dev/null
+++ b/pkg/controller/common/error.go
@@ -0,0 +1,29 @@
+// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package common
+
+import (
+ "errors"
+)
+
+// ErrorCRUD specifies errors of the CRUD operations
+type ErrorCRUD error
+
+var (
+ ErrCRUDAbort ErrorCRUD = errors.New("crud error - should abort")
+ ErrCRUDIgnore ErrorCRUD = errors.New("crud error - should ignore")
+ ErrCRUDRecreate ErrorCRUD = errors.New("crud error - should recreate")
+ ErrCRUDUnexpectedFlow ErrorCRUD = errors.New("crud error - unexpected flow")
+)
diff --git a/pkg/controller/common/event-emitter.go b/pkg/controller/common/event-emitter.go
new file mode 100644
index 000000000..e44b7b841
--- /dev/null
+++ b/pkg/controller/common/event-emitter.go
@@ -0,0 +1,157 @@
+// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package common
+
+import (
+ "time"
+
+ core "k8s.io/api/core/v1"
+ meta "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+ log "github.com/altinity/clickhouse-operator/pkg/announcer"
+ "github.com/altinity/clickhouse-operator/pkg/controller"
+ "github.com/altinity/clickhouse-operator/pkg/interfaces"
+)
+
+const (
+ // Event type (Info, Warning, Error) specifies what event type is this
+ eventTypeInfo = "Info"
+ eventTypeWarning = "Warning"
+ eventTypeError = "Error"
+)
+
+const (
+ // Event action describes what action was taken
+ EventActionReconcile = "Reconcile"
+ EventActionCreate = "Create"
+ EventActionUpdate = "Update"
+ EventActionDelete = "Delete"
+ EventActionProgress = "Progress"
+)
+
+const (
+ // Short, machine understandable string that gives the reason for the transition into the object's current status
+ EventReasonReconcileStarted = "ReconcileStarted"
+ EventReasonReconcileInProgress = "ReconcileInProgress"
+ EventReasonReconcileCompleted = "ReconcileCompleted"
+ EventReasonReconcileFailed = "ReconcileFailed"
+ EventReasonCreateStarted = "CreateStarted"
+ EventReasonCreateInProgress = "CreateInProgress"
+ EventReasonCreateCompleted = "CreateCompleted"
+ EventReasonCreateFailed = "CreateFailed"
+ EventReasonUpdateStarted = "UpdateStarted"
+ EventReasonUpdateInProgress = "UpdateInProgress"
+ EventReasonUpdateCompleted = "UpdateCompleted"
+ EventReasonUpdateFailed = "UpdateFailed"
+ EventReasonDeleteStarted = "DeleteStarted"
+ EventReasonDeleteInProgress = "DeleteInProgress"
+ EventReasonDeleteCompleted = "DeleteCompleted"
+ EventReasonDeleteFailed = "DeleteFailed"
+ EventReasonProgressHostsCompleted = "ProgressHostsCompleted"
+)
+
+type EventEmitter struct {
+ kubeEvent interfaces.IKubeEvent
+ kind string
+ generateName string
+ component string
+}
+
+func NewEventEmitter(
+ kubeEvent interfaces.IKubeEvent,
+ kind string,
+ generateName string,
+ component string,
+) *EventEmitter {
+ return &EventEmitter{
+ kubeEvent: kubeEvent,
+ kind: kind,
+ generateName: generateName,
+ component: component,
+ }
+}
+
+// EventInfo emits event Info
+func (c *EventEmitter) EventInfo(obj meta.Object, action string, reason string, message string) {
+ c.emitEvent(obj, eventTypeInfo, action, reason, message)
+}
+
+// EventWarning emits event Warning
+func (c *EventEmitter) EventWarning(obj meta.Object, action string, reason string, message string) {
+ c.emitEvent(obj, eventTypeWarning, action, reason, message)
+}
+
+// EventError emits event Error
+func (c *EventEmitter) EventError(obj meta.Object, action string, reason string, message string) {
+ c.emitEvent(obj, eventTypeError, action, reason, message)
+}
+
+// emitEvent creates CHI-related event
+// typ - type of the event - Normal, Warning, etc, one of eventType*
+// action - what action was attempted, and then succeeded/failed regarding to the Involved Object. One of eventAction*
+// reason - short, machine understandable string, one of eventReason*
+// message - human-readable description
+func (c *EventEmitter) emitEvent(
+ obj meta.Object,
+ _type string,
+ action string,
+ reason string,
+ message string,
+) {
+ now := time.Now()
+ namespace := obj.GetNamespace()
+ name := obj.GetName()
+ uid := obj.GetUID()
+ resourceVersion := obj.GetResourceVersion()
+
+ event := &core.Event{
+ ObjectMeta: meta.ObjectMeta{
+ GenerateName: c.generateName,
+ Namespace: namespace,
+ },
+ InvolvedObject: core.ObjectReference{
+ Kind: c.kind,
+ Namespace: namespace,
+ Name: name,
+ UID: uid,
+ APIVersion: "clickhouse.altinity.com/v1",
+ ResourceVersion: resourceVersion,
+ },
+ Reason: reason,
+ Message: message,
+ Source: core.EventSource{
+ Component: c.component,
+ },
+ FirstTimestamp: meta.Time{
+ Time: now,
+ },
+ LastTimestamp: meta.Time{
+ Time: now,
+ },
+ Count: 1,
+ Type: _type,
+ Action: action,
+ ReportingController: c.component,
+ // ID of the controller instance, e.g. `kubelet-xyzf`.
+ // ReportingInstance:
+ }
+ _, err := c.kubeEvent.Create(controller.NewContext(), event)
+
+ if err != nil {
+ log.M(obj).F().Error("Create Event failed: %v", err)
+ }
+
+ log.V(2).M(obj).Info("Wrote event at: %s type: %s action: %s reason: %s message: %s", now, _type, action, reason, message)
+}
diff --git a/pkg/controller/common/object-status.go b/pkg/controller/common/object-status.go
new file mode 100644
index 000000000..b4681e1a1
--- /dev/null
+++ b/pkg/controller/common/object-status.go
@@ -0,0 +1,58 @@
+// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package common
+
+import (
+ "github.com/altinity/clickhouse-operator/pkg/interfaces"
+ meta "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+ log "github.com/altinity/clickhouse-operator/pkg/announcer"
+ api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1"
+ "github.com/altinity/clickhouse-operator/pkg/util"
+)
+
+// GetObjectStatusFromMetas gets StatefulSet status from cur and new meta infos
+func GetObjectStatusFromMetas(labeler interfaces.ILabeler, curMeta, newMeta meta.Object) api.ObjectStatus {
+ // Try to perform label-based version comparison
+ curVersion, curHasLabel := labeler.GetObjectVersion(curMeta)
+ newVersion, newHasLabel := labeler.GetObjectVersion(newMeta)
+
+ if !curHasLabel || !newHasLabel {
+ log.M(newMeta).F().Warning(
+ "Not enough labels to compare objects, can not say for sure what exactly is going on. Object: %s",
+ util.NamespaceNameString(newMeta),
+ )
+ return api.ObjectStatusUnknown
+ }
+
+ //
+ // We have both set of labels, can compare them
+ //
+
+ if curVersion == newVersion {
+ log.M(newMeta).F().Info(
+ "cur and new objects are equal based on object version label. Update of the object is not required. Object: %s",
+ util.NamespaceNameString(newMeta),
+ )
+ return api.ObjectStatusSame
+ }
+
+ log.M(newMeta).F().Info(
+ "cur and new objects ARE DIFFERENT based on object version label: Update of the object is required. Object: %s",
+ util.NamespaceNameString(newMeta),
+ )
+
+ return api.ObjectStatusModified
+}
diff --git a/pkg/controller/common/poller/domain/poller-host-statefulset.go b/pkg/controller/common/poller/domain/poller-host-statefulset.go
new file mode 100644
index 000000000..cbdf03ff9
--- /dev/null
+++ b/pkg/controller/common/poller/domain/poller-host-statefulset.go
@@ -0,0 +1,134 @@
+// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package domain
+
+import (
+ "context"
+
+ apps "k8s.io/api/apps/v1"
+
+ log "github.com/altinity/clickhouse-operator/pkg/announcer"
+ api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1"
+ "github.com/altinity/clickhouse-operator/pkg/interfaces"
+ "github.com/altinity/clickhouse-operator/pkg/model/k8s"
+)
+
+type readyMarkDeleter interface {
+ DeleteReadyMarkOnPodAndService(ctx context.Context, host *api.Host) error
+}
+
+// HostStatefulSetPoller enriches StatefulSet poller with host capabilities
+type HostStatefulSetPoller struct {
+ *StatefulSetPoller
+ interfaces.IKubeSTS
+ readyMarkDeleter
+}
+
+// NewHostStatefulSetPoller creates new HostStatefulSetPoller from StatefulSet poller
+func NewHostStatefulSetPoller(poller *StatefulSetPoller, kube interfaces.IKube, labeler readyMarkDeleter) *HostStatefulSetPoller {
+ return &HostStatefulSetPoller{
+ StatefulSetPoller: poller,
+ IKubeSTS: kube.STS(),
+ readyMarkDeleter: labeler,
+ }
+}
+
+// WaitHostStatefulSetReady polls host's StatefulSet until it is ready
+func (p *HostStatefulSetPoller) WaitHostStatefulSetReady(ctx context.Context, host *api.Host) error {
+ // Wait for StatefulSet to reach generation
+ err := p.PollHostStatefulSet(
+ ctx,
+ host,
+ func(_ctx context.Context, sts *apps.StatefulSet) bool {
+ if sts == nil {
+ return false
+ }
+ p.deleteReadyMark(_ctx, host)
+ return k8s.IsStatefulSetGeneration(sts, sts.Generation)
+ },
+ func(_ctx context.Context) {
+ p.deleteReadyMark(_ctx, host)
+ },
+ )
+ if err != nil {
+ return err
+ }
+
+ // Wait StatefulSet to reach ready status
+ err = p.PollHostStatefulSet(
+ ctx,
+ host,
+ func(_ctx context.Context, sts *apps.StatefulSet) bool {
+ p.deleteReadyMark(_ctx, host)
+ return k8s.IsStatefulSetReady(sts)
+ },
+ func(_ctx context.Context) {
+ p.deleteReadyMark(_ctx, host)
+ },
+ )
+
+ return err
+}
+
+//// waitHostNotReady polls host's StatefulSet for not exists or not ready
+//func (c *HostStatefulSetPoller) WaitHostNotReady(ctx context.Context, host *api.Host) error {
+// err := c.PollHostStatefulSet(
+// ctx,
+// host,
+// // Since we are waiting for host to be nopt readylet's assyme that it should exist already
+// // and thus let's set GetErrorTimeout to zero, since we are not expecting getter function
+// // to return any errors
+// poller.NewPollerOptions().
+// FromConfig(chop.Config()).
+// SetGetErrorTimeout(0),
+// func(_ context.Context, sts *apps.StatefulSet) bool {
+// return k8s.IsStatefulSetNotReady(sts)
+// },
+// nil,
+// )
+// if apiErrors.IsNotFound(err) {
+// err = nil
+// }
+//
+// return err
+//}
+
+//// WaitHostStatefulSetDeleted polls host's StatefulSet until it is not available
+//func (p *HostStatefulSetPoller) WaitHostStatefulSetDeleted(host *api.Host) {
+// for {
+// // TODO
+// // Probably there would be better way to wait until k8s reported StatefulSet deleted
+// if _, err := p.IKubeSTS.Get(context.TODO(), host); err == nil {
+// log.V(2).Info("cache NOT yet synced")
+// time.Sleep(15 * time.Second)
+// } else {
+// log.V(1).Info("cache synced")
+// return
+// }
+// }
+//}
+
+func (p *HostStatefulSetPoller) deleteReadyMark(ctx context.Context, host *api.Host) {
+ if p == nil {
+ return
+ }
+ if p.readyMarkDeleter == nil {
+ log.V(3).F().Info("no mark deleter specified")
+ return
+ }
+
+ log.V(3).F().Info("Has mark deleter specified")
+ _ = p.readyMarkDeleter.DeleteReadyMarkOnPodAndService(ctx, host)
+}
diff --git a/pkg/model/chi/creator/creator.go b/pkg/controller/common/poller/domain/poller-host.go
similarity index 52%
rename from pkg/model/chi/creator/creator.go
rename to pkg/controller/common/poller/domain/poller-host.go
index 1a2cc93e4..a395c4613 100644
--- a/pkg/model/chi/creator/creator.go
+++ b/pkg/controller/common/poller/domain/poller-host.go
@@ -1,4 +1,5 @@
// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
+// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -12,31 +13,35 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-package creator
+package domain
import (
+ "context"
+ "fmt"
+
log "github.com/altinity/clickhouse-operator/pkg/announcer"
api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1"
"github.com/altinity/clickhouse-operator/pkg/chop"
- model "github.com/altinity/clickhouse-operator/pkg/model/chi"
+ "github.com/altinity/clickhouse-operator/pkg/controller/common/poller"
+ "github.com/altinity/clickhouse-operator/pkg/util"
)
-// Creator specifies creator object
-type Creator struct {
- chi *api.ClickHouseInstallation
- chConfigFilesGenerator *model.ClickHouseConfigFilesGenerator
- labels *model.Labeler
- annotations *model.Annotator
- a log.Announcer
-}
-
-// NewCreator creates new Creator object
-func NewCreator(chi *api.ClickHouseInstallation) *Creator {
- return &Creator{
- chi: chi,
- chConfigFilesGenerator: model.NewClickHouseConfigFilesGenerator(model.NewClickHouseConfigGenerator(chi), chop.Config()),
- labels: model.NewLabeler(chi),
- annotations: model.NewAnnotator(chi),
- a: log.M(chi),
+// PollHost polls host
+func PollHost(
+ ctx context.Context,
+ host *api.Host,
+ isDoneFn func(ctx context.Context, host *api.Host) bool,
+) error {
+ if util.IsContextDone(ctx) {
+ log.V(2).Info("task is done")
+ return nil
}
+
+ return poller.New(ctx, fmt.Sprintf("%s/%s", host.Runtime.Address.Namespace, host.Runtime.Address.HostName)).
+ WithOptions(poller.NewOptions().FromConfig(chop.Config())).
+ WithMain(&poller.Functions{
+ IsDone: func(_ctx context.Context, _ any) bool {
+ return isDoneFn(_ctx, host)
+ },
+ }).Poll()
}
diff --git a/pkg/controller/common/poller/domain/poller-statefulset.go b/pkg/controller/common/poller/domain/poller-statefulset.go
new file mode 100644
index 000000000..b07a33943
--- /dev/null
+++ b/pkg/controller/common/poller/domain/poller-statefulset.go
@@ -0,0 +1,76 @@
+// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package domain
+
+import (
+ "context"
+ "fmt"
+
+ apps "k8s.io/api/apps/v1"
+ apiErrors "k8s.io/apimachinery/pkg/api/errors"
+
+ log "github.com/altinity/clickhouse-operator/pkg/announcer"
+ api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1"
+ "github.com/altinity/clickhouse-operator/pkg/chop"
+ "github.com/altinity/clickhouse-operator/pkg/controller/common/poller"
+ "github.com/altinity/clickhouse-operator/pkg/interfaces"
+ "github.com/altinity/clickhouse-operator/pkg/util"
+)
+
+type StatefulSetPoller struct {
+ kubeSTS interfaces.IKubeSTS
+}
+
+func NewStatefulSetPoller(kube interfaces.IKube) *StatefulSetPoller {
+ return &StatefulSetPoller{
+ kubeSTS: kube.STS(),
+ }
+}
+
+// pollHostStatefulSet polls host's StatefulSet
+func (p *StatefulSetPoller) PollHostStatefulSet(
+ ctx context.Context,
+ host *api.Host,
+ isDoneFn func(context.Context, *apps.StatefulSet) bool,
+ backFn func(context.Context),
+) error {
+ if util.IsContextDone(ctx) {
+ log.V(2).Info("task is done")
+ return nil
+ }
+
+ return poller.New(
+ ctx,
+ fmt.Sprintf("%s/%s", host.Runtime.Address.Namespace, host.Runtime.Address.StatefulSet),
+ ).WithOptions(
+ poller.NewOptions().FromConfig(chop.Config()),
+ ).WithMain(
+ &poller.Functions{
+ Get: func(_ctx context.Context) (any, error) {
+ return p.kubeSTS.Get(ctx, host)
+ },
+ IsDone: func(_ctx context.Context, a any) bool {
+ return isDoneFn(_ctx, a.(*apps.StatefulSet))
+ },
+ ShouldContinue: func(_ctx context.Context, _ any, e error) bool {
+ return apiErrors.IsNotFound(e)
+ },
+ },
+ ).WithBackground(
+ &poller.BackgroundFunctions{
+ F: backFn,
+ },
+ ).Poll()
+}
diff --git a/pkg/controller/common/poller/poller-functions.go b/pkg/controller/common/poller/poller-functions.go
new file mode 100644
index 000000000..578b582f3
--- /dev/null
+++ b/pkg/controller/common/poller/poller-functions.go
@@ -0,0 +1,60 @@
+// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
+// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package poller
+
+import (
+ "context"
+)
+
+type Functions struct {
+ Get func(context.Context) (any, error)
+ IsDone func(context.Context, any) bool
+ ShouldContinue func(context.Context, any, error) bool
+}
+
+func (p *Functions) CallGet(c context.Context) (any, error) {
+ if p == nil {
+ return nil, nil
+ }
+ if p.Get == nil {
+ return nil, nil
+ }
+ return p.Get(c)
+}
+
+func (p *Functions) CallIsDone(c context.Context, a any) bool {
+ if p == nil {
+ return false
+ }
+ if p.IsDone == nil {
+ return false
+ }
+ return p.IsDone(c, a)
+}
+
+func (p *Functions) CallShouldContinue(c context.Context, a any, e error) bool {
+ if p == nil {
+ return false
+ }
+ if p.ShouldContinue == nil {
+ return false
+ }
+ return p.ShouldContinue(c, a, e)
+}
+
+type BackgroundFunctions struct {
+ F func(context.Context)
+}
diff --git a/pkg/controller/common/poller/poller-options.go b/pkg/controller/common/poller/poller-options.go
new file mode 100644
index 000000000..e1aa5ce90
--- /dev/null
+++ b/pkg/controller/common/poller/poller-options.go
@@ -0,0 +1,71 @@
+// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
+// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package poller
+
+import (
+ "time"
+
+ api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1"
+)
+
+const (
+ waitStatefulSetGenerationTimeoutBeforeStartBothering = 60
+ waitStatefulSetGenerationTimeoutToCreateStatefulSet = 30
+)
+
+// Options specifies polling options
+type Options struct {
+ StartBotheringAfterTimeout time.Duration
+ GetErrorTimeout time.Duration
+ Timeout time.Duration
+ MainInterval time.Duration
+ BackgroundInterval time.Duration
+}
+
+// NewOptions creates new poll options
+func NewOptions() *Options {
+ return &Options{}
+}
+
+// Ensure ensures poll options do exist
+func (o *Options) Ensure() *Options {
+ if o == nil {
+ return NewOptions()
+ }
+ return o
+}
+
+// FromConfig makes poll options from config
+func (o *Options) FromConfig(config *api.OperatorConfig) *Options {
+ if o == nil {
+ return nil
+ }
+ o.StartBotheringAfterTimeout = time.Duration(waitStatefulSetGenerationTimeoutBeforeStartBothering) * time.Second
+ o.GetErrorTimeout = time.Duration(waitStatefulSetGenerationTimeoutToCreateStatefulSet) * time.Second
+ o.Timeout = time.Duration(config.Reconcile.StatefulSet.Update.Timeout) * time.Second
+ o.MainInterval = time.Duration(config.Reconcile.StatefulSet.Update.PollInterval) * time.Second
+ o.BackgroundInterval = 1 * time.Second
+ return o
+}
+
+// SetCreateTimeout sets create timeout
+func (o *Options) SetGetErrorTimeout(timeout time.Duration) *Options {
+ if o == nil {
+ return nil
+ }
+ o.GetErrorTimeout = timeout
+ return o
+}
diff --git a/pkg/controller/common/poller/poller.go b/pkg/controller/common/poller/poller.go
new file mode 100644
index 000000000..425deb4a7
--- /dev/null
+++ b/pkg/controller/common/poller/poller.go
@@ -0,0 +1,149 @@
+// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
+// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package poller
+
+import (
+ "context"
+ "fmt"
+ "time"
+
+ log "github.com/altinity/clickhouse-operator/pkg/announcer"
+ "github.com/altinity/clickhouse-operator/pkg/util"
+)
+
+type Poller interface {
+ Poll() error
+ WithOptions(opts *Options) Poller
+ WithMain(functions *Functions) Poller
+ WithBackground(backgroundFunctions *BackgroundFunctions) Poller
+}
+
+type poller struct {
+ ctx context.Context
+ name string
+ opts *Options
+ main *Functions
+ background *BackgroundFunctions
+}
+
+func New(ctx context.Context, name string) Poller {
+ return &poller{
+ ctx: ctx,
+ name: name,
+ }
+}
+
+func (p *poller) WithOptions(opts *Options) Poller {
+ p.opts = opts
+ return p
+}
+
+func (p *poller) WithMain(functions *Functions) Poller {
+ p.main = functions
+ return p
+}
+
+func (p *poller) WithBackground(backgroundFunctions *BackgroundFunctions) Poller {
+ p.background = backgroundFunctions
+ return p
+}
+
+func (p *poller) Poll() error {
+ opts := p.opts.Ensure()
+ start := time.Now()
+ for {
+ if util.IsContextDone(p.ctx) {
+ log.V(2).Info("task is done")
+ return nil
+ }
+
+ item, err := p.main.CallGet(p.ctx)
+ switch {
+ case err == nil:
+ // Object is found - process it
+ if p.main.CallIsDone(p.ctx, item) {
+ // All is good, job is done, exit
+ log.V(1).M(p.name).F().Info("OK %s", p.name)
+ return nil
+ }
+ // Object is found, but processor function says we need to continue polling
+ case p.main.CallShouldContinue(p.ctx, item, err):
+ // Object is not found - it either failed to be created or just still not created
+ if (opts.GetErrorTimeout > 0) && (time.Since(start) >= opts.GetErrorTimeout) {
+ // No more wait for the object to be created. Consider create process as failed.
+ log.V(1).M(p.name).F().Error("Poller.Get() FAILED because item is not available and get timeout reached for: %s. Abort", p.name)
+ return err
+ }
+ // Error has happened but we should continue
+ default:
+ // Error has happened and we should not continue, abort polling
+ log.M(p.name).F().Error("Poller.Get() FAILED for: %s", p.name)
+ return err
+ }
+
+ // Continue polling
+
+ // May be time has come to abort polling?
+ if time.Since(start) >= opts.Timeout {
+ // Timeout reached, no good result available, time to abort
+ log.V(1).M(p.name).F().Info("poll(%s) - TIMEOUT reached", p.name)
+ return fmt.Errorf("poll(%s) - wait timeout", p.name)
+ }
+
+ // Continue polling
+
+ // May be time has come to start bothers into logs?
+ if time.Since(start) >= opts.StartBotheringAfterTimeout {
+ // Start bothering with log messages after some time only
+ log.V(1).M(p.name).F().Info("WAIT: %s", p.name)
+ }
+
+ // Wait some more time and launch background process(es)
+ log.V(2).M(p.name).F().Info("poll iteration")
+ sleepAndRunBackgroundProcess(p.ctx, opts, p.background)
+ } // for
+}
+
+func sleepAndRunBackgroundProcess(ctx context.Context, opts *Options, background *BackgroundFunctions) {
+ if ctx == nil {
+ ctx = context.Background()
+ }
+ switch {
+ case opts.BackgroundInterval > 0:
+ mainIntervalTimeout := time.After(opts.MainInterval)
+ backgroundIntervalTimeout := time.After(opts.BackgroundInterval)
+ for {
+ select {
+ case <-ctx.Done():
+ // Context is done, nothing to do here more
+ return
+ case <-mainIntervalTimeout:
+ // Timeout reached, nothing to do here more
+ return
+ case <-backgroundIntervalTimeout:
+ // Function interval reached, time to call the func
+ if background != nil {
+ if background.F != nil {
+ background.F(ctx)
+ }
+ }
+ backgroundIntervalTimeout = time.After(opts.BackgroundInterval)
+ }
+ }
+ default:
+ util.WaitContextDoneOrTimeout(ctx, opts.MainInterval)
+ }
+}
diff --git a/pkg/controller/common/reconcile-shard-and-hosts-options.go b/pkg/controller/common/reconcile-shard-and-hosts-options.go
new file mode 100644
index 000000000..02e5bbf2c
--- /dev/null
+++ b/pkg/controller/common/reconcile-shard-and-hosts-options.go
@@ -0,0 +1,28 @@
+// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package common
+
+// ReconcileShardsAndHostsOptionsCtxKeyType specifies type for ReconcileShardsAndHostsOptionsCtxKey
+// More details here on why do we need special type
+// https://stackoverflow.com/questions/40891345/fix-should-not-use-basic-type-string-as-key-in-context-withvalue-golint
+type ReconcileShardsAndHostsOptionsCtxKeyType string
+
+// ReconcileShardsAndHostsOptionsCtxKey specifies name of the key to be used for ReconcileShardsAndHostsOptions
+const ReconcileShardsAndHostsOptionsCtxKey ReconcileShardsAndHostsOptionsCtxKeyType = "ReconcileShardsAndHostsOptions"
+
+// ReconcileShardsAndHostsOptions is and options for reconciler
+type ReconcileShardsAndHostsOptions struct {
+ FullFanOut bool
+}
diff --git a/pkg/controller/common/statefulset/statefulset-reconciler-aux.go b/pkg/controller/common/statefulset/statefulset-reconciler-aux.go
new file mode 100644
index 000000000..ba101a9aa
--- /dev/null
+++ b/pkg/controller/common/statefulset/statefulset-reconciler-aux.go
@@ -0,0 +1,76 @@
+// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package statefulset
+
+import (
+ "context"
+
+ "gopkg.in/d4l3k/messagediff.v1"
+ apps "k8s.io/api/apps/v1"
+
+ api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1"
+ "github.com/altinity/clickhouse-operator/pkg/controller/common"
+ "github.com/altinity/clickhouse-operator/pkg/interfaces"
+ "github.com/altinity/clickhouse-operator/pkg/util"
+)
+
+type IHostStatefulSetPoller interface {
+ WaitHostStatefulSetReady(ctx context.Context, host *api.Host) error
+}
+
+type fallback interface {
+ OnStatefulSetCreateFailed(ctx context.Context, host *api.Host) common.ErrorCRUD
+ OnStatefulSetUpdateFailed(ctx context.Context, oldStatefulSet *apps.StatefulSet, host *api.Host, sts interfaces.IKubeSTS) common.ErrorCRUD
+}
+
+type DefaultFallback struct{}
+
+func NewDefaultFallback() *DefaultFallback {
+ return &DefaultFallback{}
+}
+
+func (f *DefaultFallback) OnStatefulSetCreateFailed(ctx context.Context, host *api.Host) common.ErrorCRUD {
+ return common.ErrCRUDIgnore
+}
+func (f *DefaultFallback) OnStatefulSetUpdateFailed(ctx context.Context, oldStatefulSet *apps.StatefulSet, host *api.Host, sts interfaces.IKubeSTS) common.ErrorCRUD {
+ return common.ErrCRUDIgnore
+}
+
+func dumpDiff(old, new *apps.StatefulSet) string {
+ diff, equal := messagediff.DeepDiff(old.Spec, new.Spec)
+
+ str := ""
+ if equal {
+ str += "EQUAL: "
+ } else {
+ str += "NOT EQUAL: "
+ }
+
+ if len(diff.Added) > 0 {
+ // Something added
+ str += util.MessageDiffItemString("added spec items", "none", "", diff.Added)
+ }
+
+ if len(diff.Removed) > 0 {
+ // Something removed
+ str += util.MessageDiffItemString("removed spec items", "none", "", diff.Removed)
+ }
+
+ if len(diff.Modified) > 0 {
+ // Something modified
+ str += util.MessageDiffItemString("modified spec items", "none", "", diff.Modified)
+ }
+ return str
+}
diff --git a/pkg/controller/common/statefulset/statefulset-reconciler-options.go b/pkg/controller/common/statefulset/statefulset-reconciler-options.go
new file mode 100644
index 000000000..8d923ede3
--- /dev/null
+++ b/pkg/controller/common/statefulset/statefulset-reconciler-options.go
@@ -0,0 +1,72 @@
+// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package statefulset
+
+type ReconcileOptions struct {
+ forceRecreate bool
+ doNotWait bool
+}
+
+func NewReconcileStatefulSetOptions() *ReconcileOptions {
+ return &ReconcileOptions{}
+}
+
+func (o *ReconcileOptions) Ensure() *ReconcileOptions {
+ if o == nil {
+ o = NewReconcileStatefulSetOptions()
+ }
+ return o
+}
+
+func (o *ReconcileOptions) SetForceRecreate() *ReconcileOptions {
+ o = o.Ensure()
+ o.forceRecreate = true
+ return o
+}
+
+func (o *ReconcileOptions) IsForceRecreate() bool {
+ if o == nil {
+ return false
+ }
+ return o.forceRecreate
+}
+
+func (o *ReconcileOptions) SetDoNotWait() *ReconcileOptions {
+ o = o.Ensure()
+ o.doNotWait = true
+ return o
+}
+
+func (o *ReconcileOptions) IsDoNotWait() bool {
+ if o == nil {
+ return false
+ }
+ return o.doNotWait
+}
+
+type ReconcileOptionsSet []*ReconcileOptions
+
+// NewReconcileOptionsSet creates new reconcileHostStatefulSetOptions array
+func NewReconcileOptionsSet(opts ...*ReconcileOptions) (res ReconcileOptionsSet) {
+ return append(res, opts...)
+}
+
+// First gets first option
+func (a ReconcileOptionsSet) First() *ReconcileOptions {
+ if len(a) > 0 {
+ return a[0]
+ }
+ return nil
+}
diff --git a/pkg/controller/common/statefulset/statefulset-reconciler.go b/pkg/controller/common/statefulset/statefulset-reconciler.go
new file mode 100644
index 000000000..9eed289bb
--- /dev/null
+++ b/pkg/controller/common/statefulset/statefulset-reconciler.go
@@ -0,0 +1,500 @@
+// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package statefulset
+
+import (
+ "context"
+ "time"
+
+ apps "k8s.io/api/apps/v1"
+ apiErrors "k8s.io/apimachinery/pkg/api/errors"
+
+ log "github.com/altinity/clickhouse-operator/pkg/announcer"
+ api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1"
+ "github.com/altinity/clickhouse-operator/pkg/apis/common/types"
+ "github.com/altinity/clickhouse-operator/pkg/controller/common"
+ "github.com/altinity/clickhouse-operator/pkg/controller/common/storage"
+ "github.com/altinity/clickhouse-operator/pkg/interfaces"
+ "github.com/altinity/clickhouse-operator/pkg/model/k8s"
+ "github.com/altinity/clickhouse-operator/pkg/util"
+)
+
+type Reconciler struct {
+ a common.Announcer
+ task *common.Task
+
+ hostSTSPoller IHostStatefulSetPoller
+ namer interfaces.INameManager
+ labeler interfaces.ILabeler
+ storage *storage.Reconciler
+
+ cr interfaces.IKubeCR
+ sts interfaces.IKubeSTS
+
+ fallback fallback
+}
+
+func NewReconciler(
+ a common.Announcer,
+ task *common.Task,
+ hostSTSPoller IHostStatefulSetPoller,
+ namer interfaces.INameManager,
+ labeler interfaces.ILabeler,
+ storage *storage.Reconciler,
+ kube interfaces.IKube,
+ fallback fallback,
+) *Reconciler {
+ return &Reconciler{
+ a: a,
+ task: task,
+
+ hostSTSPoller: hostSTSPoller,
+ namer: namer,
+ labeler: labeler,
+ storage: storage,
+
+ cr: kube.CR(),
+ sts: kube.STS(),
+
+ fallback: fallback,
+ }
+}
+
+// PrepareHostStatefulSetWithStatus prepares host's StatefulSet status
+func (r *Reconciler) PrepareHostStatefulSetWithStatus(ctx context.Context, host *api.Host, shutdown bool) {
+ if util.IsContextDone(ctx) {
+ log.V(2).Info("task is done")
+ return
+ }
+
+ r.prepareDesiredStatefulSet(host, shutdown)
+ host.GetReconcileAttributes().SetStatus(r.getStatefulSetStatus(host))
+}
+
+// prepareDesiredStatefulSet prepares desired StatefulSet
+func (r *Reconciler) prepareDesiredStatefulSet(host *api.Host, shutdown bool) {
+ host.Runtime.DesiredStatefulSet = r.task.Creator().CreateStatefulSet(host, shutdown)
+}
+
+// getStatefulSetStatus gets StatefulSet status
+func (r *Reconciler) getStatefulSetStatus(host *api.Host) api.ObjectStatus {
+ new := host.Runtime.DesiredStatefulSet
+ r.a.V(2).M(new).S().Info(util.NamespaceNameString(new))
+ defer r.a.V(2).M(new).E().Info(util.NamespaceNameString(new))
+
+ r.a.V(2).M(new).Info("host sts preamble: ancestor: %t cnt: %d added: %d",
+ host.HasAncestor(),
+ host.GetCR().IEnsureStatus().GetHostsCount(),
+ host.GetCR().IEnsureStatus().GetHostsAddedCount(),
+ )
+
+ curStatefulSet, err := r.sts.Get(context.TODO(), new)
+ switch {
+ case curStatefulSet != nil:
+ r.a.V(1).M(new).Info("Have StatefulSet available, try to perform label-based comparison for sts: %s", util.NamespaceNameString(new))
+ return common.GetObjectStatusFromMetas(r.labeler, curStatefulSet, new)
+
+ case apiErrors.IsNotFound(err):
+ // StatefulSet is not found at the moment.
+ // However, it may be just deleted
+ r.a.V(1).M(new).Info("No cur StatefulSet available and the reason is - not found. Either new one or a deleted sts: %s", util.NamespaceNameString(new))
+ if host.HasAncestor() {
+ r.a.V(1).M(new).Warning("No cur StatefulSet available but host has an ancestor. Found deleted StatefulSet. for: %s", util.NamespaceNameString(new))
+ return api.ObjectStatusModified
+ }
+ r.a.V(1).M(new).Info("No cur StatefulSet available and it is not found and is a new one. New one for: %s", util.NamespaceNameString(new))
+ return api.ObjectStatusNew
+
+ default:
+ r.a.V(1).M(new).Warning("Have no StatefulSet available, nor it is not found for: %s err: %v", util.NamespaceNameString(new), err)
+ return api.ObjectStatusUnknown
+ }
+}
+
+// ReconcileStatefulSet reconciles StatefulSet of a host
+func (r *Reconciler) ReconcileStatefulSet(
+ ctx context.Context,
+ host *api.Host,
+ register bool,
+ opts *ReconcileOptions,
+) (err error) {
+ if util.IsContextDone(ctx) {
+ log.V(2).Info("task is done")
+ return nil
+ }
+
+ newStatefulSet := host.Runtime.DesiredStatefulSet
+
+ r.a.V(2).M(host).S().Info(util.NamespaceNameString(newStatefulSet))
+ defer r.a.V(2).M(host).E().Info(util.NamespaceNameString(newStatefulSet))
+
+ if host.GetReconcileAttributes().GetStatus() == api.ObjectStatusSame {
+ r.a.V(2).M(host).F().Info("No need to reconcile THE SAME StatefulSet: %s", util.NamespaceNameString(newStatefulSet))
+ if register {
+ host.GetCR().IEnsureStatus().HostUnchanged()
+ _ = r.cr.StatusUpdate(ctx, host.GetCR(), types.UpdateStatusOptions{
+ CopyStatusOptions: types.CopyStatusOptions{
+ MainFields: true,
+ },
+ })
+ }
+ return nil
+ }
+
+ // Check whether this object already exists in k8s
+ host.Runtime.CurStatefulSet, err = r.sts.Get(ctx, newStatefulSet)
+
+ // Report diff to trace
+ if host.GetReconcileAttributes().GetStatus() == api.ObjectStatusModified {
+ r.a.V(1).M(host).F().Info("Need to reconcile MODIFIED StatefulSet: %s", util.NamespaceNameString(newStatefulSet))
+ common.DumpStatefulSetDiff(host, host.Runtime.CurStatefulSet, newStatefulSet)
+ }
+
+ switch {
+ case opts.IsForceRecreate():
+ // Force recreate prevails over all other requests
+ r.recreateStatefulSet(ctx, host, register, opts)
+ default:
+ // We have (or had in the past) StatefulSet - try to update|recreate it
+ err = r.updateStatefulSet(ctx, host, register, opts)
+ }
+
+ if apiErrors.IsNotFound(err) {
+ // StatefulSet not found - even during Update process - try to create it
+ err = r.createStatefulSet(ctx, host, register, opts)
+ }
+
+ // Host has to know current StatefulSet and Pod
+ host.Runtime.CurStatefulSet, _ = r.sts.Get(ctx, newStatefulSet)
+
+ return err
+}
+
+// recreateStatefulSet
+func (r *Reconciler) recreateStatefulSet(ctx context.Context, host *api.Host, register bool, opts *ReconcileOptions) error {
+ if util.IsContextDone(ctx) {
+ log.V(2).Info("task is done")
+ return nil
+ }
+
+ r.a.V(2).M(host).S().Info(util.NamespaceNameString(host.GetCR()))
+ defer r.a.V(2).M(host).E().Info(util.NamespaceNameString(host.GetCR()))
+
+ _ = r.doDeleteStatefulSet(ctx, host)
+ _ = r.storage.ReconcilePVCs(ctx, host, api.DesiredStatefulSet)
+ return r.createStatefulSet(ctx, host, register, opts)
+}
+
+// updateStatefulSet
+func (r *Reconciler) updateStatefulSet(ctx context.Context, host *api.Host, register bool, opts *ReconcileOptions) error {
+ if util.IsContextDone(ctx) {
+ log.V(2).Info("task is done")
+ return nil
+ }
+
+ // Helpers
+ newStatefulSet := host.Runtime.DesiredStatefulSet
+ curStatefulSet := host.Runtime.CurStatefulSet
+
+ r.a.V(2).M(host).S().Info(newStatefulSet.Name)
+ defer r.a.V(2).M(host).E().Info(newStatefulSet.Name)
+
+ namespace := newStatefulSet.Namespace
+ name := newStatefulSet.Name
+
+ r.a.V(1).
+ WithEvent(host.GetCR(), common.EventActionCreate, common.EventReasonCreateStarted).
+ WithStatusAction(host.GetCR()).
+ M(host).F().
+ Info("Update StatefulSet(%s) - started", util.NamespaceNameString(newStatefulSet))
+
+ if r.waitForConfigMapPropagation(ctx, host) {
+ log.V(2).Info("task is done")
+ return nil
+ }
+
+ action := common.ErrCRUDRecreate
+ if k8s.IsStatefulSetReady(curStatefulSet) {
+ action = r.doUpdateStatefulSet(ctx, curStatefulSet, newStatefulSet, host)
+ }
+
+ switch action {
+ case nil:
+ if register {
+ host.GetCR().IEnsureStatus().HostUpdated()
+ _ = r.cr.StatusUpdate(ctx, host.GetCR(), types.UpdateStatusOptions{
+ CopyStatusOptions: types.CopyStatusOptions{
+ MainFields: true,
+ },
+ })
+ }
+ r.a.V(1).
+ WithEvent(host.GetCR(), common.EventActionUpdate, common.EventReasonUpdateCompleted).
+ WithStatusAction(host.GetCR()).
+ M(host).F().
+ Info("Update StatefulSet(%s/%s) - completed", namespace, name)
+ return nil
+ case common.ErrCRUDAbort:
+ r.a.V(1).M(host).Info("Update StatefulSet(%s/%s) - got abort. Abort", namespace, name)
+ return common.ErrCRUDAbort
+ case common.ErrCRUDIgnore:
+ r.a.V(1).M(host).Info("Update StatefulSet(%s/%s) - got ignore. Ignore", namespace, name)
+ return nil
+ case common.ErrCRUDRecreate:
+ r.a.WithEvent(host.GetCR(), common.EventActionUpdate, common.EventReasonUpdateInProgress).
+ WithStatusAction(host.GetCR()).
+ M(host).F().
+ Info("Update StatefulSet(%s/%s) switch from Update to Recreate", namespace, name)
+ common.DumpStatefulSetDiff(host, curStatefulSet, newStatefulSet)
+ return r.recreateStatefulSet(ctx, host, register, opts)
+ case common.ErrCRUDUnexpectedFlow:
+ r.a.V(1).M(host).Warning("Got unexpected flow action. Ignore and continue for now")
+ return nil
+ }
+
+ r.a.V(1).M(host).Warning("Got unexpected flow. This is strange. Ignore and continue for now")
+ return nil
+}
+
+// createStatefulSet
+func (r *Reconciler) createStatefulSet(ctx context.Context, host *api.Host, register bool, opts *ReconcileOptions) error {
+ if util.IsContextDone(ctx) {
+ log.V(2).Info("task is done")
+ return nil
+ }
+
+ statefulSet := host.Runtime.DesiredStatefulSet
+
+ r.a.V(2).M(host).S().Info(util.NamespaceNameString(statefulSet.GetObjectMeta()))
+ defer r.a.V(2).M(host).E().Info(util.NamespaceNameString(statefulSet.GetObjectMeta()))
+
+ r.a.V(1).
+ WithEvent(host.GetCR(), common.EventActionCreate, common.EventReasonCreateStarted).
+ WithStatusAction(host.GetCR()).
+ M(host).F().
+ Info("Create StatefulSet %s - started", util.NamespaceNameString(statefulSet))
+
+ action := r.doCreateStatefulSet(ctx, host, opts)
+
+ if register {
+ host.GetCR().IEnsureStatus().HostAdded()
+ _ = r.cr.StatusUpdate(ctx, host.GetCR(), types.UpdateStatusOptions{
+ CopyStatusOptions: types.CopyStatusOptions{
+ MainFields: true,
+ },
+ })
+ }
+
+ switch action {
+ case nil:
+ r.a.V(1).
+ WithEvent(host.GetCR(), common.EventActionCreate, common.EventReasonCreateCompleted).
+ WithStatusAction(host.GetCR()).
+ M(host).F().
+ Info("Create StatefulSet: %s - completed", util.NamespaceNameString(statefulSet))
+ return nil
+ case common.ErrCRUDAbort:
+ r.a.WithEvent(host.GetCR(), common.EventActionCreate, common.EventReasonCreateFailed).
+ WithStatusAction(host.GetCR()).
+ WithStatusError(host.GetCR()).
+ M(host).F().
+ Error("Create StatefulSet: %s - failed with error: %v", util.NamespaceNameString(statefulSet), action)
+ return action
+ case common.ErrCRUDIgnore:
+ r.a.WithEvent(host.GetCR(), common.EventActionCreate, common.EventReasonCreateFailed).
+ WithStatusAction(host.GetCR()).
+ M(host).F().
+ Warning("Create StatefulSet: %s - error ignored", util.NamespaceNameString(statefulSet))
+ return nil
+ case common.ErrCRUDRecreate:
+ r.a.V(1).M(host).Warning("Got recreate action. Ignore and continue for now")
+ return nil
+ case common.ErrCRUDUnexpectedFlow:
+ r.a.V(1).M(host).Warning("Got unexpected flow action. Ignore and continue for now")
+ return nil
+ }
+
+ r.a.V(1).M(host).Warning("Got unexpected flow. This is strange. Ignore and continue for now")
+ return nil
+}
+
+// waitForConfigMapPropagation
+func (r *Reconciler) waitForConfigMapPropagation(ctx context.Context, host *api.Host) bool {
+ // No need to wait for ConfigMap propagation on stopped host
+ if host.IsStopped() {
+ r.a.V(1).M(host).F().Info("No need to wait for ConfigMap propagation - on stopped host")
+ return false
+ }
+
+ // No need to wait on unchanged ConfigMap
+ if r.task.CmUpdate().IsZero() {
+ r.a.V(1).M(host).F().Info("No need to wait for ConfigMap propagation - no changes in ConfigMap")
+ return false
+ }
+
+ // What timeout is expected to be enough for ConfigMap propagation?
+ // In case timeout is not specified, no need to wait
+ if !host.GetCR().GetReconciling().HasConfigMapPropagationTimeout() {
+ r.a.V(1).M(host).F().Info("No need to wait for ConfigMap propagation - not applicable")
+ return false
+ }
+
+ timeout := host.GetCR().GetReconciling().GetConfigMapPropagationTimeoutDuration()
+
+ // How much time has elapsed since last ConfigMap update?
+ // May be there is no need to wait already
+ elapsed := time.Now().Sub(r.task.CmUpdate())
+ if elapsed >= timeout {
+ r.a.V(1).M(host).F().Info("No need to wait for ConfigMap propagation - already elapsed. %s/%s", elapsed, timeout)
+ return false
+ }
+
+ // Looks like we need to wait for Configmap propagation, after all
+ wait := timeout - elapsed
+ r.a.V(1).M(host).F().Info("Wait for ConfigMap propagation for %s %s/%s", wait, elapsed, timeout)
+ if util.WaitContextDoneOrTimeout(ctx, wait) {
+ log.V(2).Info("task is done")
+ return true
+ }
+
+ return false
+}
+
+// createStatefulSet is an internal function, used in reconcileStatefulSet only
+func (r *Reconciler) doCreateStatefulSet(ctx context.Context, host *api.Host, opts *ReconcileOptions) common.ErrorCRUD {
+ if util.IsContextDone(ctx) {
+ log.V(2).Info("task is done")
+ return nil
+ }
+
+ log.V(1).M(host).F().P()
+ statefulSet := host.Runtime.DesiredStatefulSet
+
+ log.V(1).Info("Create StatefulSet %s", util.NamespaceNameString(statefulSet))
+ if _, err := r.sts.Create(ctx, statefulSet); err != nil {
+ log.V(1).M(host).F().Error("StatefulSet create failed. err: %v", err)
+ return common.ErrCRUDRecreate
+ }
+
+ if opts.IsDoNotWait() {
+ // StatefulSet created, do not wait until host is ready, go by
+ log.V(1).M(host).F().Info("Will NOT wait for StatefulSet to be ready, consider it is created successfully")
+ } else {
+ // StatefulSet created, wait until host is ready
+ if err := r.hostSTSPoller.WaitHostStatefulSetReady(ctx, host); err != nil {
+ log.V(1).M(host).F().Error("StatefulSet create wait failed. err: %v", err)
+ return r.fallback.OnStatefulSetCreateFailed(ctx, host)
+ }
+ log.V(2).M(host).F().Info("Target generation reached, StatefulSet created successfully")
+ }
+
+ return nil
+}
+
+// updateStatefulSet is an internal function, used in reconcileStatefulSet only
+func (r *Reconciler) doUpdateStatefulSet(
+ ctx context.Context,
+ oldStatefulSet *apps.StatefulSet,
+ newStatefulSet *apps.StatefulSet,
+ host *api.Host,
+) common.ErrorCRUD {
+ log.V(2).M(host).F().P()
+
+ if util.IsContextDone(ctx) {
+ log.V(2).Info("task is done")
+ return nil
+ }
+
+ // Apply newStatefulSet and wait for Generation to change
+ updatedStatefulSet, err := r.sts.Update(ctx, newStatefulSet)
+ if err != nil {
+ log.V(1).M(host).F().Error("StatefulSet update failed. err: %v", err)
+ log.V(1).M(host).F().Error("%s", dumpDiff(oldStatefulSet, newStatefulSet))
+ return common.ErrCRUDRecreate
+ }
+
+ // After calling "Update()"
+ // 1. ObjectMeta.Generation is target generation
+ // 2. Status.ObservedGeneration may be <= ObjectMeta.Generation
+
+ if updatedStatefulSet.Generation == oldStatefulSet.Generation {
+ // Generation is not updated - no changes in .spec section were made
+ log.V(2).M(host).F().Info("no generation change")
+ return nil
+ }
+
+ log.V(1).M(host).F().Info("generation change %d=>%d", oldStatefulSet.Generation, updatedStatefulSet.Generation)
+
+ if err := r.hostSTSPoller.WaitHostStatefulSetReady(ctx, host); err != nil {
+ log.V(1).M(host).F().Error("StatefulSet update wait failed. err: %v", err)
+ return r.fallback.OnStatefulSetUpdateFailed(ctx, oldStatefulSet, host, r.sts)
+ }
+
+ log.V(2).M(host).F().Info("Target generation reached, StatefulSet updated successfully")
+ return nil
+}
+
+// deleteStatefulSet gracefully deletes StatefulSet through zeroing Pod's count
+func (r *Reconciler) doDeleteStatefulSet(ctx context.Context, host *api.Host) error {
+ if util.IsContextDone(ctx) {
+ log.V(2).Info("task is done")
+ return nil
+ }
+
+ // IMPORTANT
+ // StatefulSets do not provide any guarantees on the termination of pods when a StatefulSet is deleted.
+ // To achieve ordered and graceful termination of the pods in the StatefulSet,
+ // it is possible to scale the StatefulSet down to 0 prior to deletion.
+
+ name := r.namer.Name(interfaces.NameStatefulSet, host)
+ namespace := host.Runtime.Address.Namespace
+ log.V(1).M(host).F().Info("%s/%s", namespace, name)
+
+ var err error
+ host.Runtime.CurStatefulSet, err = r.sts.Get(ctx, host)
+ if err != nil {
+ // Unable to fetch cur StatefulSet, but this is not necessarily an error yet
+ if apiErrors.IsNotFound(err) {
+ log.V(1).M(host).Info("NEUTRAL not found StatefulSet %s/%s", namespace, name)
+ } else {
+ log.V(1).M(host).F().Error("FAIL get StatefulSet %s/%s err:%v", namespace, name, err)
+ }
+ return err
+ }
+
+ // Scale StatefulSet down to 0 pods count.
+ // This is the proper and graceful way to delete StatefulSet
+ var zero int32 = 0
+ host.Runtime.CurStatefulSet.Spec.Replicas = &zero
+ if _, err := r.sts.Update(ctx, host.Runtime.CurStatefulSet); err != nil {
+ log.V(1).M(host).Error("UNABLE to update StatefulSet %s/%s", namespace, name)
+ return err
+ }
+
+ // Wait until StatefulSet scales down to 0 pods count.
+ _ = r.hostSTSPoller.WaitHostStatefulSetReady(ctx, host)
+
+ // And now delete empty StatefulSet
+ if err := r.sts.Delete(ctx, namespace, name); err == nil {
+ log.V(1).M(host).Info("OK delete StatefulSet %s/%s", namespace, name)
+ // r.hostSTSPoller.WaitHostStatefulSetDeleted(host)
+ } else if apiErrors.IsNotFound(err) {
+ log.V(1).M(host).Info("NEUTRAL not found StatefulSet %s/%s", namespace, name)
+ } else {
+ log.V(1).M(host).F().Error("FAIL delete StatefulSet %s/%s err: %v", namespace, name, err)
+ }
+
+ return nil
+}
diff --git a/pkg/controller/common/storage/storage-pvc.go b/pkg/controller/common/storage/storage-pvc.go
new file mode 100644
index 000000000..e7a59d19f
--- /dev/null
+++ b/pkg/controller/common/storage/storage-pvc.go
@@ -0,0 +1,135 @@
+// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package storage
+
+import (
+ "context"
+ "fmt"
+
+ core "k8s.io/api/core/v1"
+ apiErrors "k8s.io/apimachinery/pkg/api/errors"
+
+ log "github.com/altinity/clickhouse-operator/pkg/announcer"
+ api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1"
+ "github.com/altinity/clickhouse-operator/pkg/interfaces"
+ "github.com/altinity/clickhouse-operator/pkg/model/common/volume"
+ "github.com/altinity/clickhouse-operator/pkg/model/managers"
+ "github.com/altinity/clickhouse-operator/pkg/util"
+)
+
+type PVC struct {
+ interfaces.IKubePVC
+ pvcDeleter *volume.PVCDeleter
+}
+
+func NewStoragePVC(pvcKube interfaces.IKubePVC) *PVC {
+ return &PVC{
+ IKubePVC: pvcKube,
+ pvcDeleter: volume.NewPVCDeleter(managers.NewNameManager(managers.NameManagerTypeClickHouse)),
+ }
+}
+
+// UpdateOrCreate
+func (c *PVC) UpdateOrCreate(ctx context.Context, pvc *core.PersistentVolumeClaim) (*core.PersistentVolumeClaim, error) {
+ log.V(2).M(pvc).F().P()
+ if util.IsContextDone(ctx) {
+ log.V(2).Info("task is done")
+ return nil, fmt.Errorf("task is done")
+ }
+
+ _, err := c.Get(ctx, pvc.Namespace, pvc.Name)
+ if err != nil {
+ if apiErrors.IsNotFound(err) {
+ log.V(1).M(pvc).F().Error("PVC not found, need to create %s", util.NamespacedName(pvc))
+ _, err = c.Create(ctx, pvc)
+ if err != nil {
+ log.V(1).M(pvc).F().Error("unable to Create PVC err: %v", err)
+ }
+ return pvc, err
+ }
+ // In case of any non-NotFound API error - unable to proceed
+ log.V(1).M(pvc).F().Error("ERROR unable to get PVC(%s) err: %v", util.NamespacedName(pvc), err)
+ return nil, err
+ }
+
+ pvcUpdated, err := c.Update(ctx, pvc)
+ if err == nil {
+ return pvcUpdated, err
+ }
+
+ // Update failed
+ // May want to suppress special case of an error
+ //if strings.Contains(err.Error(), "field can not be less than previous value") {
+ // return pvc, nil
+ //}
+ log.V(1).M(pvc).F().Error("unable to Update PVC err: %v", err)
+ return nil, err
+}
+
+// deletePVC deletes PersistentVolumeClaim
+func (c *PVC) DeletePVC(ctx context.Context, host *api.Host) error {
+ if util.IsContextDone(ctx) {
+ log.V(2).Info("task is done")
+ return nil
+ }
+
+ log.V(2).M(host).S().P()
+ defer log.V(2).M(host).E().P()
+
+ namespace := host.Runtime.Address.Namespace
+ c.WalkDiscoveredPVCs(ctx, host, func(pvc *core.PersistentVolumeClaim) {
+ if util.IsContextDone(ctx) {
+ log.V(2).Info("task is done")
+ return
+ }
+
+ // Check whether PVC can be deleted
+ if c.pvcDeleter.HostCanDeletePVC(host, pvc.Name) {
+ log.V(1).M(host).Info("PVC %s/%s would be deleted", namespace, pvc.Name)
+ } else {
+ log.V(1).M(host).Info("PVC %s/%s should not be deleted, leave it intact", namespace, pvc.Name)
+ // Move to the next PVC
+ return
+ }
+
+ // Delete PVC
+ if err := c.Delete(ctx, pvc.Namespace, pvc.Name); err == nil {
+ log.V(1).M(host).Info("OK delete PVC %s/%s", namespace, pvc.Name)
+ } else if apiErrors.IsNotFound(err) {
+ log.V(1).M(host).Info("NEUTRAL not found PVC %s/%s", namespace, pvc.Name)
+ } else {
+ log.M(host).F().Error("FAIL to delete PVC %s/%s err:%v", namespace, pvc.Name, err)
+ }
+ })
+
+ return nil
+}
+
+func (c *PVC) WalkDiscoveredPVCs(ctx context.Context, host *api.Host, f func(pvc *core.PersistentVolumeClaim)) {
+ namespace := host.Runtime.Address.Namespace
+
+ pvcList, err := c.ListForHost(ctx, host)
+ if err != nil {
+ log.M(host).F().Error("FAIL get list of PVCs for the host %s/%s err:%v", namespace, host.GetName(), err)
+ return
+ }
+
+ for i := range pvcList.Items {
+ // Convenience wrapper
+ pvc := &pvcList.Items[i]
+
+ f(pvc)
+ }
+}
diff --git a/pkg/controller/common/storage/storage-reconciler.go b/pkg/controller/common/storage/storage-reconciler.go
new file mode 100644
index 000000000..2c1efea8c
--- /dev/null
+++ b/pkg/controller/common/storage/storage-reconciler.go
@@ -0,0 +1,298 @@
+// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package storage
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "time"
+
+ core "k8s.io/api/core/v1"
+ apiErrors "k8s.io/apimachinery/pkg/api/errors"
+
+ log "github.com/altinity/clickhouse-operator/pkg/announcer"
+ api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1"
+ "github.com/altinity/clickhouse-operator/pkg/controller/common"
+ "github.com/altinity/clickhouse-operator/pkg/interfaces"
+ "github.com/altinity/clickhouse-operator/pkg/model"
+ "github.com/altinity/clickhouse-operator/pkg/model/common/volume"
+ "github.com/altinity/clickhouse-operator/pkg/util"
+)
+
+// ErrorDataPersistence specifies errors of the PVCs and PVs
+type ErrorDataPersistence error
+
+var (
+ ErrPVCWithLostPVDeleted ErrorDataPersistence = errors.New("pvc with lost pv deleted")
+ ErrPVCIsLost ErrorDataPersistence = errors.New("pvc is lost")
+)
+
+func ErrIsDataLoss(err error) bool {
+ switch err {
+ case ErrPVCWithLostPVDeleted:
+ return true
+ case ErrPVCIsLost:
+ return true
+ }
+ return false
+}
+
+type Reconciler struct {
+ task *common.Task
+ namer interfaces.INameManager
+ pvc interfaces.IKubeStoragePVC
+}
+
+func NewStorageReconciler(task *common.Task, namer interfaces.INameManager, pvc interfaces.IKubeStoragePVC) *Reconciler {
+ return &Reconciler{
+ task: task,
+ namer: namer,
+ pvc: pvc,
+ }
+}
+
+// ReconcilePVCs reconciles all PVCs of a host
+func (w *Reconciler) ReconcilePVCs(ctx context.Context, host *api.Host, which api.WhichStatefulSet) (res ErrorDataPersistence) {
+ if util.IsContextDone(ctx) {
+ return nil
+ }
+
+ namespace := host.Runtime.Address.Namespace
+ log.V(2).M(host).S().Info("host %s/%s", namespace, host.GetName())
+ defer log.V(2).M(host).E().Info("host %s/%s", namespace, host.GetName())
+
+ host.WalkVolumeMounts(which, func(volumeMount *core.VolumeMount) {
+ if util.IsContextDone(ctx) {
+ return
+ }
+ if e := w.reconcilePVCFromVolumeMount(ctx, host, volumeMount); e != nil {
+ if res == nil {
+ res = e
+ }
+ }
+ })
+
+ return
+}
+
+func (w *Reconciler) reconcilePVCFromVolumeMount(
+ ctx context.Context,
+ host *api.Host,
+ volumeMount *core.VolumeMount,
+) (
+ reconcileError ErrorDataPersistence,
+) {
+ // Which PVC are we going to reconcile
+ pvc, volumeClaimTemplate, isModelCreated, err := w.fetchPVC(ctx, host, volumeMount)
+ if err != nil {
+ // Unable to fetch or model PVC correctly.
+ // May be volume is not built from VolumeClaimTemplate, it may be reference to ConfigMap
+ return nil
+ }
+
+ // PVC available. Either fetched or not found and model created (from templates)
+
+ pvcName := "pvc-name-unknown-pvc-not-exist"
+ namespace := host.Runtime.Address.Namespace
+
+ if pvc != nil {
+ pvcName = pvc.Name
+ }
+
+ log.V(2).M(host).S().Info("reconcile volumeMount (%s/%s/%s/%s)", namespace, host.GetName(), volumeMount.Name, pvcName)
+ defer log.V(2).M(host).E().Info("reconcile volumeMount (%s/%s/%s/%s)", namespace, host.GetName(), volumeMount.Name, pvcName)
+
+ // Check scenario 1 - no PVC available
+ // Such a PVC should be re-created
+ if w.isLostPVC(pvc, isModelCreated, host) {
+ // Looks like data loss detected
+ log.V(1).M(host).Warning("PVC is either newly added to the host or was lost earlier (%s/%s/%s/%s)", namespace, host.GetName(), volumeMount.Name, pvcName)
+ reconcileError = ErrPVCIsLost
+ }
+
+ // Check scenario 2 - PVC exists, but no PV available
+ // Such a PVC should be deleted and re-created
+ if w.isLostPV(pvc) {
+ // This PVC has no PV available
+ // Looks like data loss detected
+ w.deletePVC(ctx, pvc)
+ log.V(1).M(host).Info("deleted PVC with lost PV (%s/%s/%s/%s)", namespace, host.GetName(), volumeMount.Name, pvcName)
+
+ // Refresh PVC model. Since PVC is just deleted refreshed model may not be fetched from the k8s,
+ // but can be provided by the operator still
+ pvc, volumeClaimTemplate, _, _ = w.fetchPVC(ctx, host, volumeMount)
+ reconcileError = ErrPVCWithLostPVDeleted
+ }
+
+ // In any case - be PVC available or not - need to reconcile it
+
+ switch pvcReconciled, err := w.reconcilePVC(ctx, pvc, host, volumeClaimTemplate); err {
+ case errNilPVC:
+ log.M(host).F().Error("Unable to reconcile nil PVC: %s/%s", namespace, pvcName)
+ case nil:
+ w.task.RegistryReconciled().RegisterPVC(pvcReconciled.GetObjectMeta())
+ default:
+ w.task.RegistryFailed().RegisterPVC(pvc.GetObjectMeta())
+ log.M(host).F().Error("Unable to reconcile PVC: %s err: %v", util.NamespacedName(pvc), err)
+ }
+
+ // It still may return data loss errors
+ return reconcileError
+}
+
+func (w *Reconciler) isLostPVC(pvc *core.PersistentVolumeClaim, isJustCreated bool, host *api.Host) bool {
+ if !host.HasData() {
+ // No data to loose
+ return false
+ }
+
+ // Now we assume that this PVC has had some data in the past, since tables were created on it
+
+ if pvc == nil {
+ // No PVC available at all, was it deleted?
+ // Lost PVC
+ return true
+ }
+
+ if isJustCreated {
+ // PVC was just created by the operator, not fetched
+ // Lost PVC
+ return true
+ }
+
+ // PVC is in place
+ return false
+}
+
+func (w *Reconciler) isLostPV(pvc *core.PersistentVolumeClaim) bool {
+ if pvc == nil {
+ return false
+ }
+
+ return pvc.Status.Phase == core.ClaimLost
+}
+
+func (w *Reconciler) fetchPVC(
+ ctx context.Context,
+ host *api.Host,
+ volumeMount *core.VolumeMount,
+) (
+ pvc *core.PersistentVolumeClaim,
+ vct *api.VolumeClaimTemplate,
+ isModelCreated bool,
+ err error,
+) {
+ namespace := host.Runtime.Address.Namespace
+
+ volumeClaimTemplate, ok := volume.GetVolumeClaimTemplate(host, volumeMount)
+ if !ok {
+ // No this is not a reference to VolumeClaimTemplate, it may be reference to ConfigMap
+ return nil, nil, false, fmt.Errorf("unable to find VolumeClaimTemplate from volume mount")
+ }
+ pvcName := w.namer.Name(interfaces.NamePVCNameByVolumeClaimTemplate, host, volumeClaimTemplate)
+
+ // We have a VolumeClaimTemplate for this VolumeMount
+ // Treat it as persistent storage mount
+
+ _pvc, e := w.pvc.Get(ctx, namespace, pvcName)
+ if e == nil {
+ log.V(2).M(host).Info("PVC (%s/%s/%s/%s) found", namespace, host.GetName(), volumeMount.Name, pvcName)
+ return _pvc, volumeClaimTemplate, false, nil
+ }
+
+ // We have an error. PVC not fetched
+
+ if !apiErrors.IsNotFound(e) {
+ // In case of any non-NotFound API error - unable to proceed
+ log.M(host).F().Error("ERROR unable to get PVC(%s/%s) err: %v", namespace, pvcName, e)
+ return nil, nil, false, e
+ }
+
+ // We have NotFound error - PVC not found
+ // This is not an error per se, means PVC is not created (yet)?
+ log.V(2).M(host).Info("PVC (%s/%s/%s/%s) not found", namespace, host.GetName(), volumeMount.Name, pvcName)
+
+ if volume.OperatorShouldCreatePVC(host, volumeClaimTemplate) {
+ // Operator is in charge of PVCs
+ // Create PVC model.
+ pvc = w.task.Creator().CreatePVC(pvcName, namespace, host, &volumeClaimTemplate.Spec)
+ log.V(1).M(host).Info("PVC (%s/%s/%s/%s) model provided by the operator", namespace, host.GetName(), volumeMount.Name, pvcName)
+ return pvc, volumeClaimTemplate, true, nil
+ }
+
+ // PVC is not available and the operator is not expected to create PVC
+ log.V(1).M(host).Info("PVC (%s/%s/%s/%s) not found and model will not be provided by the operator", namespace, host.GetName(), volumeMount.Name, pvcName)
+ return nil, volumeClaimTemplate, false, nil
+}
+
+var errNilPVC = fmt.Errorf("nil PVC, nothing to reconcile")
+
+// reconcilePVC reconciles specified PVC
+func (w *Reconciler) reconcilePVC(
+ ctx context.Context,
+ pvc *core.PersistentVolumeClaim,
+ host *api.Host,
+ template *api.VolumeClaimTemplate,
+) (*core.PersistentVolumeClaim, error) {
+ if pvc == nil {
+ log.V(2).M(host).F().Info("nil PVC, nothing to reconcile")
+ return nil, errNilPVC
+ }
+
+ log.V(1).M(host).S().Info("reconcile PVC (%s/%s)", util.NamespacedName(pvc), host.GetName())
+ defer log.V(1).M(host).E().Info("reconcile PVC (%s/%s)", util.NamespacedName(pvc), host.GetName())
+
+ if util.IsContextDone(ctx) {
+ log.V(2).Info("task is done")
+ return nil, fmt.Errorf("task is done")
+ }
+
+ model.VolumeClaimTemplateApplyResourcesRequestsOnPVC(template, pvc)
+ pvc = w.task.Creator().AdjustPVC(pvc, host, template)
+ return w.pvc.UpdateOrCreate(ctx, pvc)
+}
+
+func (w *Reconciler) deletePVC(ctx context.Context, pvc *core.PersistentVolumeClaim) bool {
+ log.V(1).M(pvc).F().S().Info("delete PVC with lost PV start: %s", util.NamespacedName(pvc))
+ defer log.V(1).M(pvc).F().E().Info("delete PVC with lost PV end: %s", util.NamespacedName(pvc))
+
+ log.V(2).M(pvc).F().Info("PVC with lost PV about to be deleted: %s", util.NamespacedName(pvc))
+ w.pvc.Delete(ctx, pvc.Namespace, pvc.Name)
+
+ for i := 0; i < 360; i++ {
+
+ // Check availability
+ log.V(2).M(pvc).F().Info("check PVC with lost PV availability: %s", util.NamespacedName(pvc))
+ curPVC, err := w.pvc.Get(ctx, pvc.Namespace, pvc.Name)
+ if err != nil {
+ if apiErrors.IsNotFound(err) {
+ // Not available - consider it to be deleted
+ log.V(1).M(pvc).F().Warning("PVC with lost PV was deleted: %s", util.NamespacedName(pvc))
+ return true
+ }
+ }
+
+ // PVC is not deleted (yet?). May be it has finalizers installed. Need to clean them.
+ if len(curPVC.Finalizers) > 0 {
+ log.V(2).M(pvc).F().Info("clean finalizers for PVC with lost PV: %s", util.NamespacedName(pvc))
+ curPVC.Finalizers = nil
+ w.pvc.UpdateOrCreate(ctx, curPVC)
+ }
+ time.Sleep(10 * time.Second)
+ }
+
+ return false
+}
diff --git a/pkg/controller/common/task.go b/pkg/controller/common/task.go
new file mode 100644
index 000000000..f8ab9f1e2
--- /dev/null
+++ b/pkg/controller/common/task.go
@@ -0,0 +1,62 @@
+// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package common
+
+import (
+ "time"
+
+ "github.com/altinity/clickhouse-operator/pkg/interfaces"
+ "github.com/altinity/clickhouse-operator/pkg/model"
+)
+
+// task represents context of a worker. This also can be called "a reconcile task"
+type Task struct {
+ creator interfaces.ICreator
+ registryReconciled *model.Registry
+ registryFailed *model.Registry
+ cmUpdate time.Time
+ start time.Time
+}
+
+// NewTask creates new context
+func NewTask(creator interfaces.ICreator) *Task {
+ return &Task{
+ creator: creator,
+ registryReconciled: model.NewRegistry(),
+ registryFailed: model.NewRegistry(),
+ cmUpdate: time.Time{},
+ start: time.Now(),
+ }
+}
+
+func (t *Task) Creator() interfaces.ICreator {
+ return t.creator
+}
+
+func (t *Task) RegistryReconciled() *model.Registry {
+ return t.registryReconciled
+}
+
+func (t *Task) RegistryFailed() *model.Registry {
+ return t.registryFailed
+}
+
+func (t *Task) CmUpdate() time.Time {
+ return t.cmUpdate
+}
+
+func (t *Task) SetCmUpdate(update time.Time) {
+ t.cmUpdate = update
+}
diff --git a/pkg/controller/common/util.go b/pkg/controller/common/util.go
new file mode 100644
index 000000000..e855c1f5b
--- /dev/null
+++ b/pkg/controller/common/util.go
@@ -0,0 +1,70 @@
+// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package common
+
+import (
+ "gopkg.in/d4l3k/messagediff.v1"
+ apps "k8s.io/api/apps/v1"
+
+ log "github.com/altinity/clickhouse-operator/pkg/announcer"
+ api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1"
+ "github.com/altinity/clickhouse-operator/pkg/util"
+)
+
+func DumpStatefulSetDiff(host *api.Host, cur, new *apps.StatefulSet) {
+ if cur == nil {
+ log.V(1).M(host).Info("Cur StatefulSet is not available, nothing to compare to")
+ return
+ }
+ if new == nil {
+ log.V(1).M(host).Info("New StatefulSet is not available, nothing to compare to")
+ return
+ }
+
+ if diff, equal := messagediff.DeepDiff(cur.Spec, new.Spec); equal {
+ log.V(1).M(host).Info("StatefulSet.Spec ARE EQUAL")
+ } else {
+ log.V(1).Info(
+ "StatefulSet.Spec ARE DIFFERENT:\nadded:\n%s\nmodified:\n%s\nremoved:\n%s",
+ util.MessageDiffItemString("added .spec items", "none", "", diff.Added),
+ util.MessageDiffItemString("modified .spec items", "none", "", diff.Modified),
+ util.MessageDiffItemString("removed .spec items", "none", "", diff.Removed),
+ )
+ }
+ if diff, equal := messagediff.DeepDiff(cur.Labels, new.Labels); equal {
+ log.V(1).M(host).Info("StatefulSet.Labels ARE EQUAL")
+ } else {
+ if len(cur.Labels)+len(new.Labels) > 0 {
+ log.V(1).Info(
+ "StatefulSet.Labels ARE DIFFERENT:\nadded:\n%s\nmodified:\n%s\nremoved:\n%s",
+ util.MessageDiffItemString("added .labels items", "none", "", diff.Added),
+ util.MessageDiffItemString("modified .labels items", "none", "", diff.Modified),
+ util.MessageDiffItemString("removed .labels items", "none", "", diff.Removed),
+ )
+ }
+ }
+ if diff, equal := messagediff.DeepDiff(cur.Annotations, new.Annotations); equal {
+ log.V(1).M(host).Info("StatefulSet.Annotations ARE EQUAL")
+ } else {
+ if len(cur.Annotations)+len(new.Annotations) > 0 {
+ log.V(1).Info(
+ "StatefulSet.Annotations ARE DIFFERENT:\nadded:\n%s\nmodified:\n%s\nremoved:\n%s",
+ util.MessageDiffItemString("added .annotations items", "none", "", diff.Added),
+ util.MessageDiffItemString("modified .annotations items", "none", "", diff.Modified),
+ util.MessageDiffItemString("removed .annotations items", "none", "", diff.Removed),
+ )
+ }
+ }
+}
diff --git a/pkg/controller/common/worker-log.go b/pkg/controller/common/worker-log.go
new file mode 100644
index 000000000..839b771d9
--- /dev/null
+++ b/pkg/controller/common/worker-log.go
@@ -0,0 +1,47 @@
+// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package common
+
+import (
+ log "github.com/altinity/clickhouse-operator/pkg/announcer"
+
+ api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1"
+ "github.com/altinity/clickhouse-operator/pkg/apis/common/types"
+ "github.com/altinity/clickhouse-operator/pkg/model/common/action_plan"
+)
+
+// LogCR writes a CR into the log
+func LogCR(name string, cr api.ICustomResource) {
+ log.V(1).M(cr).Info(
+ "logCR %s start--------------------------------------------:\n%s\nlogCR %s end--------------------------------------------",
+ name,
+ name,
+ cr.YAML(types.CopyCROptions{SkipStatus: true, SkipManagedFields: true}),
+ )
+}
+
+// LogActionPlan logs action plan
+func LogActionPlan(ap *action_plan.ActionPlan) {
+ log.Info(
+ "ActionPlan start---------------------------------------------:\n%s\nActionPlan end---------------------------------------------",
+ ap,
+ )
+}
+
+// LogOldAndNew writes old and new CHIs into the log
+func LogOldAndNew(name string, old, new api.ICustomResource) {
+ LogCR(name+" old", old)
+ LogCR(name+" new", new)
+}
diff --git a/pkg/controller/poller.go b/pkg/controller/poller.go
deleted file mode 100644
index 5fb2b1d68..000000000
--- a/pkg/controller/poller.go
+++ /dev/null
@@ -1,208 +0,0 @@
-// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
-// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package controller
-
-import (
- "context"
- "fmt"
- "time"
-
- log "github.com/altinity/clickhouse-operator/pkg/announcer"
- api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1"
- "github.com/altinity/clickhouse-operator/pkg/util"
-)
-
-const (
- waitStatefulSetGenerationTimeoutBeforeStartBothering = 60
- waitStatefulSetGenerationTimeoutToCreateStatefulSet = 30
-)
-
-// PollerOptions specifies polling options
-type PollerOptions struct {
- StartBotheringAfterTimeout time.Duration
- GetErrorTimeout time.Duration
- Timeout time.Duration
- MainInterval time.Duration
- BackgroundInterval time.Duration
-}
-
-// NewPollerOptions creates new poll options
-func NewPollerOptions() *PollerOptions {
- return &PollerOptions{}
-}
-
-// Ensure ensures poll options do exist
-func (o *PollerOptions) Ensure() *PollerOptions {
- if o == nil {
- return NewPollerOptions()
- }
- return o
-}
-
-// FromConfig makes poll options from config
-func (o *PollerOptions) FromConfig(config *api.OperatorConfig) *PollerOptions {
- if o == nil {
- return nil
- }
- o.StartBotheringAfterTimeout = time.Duration(waitStatefulSetGenerationTimeoutBeforeStartBothering) * time.Second
- o.GetErrorTimeout = time.Duration(waitStatefulSetGenerationTimeoutToCreateStatefulSet) * time.Second
- o.Timeout = time.Duration(config.Reconcile.StatefulSet.Update.Timeout) * time.Second
- o.MainInterval = time.Duration(config.Reconcile.StatefulSet.Update.PollInterval) * time.Second
- o.BackgroundInterval = 1 * time.Second
- return o
-}
-
-// SetCreateTimeout sets create timeout
-func (o *PollerOptions) SetGetErrorTimeout(timeout time.Duration) *PollerOptions {
- if o == nil {
- return nil
- }
- o.GetErrorTimeout = timeout
- return o
-}
-
-type PollerFunctions struct {
- Get func(context.Context) (any, error)
- IsDone func(context.Context, any) bool
- ShouldContinue func(context.Context, any, error) bool
-}
-
-func (p *PollerFunctions) CallGet(c context.Context) (any, error) {
- if p == nil {
- return nil, nil
- }
- if p.Get == nil {
- return nil, nil
- }
- return p.Get(c)
-}
-
-func (p *PollerFunctions) CallIsDone(c context.Context, a any) bool {
- if p == nil {
- return false
- }
- if p.IsDone == nil {
- return false
- }
- return p.IsDone(c, a)
-}
-
-func (p *PollerFunctions) CallShouldContinue(c context.Context, a any, e error) bool {
- if p == nil {
- return false
- }
- if p.ShouldContinue == nil {
- return false
- }
- return p.ShouldContinue(c, a, e)
-}
-
-type PollerBackgroundFunctions struct {
- F func(context.Context)
-}
-
-func Poll(
- ctx context.Context,
- namespace, name string,
- opts *PollerOptions,
- main *PollerFunctions,
- background *PollerBackgroundFunctions,
-) error {
- opts = opts.Ensure()
- start := time.Now()
- for {
- if util.IsContextDone(ctx) {
- log.V(2).Info("task is done")
- return nil
- }
-
- item, err := main.CallGet(ctx)
- switch {
- case err == nil:
- // Object is found - process it
- if main.CallIsDone(ctx, item) {
- // All is good, job is done, exit
- log.V(1).M(namespace, name).F().Info("OK %s/%s", namespace, name)
- return nil
- }
- // Object is found, but processor function says we need to continue polling
- case main.CallShouldContinue(ctx, item, err):
- // Object is not found - it either failed to be created or just still not created
- if (opts.GetErrorTimeout > 0) && (time.Since(start) >= opts.GetErrorTimeout) {
- // No more wait for the object to be created. Consider create process as failed.
- log.V(1).M(namespace, name).F().Error("Get() FAILED - item is not available and get timeout reached. Abort")
- return err
- }
- // Object is not found - create timeout is not reached, we need to continue polling
- default:
- // Some kind of total error, abort polling
- log.M(namespace, name).F().Error("%s/%s Get() FAILED", namespace, name)
- return err
- }
-
- // Continue polling
-
- // May be time has come to abort polling?
- if time.Since(start) >= opts.Timeout {
- // Timeout reached, no good result available, time to abort
- log.V(1).M(namespace, name).F().Info("poll(%s/%s) - TIMEOUT reached", namespace, name)
- return fmt.Errorf("poll(%s/%s) - wait timeout", namespace, name)
- }
-
- // Continue polling
-
- // May be time has come to start bothers into logs?
- if time.Since(start) >= opts.StartBotheringAfterTimeout {
- // Start bothering with log messages after some time only
- log.V(1).M(namespace, name).F().Info("WAIT:%s/%s", namespace, name)
- }
-
- // Wait some more time and lauch background process(es)
- log.V(2).M(namespace, name).F().P()
- sleepAndRunBackgroundProcess(ctx, opts, background)
- } // for
-}
-
-func sleepAndRunBackgroundProcess(ctx context.Context, opts *PollerOptions, background *PollerBackgroundFunctions) {
- if ctx == nil {
- ctx = context.Background()
- }
- switch {
- case opts.BackgroundInterval > 0:
- mainIntervalTimeout := time.After(opts.MainInterval)
- backgroundIntervalTimeout := time.After(opts.BackgroundInterval)
- for {
- select {
- case <-ctx.Done():
- // Context is done, nothing to do here more
- return
- case <-mainIntervalTimeout:
- // Timeout reached, nothing to do here more
- return
- case <-backgroundIntervalTimeout:
- // Function interval reached, time to call the func
- if background != nil {
- if background.F != nil {
- background.F(ctx)
- }
- }
- backgroundIntervalTimeout = time.After(opts.BackgroundInterval)
- }
- }
- default:
- util.WaitContextDoneOrTimeout(ctx, opts.MainInterval)
- }
-}
diff --git a/pkg/interfaces/annotate_type.go b/pkg/interfaces/annotate_type.go
new file mode 100644
index 000000000..f29166726
--- /dev/null
+++ b/pkg/interfaces/annotate_type.go
@@ -0,0 +1,40 @@
+// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package interfaces
+
+type AnnotateType string
+
+const (
+ AnnotateServiceCR AnnotateType = "annotate svc cr"
+ AnnotateServiceCluster AnnotateType = "annotate svc cluster"
+ AnnotateServiceShard AnnotateType = "annotate svc shard"
+ AnnotateServiceHost AnnotateType = "annotate svc host"
+
+ AnnotateExistingPV AnnotateType = "annotate existing pv"
+ AnnotateNewPVC AnnotateType = "annotate new pvc"
+ AnnotateExistingPVC AnnotateType = "annotate existing pvc"
+
+ AnnotatePDB AnnotateType = "annotate pdb"
+
+ AnnotateSTS AnnotateType = "annotate STS"
+
+ AnnotatePodTemplate AnnotateType = "annotate PodTemplate"
+)
+
+const (
+ AnnotateConfigMapCommon AnnotateType = "annotate cm common"
+ AnnotateConfigMapCommonUsers AnnotateType = "annotate cm common users"
+ AnnotateConfigMapHost AnnotateType = "annotate cm host"
+)
diff --git a/pkg/interfaces/cluster_type.go b/pkg/interfaces/cluster_type.go
new file mode 100644
index 000000000..2048e5c3b
--- /dev/null
+++ b/pkg/interfaces/cluster_type.go
@@ -0,0 +1,22 @@
+// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package interfaces
+
+type ClusterType string
+
+const (
+ ClusterCHIDefault ClusterType = "chi cluster default"
+ ClusterCHKDefault ClusterType = "chk cluster default"
+)
diff --git a/pkg/interfaces/config_map_type.go b/pkg/interfaces/config_map_type.go
new file mode 100644
index 000000000..e6000550e
--- /dev/null
+++ b/pkg/interfaces/config_map_type.go
@@ -0,0 +1,24 @@
+// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package interfaces
+
+type ConfigMapType string
+
+const (
+ ConfigMapCommon ConfigMapType = "common"
+ ConfigMapCommonUsers ConfigMapType = "common users"
+ ConfigMapHost ConfigMapType = "host"
+ ConfigMapConfig ConfigMapType = "config"
+)
diff --git a/pkg/interfaces/files_group_type.go b/pkg/interfaces/files_group_type.go
new file mode 100644
index 000000000..6a3263693
--- /dev/null
+++ b/pkg/interfaces/files_group_type.go
@@ -0,0 +1,23 @@
+// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package interfaces
+
+type FilesGroupType string
+
+const (
+ FilesGroupCommon FilesGroupType = "FilesGroupType common"
+ FilesGroupUsers FilesGroupType = "FilesGroupType users"
+ FilesGroupHost FilesGroupType = "FilesGroupType host"
+)
diff --git a/pkg/interfaces/host_template_type.go b/pkg/interfaces/host_template_type.go
new file mode 100644
index 000000000..e6e64fc92
--- /dev/null
+++ b/pkg/interfaces/host_template_type.go
@@ -0,0 +1,22 @@
+// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package interfaces
+
+type HostTemplateType string
+
+const (
+ HostTemplateCommon HostTemplateType = "ht common"
+ HostTemplateHostNetwork HostTemplateType = "ht host net"
+)
diff --git a/pkg/interfaces/interfaces-kube.go b/pkg/interfaces/interfaces-kube.go
new file mode 100644
index 000000000..1c074ebc1
--- /dev/null
+++ b/pkg/interfaces/interfaces-kube.go
@@ -0,0 +1,120 @@
+// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package interfaces
+
+import (
+ "context"
+
+ apps "k8s.io/api/apps/v1"
+ core "k8s.io/api/core/v1"
+ policy "k8s.io/api/policy/v1"
+ meta "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+ api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1"
+ "github.com/altinity/clickhouse-operator/pkg/apis/common/types"
+)
+
+type IKube interface {
+ CR() IKubeCR
+ ConfigMap() IKubeConfigMap
+ Deployment() IKubeDeployment
+ PDB() IKubePDB
+ Event() IKubeEvent
+ Pod() IKubePod
+ Storage() IKubeStoragePVC
+ ReplicaSet() IKubeReplicaSet
+ Secret() IKubeSecret
+ Service() IKubeService
+ STS() IKubeSTS
+}
+
+type IKubeConfigMap interface {
+ Create(ctx context.Context, cm *core.ConfigMap) (*core.ConfigMap, error)
+ Get(ctx context.Context, namespace, name string) (*core.ConfigMap, error)
+ Update(ctx context.Context, cm *core.ConfigMap) (*core.ConfigMap, error)
+ Delete(ctx context.Context, namespace, name string) error
+ List(ctx context.Context, namespace string, opts meta.ListOptions) ([]core.ConfigMap, error)
+}
+
+type IKubeDeployment interface {
+ Get(namespace, name string) (*apps.Deployment, error)
+ Update(deployment *apps.Deployment) (*apps.Deployment, error)
+}
+
+type IKubeEvent interface {
+ Create(ctx context.Context, event *core.Event) (*core.Event, error)
+}
+
+type IKubePDB interface {
+ Create(ctx context.Context, pdb *policy.PodDisruptionBudget) (*policy.PodDisruptionBudget, error)
+ Get(ctx context.Context, namespace, name string) (*policy.PodDisruptionBudget, error)
+ Update(ctx context.Context, pdb *policy.PodDisruptionBudget) (*policy.PodDisruptionBudget, error)
+ Delete(ctx context.Context, namespace, name string) error
+ List(ctx context.Context, namespace string, opts meta.ListOptions) ([]policy.PodDisruptionBudget, error)
+}
+
+type IKubePod interface {
+ Get(params ...any) (*core.Pod, error)
+ GetAll(obj any) []*core.Pod
+ Update(ctx context.Context, pod *core.Pod) (*core.Pod, error)
+ Delete(ctx context.Context, namespace, name string) error
+}
+
+type IKubePVC interface {
+ Create(ctx context.Context, pvc *core.PersistentVolumeClaim) (*core.PersistentVolumeClaim, error)
+ Get(ctx context.Context, namespace, name string) (*core.PersistentVolumeClaim, error)
+ Update(ctx context.Context, pvc *core.PersistentVolumeClaim) (*core.PersistentVolumeClaim, error)
+ Delete(ctx context.Context, namespace, name string) error
+ List(ctx context.Context, namespace string, opts meta.ListOptions) ([]core.PersistentVolumeClaim, error)
+ ListForHost(ctx context.Context, host *api.Host) (*core.PersistentVolumeClaimList, error)
+}
+type IKubeStoragePVC interface {
+ IKubePVC
+ UpdateOrCreate(ctx context.Context, pvc *core.PersistentVolumeClaim) (*core.PersistentVolumeClaim, error)
+}
+
+type IKubeCR interface {
+ Get(ctx context.Context, namespace, name string) (api.ICustomResource, error)
+ StatusUpdate(ctx context.Context, cr api.ICustomResource, opts types.UpdateStatusOptions) (err error)
+}
+
+type IKubeReplicaSet interface {
+ Get(ctx context.Context, namespace, name string) (*apps.ReplicaSet, error)
+ Update(ctx context.Context, replicaSet *apps.ReplicaSet) (*apps.ReplicaSet, error)
+}
+
+type IKubeSecret interface {
+ Get(ctx context.Context, params ...any) (*core.Secret, error)
+ Create(ctx context.Context, svc *core.Secret) (*core.Secret, error)
+ Update(ctx context.Context, svc *core.Secret) (*core.Secret, error)
+ Delete(ctx context.Context, namespace, name string) error
+ List(ctx context.Context, namespace string, opts meta.ListOptions) ([]core.Secret, error)
+}
+
+type IKubeService interface {
+ Get(ctx context.Context, params ...any) (*core.Service, error)
+ Create(ctx context.Context, svc *core.Service) (*core.Service, error)
+ Update(ctx context.Context, svc *core.Service) (*core.Service, error)
+ Delete(ctx context.Context, namespace, name string) error
+ List(ctx context.Context, namespace string, opts meta.ListOptions) ([]core.Service, error)
+}
+
+type IKubeSTS interface {
+ Get(ctx context.Context, params ...any) (*apps.StatefulSet, error)
+ Create(ctx context.Context, statefulSet *apps.StatefulSet) (*apps.StatefulSet, error)
+ Update(ctx context.Context, sts *apps.StatefulSet) (*apps.StatefulSet, error)
+ Delete(ctx context.Context, namespace, name string) error
+ List(ctx context.Context, namespace string, opts meta.ListOptions) ([]apps.StatefulSet, error)
+}
diff --git a/pkg/interfaces/interfaces-main.go b/pkg/interfaces/interfaces-main.go
new file mode 100644
index 000000000..13edc5ead
--- /dev/null
+++ b/pkg/interfaces/interfaces-main.go
@@ -0,0 +1,116 @@
+// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package interfaces
+
+import (
+ apps "k8s.io/api/apps/v1"
+ core "k8s.io/api/core/v1"
+ policy "k8s.io/api/policy/v1"
+ meta "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+ api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1"
+)
+
+type IConfigMapManager interface {
+ CreateConfigMap(what ConfigMapType, params ...any) *core.ConfigMap
+ SetCR(cr api.ICustomResource)
+ SetTagger(tagger ITagger)
+ SetConfigFilesGenerator(configFilesGenerator IConfigFilesGenerator)
+}
+
+type IConfigFilesGenerator interface {
+ CreateConfigFiles(what FilesGroupType, params ...any) map[string]string
+}
+
+type INameManager interface {
+ Names(what NameType, params ...any) []string
+ Name(what NameType, params ...any) string
+}
+
+type IAnnotator interface {
+ Annotate(what AnnotateType, params ...any) map[string]string
+}
+
+type IMacro interface {
+ Get(string) string
+ Scope(scope any) IMacro
+ Line(line string) string
+ Map(_map map[string]string) map[string]string
+}
+
+type ILabeler interface {
+ Label(what LabelType, params ...any) map[string]string
+ Selector(what SelectorType, params ...any) map[string]string
+ MakeObjectVersion(meta meta.Object, obj interface{})
+ GetObjectVersion(meta meta.Object) (string, bool)
+ Get(string) string
+}
+
+type ITagger interface {
+ Annotate(what AnnotateType, params ...any) map[string]string
+ Label(what LabelType, params ...any) map[string]string
+ Selector(what SelectorType, params ...any) map[string]string
+}
+
+type IVolumeManager interface {
+ SetupVolumes(what VolumeType, statefulSet *apps.StatefulSet, host *api.Host)
+ SetCR(cr api.ICustomResource)
+}
+
+type IContainerManager interface {
+ NewDefaultAppContainer(host *api.Host) core.Container
+ GetAppContainer(statefulSet *apps.StatefulSet) (*core.Container, bool)
+ EnsureAppContainer(statefulSet *apps.StatefulSet, host *api.Host)
+ EnsureLogContainer(statefulSet *apps.StatefulSet)
+}
+
+type IProbeManager interface {
+ CreateProbe(what ProbeType, host *api.Host) *core.Probe
+}
+
+type IServiceManager interface {
+ CreateService(what ServiceType, params ...any) *core.Service
+ SetCR(cr api.ICustomResource)
+ SetTagger(tagger ITagger)
+}
+
+type ICreator interface {
+ CreateConfigMap(what ConfigMapType, params ...any) *core.ConfigMap
+ CreatePodDisruptionBudget(cluster api.ICluster) *policy.PodDisruptionBudget
+ CreatePVC(
+ name string,
+ namespace string,
+ host *api.Host,
+ spec *core.PersistentVolumeClaimSpec,
+ ) *core.PersistentVolumeClaim
+ AdjustPVC(
+ pvc *core.PersistentVolumeClaim,
+ host *api.Host,
+ template *api.VolumeClaimTemplate,
+ ) *core.PersistentVolumeClaim
+ CreateClusterSecret(name string) *core.Secret
+ CreateService(what ServiceType, params ...any) *core.Service
+ CreateStatefulSet(host *api.Host, shutdown bool) *apps.StatefulSet
+}
+
+type IEventEmitter interface {
+ EventInfo(obj meta.Object, action string, reason string, message string)
+ EventWarning(obj meta.Object, action string, reason string, message string)
+ EventError(obj meta.Object, action string, reason string, message string)
+}
+
+type IOwnerReferencesManager interface {
+ CreateOwnerReferences(owner api.ICustomResource) []meta.OwnerReference
+}
diff --git a/pkg/interfaces/label_type.go b/pkg/interfaces/label_type.go
new file mode 100644
index 000000000..2db79e4a0
--- /dev/null
+++ b/pkg/interfaces/label_type.go
@@ -0,0 +1,40 @@
+// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package interfaces
+
+type LabelType string
+
+const (
+ LabelConfigMapCommon LabelType = "Label cm common"
+ LabelConfigMapCommonUsers LabelType = "Label cm common users"
+ LabelConfigMapHost LabelType = "Label cm host"
+)
+
+const (
+ LabelServiceCR LabelType = "Label svc chi"
+ LabelServiceCluster LabelType = "Label svc cluster"
+ LabelServiceShard LabelType = "Label svc shard"
+ LabelServiceHost LabelType = "Label svc host"
+
+ LabelExistingPV LabelType = "Label existing pv"
+ LabelNewPVC LabelType = "Label new pvc"
+ LabelExistingPVC LabelType = "Label existing pvc"
+
+ LabelPDB LabelType = "Label pdb"
+
+ LabelSTS LabelType = "Label STS"
+
+ LabelPodTemplate LabelType = "Label PodTemplate"
+)
diff --git a/pkg/interfaces/name_type.go b/pkg/interfaces/name_type.go
new file mode 100644
index 000000000..94a5cfacb
--- /dev/null
+++ b/pkg/interfaces/name_type.go
@@ -0,0 +1,43 @@
+// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package interfaces
+
+type NameType string
+
+const (
+ NameConfigMapHost NameType = "ConfigMapHost"
+ NameConfigMapCommon NameType = "ConfigMapCommon"
+ NameConfigMapCommonUsers NameType = "NameConfigMapCommonUsers"
+)
+const (
+ NameCRService NameType = "NameCRService"
+ NameCRServiceFQDN NameType = "NameCRServiceFQDN"
+ NameClusterService NameType = "NameClusterService"
+ NameShardService NameType = "NameShardService"
+ NameShard NameType = "NameShard"
+ NameReplica NameType = "NameReplica"
+ NameHost NameType = "NameHost"
+ NameHostTemplate NameType = "NameHostTemplate"
+ NameInstanceHostname NameType = "NameInstanceHostname"
+ NameStatefulSet NameType = "NameStatefulSet"
+ NameStatefulSetService NameType = "NameStatefulSetService"
+ NamePodHostname NameType = "NamePodHostname"
+ NameFQDN NameType = "NameFQDN"
+ NameFQDNs NameType = "NameFQDNs"
+ NamePodHostnameRegexp NameType = "NamePodHostnameRegexp"
+ NamePod NameType = "NamePod"
+ NamePVCNameByVolumeClaimTemplate NameType = "NamePVCNameByVolumeClaimTemplate"
+ NameClusterAutoSecret NameType = "NameClusterAutoSecret"
+)
diff --git a/pkg/interfaces/probe_type.go b/pkg/interfaces/probe_type.go
new file mode 100644
index 000000000..923d9a1b4
--- /dev/null
+++ b/pkg/interfaces/probe_type.go
@@ -0,0 +1,22 @@
+// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package interfaces
+
+type ProbeType string
+
+const (
+ ProbeDefaultLiveness ProbeType = "ProbeDefaultLiveness"
+ ProbeDefaultReadiness ProbeType = "ProbeDefaultReadiness"
+)
diff --git a/pkg/interfaces/selector_type.go b/pkg/interfaces/selector_type.go
new file mode 100644
index 000000000..b8cf86c27
--- /dev/null
+++ b/pkg/interfaces/selector_type.go
@@ -0,0 +1,26 @@
+// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package interfaces
+
+type SelectorType string
+
+const (
+ SelectorCRScope SelectorType = "SelectorCRScope"
+ SelectorCRScopeReady SelectorType = "SelectorCRScopeReady"
+ SelectorClusterScope SelectorType = "SelectorClusterScope"
+ SelectorClusterScopeReady SelectorType = "SelectorClusterScopeReady"
+ SelectorShardScopeReady SelectorType = "SelectorShardScopeReady"
+ SelectorHostScope SelectorType = "getSelectorHostScope"
+)
diff --git a/pkg/interfaces/service_type.go b/pkg/interfaces/service_type.go
new file mode 100644
index 000000000..2590aeab5
--- /dev/null
+++ b/pkg/interfaces/service_type.go
@@ -0,0 +1,24 @@
+// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package interfaces
+
+type ServiceType string
+
+const (
+ ServiceCR ServiceType = "svc chi"
+ ServiceCluster ServiceType = "svc cluster"
+ ServiceShard ServiceType = "svc shard"
+ ServiceHost ServiceType = "svc host"
+)
diff --git a/pkg/interfaces/volume_type.go b/pkg/interfaces/volume_type.go
new file mode 100644
index 000000000..60dc34925
--- /dev/null
+++ b/pkg/interfaces/volume_type.go
@@ -0,0 +1,22 @@
+// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package interfaces
+
+type VolumeType string
+
+const (
+ VolumesForConfigMaps VolumeType = "VolumesForConfigMaps"
+ VolumesUserDataWithFixedPaths VolumeType = "VolumesUserDataWithFixedPaths"
+)
diff --git a/pkg/apis/metrics/chi_index.go b/pkg/metrics/clickhouse/chi_index.go
similarity index 66%
rename from pkg/apis/metrics/chi_index.go
rename to pkg/metrics/clickhouse/chi_index.go
index 157dd48d1..f70209b3f 100644
--- a/pkg/apis/metrics/chi_index.go
+++ b/pkg/metrics/clickhouse/chi_index.go
@@ -12,19 +12,21 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-package metrics
+package clickhouse
-type chInstallationsIndex map[string]*WatchedCHI
+import "github.com/altinity/clickhouse-operator/pkg/apis/metrics"
-func (i chInstallationsIndex) slice() []*WatchedCHI {
- res := make([]*WatchedCHI, 0)
+type chInstallationsIndex map[string]*metrics.WatchedCHI
+
+func (i chInstallationsIndex) slice() []*metrics.WatchedCHI {
+ res := make([]*metrics.WatchedCHI, 0)
for _, chi := range i {
res = append(res, chi)
}
return res
}
-func (i chInstallationsIndex) get(key string) (*WatchedCHI, bool) {
+func (i chInstallationsIndex) get(key string) (*metrics.WatchedCHI, bool) {
if i == nil {
return nil, false
}
@@ -34,7 +36,7 @@ func (i chInstallationsIndex) get(key string) (*WatchedCHI, bool) {
return nil, false
}
-func (i chInstallationsIndex) set(key string, value *WatchedCHI) {
+func (i chInstallationsIndex) set(key string, value *metrics.WatchedCHI) {
if i == nil {
return
}
@@ -50,9 +52,9 @@ func (i chInstallationsIndex) remove(key string) {
}
}
-func (i chInstallationsIndex) walk(f func(*WatchedCHI, *WatchedCluster, *WatchedHost)) {
+func (i chInstallationsIndex) walk(f func(*metrics.WatchedCHI, *metrics.WatchedCluster, *metrics.WatchedHost)) {
// Loop over ClickHouseInstallations
for _, chi := range i {
- chi.walkHosts(f)
+ chi.WalkHosts(f)
}
}
diff --git a/pkg/apis/metrics/clickhouse_metrics_fetcher.go b/pkg/metrics/clickhouse/clickhouse_metrics_fetcher.go
similarity index 99%
rename from pkg/apis/metrics/clickhouse_metrics_fetcher.go
rename to pkg/metrics/clickhouse/clickhouse_metrics_fetcher.go
index 28274857a..cbad28ebc 100644
--- a/pkg/apis/metrics/clickhouse_metrics_fetcher.go
+++ b/pkg/metrics/clickhouse/clickhouse_metrics_fetcher.go
@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-package metrics
+package clickhouse
import (
"context"
diff --git a/pkg/apis/metrics/exporter.go b/pkg/metrics/clickhouse/exporter.go
similarity index 83%
rename from pkg/apis/metrics/exporter.go
rename to pkg/metrics/clickhouse/exporter.go
index 0fc6ee585..6a07d36ca 100644
--- a/pkg/apis/metrics/exporter.go
+++ b/pkg/metrics/clickhouse/exporter.go
@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-package metrics
+package clickhouse
import (
"context"
@@ -29,11 +29,14 @@ import (
kube "k8s.io/client-go/kubernetes"
api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1"
+ "github.com/altinity/clickhouse-operator/pkg/apis/common/types"
+ "github.com/altinity/clickhouse-operator/pkg/apis/metrics"
"github.com/altinity/clickhouse-operator/pkg/chop"
chopAPI "github.com/altinity/clickhouse-operator/pkg/client/clientset/versioned"
"github.com/altinity/clickhouse-operator/pkg/controller"
chiNormalizer "github.com/altinity/clickhouse-operator/pkg/model/chi/normalizer"
"github.com/altinity/clickhouse-operator/pkg/model/clickhouse"
+ normalizerCommon "github.com/altinity/clickhouse-operator/pkg/model/common/normalizer"
)
// Exporter implements prometheus.Collector interface
@@ -53,13 +56,13 @@ var _ prometheus.Collector = &Exporter{}
// NewExporter returns a new instance of Exporter type
func NewExporter(collectorTimeout time.Duration) *Exporter {
return &Exporter{
- chInstallations: make(map[string]*WatchedCHI),
+ chInstallations: make(map[string]*metrics.WatchedCHI),
collectorTimeout: collectorTimeout,
}
}
// getWatchedCHIs
-func (e *Exporter) getWatchedCHIs() []*WatchedCHI {
+func (e *Exporter) getWatchedCHIs() []*metrics.WatchedCHI {
return e.chInstallations.slice()
}
@@ -91,9 +94,9 @@ func (e *Exporter) Collect(ch chan<- prometheus.Metric) {
log.V(1).Infof("Launching host collectors [%s]", time.Now().Sub(start))
var wg = sync.WaitGroup{}
- e.chInstallations.walk(func(chi *WatchedCHI, _ *WatchedCluster, host *WatchedHost) {
+ e.chInstallations.walk(func(chi *metrics.WatchedCHI, _ *metrics.WatchedCluster, host *metrics.WatchedHost) {
wg.Add(1)
- go func(ctx context.Context, chi *WatchedCHI, host *WatchedHost, ch chan<- prometheus.Metric) {
+ go func(ctx context.Context, chi *metrics.WatchedCHI, host *metrics.WatchedHost, ch chan<- prometheus.Metric) {
defer wg.Done()
e.collectHostMetrics(ctx, chi, host, ch)
}(ctx, chi, host, ch)
@@ -107,7 +110,7 @@ func (e *Exporter) Describe(ch chan<- *prometheus.Desc) {
}
// enqueueToRemoveFromWatched
-func (e *Exporter) enqueueToRemoveFromWatched(chi *WatchedCHI) {
+func (e *Exporter) enqueueToRemoveFromWatched(chi *metrics.WatchedCHI) {
e.toRemoveFromWatched.Store(chi, struct{}{})
}
@@ -117,10 +120,10 @@ func (e *Exporter) cleanup() {
log.V(2).Info("Starting cleanup")
e.toRemoveFromWatched.Range(func(key, value interface{}) bool {
switch key.(type) {
- case *WatchedCHI:
+ case *metrics.WatchedCHI:
e.toRemoveFromWatched.Delete(key)
- e.removeFromWatched(key.(*WatchedCHI))
- log.V(1).Infof("Removed ClickHouseInstallation (%s/%s) from Exporter", key.(*WatchedCHI).Name, key.(*WatchedCHI).Namespace)
+ e.removeFromWatched(key.(*metrics.WatchedCHI))
+ log.V(1).Infof("Removed ClickHouseInstallation (%s/%s) from Exporter", key.(*metrics.WatchedCHI).Name, key.(*metrics.WatchedCHI).Namespace)
}
return true
})
@@ -128,33 +131,33 @@ func (e *Exporter) cleanup() {
}
// removeFromWatched deletes record from Exporter.chInstallation map identified by chiName key
-func (e *Exporter) removeFromWatched(chi *WatchedCHI) {
+func (e *Exporter) removeFromWatched(chi *metrics.WatchedCHI) {
e.mutex.Lock()
defer e.mutex.Unlock()
log.V(1).Infof("Remove ClickHouseInstallation (%s/%s)", chi.Namespace, chi.Name)
- e.chInstallations.remove(chi.indexKey())
+ e.chInstallations.remove(chi.IndexKey())
}
// updateWatched updates Exporter.chInstallation map with values from chInstances slice
-func (e *Exporter) updateWatched(chi *WatchedCHI) {
+func (e *Exporter) updateWatched(chi *metrics.WatchedCHI) {
e.mutex.Lock()
defer e.mutex.Unlock()
log.V(1).Infof("Update ClickHouseInstallation (%s/%s): %s", chi.Namespace, chi.Name, chi)
- e.chInstallations.set(chi.indexKey(), chi)
+ e.chInstallations.set(chi.IndexKey(), chi)
}
// newFetcher returns new Metrics Fetcher for specified host
-func (e *Exporter) newHostFetcher(host *WatchedHost) *ClickHouseMetricsFetcher {
+func (e *Exporter) newHostFetcher(host *metrics.WatchedHost) *ClickHouseMetricsFetcher {
// Make base cluster connection params
clusterConnectionParams := clickhouse.NewClusterConnectionParamsFromCHOpConfig(chop.Config())
// Adjust base cluster connection params with per-host props
switch clusterConnectionParams.Scheme {
case api.ChSchemeAuto:
switch {
- case api.IsPortAssigned(host.HTTPPort):
+ case types.IsPortAssigned(host.HTTPPort):
clusterConnectionParams.Scheme = "http"
clusterConnectionParams.Port = int(host.HTTPPort)
- case api.IsPortAssigned(host.HTTPSPort):
+ case types.IsPortAssigned(host.HTTPSPort):
clusterConnectionParams.Scheme = "https"
clusterConnectionParams.Port = int(host.HTTPSPort)
}
@@ -168,33 +171,33 @@ func (e *Exporter) newHostFetcher(host *WatchedHost) *ClickHouseMetricsFetcher {
}
// collectHostMetrics collects metrics from one host and writes them into chan
-func (e *Exporter) collectHostMetrics(ctx context.Context, chi *WatchedCHI, host *WatchedHost, c chan<- prometheus.Metric) {
+func (e *Exporter) collectHostMetrics(ctx context.Context, chi *metrics.WatchedCHI, host *metrics.WatchedHost, c chan<- prometheus.Metric) {
fetcher := e.newHostFetcher(host)
writer := NewCHIPrometheusWriter(c, chi, host)
wg := sync.WaitGroup{}
wg.Add(6)
- go func(ctx context.Context, host *WatchedHost, fetcher *ClickHouseMetricsFetcher, writer *CHIPrometheusWriter) {
+ go func(ctx context.Context, host *metrics.WatchedHost, fetcher *ClickHouseMetricsFetcher, writer *CHIPrometheusWriter) {
e.collectHostSystemMetrics(ctx, host, fetcher, writer)
wg.Done()
}(ctx, host, fetcher, writer)
- go func(ctx context.Context, host *WatchedHost, fetcher *ClickHouseMetricsFetcher, writer *CHIPrometheusWriter) {
+ go func(ctx context.Context, host *metrics.WatchedHost, fetcher *ClickHouseMetricsFetcher, writer *CHIPrometheusWriter) {
e.collectHostSystemPartsMetrics(ctx, host, fetcher, writer)
wg.Done()
}(ctx, host, fetcher, writer)
- go func(ctx context.Context, host *WatchedHost, fetcher *ClickHouseMetricsFetcher, writer *CHIPrometheusWriter) {
+ go func(ctx context.Context, host *metrics.WatchedHost, fetcher *ClickHouseMetricsFetcher, writer *CHIPrometheusWriter) {
e.collectHostSystemReplicasMetrics(ctx, host, fetcher, writer)
wg.Done()
}(ctx, host, fetcher, writer)
- go func(ctx context.Context, host *WatchedHost, fetcher *ClickHouseMetricsFetcher, writer *CHIPrometheusWriter) {
+ go func(ctx context.Context, host *metrics.WatchedHost, fetcher *ClickHouseMetricsFetcher, writer *CHIPrometheusWriter) {
e.collectHostMutationsMetrics(ctx, host, fetcher, writer)
wg.Done()
}(ctx, host, fetcher, writer)
- go func(ctx context.Context, host *WatchedHost, fetcher *ClickHouseMetricsFetcher, writer *CHIPrometheusWriter) {
+ go func(ctx context.Context, host *metrics.WatchedHost, fetcher *ClickHouseMetricsFetcher, writer *CHIPrometheusWriter) {
e.collectHostSystemDisksMetrics(ctx, host, fetcher, writer)
wg.Done()
}(ctx, host, fetcher, writer)
- go func(ctx context.Context, host *WatchedHost, fetcher *ClickHouseMetricsFetcher, writer *CHIPrometheusWriter) {
+ go func(ctx context.Context, host *metrics.WatchedHost, fetcher *ClickHouseMetricsFetcher, writer *CHIPrometheusWriter) {
e.collectHostDetachedPartsMetrics(ctx, host, fetcher, writer)
wg.Done()
}(ctx, host, fetcher, writer)
@@ -203,7 +206,7 @@ func (e *Exporter) collectHostMetrics(ctx context.Context, chi *WatchedCHI, host
func (e *Exporter) collectHostSystemMetrics(
ctx context.Context,
- host *WatchedHost,
+ host *metrics.WatchedHost,
fetcher *ClickHouseMetricsFetcher,
writer *CHIPrometheusWriter,
) {
@@ -224,7 +227,7 @@ func (e *Exporter) collectHostSystemMetrics(
func (e *Exporter) collectHostSystemPartsMetrics(
ctx context.Context,
- host *WatchedHost,
+ host *metrics.WatchedHost,
fetcher *ClickHouseMetricsFetcher,
writer *CHIPrometheusWriter,
) {
@@ -248,7 +251,7 @@ func (e *Exporter) collectHostSystemPartsMetrics(
func (e *Exporter) collectHostSystemReplicasMetrics(
ctx context.Context,
- host *WatchedHost,
+ host *metrics.WatchedHost,
fetcher *ClickHouseMetricsFetcher,
writer *CHIPrometheusWriter,
) {
@@ -269,7 +272,7 @@ func (e *Exporter) collectHostSystemReplicasMetrics(
func (e *Exporter) collectHostMutationsMetrics(
ctx context.Context,
- host *WatchedHost,
+ host *metrics.WatchedHost,
fetcher *ClickHouseMetricsFetcher,
writer *CHIPrometheusWriter,
) {
@@ -290,7 +293,7 @@ func (e *Exporter) collectHostMutationsMetrics(
func (e *Exporter) collectHostSystemDisksMetrics(
ctx context.Context,
- host *WatchedHost,
+ host *metrics.WatchedHost,
fetcher *ClickHouseMetricsFetcher,
writer *CHIPrometheusWriter,
) {
@@ -311,7 +314,7 @@ func (e *Exporter) collectHostSystemDisksMetrics(
func (e *Exporter) collectHostDetachedPartsMetrics(
ctx context.Context,
- host *WatchedHost,
+ host *metrics.WatchedHost,
fetcher *ClickHouseMetricsFetcher,
writer *CHIPrometheusWriter,
) {
@@ -337,10 +340,10 @@ func (e *Exporter) getWatchedCHI(w http.ResponseWriter, r *http.Request) {
}
// fetchCHI decodes chi from the request
-func (e *Exporter) fetchCHI(r *http.Request) (*WatchedCHI, error) {
- chi := &WatchedCHI{}
+func (e *Exporter) fetchCHI(r *http.Request) (*metrics.WatchedCHI, error) {
+ chi := &metrics.WatchedCHI{}
if err := json.NewDecoder(r.Body).Decode(chi); err == nil {
- if chi.isValid() {
+ if chi.IsValid() {
return chi, nil
}
}
@@ -391,18 +394,18 @@ func (e *Exporter) DiscoveryWatchedCHIs(kubeClient kube.Interface, chopClient *c
continue
}
- if !chi.GetStatus().HasNormalizedCHICompleted() {
+ if !chi.GetStatus().HasNormalizedCRCompleted() {
log.V(1).Infof("CHI %s/%s is not completed yet, skip it", chi.Namespace, chi.Name)
continue
}
log.V(1).Infof("CHI %s/%s is completed, add it", chi.Namespace, chi.Name)
- normalizer := chiNormalizer.NewNormalizer(func(namespace, name string) (*core.Secret, error) {
+ normalizer := chiNormalizer.New(func(namespace, name string) (*core.Secret, error) {
return kubeClient.CoreV1().Secrets(namespace).Get(context.TODO(), name, controller.NewGetOptions())
})
- normalized, _ := normalizer.CreateTemplatedCHI(chi, chiNormalizer.NewOptions())
+ normalized, _ := normalizer.CreateTemplated(chi, normalizerCommon.NewOptions())
- watchedCHI := NewWatchedCHI(normalized)
+ watchedCHI := metrics.NewWatchedCHI(normalized)
e.updateWatched(watchedCHI)
}
}
diff --git a/pkg/apis/metrics/prometheus_writer.go b/pkg/metrics/clickhouse/prometheus_writer.go
similarity index 96%
rename from pkg/apis/metrics/prometheus_writer.go
rename to pkg/metrics/clickhouse/prometheus_writer.go
index bdecd3659..723d37049 100644
--- a/pkg/apis/metrics/prometheus_writer.go
+++ b/pkg/metrics/clickhouse/prometheus_writer.go
@@ -12,11 +12,12 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-package metrics
+package clickhouse
import (
"fmt"
- "github.com/altinity/clickhouse-operator/pkg/metrics"
+ "github.com/altinity/clickhouse-operator/pkg/apis/metrics"
+ "github.com/altinity/clickhouse-operator/pkg/metrics/operator"
"strconv"
"time"
@@ -40,15 +41,15 @@ const (
// CHIPrometheusWriter specifies writer to prometheus
type CHIPrometheusWriter struct {
out chan<- prometheus.Metric
- chi *WatchedCHI
- host *WatchedHost
+ chi *metrics.WatchedCHI
+ host *metrics.WatchedHost
}
// NewCHIPrometheusWriter creates new CHI prometheus writer
func NewCHIPrometheusWriter(
out chan<- prometheus.Metric,
- chi *WatchedCHI,
- host *WatchedHost,
+ chi *metrics.WatchedCHI,
+ host *metrics.WatchedHost,
) *CHIPrometheusWriter {
return &CHIPrometheusWriter{
out: out,
@@ -227,7 +228,7 @@ func (w *CHIPrometheusWriter) appendHostLabel(labels, values []string) ([]string
func (w *CHIPrometheusWriter) getMandatoryLabelsAndValues() (labelNames []string, labelValues []string) {
// Prepare mandatory set of labels
- labelNames, labelValues = metrics.GetMandatoryLabelsAndValues(w.chi)
+ labelNames, labelValues = operator.GetMandatoryLabelsAndValues(w.chi)
// Append current host label
labelNames, labelValues = w.appendHostLabel(labelNames, labelValues)
diff --git a/pkg/apis/metrics/rest_client.go b/pkg/metrics/clickhouse/rest_client.go
similarity index 78%
rename from pkg/apis/metrics/rest_client.go
rename to pkg/metrics/clickhouse/rest_client.go
index dcdc7d3bc..4c91d22bb 100644
--- a/pkg/apis/metrics/rest_client.go
+++ b/pkg/metrics/clickhouse/rest_client.go
@@ -12,14 +12,16 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-package metrics
+package clickhouse
+
+import "github.com/altinity/clickhouse-operator/pkg/apis/metrics"
// InformMetricsExporterAboutWatchedCHI informs exporter about new watched CHI
-func InformMetricsExporterAboutWatchedCHI(chi *WatchedCHI) error {
+func InformMetricsExporterAboutWatchedCHI(chi *metrics.WatchedCHI) error {
return makeRESTCall(chi, "POST")
}
// InformMetricsExporterToDeleteWatchedCHI informs exporter to delete/forget watched CHI
-func InformMetricsExporterToDeleteWatchedCHI(chi *WatchedCHI) error {
+func InformMetricsExporterToDeleteWatchedCHI(chi *metrics.WatchedCHI) error {
return makeRESTCall(chi, "DELETE")
}
diff --git a/pkg/apis/metrics/rest_machinery.go b/pkg/metrics/clickhouse/rest_machinery.go
similarity index 90%
rename from pkg/apis/metrics/rest_machinery.go
rename to pkg/metrics/clickhouse/rest_machinery.go
index 6376847fd..1eb014433 100644
--- a/pkg/apis/metrics/rest_machinery.go
+++ b/pkg/metrics/clickhouse/rest_machinery.go
@@ -12,17 +12,18 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-package metrics
+package clickhouse
import (
"bytes"
"encoding/json"
"fmt"
+ "github.com/altinity/clickhouse-operator/pkg/apis/metrics"
"io"
"net/http"
)
-func makeRESTCall(chi *WatchedCHI, method string) error {
+func makeRESTCall(chi *metrics.WatchedCHI, method string) error {
url := "http://127.0.0.1:8888/chi"
json, err := json.Marshal(chi)
diff --git a/pkg/apis/metrics/rest_server.go b/pkg/metrics/clickhouse/rest_server.go
similarity index 99%
rename from pkg/apis/metrics/rest_server.go
rename to pkg/metrics/clickhouse/rest_server.go
index 94e76a73e..12027507e 100644
--- a/pkg/apis/metrics/rest_server.go
+++ b/pkg/metrics/clickhouse/rest_server.go
@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-package metrics
+package clickhouse
import (
"fmt"
diff --git a/pkg/metrics/metrics.go b/pkg/metrics/operator/metrics.go
similarity index 95%
rename from pkg/metrics/metrics.go
rename to pkg/metrics/operator/metrics.go
index ed0c6f18b..84e4b4689 100644
--- a/pkg/metrics/metrics.go
+++ b/pkg/metrics/operator/metrics.go
@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-package metrics
+package operator
import (
"fmt"
@@ -132,16 +132,16 @@ func getLabelsFromAnnotations(chi BaseInfoGetter) (labels []string, values []str
)
}
-func GetMandatoryLabelsAndValues(chi BaseInfoGetter) (labels []string, values []string) {
- labelsFromNames, valuesFromNames := getLabelsFromName(chi)
+func GetMandatoryLabelsAndValues(cr BaseInfoGetter) (labels []string, values []string) {
+ labelsFromNames, valuesFromNames := getLabelsFromName(cr)
labels = append(labels, labelsFromNames...)
values = append(values, valuesFromNames...)
- labelsFromLabels, valuesFromLabels := getLabelsFromLabels(chi)
+ labelsFromLabels, valuesFromLabels := getLabelsFromLabels(cr)
labels = append(labels, labelsFromLabels...)
values = append(values, valuesFromLabels...)
- labelsFromAnnotations, valuesFromAnnotations := getLabelsFromAnnotations(chi)
+ labelsFromAnnotations, valuesFromAnnotations := getLabelsFromAnnotations(cr)
labels = append(labels, labelsFromAnnotations...)
values = append(values, valuesFromAnnotations...)
diff --git a/pkg/model/chi/affinity.go b/pkg/model/chi/affinity.go
deleted file mode 100644
index 8b1360caf..000000000
--- a/pkg/model/chi/affinity.go
+++ /dev/null
@@ -1,946 +0,0 @@
-// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package chi
-
-import (
- "gopkg.in/d4l3k/messagediff.v1"
-
- core "k8s.io/api/core/v1"
- meta "k8s.io/apimachinery/pkg/apis/meta/v1"
-
- api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1"
- "github.com/altinity/clickhouse-operator/pkg/apis/deployment"
- "github.com/altinity/clickhouse-operator/pkg/util"
-)
-
-// NewAffinity creates new Affinity struct
-func NewAffinity(template *api.PodTemplate) *core.Affinity {
- // Pod node affinity scheduling rules.
- nodeAffinity := newNodeAffinity(template)
- // Pod affinity scheduling rules. Ex.: co-locate this pod in the same node, zone, etc
- podAffinity := newPodAffinity(template)
- // Pod anti-affinity scheduling rules. Ex.: avoid putting this pod in the same node, zone, etc
- podAntiAffinity := newPodAntiAffinity(template)
-
- // At least one affinity has to be reasonable
- if (nodeAffinity == nil) && (podAffinity == nil) && (podAntiAffinity == nil) {
- // Neither Affinity nor AntiAffinity specified
- return nil
- }
-
- return &core.Affinity{
- NodeAffinity: nodeAffinity,
- PodAffinity: podAffinity,
- PodAntiAffinity: podAntiAffinity,
- }
-}
-
-// MergeAffinity merges from src into dst and returns dst
-func MergeAffinity(dst *core.Affinity, src *core.Affinity) *core.Affinity {
- if src == nil {
- // Nothing to merge from
- return dst
- }
-
- created := false
- if dst == nil {
- // No receiver specified, allocate a new one
- dst = &core.Affinity{}
- created = true
- }
-
- dst.NodeAffinity = mergeNodeAffinity(dst.NodeAffinity, src.NodeAffinity)
- dst.PodAffinity = mergePodAffinity(dst.PodAffinity, src.PodAffinity)
- dst.PodAntiAffinity = mergePodAntiAffinity(dst.PodAntiAffinity, src.PodAntiAffinity)
-
- empty := (dst.NodeAffinity == nil) && (dst.PodAffinity == nil) && (dst.PodAntiAffinity == nil)
- if created && empty {
- // Do not return empty and internally created dst
- return nil
- }
-
- return dst
-}
-
-// newNodeAffinity
-func newNodeAffinity(template *api.PodTemplate) *core.NodeAffinity {
- if template.Zone.Key == "" {
- return nil
- }
-
- return &core.NodeAffinity{
- RequiredDuringSchedulingIgnoredDuringExecution: &core.NodeSelector{
- NodeSelectorTerms: []core.NodeSelectorTerm{
- {
- // A list of node selector requirements by node's labels.
- MatchExpressions: []core.NodeSelectorRequirement{
- {
- Key: template.Zone.Key,
- Operator: core.NodeSelectorOpIn,
- Values: template.Zone.Values,
- },
- },
- // A list of node selector requirements by node's fields.
- //MatchFields: []core.NodeSelectorRequirement{
- // core.NodeSelectorRequirement{},
- //},
- },
- },
- },
-
- // PreferredDuringSchedulingIgnoredDuringExecution: []core.PreferredSchedulingTerm{},
- }
-}
-
-func getNodeSelectorTerms(affinity *core.NodeAffinity) []core.NodeSelectorTerm {
- if affinity == nil {
- return nil
- }
-
- if affinity.RequiredDuringSchedulingIgnoredDuringExecution == nil {
- return nil
- }
- return affinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms
-}
-
-func getNodeSelectorTerm(affinity *core.NodeAffinity, i int) *core.NodeSelectorTerm {
- terms := getNodeSelectorTerms(affinity)
- if terms == nil {
- return nil
- }
- if i >= len(terms) {
- return nil
- }
- return &terms[i]
-}
-
-func appendNodeSelectorTerm(affinity *core.NodeAffinity, term *core.NodeSelectorTerm) *core.NodeAffinity {
- if term == nil {
- return affinity
- }
-
- // Ensure path to terms exists
- if affinity == nil {
- affinity = &core.NodeAffinity{}
- }
- if affinity.RequiredDuringSchedulingIgnoredDuringExecution == nil {
- affinity.RequiredDuringSchedulingIgnoredDuringExecution = &core.NodeSelector{}
- }
-
- affinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms = append(
- affinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms,
- *term,
- )
-
- return affinity
-}
-
-func getPreferredSchedulingTerms(affinity *core.NodeAffinity) []core.PreferredSchedulingTerm {
- if affinity == nil {
- return nil
- }
-
- return affinity.PreferredDuringSchedulingIgnoredDuringExecution
-}
-
-func getPreferredSchedulingTerm(affinity *core.NodeAffinity, i int) *core.PreferredSchedulingTerm {
- terms := getPreferredSchedulingTerms(affinity)
- if terms == nil {
- return nil
- }
- if i >= len(terms) {
- return nil
- }
- return &terms[i]
-}
-
-func appendPreferredSchedulingTerm(affinity *core.NodeAffinity, term *core.PreferredSchedulingTerm) *core.NodeAffinity {
- if term == nil {
- return affinity
- }
-
- // Ensure path to terms exists
- if affinity == nil {
- affinity = &core.NodeAffinity{}
- }
-
- affinity.PreferredDuringSchedulingIgnoredDuringExecution = append(
- affinity.PreferredDuringSchedulingIgnoredDuringExecution,
- *term,
- )
-
- return affinity
-}
-
-// mergeNodeAffinity
-func mergeNodeAffinity(dst *core.NodeAffinity, src *core.NodeAffinity) *core.NodeAffinity {
- if src == nil {
- // Nothing to merge from
- return dst
- }
-
- if dst == nil {
- // In case no receiver, it will be allocated by appendNodeSelectorTerm() or appendPreferredSchedulingTerm() if need be
- }
-
- // Merge NodeSelectors
- for i := range getNodeSelectorTerms(src) {
- s := getNodeSelectorTerm(src, i)
- equal := false
- for j := range getNodeSelectorTerms(dst) {
- d := getNodeSelectorTerm(dst, j)
- if _, equal = messagediff.DeepDiff(*s, *d); equal {
- break
- }
- }
- if !equal {
- dst = appendNodeSelectorTerm(dst, s)
- }
- }
-
- // Merge PreferredSchedulingTerm
- for i := range getPreferredSchedulingTerms(src) {
- s := getPreferredSchedulingTerm(src, i)
- equal := false
- for j := range getPreferredSchedulingTerms(dst) {
- d := getPreferredSchedulingTerm(dst, j)
- if _, equal = messagediff.DeepDiff(*s, *d); equal {
- break
- }
- }
- if !equal {
- dst = appendPreferredSchedulingTerm(dst, s)
- }
- }
-
- return dst
-}
-
-// newPodAffinity
-func newPodAffinity(template *api.PodTemplate) *core.PodAffinity {
- // Return podAffinity only in case something was added into it
- added := false
- podAffinity := &core.PodAffinity{}
-
- for i := range template.PodDistribution {
- podDistribution := &template.PodDistribution[i]
- switch podDistribution.Type {
- case deployment.PodDistributionNamespaceAffinity:
- added = true
- podAffinity.PreferredDuringSchedulingIgnoredDuringExecution = append(
- podAffinity.PreferredDuringSchedulingIgnoredDuringExecution,
- newWeightedPodAffinityTermWithMatchLabels(
- 1,
- podDistribution,
- map[string]string{
- LabelNamespace: macrosNamespace,
- },
- ),
- )
- case deployment.PodDistributionClickHouseInstallationAffinity:
- added = true
- podAffinity.PreferredDuringSchedulingIgnoredDuringExecution = append(
- podAffinity.PreferredDuringSchedulingIgnoredDuringExecution,
- newWeightedPodAffinityTermWithMatchLabels(
- 1,
- podDistribution,
- map[string]string{
- LabelCHIName: macrosChiName,
- },
- ),
- )
- case deployment.PodDistributionClusterAffinity:
- added = true
- podAffinity.PreferredDuringSchedulingIgnoredDuringExecution = append(
- podAffinity.PreferredDuringSchedulingIgnoredDuringExecution,
- newWeightedPodAffinityTermWithMatchLabels(
- 1,
- podDistribution,
- map[string]string{
- LabelClusterName: macrosClusterName,
- },
- ),
- )
- case deployment.PodDistributionShardAffinity:
- added = true
- podAffinity.PreferredDuringSchedulingIgnoredDuringExecution = append(
- podAffinity.PreferredDuringSchedulingIgnoredDuringExecution,
- newWeightedPodAffinityTermWithMatchLabels(
- 1,
- podDistribution,
- map[string]string{
- LabelShardName: macrosShardName,
- },
- ),
- )
- case deployment.PodDistributionReplicaAffinity:
- added = true
- podAffinity.PreferredDuringSchedulingIgnoredDuringExecution = append(
- podAffinity.PreferredDuringSchedulingIgnoredDuringExecution,
- newWeightedPodAffinityTermWithMatchLabels(
- 1,
- podDistribution,
- map[string]string{
- LabelReplicaName: macrosReplicaName,
- },
- ),
- )
- case deployment.PodDistributionPreviousTailAffinity:
- // Newer k8s insists on Required for this Affinity
- added = true
- podAffinity.RequiredDuringSchedulingIgnoredDuringExecution = append(
- podAffinity.RequiredDuringSchedulingIgnoredDuringExecution,
- newPodAffinityTermWithMatchLabels(
- podDistribution,
- map[string]string{
- LabelClusterScopeIndex: macrosClusterScopeCycleHeadPointsToPreviousCycleTail,
- },
- ),
- )
- podAffinity.PreferredDuringSchedulingIgnoredDuringExecution = append(
- podAffinity.PreferredDuringSchedulingIgnoredDuringExecution,
- newWeightedPodAffinityTermWithMatchLabels(
- 1,
- podDistribution,
- map[string]string{
- LabelClusterScopeIndex: macrosClusterScopeCycleHeadPointsToPreviousCycleTail,
- },
- ),
- )
- }
- }
-
- if added {
- // Has something to return
- return podAffinity
- }
-
- return nil
-}
-
-func getPodAffinityTerms(affinity *core.PodAffinity) []core.PodAffinityTerm {
- if affinity == nil {
- return nil
- }
-
- return affinity.RequiredDuringSchedulingIgnoredDuringExecution
-}
-
-func getPodAffinityTerm(affinity *core.PodAffinity, i int) *core.PodAffinityTerm {
- terms := getPodAffinityTerms(affinity)
- if terms == nil {
- return nil
- }
- if i >= len(terms) {
- return nil
- }
- return &terms[i]
-}
-
-func appendPodAffinityTerm(affinity *core.PodAffinity, term *core.PodAffinityTerm) *core.PodAffinity {
- if term == nil {
- return affinity
- }
-
- // Ensure path to terms exists
- if affinity == nil {
- affinity = &core.PodAffinity{}
- }
-
- affinity.RequiredDuringSchedulingIgnoredDuringExecution = append(
- affinity.RequiredDuringSchedulingIgnoredDuringExecution,
- *term,
- )
-
- return affinity
-}
-
-func getWeightedPodAffinityTerms(affinity *core.PodAffinity) []core.WeightedPodAffinityTerm {
- if affinity == nil {
- return nil
- }
-
- return affinity.PreferredDuringSchedulingIgnoredDuringExecution
-}
-
-func getWeightedPodAffinityTerm(affinity *core.PodAffinity, i int) *core.WeightedPodAffinityTerm {
- terms := getWeightedPodAffinityTerms(affinity)
- if terms == nil {
- return nil
- }
- if i >= len(terms) {
- return nil
- }
- return &terms[i]
-}
-
-func appendWeightedPodAffinityTerm(affinity *core.PodAffinity, term *core.WeightedPodAffinityTerm) *core.PodAffinity {
- if term == nil {
- return affinity
- }
-
- // Ensure path to terms exists
- if affinity == nil {
- affinity = &core.PodAffinity{}
- }
-
- affinity.PreferredDuringSchedulingIgnoredDuringExecution = append(
- affinity.PreferredDuringSchedulingIgnoredDuringExecution,
- *term,
- )
-
- return affinity
-}
-
-// mergePodAffinity
-func mergePodAffinity(dst *core.PodAffinity, src *core.PodAffinity) *core.PodAffinity {
- if src == nil {
- // Nothing to merge from
- return dst
- }
-
- if dst == nil {
- // In case no receiver, it will be allocated by appendPodAffinityTerm() or appendWeightedPodAffinityTerm() if need be
- }
-
- // Merge PodAffinityTerm
- for i := range getPodAffinityTerms(src) {
- s := getPodAffinityTerm(src, i)
- equal := false
- for j := range getPodAffinityTerms(dst) {
- d := getPodAffinityTerm(dst, j)
- if _, equal = messagediff.DeepDiff(*s, *d); equal {
- break
- }
- }
- if !equal {
- dst = appendPodAffinityTerm(dst, s)
- }
- }
-
- // Merge WeightedPodAffinityTerm
- for i := range getWeightedPodAffinityTerms(src) {
- s := getWeightedPodAffinityTerm(src, i)
- equal := false
- for j := range getWeightedPodAffinityTerms(dst) {
- d := getWeightedPodAffinityTerm(dst, j)
- if _, equal = messagediff.DeepDiff(*s, *d); equal {
- break
- }
- }
- if !equal {
- dst = appendWeightedPodAffinityTerm(dst, s)
- }
- }
-
- return dst
-}
-
-// newMatchLabels
-func newMatchLabels(
- podDistribution *api.PodDistribution,
- matchLabels map[string]string,
-) map[string]string {
- var scopeLabels map[string]string
-
- switch podDistribution.Scope {
- case deployment.PodDistributionScopeShard:
- scopeLabels = map[string]string{
- LabelNamespace: macrosNamespace,
- LabelCHIName: macrosChiName,
- LabelClusterName: macrosClusterName,
- LabelShardName: macrosShardName,
- }
- case deployment.PodDistributionScopeReplica:
- scopeLabels = map[string]string{
- LabelNamespace: macrosNamespace,
- LabelCHIName: macrosChiName,
- LabelClusterName: macrosClusterName,
- LabelReplicaName: macrosReplicaName,
- }
- case deployment.PodDistributionScopeCluster:
- scopeLabels = map[string]string{
- LabelNamespace: macrosNamespace,
- LabelCHIName: macrosChiName,
- LabelClusterName: macrosClusterName,
- }
- case deployment.PodDistributionScopeClickHouseInstallation:
- scopeLabels = map[string]string{
- LabelNamespace: macrosNamespace,
- LabelCHIName: macrosChiName,
- }
- case deployment.PodDistributionScopeNamespace:
- scopeLabels = map[string]string{
- LabelNamespace: macrosNamespace,
- }
- case deployment.PodDistributionScopeGlobal:
- scopeLabels = map[string]string{}
- }
-
- return util.MergeStringMapsOverwrite(matchLabels, scopeLabels)
-}
-
-// newPodAntiAffinity
-func newPodAntiAffinity(template *api.PodTemplate) *core.PodAntiAffinity {
- // Return podAntiAffinity only in case something was added into it
- added := false
- podAntiAffinity := &core.PodAntiAffinity{}
-
- // PodDistribution
- for i := range template.PodDistribution {
- podDistribution := &template.PodDistribution[i]
- switch podDistribution.Type {
- case deployment.PodDistributionClickHouseAntiAffinity:
- added = true
- podAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution = append(
- podAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution,
- newPodAffinityTermWithMatchLabels(
- podDistribution,
- newMatchLabels(
- podDistribution,
- map[string]string{
- LabelAppName: LabelAppValue,
- },
- ),
- ),
- )
- case deployment.PodDistributionMaxNumberPerNode:
- added = true
- podAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution = append(
- podAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution,
- newPodAffinityTermWithMatchLabels(
- podDistribution,
- newMatchLabels(
- podDistribution,
- map[string]string{
- LabelClusterScopeCycleIndex: macrosClusterScopeCycleIndex,
- },
- ),
- ),
- )
- case deployment.PodDistributionShardAntiAffinity:
- added = true
- podAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution = append(
- podAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution,
- newPodAffinityTermWithMatchLabels(
- podDistribution,
- newMatchLabels(
- podDistribution,
- map[string]string{
- LabelShardName: macrosShardName,
- },
- ),
- ),
- )
- case deployment.PodDistributionReplicaAntiAffinity:
- added = true
- podAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution = append(
- podAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution,
- newPodAffinityTermWithMatchLabels(
- podDistribution,
- newMatchLabels(
- podDistribution,
- map[string]string{
- LabelReplicaName: macrosReplicaName,
- },
- ),
- ),
- )
- case deployment.PodDistributionAnotherNamespaceAntiAffinity:
- added = true
- podAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution = append(
- podAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution,
- newPodAffinityTermWithMatchExpressions(
- podDistribution,
- []meta.LabelSelectorRequirement{
- {
- Key: LabelNamespace,
- Operator: meta.LabelSelectorOpNotIn,
- Values: []string{
- macrosNamespace,
- },
- },
- },
- ),
- )
- case deployment.PodDistributionAnotherClickHouseInstallationAntiAffinity:
- added = true
- podAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution = append(
- podAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution,
- newPodAffinityTermWithMatchExpressions(
- podDistribution,
- []meta.LabelSelectorRequirement{
- {
- Key: LabelCHIName,
- Operator: meta.LabelSelectorOpNotIn,
- Values: []string{
- macrosChiName,
- },
- },
- },
- ),
- )
- case deployment.PodDistributionAnotherClusterAntiAffinity:
- added = true
- podAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution = append(
- podAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution,
- newPodAffinityTermWithMatchExpressions(
- podDistribution,
- []meta.LabelSelectorRequirement{
- {
- Key: LabelClusterName,
- Operator: meta.LabelSelectorOpNotIn,
- Values: []string{
- macrosClusterName,
- },
- },
- },
- ),
- )
- }
- }
-
- if added {
- // Has something to return
- return podAntiAffinity
- }
-
- return nil
-}
-
-func getPodAntiAffinityTerms(affinity *core.PodAntiAffinity) []core.PodAffinityTerm {
- if affinity == nil {
- return nil
- }
-
- return affinity.RequiredDuringSchedulingIgnoredDuringExecution
-}
-
-func getPodAntiAffinityTerm(affinity *core.PodAntiAffinity, i int) *core.PodAffinityTerm {
- terms := getPodAntiAffinityTerms(affinity)
- if terms == nil {
- return nil
- }
- if i >= len(terms) {
- return nil
- }
- return &terms[i]
-}
-
-func appendPodAntiAffinityTerm(affinity *core.PodAntiAffinity, term *core.PodAffinityTerm) *core.PodAntiAffinity {
- if term == nil {
- return affinity
- }
-
- // Ensure path to terms exists
- if affinity == nil {
- affinity = &core.PodAntiAffinity{}
- }
-
- affinity.RequiredDuringSchedulingIgnoredDuringExecution = append(
- affinity.RequiredDuringSchedulingIgnoredDuringExecution,
- *term,
- )
-
- return affinity
-}
-
-func getWeightedPodAntiAffinityTerms(affinity *core.PodAntiAffinity) []core.WeightedPodAffinityTerm {
- if affinity == nil {
- return nil
- }
-
- return affinity.PreferredDuringSchedulingIgnoredDuringExecution
-}
-
-func getWeightedPodAntiAffinityTerm(affinity *core.PodAntiAffinity, i int) *core.WeightedPodAffinityTerm {
- terms := getWeightedPodAntiAffinityTerms(affinity)
- if terms == nil {
- return nil
- }
- if i >= len(terms) {
- return nil
- }
- return &terms[i]
-}
-
-func appendWeightedPodAntiAffinityTerm(affinity *core.PodAntiAffinity, term *core.WeightedPodAffinityTerm) *core.PodAntiAffinity {
- if term == nil {
- return affinity
- }
-
- // Ensure path to terms exists
- if affinity == nil {
- affinity = &core.PodAntiAffinity{}
- }
-
- affinity.PreferredDuringSchedulingIgnoredDuringExecution = append(
- affinity.PreferredDuringSchedulingIgnoredDuringExecution,
- *term,
- )
-
- return affinity
-}
-
-// mergePodAntiAffinity
-func mergePodAntiAffinity(dst *core.PodAntiAffinity, src *core.PodAntiAffinity) *core.PodAntiAffinity {
- if src == nil {
- // Nothing to merge from
- return dst
- }
-
- if dst == nil {
- // In case no receiver, it will be allocated by appendPodAntiAffinityTerm() or appendWeightedPodAntiAffinityTerm() if need be
- }
-
- // Merge PodAffinityTerm
- for i := range getPodAntiAffinityTerms(src) {
- s := getPodAntiAffinityTerm(src, i)
- equal := false
- for j := range getPodAntiAffinityTerms(dst) {
- d := getPodAntiAffinityTerm(dst, j)
- if _, equal = messagediff.DeepDiff(*s, *d); equal {
- break
- }
- }
- if !equal {
- dst = appendPodAntiAffinityTerm(dst, s)
- }
- }
-
- // Merge WeightedPodAffinityTerm
- for i := range getWeightedPodAntiAffinityTerms(src) {
- s := getWeightedPodAntiAffinityTerm(src, i)
- equal := false
- for j := range getWeightedPodAntiAffinityTerms(dst) {
- d := getWeightedPodAntiAffinityTerm(dst, j)
- if _, equal = messagediff.DeepDiff(*s, *d); equal {
- break
- }
- }
- if !equal {
- dst = appendWeightedPodAntiAffinityTerm(dst, s)
- }
- }
-
- return dst
-}
-
-// newPodAffinityTermWithMatchLabels
-func newPodAffinityTermWithMatchLabels(
- podDistribution *api.PodDistribution,
- matchLabels map[string]string,
-) core.PodAffinityTerm {
- return core.PodAffinityTerm{
- LabelSelector: &meta.LabelSelector{
- // A list of node selector requirements by node's labels.
- //MatchLabels: map[string]string{
- // LabelClusterScopeCycleIndex: macrosClusterScopeCycleIndex,
- //},
- MatchLabels: matchLabels,
- // Switch to MatchLabels
- //MatchExpressions: []meta.LabelSelectorRequirement{
- // {
- // Key: LabelAppName,
- // Operator: meta.LabelSelectorOpIn,
- // Values: []string{
- // LabelAppValue,
- // },
- // },
- //},
- },
- TopologyKey: podDistribution.TopologyKey,
- }
-}
-
-// newPodAffinityTermWithMatchExpressions
-func newPodAffinityTermWithMatchExpressions(
- podDistribution *api.PodDistribution,
- matchExpressions []meta.LabelSelectorRequirement,
-) core.PodAffinityTerm {
- return core.PodAffinityTerm{
- LabelSelector: &meta.LabelSelector{
- // A list of node selector requirements by node's labels.
- //MatchLabels: map[string]string{
- // LabelClusterScopeCycleIndex: macrosClusterScopeCycleIndex,
- //},
- //MatchExpressions: []meta.LabelSelectorRequirement{
- // {
- // Key: LabelAppName,
- // Operator: meta.LabelSelectorOpIn,
- // Values: []string{
- // LabelAppValue,
- // },
- // },
- //},
- MatchExpressions: matchExpressions,
- },
- TopologyKey: podDistribution.TopologyKey,
- }
-}
-
-// newWeightedPodAffinityTermWithMatchLabels is an enhanced append()
-func newWeightedPodAffinityTermWithMatchLabels(
- weight int32,
- podDistribution *api.PodDistribution,
- matchLabels map[string]string,
-) core.WeightedPodAffinityTerm {
- return core.WeightedPodAffinityTerm{
- Weight: weight,
- PodAffinityTerm: core.PodAffinityTerm{
- LabelSelector: &meta.LabelSelector{
- // A list of node selector requirements by node's labels.
- //MatchLabels: map[string]string{
- // LabelClusterScopeCycleIndex: macrosClusterScopeCycleIndex,
- //},
- MatchLabels: matchLabels,
- // Switch to MatchLabels
- //MatchExpressions: []meta.LabelSelectorRequirement{
- // {
- // Key: LabelAppName,
- // Operator: meta.LabelSelectorOpIn,
- // Values: []string{
- // LabelAppValue,
- // },
- // },
- //},
- },
- TopologyKey: podDistribution.TopologyKey,
- },
- }
-}
-
-// PrepareAffinity
-func PrepareAffinity(podTemplate *api.PodTemplate, host *api.ChiHost) {
- switch {
- case podTemplate == nil:
- return
- case podTemplate.Spec.Affinity == nil:
- return
- }
-
- // Walk over all affinity fields
-
- if podTemplate.Spec.Affinity.NodeAffinity != nil {
- processNodeSelector(podTemplate.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution, host)
- processPreferredSchedulingTerms(podTemplate.Spec.Affinity.NodeAffinity.PreferredDuringSchedulingIgnoredDuringExecution, host)
- }
-
- if podTemplate.Spec.Affinity.PodAffinity != nil {
- processPodAffinityTerms(podTemplate.Spec.Affinity.PodAffinity.RequiredDuringSchedulingIgnoredDuringExecution, host)
- processWeightedPodAffinityTerms(podTemplate.Spec.Affinity.PodAffinity.PreferredDuringSchedulingIgnoredDuringExecution, host)
- }
-
- if podTemplate.Spec.Affinity.PodAntiAffinity != nil {
- processPodAffinityTerms(podTemplate.Spec.Affinity.PodAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution, host)
- processWeightedPodAffinityTerms(podTemplate.Spec.Affinity.PodAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution, host)
- }
-}
-
-// processNodeSelector
-func processNodeSelector(nodeSelector *core.NodeSelector, host *api.ChiHost) {
- if nodeSelector == nil {
- return
- }
- for i := range nodeSelector.NodeSelectorTerms {
- nodeSelectorTerm := &nodeSelector.NodeSelectorTerms[i]
- processNodeSelectorTerm(nodeSelectorTerm, host)
- }
-}
-
-// processPreferredSchedulingTerms
-func processPreferredSchedulingTerms(preferredSchedulingTerms []core.PreferredSchedulingTerm, host *api.ChiHost) {
- for i := range preferredSchedulingTerms {
- nodeSelectorTerm := &preferredSchedulingTerms[i].Preference
- processNodeSelectorTerm(nodeSelectorTerm, host)
- }
-}
-
-// processNodeSelectorTerm
-func processNodeSelectorTerm(nodeSelectorTerm *core.NodeSelectorTerm, host *api.ChiHost) {
- for i := range nodeSelectorTerm.MatchExpressions {
- nodeSelectorRequirement := &nodeSelectorTerm.MatchExpressions[i]
- processNodeSelectorRequirement(nodeSelectorRequirement, host)
- }
-
- for i := range nodeSelectorTerm.MatchFields {
- nodeSelectorRequirement := &nodeSelectorTerm.MatchFields[i]
- processNodeSelectorRequirement(nodeSelectorRequirement, host)
- }
-}
-
-// processNodeSelectorRequirement
-func processNodeSelectorRequirement(nodeSelectorRequirement *core.NodeSelectorRequirement, host *api.ChiHost) {
- if nodeSelectorRequirement == nil {
- return
- }
- nodeSelectorRequirement.Key = Macro(host).Line(nodeSelectorRequirement.Key)
- // Update values only, keys are not macros-ed
- for i := range nodeSelectorRequirement.Values {
- nodeSelectorRequirement.Values[i] = Macro(host).Line(nodeSelectorRequirement.Values[i])
- }
-}
-
-// processPodAffinityTerms
-func processPodAffinityTerms(podAffinityTerms []core.PodAffinityTerm, host *api.ChiHost) {
- for i := range podAffinityTerms {
- podAffinityTerm := &podAffinityTerms[i]
- processPodAffinityTerm(podAffinityTerm, host)
- }
-}
-
-// processWeightedPodAffinityTerms
-func processWeightedPodAffinityTerms(weightedPodAffinityTerms []core.WeightedPodAffinityTerm, host *api.ChiHost) {
- for i := range weightedPodAffinityTerms {
- podAffinityTerm := &weightedPodAffinityTerms[i].PodAffinityTerm
- processPodAffinityTerm(podAffinityTerm, host)
- }
-}
-
-// processPodAffinityTerm
-func processPodAffinityTerm(podAffinityTerm *core.PodAffinityTerm, host *api.ChiHost) {
- if podAffinityTerm == nil {
- return
- }
- processLabelSelector(podAffinityTerm.LabelSelector, host)
- podAffinityTerm.TopologyKey = Macro(host).Line(podAffinityTerm.TopologyKey)
-}
-
-// processLabelSelector
-func processLabelSelector(labelSelector *meta.LabelSelector, host *api.ChiHost) {
- if labelSelector == nil {
- return
- }
-
- for k := range labelSelector.MatchLabels {
- labelSelector.MatchLabels[k] = Macro(host).Line(labelSelector.MatchLabels[k])
- }
- for j := range labelSelector.MatchExpressions {
- labelSelectorRequirement := &labelSelector.MatchExpressions[j]
- processLabelSelectorRequirement(labelSelectorRequirement, host)
- }
-}
-
-// processLabelSelectorRequirement
-func processLabelSelectorRequirement(labelSelectorRequirement *meta.LabelSelectorRequirement, host *api.ChiHost) {
- if labelSelectorRequirement == nil {
- return
- }
- labelSelectorRequirement.Key = Macro(host).Line(labelSelectorRequirement.Key)
- // Update values only, keys are not macros-ed
- for i := range labelSelectorRequirement.Values {
- labelSelectorRequirement.Values[i] = Macro(host).Line(labelSelectorRequirement.Values[i])
- }
-}
diff --git a/pkg/model/chi/annotator.go b/pkg/model/chi/annotator.go
deleted file mode 100644
index a01ef80af..000000000
--- a/pkg/model/chi/annotator.go
+++ /dev/null
@@ -1,140 +0,0 @@
-// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package chi
-
-import (
- core "k8s.io/api/core/v1"
-
- api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1"
- "github.com/altinity/clickhouse-operator/pkg/chop"
- "github.com/altinity/clickhouse-operator/pkg/util"
-)
-
-// Annotator is an entity which can annotate CHI artifacts
-type Annotator struct {
- chi *api.ClickHouseInstallation
-}
-
-// NewAnnotator creates new annotator with context
-func NewAnnotator(chi *api.ClickHouseInstallation) *Annotator {
- return &Annotator{
- chi: chi,
- }
-}
-
-// GetConfigMapCHICommon
-func (a *Annotator) GetConfigMapCHICommon() map[string]string {
- return util.MergeStringMapsOverwrite(
- a.getCHIScope(),
- nil,
- )
-}
-
-// GetConfigMapCHICommonUsers
-func (a *Annotator) GetConfigMapCHICommonUsers() map[string]string {
- return util.MergeStringMapsOverwrite(
- a.getCHIScope(),
- nil,
- )
-}
-
-// GetConfigMapHost
-func (a *Annotator) GetConfigMapHost(host *api.ChiHost) map[string]string {
- return util.MergeStringMapsOverwrite(
- a.GetHostScope(host),
- nil,
- )
-}
-
-// GetServiceCHI
-func (a *Annotator) GetServiceCHI(chi *api.ClickHouseInstallation) map[string]string {
- return util.MergeStringMapsOverwrite(
- a.getCHIScope(),
- nil,
- )
-}
-
-// GetServiceCluster
-func (a *Annotator) GetServiceCluster(cluster *api.Cluster) map[string]string {
- return util.MergeStringMapsOverwrite(
- a.GetClusterScope(cluster),
- nil,
- )
-}
-
-// GetServiceShard
-func (a *Annotator) GetServiceShard(shard *api.ChiShard) map[string]string {
- return util.MergeStringMapsOverwrite(
- a.getShardScope(shard),
- nil,
- )
-}
-
-// GetServiceHost
-func (a *Annotator) GetServiceHost(host *api.ChiHost) map[string]string {
- return util.MergeStringMapsOverwrite(
- a.GetHostScope(host),
- nil,
- )
-}
-
-// getCHIScope gets annotations for CHI-scoped object
-func (a *Annotator) getCHIScope() map[string]string {
- // Combine generated annotations and CHI-provided annotations
- return a.filterOutPredefined(a.appendCHIProvidedTo(nil))
-}
-
-// GetClusterScope gets annotations for Cluster-scoped object
-func (a *Annotator) GetClusterScope(cluster *api.Cluster) map[string]string {
- // Combine generated annotations and CHI-provided annotations
- return a.filterOutPredefined(a.appendCHIProvidedTo(nil))
-}
-
-// getShardScope gets annotations for Shard-scoped object
-func (a *Annotator) getShardScope(shard *api.ChiShard) map[string]string {
- // Combine generated annotations and CHI-provided annotations
- return a.filterOutPredefined(a.appendCHIProvidedTo(nil))
-}
-
-// GetHostScope gets annotations for Host-scoped object
-func (a *Annotator) GetHostScope(host *api.ChiHost) map[string]string {
- return a.filterOutPredefined(a.appendCHIProvidedTo(nil))
-}
-
-// filterOutPredefined filters out predefined values
-func (a *Annotator) filterOutPredefined(m map[string]string) map[string]string {
- return util.CopyMapFilter(m, nil, util.AnnotationsTobeSkipped)
-}
-
-// appendCHIProvidedTo appends CHI-provided annotations to specified annotations
-func (a *Annotator) appendCHIProvidedTo(dst map[string]string) map[string]string {
- source := util.CopyMapFilter(a.chi.Annotations, chop.Config().Annotation.Include, chop.Config().Annotation.Exclude)
- return util.MergeStringMapsOverwrite(dst, source)
-}
-
-// GetPV
-func (a *Annotator) GetPV(pv *core.PersistentVolume, host *api.ChiHost) map[string]string {
- return util.MergeStringMapsOverwrite(pv.Annotations, a.GetHostScope(host))
-}
-
-// GetPVC
-func (a *Annotator) GetPVC(
- pvc *core.PersistentVolumeClaim,
- host *api.ChiHost,
- template *api.VolumeClaimTemplate,
-) map[string]string {
- annotations := util.MergeStringMapsOverwrite(pvc.Annotations, template.ObjectMeta.Annotations)
- return util.MergeStringMapsOverwrite(annotations, a.GetHostScope(host))
-}
diff --git a/pkg/model/chi/ch_config_files_generator.go b/pkg/model/chi/ch_config_files_generator.go
deleted file mode 100644
index ad423f09d..000000000
--- a/pkg/model/chi/ch_config_files_generator.go
+++ /dev/null
@@ -1,130 +0,0 @@
-// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package chi
-
-import (
- api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1"
- "github.com/altinity/clickhouse-operator/pkg/util"
-)
-
-// ClickHouseConfigFilesGenerator specifies clickhouse configuration generator object
-type ClickHouseConfigFilesGenerator struct {
- // ClickHouse config generator
- chConfigGenerator *ClickHouseConfigGenerator
- // clickhouse-operator configuration
- chopConfig *api.OperatorConfig
-}
-
-// NewClickHouseConfigFilesGenerator creates new clickhouse configuration generator object
-func NewClickHouseConfigFilesGenerator(
- chConfigGenerator *ClickHouseConfigGenerator,
- chopConfig *api.OperatorConfig,
-) *ClickHouseConfigFilesGenerator {
- return &ClickHouseConfigFilesGenerator{
- chConfigGenerator: chConfigGenerator,
- chopConfig: chopConfig,
- }
-}
-
-// ClickHouseConfigFilesGeneratorOptions specifies options for clickhouse configuration generator
-type ClickHouseConfigFilesGeneratorOptions struct {
- RemoteServersGeneratorOptions *RemoteServersGeneratorOptions
-}
-
-// NewClickHouseConfigFilesGeneratorOptions creates new options for clickhouse configuration generator
-func NewClickHouseConfigFilesGeneratorOptions() *ClickHouseConfigFilesGeneratorOptions {
- return &ClickHouseConfigFilesGeneratorOptions{}
-}
-
-// GetRemoteServersGeneratorOptions gets remote-servers generator options
-func (o *ClickHouseConfigFilesGeneratorOptions) GetRemoteServersGeneratorOptions() *RemoteServersGeneratorOptions {
- if o == nil {
- return nil
- }
- return o.RemoteServersGeneratorOptions
-}
-
-// SetRemoteServersGeneratorOptions sets remote-servers generator options
-func (o *ClickHouseConfigFilesGeneratorOptions) SetRemoteServersGeneratorOptions(opts *RemoteServersGeneratorOptions) *ClickHouseConfigFilesGeneratorOptions {
- if o == nil {
- return nil
- }
- o.RemoteServersGeneratorOptions = opts
-
- return o
-}
-
-// defaultClickHouseConfigFilesGeneratorOptions creates new default options for clickhouse config generator
-func defaultClickHouseConfigFilesGeneratorOptions() *ClickHouseConfigFilesGeneratorOptions {
- return NewClickHouseConfigFilesGeneratorOptions()
-}
-
-// CreateConfigFilesGroupCommon creates common config files
-func (c *ClickHouseConfigFilesGenerator) CreateConfigFilesGroupCommon(options *ClickHouseConfigFilesGeneratorOptions) map[string]string {
- if options == nil {
- options = defaultClickHouseConfigFilesGeneratorOptions()
- }
- commonConfigSections := make(map[string]string)
- // commonConfigSections maps section name to section XML chopConfig of the following sections:
- // 1. remote servers
- // 2. common settings
- // 3. common files
- util.IncludeNonEmpty(commonConfigSections, createConfigSectionFilename(configRemoteServers), c.chConfigGenerator.GetRemoteServers(options.GetRemoteServersGeneratorOptions()))
- util.IncludeNonEmpty(commonConfigSections, createConfigSectionFilename(configSettings), c.chConfigGenerator.GetSettingsGlobal())
- util.MergeStringMapsOverwrite(commonConfigSections, c.chConfigGenerator.GetSectionFromFiles(api.SectionCommon, true, nil))
- // Extra user-specified config files
- util.MergeStringMapsOverwrite(commonConfigSections, c.chopConfig.ClickHouse.Config.File.Runtime.CommonConfigFiles)
-
- return commonConfigSections
-}
-
-// CreateConfigFilesGroupUsers creates users config files
-func (c *ClickHouseConfigFilesGenerator) CreateConfigFilesGroupUsers() map[string]string {
- commonUsersConfigSections := make(map[string]string)
- // commonUsersConfigSections maps section name to section XML chopConfig of the following sections:
- // 1. users
- // 2. quotas
- // 3. profiles
- // 4. user files
- util.IncludeNonEmpty(commonUsersConfigSections, createConfigSectionFilename(configUsers), c.chConfigGenerator.GetUsers())
- util.IncludeNonEmpty(commonUsersConfigSections, createConfigSectionFilename(configQuotas), c.chConfigGenerator.GetQuotas())
- util.IncludeNonEmpty(commonUsersConfigSections, createConfigSectionFilename(configProfiles), c.chConfigGenerator.GetProfiles())
- util.MergeStringMapsOverwrite(commonUsersConfigSections, c.chConfigGenerator.GetSectionFromFiles(api.SectionUsers, false, nil))
- // Extra user-specified config files
- util.MergeStringMapsOverwrite(commonUsersConfigSections, c.chopConfig.ClickHouse.Config.File.Runtime.UsersConfigFiles)
-
- return commonUsersConfigSections
-}
-
-// CreateConfigFilesGroupHost creates host config files
-func (c *ClickHouseConfigFilesGenerator) CreateConfigFilesGroupHost(host *api.ChiHost) map[string]string {
- // Prepare for this replica deployment chopConfig files map as filename->content
- hostConfigSections := make(map[string]string)
- util.IncludeNonEmpty(hostConfigSections, createConfigSectionFilename(configMacros), c.chConfigGenerator.GetHostMacros(host))
- util.IncludeNonEmpty(hostConfigSections, createConfigSectionFilename(configHostnamePorts), c.chConfigGenerator.GetHostHostnameAndPorts(host))
- util.IncludeNonEmpty(hostConfigSections, createConfigSectionFilename(configZookeeper), c.chConfigGenerator.GetHostZookeeper(host))
- util.IncludeNonEmpty(hostConfigSections, createConfigSectionFilename(configSettings), c.chConfigGenerator.GetSettings(host))
- util.MergeStringMapsOverwrite(hostConfigSections, c.chConfigGenerator.GetSectionFromFiles(api.SectionHost, true, host))
- // Extra user-specified config files
- util.MergeStringMapsOverwrite(hostConfigSections, c.chopConfig.ClickHouse.Config.File.Runtime.HostConfigFiles)
-
- return hostConfigSections
-}
-
-// createConfigSectionFilename creates filename of a configuration file.
-// filename depends on a section which it will contain
-func createConfigSectionFilename(section string) string {
- return "chop-generated-" + section + ".xml"
-}
diff --git a/pkg/model/chi/ch_config_generator.go b/pkg/model/chi/ch_config_generator.go
deleted file mode 100644
index 99aa86d53..000000000
--- a/pkg/model/chi/ch_config_generator.go
+++ /dev/null
@@ -1,570 +0,0 @@
-// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package chi
-
-import (
- "bytes"
- "fmt"
- "strings"
-
- api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1"
- "github.com/altinity/clickhouse-operator/pkg/util"
- "github.com/altinity/clickhouse-operator/pkg/xml"
-)
-
-const (
- // Pattern for string path used in XXX
- DistributedDDLPathPattern = "/clickhouse/%s/task_queue/ddl"
-
- // Special auto-generated clusters. Each of these clusters lay over all replicas in CHI
- // 1. Cluster with one shard and all replicas. Used to duplicate data over all replicas.
- // 2. Cluster with all shards (1 replica). Used to gather/scatter data over all replicas.
- OneShardAllReplicasClusterName = "all-replicated"
- AllShardsOneReplicaClusterName = "all-sharded"
-)
-
-// ClickHouseConfigGenerator generates ClickHouse configuration files content for specified CHI
-// ClickHouse configuration files content is an XML ATM, so config generator provides set of Get*() functions
-// which produces XML which are parts of ClickHouse configuration and can/should be used as ClickHouse config files.
-type ClickHouseConfigGenerator struct {
- chi *api.ClickHouseInstallation
-}
-
-// NewClickHouseConfigGenerator returns new ClickHouseConfigGenerator struct
-func NewClickHouseConfigGenerator(chi *api.ClickHouseInstallation) *ClickHouseConfigGenerator {
- return &ClickHouseConfigGenerator{
- chi: chi,
- }
-}
-
-// GetUsers creates data for users section. Used as "users.xml"
-func (c *ClickHouseConfigGenerator) GetUsers() string {
- return c.generateXMLConfig(c.chi.Spec.Configuration.Users, configUsers)
-}
-
-// GetProfiles creates data for profiles section. Used as "profiles.xml"
-func (c *ClickHouseConfigGenerator) GetProfiles() string {
- return c.generateXMLConfig(c.chi.Spec.Configuration.Profiles, configProfiles)
-}
-
-// GetQuotas creates data for "quotas.xml"
-func (c *ClickHouseConfigGenerator) GetQuotas() string {
- return c.generateXMLConfig(c.chi.Spec.Configuration.Quotas, configQuotas)
-}
-
-// GetSettingsGlobal creates data for "settings.xml"
-func (c *ClickHouseConfigGenerator) GetSettingsGlobal() string {
- // No host specified means request to generate common config
- return c.generateXMLConfig(c.chi.Spec.Configuration.Settings, "")
-}
-
-// GetSettings creates data for "settings.xml"
-func (c *ClickHouseConfigGenerator) GetSettings(host *api.ChiHost) string {
- // Generate config for the specified host
- return c.generateXMLConfig(host.Settings, "")
-}
-
-// GetSectionFromFiles creates data for custom common config files
-func (c *ClickHouseConfigGenerator) GetSectionFromFiles(section api.SettingsSection, includeUnspecified bool, host *api.ChiHost) map[string]string {
- var files *api.Settings
- if host == nil {
- // We are looking into Common files
- files = c.chi.Spec.Configuration.Files
- } else {
- // We are looking into host's personal files
- files = host.Files
- }
-
- // Extract particular section from files
-
- return files.GetSection(section, includeUnspecified)
-}
-
-// GetHostZookeeper creates data for "zookeeper.xml"
-func (c *ClickHouseConfigGenerator) GetHostZookeeper(host *api.ChiHost) string {
- zk := host.GetZookeeper()
-
- if zk.IsEmpty() {
- // No Zookeeper nodes provided
- return ""
- }
-
- b := &bytes.Buffer{}
- //
- //
- util.Iline(b, 0, "<"+xmlTagYandex+">")
- util.Iline(b, 4, "")
-
- // Append Zookeeper nodes
- for i := range zk.Nodes {
- // Convenience wrapper
- node := &zk.Nodes[i]
- //
- // HOST
- // PORT
- // %d
- //
- util.Iline(b, 8, "")
- util.Iline(b, 8, " %s", node.Host)
- util.Iline(b, 8, " %d", node.Port)
- if node.Secure.HasValue() {
- util.Iline(b, 8, " %d", c.getSecure(node))
- }
- util.Iline(b, 8, "")
- }
-
- // Append session_timeout_ms
- if zk.SessionTimeoutMs > 0 {
- util.Iline(b, 8, "%d", zk.SessionTimeoutMs)
- }
-
- // Append operation_timeout_ms
- if zk.OperationTimeoutMs > 0 {
- util.Iline(b, 8, "%d", zk.OperationTimeoutMs)
- }
-
- // Append root
- if len(zk.Root) > 0 {
- util.Iline(b, 8, "%s", zk.Root)
- }
-
- // Append identity
- if len(zk.Identity) > 0 {
- util.Iline(b, 8, "%s", zk.Identity)
- }
-
- //
- util.Iline(b, 4, "")
-
- //
- // /x/y/chi.name/z
- // X
- util.Iline(b, 4, "")
- util.Iline(b, 4, " %s", c.getDistributedDDLPath())
- if c.chi.Spec.Defaults.DistributedDDL.HasProfile() {
- util.Iline(b, 4, " %s", c.chi.Spec.Defaults.DistributedDDL.GetProfile())
- }
- //
- //
- util.Iline(b, 4, "")
- util.Iline(b, 0, ""+xmlTagYandex+">")
-
- return b.String()
-}
-
-// RemoteServersGeneratorOptions specifies options for remote-servers generator
-type RemoteServersGeneratorOptions struct {
- exclude struct {
- attributes *api.HostReconcileAttributes
- hosts []*api.ChiHost
- }
-}
-
-// NewRemoteServersGeneratorOptions creates new remote-servers generator options
-func NewRemoteServersGeneratorOptions() *RemoteServersGeneratorOptions {
- return &RemoteServersGeneratorOptions{}
-}
-
-// ExcludeHost specifies to exclude a host
-func (o *RemoteServersGeneratorOptions) ExcludeHost(host *api.ChiHost) *RemoteServersGeneratorOptions {
- if (o == nil) || (host == nil) {
- return o
- }
-
- o.exclude.hosts = append(o.exclude.hosts, host)
- return o
-}
-
-// ExcludeHosts specifies to exclude list of hosts
-func (o *RemoteServersGeneratorOptions) ExcludeHosts(hosts ...*api.ChiHost) *RemoteServersGeneratorOptions {
- if (o == nil) || (len(hosts) == 0) {
- return o
- }
-
- o.exclude.hosts = append(o.exclude.hosts, hosts...)
- return o
-}
-
-// ExcludeReconcileAttributes specifies to exclude reconcile attributes
-func (o *RemoteServersGeneratorOptions) ExcludeReconcileAttributes(attrs *api.HostReconcileAttributes) *RemoteServersGeneratorOptions {
- if (o == nil) || (attrs == nil) {
- return o
- }
-
- o.exclude.attributes = attrs
- return o
-}
-
-// Exclude tells whether to exclude the host
-func (o *RemoteServersGeneratorOptions) Exclude(host *api.ChiHost) bool {
- if o == nil {
- return false
- }
-
- if o.exclude.attributes.Any(host.GetReconcileAttributes()) {
- // Reconcile attributes specify to exclude this host
- return true
- }
-
- for _, val := range o.exclude.hosts {
- // Host is in the list to be excluded
- if val == host {
- return true
- }
- }
-
- return false
-}
-
-// Include tells whether to include the host
-func (o *RemoteServersGeneratorOptions) Include(host *api.ChiHost) bool {
- if o == nil {
- return false
- }
-
- if o.exclude.attributes.Any(host.GetReconcileAttributes()) {
- // Reconcile attributes specify to exclude this host
- return false
- }
-
- for _, val := range o.exclude.hosts {
- // Host is in the list to be excluded
- if val == host {
- return false
- }
- }
-
- return true
-}
-
-// String returns string representation
-func (o *RemoteServersGeneratorOptions) String() string {
- if o == nil {
- return "(nil)"
- }
-
- var hostnames []string
- for _, host := range o.exclude.hosts {
- hostnames = append(hostnames, host.Name)
- }
- return fmt.Sprintf("exclude hosts: %s, attributes: %s", "["+strings.Join(hostnames, ",")+"]", o.exclude.attributes)
-}
-
-// defaultRemoteServersGeneratorOptions
-func defaultRemoteServersGeneratorOptions() *RemoteServersGeneratorOptions {
- return NewRemoteServersGeneratorOptions()
-}
-
-// CHIHostsNum count hosts according to the options
-func (c *ClickHouseConfigGenerator) CHIHostsNum(options *RemoteServersGeneratorOptions) int {
- num := 0
- c.chi.WalkHosts(func(host *api.ChiHost) error {
- if options.Include(host) {
- num++
- }
- return nil
- })
- return num
-}
-
-// ClusterHostsNum count hosts according to the options
-func (c *ClickHouseConfigGenerator) ClusterHostsNum(cluster *api.Cluster, options *RemoteServersGeneratorOptions) int {
- num := 0
- // Build each shard XML
- cluster.WalkShards(func(index int, shard *api.ChiShard) error {
- num += c.ShardHostsNum(shard, options)
- return nil
- })
- return num
-}
-
-// ShardHostsNum count hosts according to the options
-func (c *ClickHouseConfigGenerator) ShardHostsNum(shard *api.ChiShard, options *RemoteServersGeneratorOptions) int {
- num := 0
- shard.WalkHosts(func(host *api.ChiHost) error {
- if options.Include(host) {
- num++
- }
- return nil
- })
- return num
-}
-
-func (c *ClickHouseConfigGenerator) getRemoteServersReplica(host *api.ChiHost, b *bytes.Buffer) {
- //
- // XXX
- // XXX
- // XXX
- //
- var port int32
- if host.IsSecure() {
- port = host.TLSPort
- } else {
- port = host.TCPPort
- }
- util.Iline(b, 16, "")
- util.Iline(b, 16, " %s", c.getRemoteServersReplicaHostname(host))
- util.Iline(b, 16, " %d", port)
- util.Iline(b, 16, " %d", c.getSecure(host))
- util.Iline(b, 16, "")
-}
-
-// GetRemoteServers creates "remote_servers.xml" content and calculates data generation parameters for other sections
-func (c *ClickHouseConfigGenerator) GetRemoteServers(options *RemoteServersGeneratorOptions) string {
- if options == nil {
- options = defaultRemoteServersGeneratorOptions()
- }
-
- b := &bytes.Buffer{}
-
- //
- //
- util.Iline(b, 0, "<"+xmlTagYandex+">")
- util.Iline(b, 4, "")
-
- util.Iline(b, 8, "")
-
- // Build each cluster XML
- c.chi.WalkClusters(func(cluster *api.Cluster) error {
- if c.ClusterHostsNum(cluster, options) < 1 {
- // Skip empty cluster
- return nil
- }
- //
- util.Iline(b, 8, "<%s>", cluster.Name)
-
- // VALUE
- switch cluster.Secret.Source() {
- case api.ClusterSecretSourcePlaintext:
- // Secret value is explicitly specified
- util.Iline(b, 12, "%s", cluster.Secret.Value)
- case api.ClusterSecretSourceSecretRef, api.ClusterSecretSourceAuto:
- // Use secret via ENV var from secret
- util.Iline(b, 12, ``, InternodeClusterSecretEnvName)
- }
-
- // Build each shard XML
- cluster.WalkShards(func(index int, shard *api.ChiShard) error {
- if c.ShardHostsNum(shard, options) < 1 {
- // Skip empty shard
- return nil
- }
-
- //
- // VALUE(true/false)
- util.Iline(b, 12, "")
- util.Iline(b, 16, "%s", shard.InternalReplication)
-
- // X
- if shard.HasWeight() {
- util.Iline(b, 16, "%d", shard.GetWeight())
- }
-
- shard.WalkHosts(func(host *api.ChiHost) error {
- if options.Include(host) {
- c.getRemoteServersReplica(host, b)
- }
- return nil
- })
-
- //
- util.Iline(b, 12, "")
-
- return nil
- })
- //
- util.Iline(b, 8, "%s>", cluster.Name)
-
- return nil
- })
-
- // Auto-generated clusters
-
- if c.CHIHostsNum(options) < 1 {
- util.Iline(b, 8, "")
- } else {
- util.Iline(b, 8, "")
- // One Shard All Replicas
-
- //
- //
- //
- clusterName := OneShardAllReplicasClusterName
- util.Iline(b, 8, "<%s>", clusterName)
- util.Iline(b, 8, " ")
- util.Iline(b, 8, " true")
- c.chi.WalkHosts(func(host *api.ChiHost) error {
- if options.Include(host) {
- c.getRemoteServersReplica(host, b)
- }
- return nil
- })
-
- //
- //
- util.Iline(b, 8, " ")
- util.Iline(b, 8, "%s>", clusterName)
-
- // All Shards One Replica
-
- //
- clusterName = AllShardsOneReplicaClusterName
- util.Iline(b, 8, "<%s>", clusterName)
- c.chi.WalkHosts(func(host *api.ChiHost) error {
- if options.Include(host) {
- //
- //
- util.Iline(b, 12, "")
- util.Iline(b, 12, " false")
-
- c.getRemoteServersReplica(host, b)
-
- //
- util.Iline(b, 12, "")
- }
- return nil
- })
- //
- util.Iline(b, 8, "%s>", clusterName)
- }
-
- //
- //
- util.Iline(b, 0, "