diff --git a/charts/postgres-operator/crds/operatorconfigurations.yaml b/charts/postgres-operator/crds/operatorconfigurations.yaml index 058769acf..e56730a63 100644 --- a/charts/postgres-operator/crds/operatorconfigurations.yaml +++ b/charts/postgres-operator/crds/operatorconfigurations.yaml @@ -327,6 +327,9 @@ spec: pod_role_label: type: string default: "spilo-role" + pod_leader_label_value: + type: string + default: "master" pod_service_account_definition: type: string default: "" diff --git a/charts/postgres-operator/values.yaml b/charts/postgres-operator/values.yaml index 2511a09d3..d2ad03d5f 100644 --- a/charts/postgres-operator/values.yaml +++ b/charts/postgres-operator/values.yaml @@ -194,6 +194,7 @@ configKubernetes: pod_management_policy: "ordered_ready" # label assigned to the Postgres pods (and services/endpoints) pod_role_label: spilo-role + pod_leader_label_value: master # service account definition as JSON/YAML string to be used by postgres cluster pods # pod_service_account_definition: "" diff --git a/docs/reference/operator_parameters.md b/docs/reference/operator_parameters.md index 3bd9e44f7..38cbf62a4 100644 --- a/docs/reference/operator_parameters.md +++ b/docs/reference/operator_parameters.md @@ -434,6 +434,10 @@ configuration they are grouped under the `kubernetes` key. name of the label assigned to the Postgres pods (and services/endpoints) by the operator. The default is `spilo-role`. +* **pod_leader_label_value** + value of the pod label if Postgres role is primary when running on Kubernetes. + The default is 'master'. + * **cluster_labels** list of `name:value` pairs for additional labels assigned to the cluster objects. The default is `application:spilo`. diff --git a/e2e/tests/constants.py b/e2e/tests/constants.py new file mode 100644 index 000000000..96594010d --- /dev/null +++ b/e2e/tests/constants.py @@ -0,0 +1,4 @@ +LEADER_LABEL_VALUE = "master" # value should be the same as in the configmap: pod_leader_label_value +SPILO_CURRENT = "registry.opensource.zalan.do/acid/spilo-17-e2e:0.3" +SPILO_LAZY = "registry.opensource.zalan.do/acid/spilo-17-e2e:0.4" +SPILO_FULL_IMAGE = "ghcr.io/zalando/spilo-17:4.0-p2" diff --git a/e2e/tests/k8s_api.py b/e2e/tests/k8s_api.py index 1f42ad4bc..61fcd137e 100644 --- a/e2e/tests/k8s_api.py +++ b/e2e/tests/k8s_api.py @@ -6,6 +6,7 @@ from kubernetes import client, config from kubernetes.client.rest import ApiException +from tests.constants import LEADER_LABEL_VALUE def to_selector(labels): return ",".join(["=".join(lbl) for lbl in labels.items()]) @@ -47,7 +48,7 @@ def get_pg_nodes(self, pg_cluster_name, namespace='default'): replica_pod_nodes = [] podsList = self.api.core_v1.list_namespaced_pod(namespace, label_selector=pg_cluster_name) for pod in podsList.items: - if pod.metadata.labels.get('spilo-role') == 'master': + if pod.metadata.labels.get('spilo-role') == LEADER_LABEL_VALUE: master_pod_node = pod.spec.node_name elif pod.metadata.labels.get('spilo-role') == 'replica': replica_pod_nodes.append(pod.spec.node_name) @@ -59,7 +60,7 @@ def get_cluster_nodes(self, cluster_labels='application=spilo,cluster-name=acid- r = [] podsList = self.api.core_v1.list_namespaced_pod(namespace, label_selector=cluster_labels) for pod in podsList.items: - if pod.metadata.labels.get('spilo-role') == 'master' and pod.status.phase == 'Running': + if pod.metadata.labels.get('spilo-role') == LEADER_LABEL_VALUE and pod.status.phase == 'Running': m.append(pod.spec.node_name) elif pod.metadata.labels.get('spilo-role') == 'replica' and pod.status.phase == 'Running': r.append(pod.spec.node_name) @@ -351,7 +352,7 @@ def get_cluster_pod(self, role, labels='application=spilo,cluster-name=acid-mini return pods[0] def get_cluster_leader_pod(self, labels='application=spilo,cluster-name=acid-minimal-cluster', namespace='default'): - return self.get_cluster_pod('master', labels, namespace) + return self.get_cluster_pod(LEADER_LABEL_VALUE, labels, namespace) def get_cluster_replica_pod(self, labels='application=spilo,cluster-name=acid-minimal-cluster', namespace='default'): return self.get_cluster_pod('replica', labels, namespace) @@ -383,7 +384,7 @@ def get_pg_nodes(self, pg_cluster_labels='cluster-name=acid-minimal-cluster', na replica_pod_nodes = [] podsList = self.api.core_v1.list_namespaced_pod(namespace, label_selector=pg_cluster_labels) for pod in podsList.items: - if pod.metadata.labels.get('spilo-role') == 'master': + if pod.metadata.labels.get('spilo-role') == LEADER_LABEL_VALUE: master_pod_node = pod.spec.node_name elif pod.metadata.labels.get('spilo-role') == 'replica': replica_pod_nodes.append(pod.spec.node_name) @@ -395,7 +396,7 @@ def get_cluster_nodes(self, cluster_labels='cluster-name=acid-minimal-cluster', r = [] podsList = self.api.core_v1.list_namespaced_pod(namespace, label_selector=cluster_labels) for pod in podsList.items: - if pod.metadata.labels.get('spilo-role') == 'master' and pod.status.phase == 'Running': + if pod.metadata.labels.get('spilo-role') == LEADER_LABEL_VALUE and pod.status.phase == 'Running': m.append(pod.spec.node_name) elif pod.metadata.labels.get('spilo-role') == 'replica' and pod.status.phase == 'Running': r.append(pod.spec.node_name) @@ -622,7 +623,7 @@ def get_pg_nodes(self): replica_pod_nodes = [] podsList = self.api.core_v1.list_namespaced_pod(self.namespace, label_selector=self.labels) for pod in podsList.items: - if pod.metadata.labels.get('spilo-role') == 'master': + if pod.metadata.labels.get('spilo-role') == LEADER_LABEL_VALUE: master_pod_node = pod.spec.node_name elif pod.metadata.labels.get('spilo-role') == 'replica': replica_pod_nodes.append(pod.spec.node_name) diff --git a/e2e/tests/test_e2e.py b/e2e/tests/test_e2e.py index 04c6465c9..ba9cd6c8a 100644 --- a/e2e/tests/test_e2e.py +++ b/e2e/tests/test_e2e.py @@ -8,13 +8,11 @@ from datetime import datetime, date, timedelta from kubernetes import client +from kubernetes.client.rest import ApiException from tests.k8s_api import K8s -from kubernetes.client.rest import ApiException +from tests.constants import SPILO_CURRENT, SPILO_FULL_IMAGE, SPILO_LAZY, LEADER_LABEL_VALUE -SPILO_CURRENT = "registry.opensource.zalan.do/acid/spilo-17-e2e:0.3" -SPILO_LAZY = "registry.opensource.zalan.do/acid/spilo-17-e2e:0.4" -SPILO_FULL_IMAGE = "ghcr.io/zalando/spilo-17:4.0-p2" def to_selector(labels): return ",".join(["=".join(lbl) for lbl in labels.items()]) @@ -154,7 +152,7 @@ def setUpClass(cls): result = k8s.create_with_kubectl("manifests/minimal-postgres-manifest.yaml") print('stdout: {}, stderr: {}'.format(result.stdout, result.stderr)) try: - k8s.wait_for_pod_start('spilo-role=master,' + cluster_label) + k8s.wait_for_pod_start('spilo-role={},'.format(LEADER_LABEL_VALUE) + cluster_label) k8s.wait_for_pod_start('spilo-role=replica,' + cluster_label) except timeout_decorator.TimeoutError: print('Operator log: {}'.format(k8s.get_operator_log())) @@ -223,7 +221,7 @@ def test_additional_pod_capabilities(self): k8s.update_config(patch_capabilities) # changed security context of postgres container should trigger a rolling update - k8s.wait_for_pod_failover(replica_nodes, 'spilo-role=master,' + cluster_label) + k8s.wait_for_pod_failover(replica_nodes, 'spilo-role={},'.format(LEADER_LABEL_VALUE) + cluster_label) k8s.wait_for_pod_start('spilo-role=replica,' + cluster_label) self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync") @@ -657,7 +655,7 @@ def test_custom_ssl_certificate(self): "acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster", pg_patch_tls) # wait for switched over - k8s.wait_for_pod_failover(replica_nodes, 'spilo-role=master,' + cluster_label) + k8s.wait_for_pod_failover(replica_nodes, 'spilo-role={},'.format(LEADER_LABEL_VALUE) + cluster_label) k8s.wait_for_pod_start('spilo-role=replica,' + cluster_label) self.eventuallyEqual(lambda: k8s.count_pods_with_env_variable("SSL_CERTIFICATE_FILE", cluster_label), 2, "TLS env variable SSL_CERTIFICATE_FILE missing in Spilo pods") @@ -860,7 +858,7 @@ def test_enable_load_balancer(self): k8s = self.k8s cluster_label = 'application=spilo,cluster-name=acid-minimal-cluster,spilo-role={}' - self.eventuallyEqual(lambda: k8s.get_service_type(cluster_label.format("master")), + self.eventuallyEqual(lambda: k8s.get_service_type(cluster_label.format(LEADER_LABEL_VALUE)), 'ClusterIP', "Expected ClusterIP type initially, found {}") @@ -875,7 +873,7 @@ def test_enable_load_balancer(self): k8s.api.custom_objects_api.patch_namespaced_custom_object( "acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster", pg_patch_enable_lbs) - self.eventuallyEqual(lambda: k8s.get_service_type(cluster_label.format("master")), + self.eventuallyEqual(lambda: k8s.get_service_type(cluster_label.format(LEADER_LABEL_VALUE)), 'LoadBalancer', "Expected LoadBalancer service type for master, found {}") @@ -893,7 +891,7 @@ def test_enable_load_balancer(self): k8s.api.custom_objects_api.patch_namespaced_custom_object( "acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster", pg_patch_disable_lbs) - self.eventuallyEqual(lambda: k8s.get_service_type(cluster_label.format("master")), + self.eventuallyEqual(lambda: k8s.get_service_type(cluster_label.format(LEADER_LABEL_VALUE)), 'ClusterIP', "Expected LoadBalancer service type for master, found {}") @@ -1226,7 +1224,7 @@ def get_annotations(): self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync") k8s.wait_for_pod_failover(master_nodes, 'spilo-role=replica,' + cluster_label) - k8s.wait_for_pod_start('spilo-role=master,' + cluster_label) + k8s.wait_for_pod_start('spilo-role={},'.format(LEADER_LABEL_VALUE) + cluster_label) k8s.wait_for_pod_start('spilo-role=replica,' + cluster_label) self.eventuallyEqual(check_version, 14, "Version should be upgraded from 13 to 14") @@ -1251,8 +1249,8 @@ def get_annotations(): "acid.zalan.do", "v1", "default", "postgresqls", "acid-upgrade-test", pg_patch_version_15) self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync") - k8s.wait_for_pod_failover(master_nodes, 'spilo-role=master,' + cluster_label) - k8s.wait_for_pod_start('spilo-role=master,' + cluster_label) + k8s.wait_for_pod_failover(master_nodes, 'spilo-role={},'.format(LEADER_LABEL_VALUE) + cluster_label) + k8s.wait_for_pod_start('spilo-role={},'.format(LEADER_LABEL_VALUE) + cluster_label) k8s.wait_for_pod_start('spilo-role=replica,' + cluster_label) self.eventuallyEqual(check_version, 14, "Version should not be upgraded") @@ -1277,7 +1275,7 @@ def get_annotations(): self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync") k8s.wait_for_pod_failover(master_nodes, 'spilo-role=replica,' + cluster_label) - k8s.wait_for_pod_start('spilo-role=master,' + cluster_label) + k8s.wait_for_pod_start('spilo-role={},'.format(LEADER_LABEL_VALUE) + cluster_label) k8s.wait_for_pod_start('spilo-role=replica,' + cluster_label) self.eventuallyEqual(check_version, 16, "Version should be upgraded from 14 to 16") @@ -1303,8 +1301,8 @@ def get_annotations(): "acid.zalan.do", "v1", "default", "postgresqls", "acid-upgrade-test", pg_patch_version_17) self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync") - k8s.wait_for_pod_failover(master_nodes, 'spilo-role=master,' + cluster_label) - k8s.wait_for_pod_start('spilo-role=master,' + cluster_label) + k8s.wait_for_pod_failover(master_nodes, 'spilo-role={},'.format(LEADER_LABEL_VALUE) + cluster_label) + k8s.wait_for_pod_start('spilo-role={},'.format(LEADER_LABEL_VALUE) + cluster_label) k8s.wait_for_pod_start('spilo-role=replica,' + cluster_label) self.eventuallyEqual(check_version, 16, "Version should not be upgraded because annotation for last upgrade's failure is set") @@ -1314,7 +1312,7 @@ def get_annotations(): self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync") k8s.wait_for_pod_failover(master_nodes, 'spilo-role=replica,' + cluster_label) - k8s.wait_for_pod_start('spilo-role=master,' + cluster_label) + k8s.wait_for_pod_start('spilo-role={},'.format(LEADER_LABEL_VALUE) + cluster_label) k8s.wait_for_pod_start('spilo-role=replica,' + cluster_label) fourth_annotations = get_annotations() @@ -1432,7 +1430,7 @@ def test_resource_generation(self): "Operator does not get in sync") # wait for switched over - k8s.wait_for_pod_failover(replica_nodes, 'spilo-role=master,' + cluster_label) + k8s.wait_for_pod_failover(replica_nodes, 'spilo-role={},'.format(LEADER_LABEL_VALUE) + cluster_label) k8s.wait_for_pod_start('spilo-role=replica,' + cluster_label) def verify_pod_resources(): @@ -1464,7 +1462,7 @@ def test_multi_namespace_support(self): try: k8s.create_with_kubectl("manifests/complete-postgres-manifest.yaml") - k8s.wait_for_pod_start("spilo-role=master", self.test_namespace) + k8s.wait_for_pod_start("spilo-role={}".format(LEADER_LABEL_VALUE), self.test_namespace) k8s.wait_for_pod_start("spilo-role=replica", self.test_namespace) self.assert_master_is_unique(self.test_namespace, "acid-test-cluster") # acid-test-cluster will be deleted in test_owner_references test @@ -1539,7 +1537,7 @@ def test_node_affinity(self): k8s.wait_for_pod_failover(master_nodes, 'spilo-role=replica,' + cluster_label) k8s.wait_for_pod_start('spilo-role=replica,' + cluster_label) # next master will be switched over and pod needs to be replaced as well to finish the rolling update - k8s.wait_for_pod_failover(master_nodes, 'spilo-role=master,' + cluster_label) + k8s.wait_for_pod_failover(master_nodes, 'spilo-role={},'.format(LEADER_LABEL_VALUE) + cluster_label) k8s.wait_for_pod_start('spilo-role=replica,' + cluster_label) podsList = k8s.api.core_v1.list_namespaced_pod('default', label_selector=cluster_label) @@ -1572,7 +1570,7 @@ def test_node_affinity(self): # node affinity change should cause another rolling update and relocation of replica k8s.wait_for_pod_failover(master_nodes, 'spilo-role=replica,' + cluster_label) - k8s.wait_for_pod_start('spilo-role=master,' + cluster_label) + k8s.wait_for_pod_start('spilo-role={},'.format(LEADER_LABEL_VALUE) + cluster_label) k8s.wait_for_pod_start('spilo-role=replica,' + cluster_label) except timeout_decorator.TimeoutError: @@ -1633,7 +1631,7 @@ def test_node_readiness_label(self): k8s.wait_for_pod_start('spilo-role=replica,' + cluster_label) # next master will be switched over and pod needs to be replaced as well to finish the rolling update - k8s.wait_for_pod_failover(replica_nodes, 'spilo-role=master,' + cluster_label) + k8s.wait_for_pod_failover(replica_nodes, 'spilo-role={},'.format(LEADER_LABEL_VALUE) + cluster_label) k8s.wait_for_pod_start('spilo-role=replica,' + cluster_label) # patch also node where master ran before @@ -1921,7 +1919,7 @@ def test_rolling_update_flag(self): podsList = k8s.api.core_v1.list_namespaced_pod('default', label_selector=cluster_label) for pod in podsList.items: # add flag only to the master to make it appear to the operator as a leftover from a rolling update - if pod.metadata.labels.get('spilo-role') == 'master': + if pod.metadata.labels.get('spilo-role') == LEADER_LABEL_VALUE: old_creation_timestamp = pod.metadata.creation_timestamp k8s.patch_pod(flag, pod.metadata.name, pod.metadata.namespace) else: @@ -1932,7 +1930,7 @@ def test_rolling_update_flag(self): k8s.delete_operator_pod() # operator should now recreate the master pod and do a switchover before - k8s.wait_for_pod_failover(replica_nodes, 'spilo-role=master,' + cluster_label) + k8s.wait_for_pod_failover(replica_nodes, 'spilo-role={},'.format(LEADER_LABEL_VALUE) + cluster_label) k8s.wait_for_pod_start('spilo-role=replica,' + cluster_label) # check if the former replica is now the new master @@ -2003,7 +2001,7 @@ def test_rolling_update_label_timeout(self): self.eventuallyEqual(lambda: k8s.pg_get_status(), "SyncFailed", "Expected SYNC event to fail") # wait for next sync, replica should be running normally by now and be ready for switchover - k8s.wait_for_pod_failover(replica_nodes, 'spilo-role=master,' + cluster_label) + k8s.wait_for_pod_failover(replica_nodes, 'spilo-role={},'.format(LEADER_LABEL_VALUE) + cluster_label) k8s.wait_for_pod_start('spilo-role=replica,' + cluster_label) # check if the former replica is now the new master @@ -2078,7 +2076,7 @@ def test_service_annotations(self): "alice": "bob" } - self.eventuallyTrue(lambda: k8s.check_service_annotations("cluster-name=acid-minimal-cluster,spilo-role=master", annotations), "Wrong annotations") + self.eventuallyTrue(lambda: k8s.check_service_annotations("cluster-name=acid-minimal-cluster,spilo-role={}".format(LEADER_LABEL_VALUE), annotations), "Wrong annotations") self.eventuallyTrue(lambda: k8s.check_service_annotations("cluster-name=acid-minimal-cluster,spilo-role=replica", annotations), "Wrong annotations") # clean up @@ -2150,7 +2148,7 @@ def test_standby_cluster(self): try: k8s.create_with_kubectl("manifests/standby-manifest.yaml") - k8s.wait_for_pod_start("spilo-role=master," + cluster_label) + k8s.wait_for_pod_start("spilo-role={},".format(LEADER_LABEL_VALUE) + cluster_label) except timeout_decorator.TimeoutError: print('Operator log: {}'.format(k8s.get_operator_log())) @@ -2456,11 +2454,11 @@ def test_zz_cluster_deletion(self): def assert_master_is_unique(self, namespace='default', clusterName="acid-minimal-cluster"): ''' - Check that there is a single pod in the k8s cluster with the label "spilo-role=master" + Check that there is a single pod in the k8s cluster with the label "spilo-role=primary" or "spilo-role=master" To be called manually after operations that affect pods ''' k8s = self.k8s - labels = 'spilo-role=master,cluster-name=' + clusterName + labels = 'spilo-role={},cluster-name='.format(LEADER_LABEL_VALUE) + clusterName num_of_master_pods = k8s.count_pods_with_label(labels, namespace) self.assertEqual(num_of_master_pods, 1, "Expected 1 master pod, found {}".format(num_of_master_pods)) diff --git a/manifests/configmap.yaml b/manifests/configmap.yaml index 9473ef5ec..211949a1e 100644 --- a/manifests/configmap.yaml +++ b/manifests/configmap.yaml @@ -134,6 +134,7 @@ data: pod_management_policy: "ordered_ready" # pod_priority_class_name: "postgres-pod-priority" pod_role_label: spilo-role + pod_leader_label_value: master pod_service_account_definition: "" pod_service_account_name: "postgres-pod" pod_service_account_role_binding_definition: "" diff --git a/manifests/operatorconfiguration.crd.yaml b/manifests/operatorconfiguration.crd.yaml index ded2477d7..85f13c973 100644 --- a/manifests/operatorconfiguration.crd.yaml +++ b/manifests/operatorconfiguration.crd.yaml @@ -325,6 +325,9 @@ spec: pod_role_label: type: string default: "spilo-role" + pod_leader_label_value: + type: string + default: "master" pod_service_account_definition: type: string default: "" diff --git a/manifests/postgresql-operator-default-configuration.yaml b/manifests/postgresql-operator-default-configuration.yaml index 570ebd338..5a6006b7c 100644 --- a/manifests/postgresql-operator-default-configuration.yaml +++ b/manifests/postgresql-operator-default-configuration.yaml @@ -99,6 +99,7 @@ configuration: pod_management_policy: "ordered_ready" # pod_priority_class_name: "postgres-pod-priority" pod_role_label: spilo-role + pod_leader_label_value: master # pod_service_account_definition: "" pod_service_account_name: postgres-pod # pod_service_account_role_binding_definition: "" diff --git a/pkg/apis/acid.zalan.do/v1/crds.go b/pkg/apis/acid.zalan.do/v1/crds.go index 3f6bf25d9..0728e4dec 100644 --- a/pkg/apis/acid.zalan.do/v1/crds.go +++ b/pkg/apis/acid.zalan.do/v1/crds.go @@ -1498,6 +1498,9 @@ var OperatorConfigCRDResourceValidation = apiextv1.CustomResourceValidation{ "pod_role_label": { Type: "string", }, + "pod_leader_label_value": { + Type: "string", + }, "pod_service_account_definition": { Type: "string", }, diff --git a/pkg/apis/acid.zalan.do/v1/operator_configuration_type.go b/pkg/apis/acid.zalan.do/v1/operator_configuration_type.go index cd11b9173..8789bfc48 100644 --- a/pkg/apis/acid.zalan.do/v1/operator_configuration_type.go +++ b/pkg/apis/acid.zalan.do/v1/operator_configuration_type.go @@ -81,6 +81,7 @@ type KubernetesMetaConfiguration struct { InfrastructureRolesSecretName spec.NamespacedName `json:"infrastructure_roles_secret_name,omitempty"` InfrastructureRolesDefs []*config.InfrastructureRole `json:"infrastructure_roles_secrets,omitempty"` PodRoleLabel string `json:"pod_role_label,omitempty"` + PodLeaderLabelValue string `json:"pod_leader_label_value,omitempty"` ClusterLabels map[string]string `json:"cluster_labels,omitempty"` InheritedLabels []string `json:"inherited_labels,omitempty"` InheritedAnnotations []string `json:"inherited_annotations,omitempty"` diff --git a/pkg/cluster/cluster.go b/pkg/cluster/cluster.go index 1a8d6f762..67548333f 100644 --- a/pkg/cluster/cluster.go +++ b/pkg/cluster/cluster.go @@ -175,6 +175,14 @@ func (c *Cluster) clusterNamespace() string { return c.ObjectMeta.Namespace } +func (c *Cluster) masterRole() PostgresRole { + return PostgresRole(c.OpConfig.PodLeaderLabelValue) +} + +func (c *Cluster) replicaRole() PostgresRole { + return PostgresRole("replica") +} + func (c *Cluster) teamName() string { // TODO: check Teams API for the actual name (in case the user passes an integer Id). return c.Spec.TeamID @@ -294,7 +302,7 @@ func (c *Cluster) Create() (err error) { } c.eventRecorder.Event(c.GetReference(), v1.EventTypeNormal, "Create", "Started creation of new cluster resources") - for _, role := range []PostgresRole{Master, Replica} { + for _, role := range []PostgresRole{c.masterRole(), c.replicaRole()} { // if kubernetes_use_configmaps is set Patroni will create configmaps // otherwise it will use endpoints @@ -302,7 +310,7 @@ func (c *Cluster) Create() (err error) { if c.Endpoints[role] != nil { return fmt.Errorf("%s endpoint already exists in the cluster", role) } - if role == Master { + if role == c.masterRole() { // replica endpoint will be created by the replica service. Master endpoint needs to be created by us, // since the corresponding master service does not define any selectors. ep, err = c.createEndpoint(role) @@ -829,6 +837,10 @@ func (c *Cluster) compareServices(old, new *v1.Service) (bool, string) { } } + if !reflect.DeepEqual(old.Labels, new.Labels) { + return false, "new service's labels do not match the current ones" + } + if !reflect.DeepEqual(old.ObjectMeta.OwnerReferences, new.ObjectMeta.OwnerReferences) { return false, "new service's owner references do not match the current ones" } @@ -1213,7 +1225,7 @@ func (c *Cluster) Delete() error { c.eventRecorder.Eventf(c.GetReference(), v1.EventTypeWarning, "Delete", "could not delete pod disruption budget: %v", err) } - for _, role := range []PostgresRole{Master, Replica} { + for _, role := range []PostgresRole{c.masterRole(), c.replicaRole()} { if !c.patroniKubernetesUseConfigMaps() { if err := c.deleteEndpoint(role); err != nil { anyErrors = true @@ -1238,7 +1250,7 @@ func (c *Cluster) Delete() error { // Delete connection pooler objects anyway, even if it's not mentioned in the // manifest, just to not keep orphaned components in case if something went // wrong - for _, role := range [2]PostgresRole{Master, Replica} { + for _, role := range [2]PostgresRole{c.masterRole(), c.replicaRole()} { if err := c.deleteConnectionPooler(role); err != nil { anyErrors = true c.logger.Warningf("could not remove connection pooler: %v", err) diff --git a/pkg/cluster/cluster_test.go b/pkg/cluster/cluster_test.go index 897ed6c0d..ff0e1a830 100644 --- a/pkg/cluster/cluster_test.go +++ b/pkg/cluster/cluster_test.go @@ -35,6 +35,7 @@ const ( adminUserName = "admin" exampleSpiloConfig = `{"postgresql":{"bin_dir":"/usr/lib/postgresql/12/bin","parameters":{"autovacuum_analyze_scale_factor":"0.1"},"pg_hba":["hostssl all all 0.0.0.0/0 md5","host all all 0.0.0.0/0 md5"]},"bootstrap":{"initdb":[{"auth-host":"md5"},{"auth-local":"trust"},"data-checksums",{"encoding":"UTF8"},{"locale":"en_US.UTF-8"}],"dcs":{"ttl":30,"loop_wait":10,"retry_timeout":10,"maximum_lag_on_failover":33554432,"postgresql":{"parameters":{"max_connections":"100","max_locks_per_transaction":"64","max_worker_processes":"4"}}}}}` spiloConfigDiff = `{"postgresql":{"bin_dir":"/usr/lib/postgresql/12/bin","parameters":{"autovacuum_analyze_scale_factor":"0.1"},"pg_hba":["hostssl all all 0.0.0.0/0 md5","host all all 0.0.0.0/0 md5"]},"bootstrap":{"initdb":[{"auth-host":"md5"},{"auth-local":"trust"},"data-checksums",{"encoding":"UTF8"},{"locale":"en_US.UTF-8"}],"dcs":{"loop_wait":10,"retry_timeout":10,"maximum_lag_on_failover":33554432,"postgresql":{"parameters":{"max_locks_per_transaction":"64","max_worker_processes":"4"}}}}}` + leaderLabelValue = "primary" ) var logger = logrus.New().WithField("test", "cluster") @@ -55,6 +56,7 @@ var cl = New( }, Resources: config.Resources{ DownscalerAnnotations: []string{"downscaler/*"}, + PodLeaderLabelValue: leaderLabelValue, }, ConnectionPooler: config.ConnectionPooler{ User: poolerUserName, @@ -126,7 +128,7 @@ func TestCreate(t *testing.T) { Labels: map[string]string{ "application": "spilo", "cluster-name": clusterName, - "spilo-role": "master", + "spilo-role": leaderLabelValue, }, }, } @@ -147,6 +149,7 @@ func TestCreate(t *testing.T) { DefaultMemoryRequest: "300Mi", DefaultMemoryLimit: "300Mi", PodRoleLabel: "spilo-role", + PodLeaderLabelValue: leaderLabelValue, ResourceCheckInterval: time.Duration(3), ResourceCheckTimeout: time.Duration(10), }, @@ -663,7 +666,7 @@ func TestServiceAnnotations(t *testing.T) { //MASTER { about: "Master with no annotations and EnableMasterLoadBalancer disabled on spec and OperatorConfig", - role: "master", + role: leaderLabelValue, enableMasterLoadBalancerSpec: &disabled, enableMasterLoadBalancerOC: false, enableTeamIdClusterPrefix: false, @@ -673,7 +676,7 @@ func TestServiceAnnotations(t *testing.T) { }, { about: "Master with no annotations and EnableMasterLoadBalancer enabled on spec", - role: "master", + role: leaderLabelValue, enableMasterLoadBalancerSpec: &enabled, enableMasterLoadBalancerOC: false, enableTeamIdClusterPrefix: false, @@ -686,7 +689,7 @@ func TestServiceAnnotations(t *testing.T) { }, { about: "Master with no annotations and EnableMasterLoadBalancer enabled only on operator config", - role: "master", + role: leaderLabelValue, enableMasterLoadBalancerSpec: &disabled, enableMasterLoadBalancerOC: true, enableTeamIdClusterPrefix: false, @@ -696,7 +699,7 @@ func TestServiceAnnotations(t *testing.T) { }, { about: "Master with no annotations and EnableMasterLoadBalancer defined only on operator config", - role: "master", + role: leaderLabelValue, enableMasterLoadBalancerOC: true, enableTeamIdClusterPrefix: false, operatorAnnotations: make(map[string]string), @@ -708,7 +711,7 @@ func TestServiceAnnotations(t *testing.T) { }, { about: "Master with cluster annotations and load balancer enabled", - role: "master", + role: leaderLabelValue, enableMasterLoadBalancerOC: true, enableTeamIdClusterPrefix: false, operatorAnnotations: make(map[string]string), @@ -721,7 +724,7 @@ func TestServiceAnnotations(t *testing.T) { }, { about: "Master with cluster annotations and load balancer disabled", - role: "master", + role: leaderLabelValue, enableMasterLoadBalancerSpec: &disabled, enableMasterLoadBalancerOC: true, enableTeamIdClusterPrefix: false, @@ -731,7 +734,7 @@ func TestServiceAnnotations(t *testing.T) { }, { about: "Master with operator annotations and load balancer enabled", - role: "master", + role: leaderLabelValue, enableMasterLoadBalancerOC: true, enableTeamIdClusterPrefix: false, operatorAnnotations: map[string]string{"foo": "bar"}, @@ -744,7 +747,7 @@ func TestServiceAnnotations(t *testing.T) { }, { about: "Master with operator annotations override default annotations", - role: "master", + role: leaderLabelValue, enableMasterLoadBalancerOC: true, enableTeamIdClusterPrefix: false, operatorAnnotations: map[string]string{ @@ -758,7 +761,7 @@ func TestServiceAnnotations(t *testing.T) { }, { about: "Master with cluster annotations override default annotations", - role: "master", + role: leaderLabelValue, enableMasterLoadBalancerOC: true, enableTeamIdClusterPrefix: false, operatorAnnotations: make(map[string]string), @@ -772,7 +775,7 @@ func TestServiceAnnotations(t *testing.T) { }, { about: "Master with cluster annotations do not override external-dns annotations", - role: "master", + role: leaderLabelValue, enableMasterLoadBalancerOC: true, enableTeamIdClusterPrefix: false, operatorAnnotations: make(map[string]string), @@ -786,7 +789,7 @@ func TestServiceAnnotations(t *testing.T) { }, { about: "Master with cluster name teamId prefix enabled", - role: "master", + role: leaderLabelValue, enableMasterLoadBalancerOC: true, enableTeamIdClusterPrefix: true, serviceAnnotations: make(map[string]string), @@ -798,7 +801,7 @@ func TestServiceAnnotations(t *testing.T) { }, { about: "Master with master service annotations override service annotations", - role: "master", + role: leaderLabelValue, enableMasterLoadBalancerOC: true, enableTeamIdClusterPrefix: false, operatorAnnotations: make(map[string]string), diff --git a/pkg/cluster/connection_pooler.go b/pkg/cluster/connection_pooler.go index 6cd46f745..e0d97b359 100644 --- a/pkg/cluster/connection_pooler.go +++ b/pkg/cluster/connection_pooler.go @@ -49,7 +49,7 @@ type ConnectionPoolerObjects struct { func (c *Cluster) connectionPoolerName(role PostgresRole) string { name := fmt.Sprintf("%s-%s", c.Name, constants.ConnectionPoolerResourceSuffix) - if role == Replica { + if role == c.replicaRole() { name = fmt.Sprintf("%s-%s", name, "repl") } return name @@ -537,8 +537,8 @@ func (c *Cluster) generatePoolerServiceAnnotations(role PostgresRole, spec *acid annotations[constants.ElbTimeoutAnnotationName] = constants.ElbTimeoutAnnotationValue } // -repl suffix will be added by replicaDNSName - clusterNameWithPoolerSuffix := c.connectionPoolerName(Master) - if role == Master { + clusterNameWithPoolerSuffix := c.connectionPoolerName(c.masterRole()) + if role == c.masterRole() { dnsString = c.masterDNSName(clusterNameWithPoolerSuffix) } else { dnsString = c.replicaDNSName(clusterNameWithPoolerSuffix) @@ -557,7 +557,7 @@ func (c *Cluster) shouldCreateLoadBalancerForPoolerService(role PostgresRole, sp switch role { - case Replica: + case c.replicaRole(): // if the value is explicitly set in a Postgresql manifest, follow this setting if spec.EnableReplicaPoolerLoadBalancer != nil { return *spec.EnableReplicaPoolerLoadBalancer @@ -565,7 +565,7 @@ func (c *Cluster) shouldCreateLoadBalancerForPoolerService(role PostgresRole, sp // otherwise, follow the operator configuration return c.OpConfig.EnableReplicaPoolerLoadBalancer - case Master: + case c.masterRole(): if spec.EnableMasterPoolerLoadBalancer != nil { return *spec.EnableMasterPoolerLoadBalancer } @@ -877,9 +877,9 @@ func (c *Cluster) syncConnectionPooler(oldSpec, newSpec *acidv1.Postgresql, Look logPoolerEssentials(c.logger, oldSpec, newSpec) // Check and perform the sync requirements for each of the roles. - for _, role := range [2]PostgresRole{Master, Replica} { + for _, role := range [2]PostgresRole{c.masterRole(), c.replicaRole()} { - if role == Master { + if role == c.masterRole() { connectionPoolerNeeded = needMasterConnectionPoolerWorker(&newSpec.Spec) } else { connectionPoolerNeeded = needReplicaConnectionPoolerWorker(&newSpec.Spec) diff --git a/pkg/cluster/connection_pooler_test.go b/pkg/cluster/connection_pooler_test.go index 78d1c2527..8fb364541 100644 --- a/pkg/cluster/connection_pooler_test.go +++ b/pkg/cluster/connection_pooler_test.go @@ -42,7 +42,7 @@ func boolToPointer(value bool) *bool { } func deploymentUpdated(cluster *Cluster, err error, reason SyncReason) error { - for _, role := range [2]PostgresRole{Master, Replica} { + for _, role := range [2]PostgresRole{cluster.masterRole(), cluster.replicaRole()} { poolerLabels := cluster.labelsSet(false) poolerLabels["application"] = "db-connection-pooler" @@ -63,7 +63,7 @@ func objectsAreSaved(cluster *Cluster, err error, reason SyncReason) error { return fmt.Errorf("Connection pooler resources are empty") } - for _, role := range []PostgresRole{Master, Replica} { + for _, role := range []PostgresRole{cluster.masterRole(), cluster.replicaRole()} { poolerLabels := cluster.labelsSet(false) poolerLabels["application"] = "db-connection-pooler" poolerLabels["connection-pooler"] = cluster.connectionPoolerName(role) @@ -87,14 +87,14 @@ func MasterObjectsAreSaved(cluster *Cluster, err error, reason SyncReason) error poolerLabels := cluster.labelsSet(false) poolerLabels["application"] = "db-connection-pooler" - poolerLabels["connection-pooler"] = cluster.connectionPoolerName(Master) + poolerLabels["connection-pooler"] = cluster.connectionPoolerName(cluster.masterRole()) - if cluster.ConnectionPooler[Master].Deployment == nil || !util.MapContains(cluster.ConnectionPooler[Master].Deployment.Labels, poolerLabels) { - return fmt.Errorf("Deployment was not saved or labels not attached %s", cluster.ConnectionPooler[Master].Deployment.Labels) + if cluster.ConnectionPooler[cluster.masterRole()].Deployment == nil || !util.MapContains(cluster.ConnectionPooler[cluster.masterRole()].Deployment.Labels, poolerLabels) { + return fmt.Errorf("Deployment was not saved or labels not attached %s", cluster.ConnectionPooler[cluster.masterRole()].Deployment.Labels) } - if cluster.ConnectionPooler[Master].Service == nil || !util.MapContains(cluster.ConnectionPooler[Master].Service.Labels, poolerLabels) { - return fmt.Errorf("Service was not saved or labels not attached %s", cluster.ConnectionPooler[Master].Service.Labels) + if cluster.ConnectionPooler[cluster.masterRole()].Service == nil || !util.MapContains(cluster.ConnectionPooler[cluster.masterRole()].Service.Labels, poolerLabels) { + return fmt.Errorf("Service was not saved or labels not attached %s", cluster.ConnectionPooler[cluster.masterRole()].Service.Labels) } return nil @@ -107,21 +107,21 @@ func ReplicaObjectsAreSaved(cluster *Cluster, err error, reason SyncReason) erro poolerLabels := cluster.labelsSet(false) poolerLabels["application"] = "db-connection-pooler" - poolerLabels["connection-pooler"] = cluster.connectionPoolerName(Replica) + poolerLabels["connection-pooler"] = cluster.connectionPoolerName(cluster.replicaRole()) - if cluster.ConnectionPooler[Replica].Deployment == nil || !util.MapContains(cluster.ConnectionPooler[Replica].Deployment.Labels, poolerLabels) { - return fmt.Errorf("Deployment was not saved or labels not attached %s", cluster.ConnectionPooler[Replica].Deployment.Labels) + if cluster.ConnectionPooler[cluster.replicaRole()].Deployment == nil || !util.MapContains(cluster.ConnectionPooler[cluster.replicaRole()].Deployment.Labels, poolerLabels) { + return fmt.Errorf("Deployment was not saved or labels not attached %s", cluster.ConnectionPooler[cluster.replicaRole()].Deployment.Labels) } - if cluster.ConnectionPooler[Replica].Service == nil || !util.MapContains(cluster.ConnectionPooler[Replica].Service.Labels, poolerLabels) { - return fmt.Errorf("Service was not saved or labels not attached %s", cluster.ConnectionPooler[Replica].Service.Labels) + if cluster.ConnectionPooler[cluster.replicaRole()].Service == nil || !util.MapContains(cluster.ConnectionPooler[cluster.replicaRole()].Service.Labels, poolerLabels) { + return fmt.Errorf("Service was not saved or labels not attached %s", cluster.ConnectionPooler[cluster.replicaRole()].Service.Labels) } return nil } func objectsAreDeleted(cluster *Cluster, err error, reason SyncReason) error { - for _, role := range [2]PostgresRole{Master, Replica} { + for _, role := range [2]PostgresRole{cluster.masterRole(), cluster.replicaRole()} { if cluster.ConnectionPooler[role] != nil && (cluster.ConnectionPooler[role].Deployment != nil || cluster.ConnectionPooler[role].Service != nil) { return fmt.Errorf("Connection pooler was not deleted for role %v", role) @@ -133,8 +133,8 @@ func objectsAreDeleted(cluster *Cluster, err error, reason SyncReason) error { func OnlyMasterDeleted(cluster *Cluster, err error, reason SyncReason) error { - if cluster.ConnectionPooler[Master] != nil && - (cluster.ConnectionPooler[Master].Deployment != nil || cluster.ConnectionPooler[Master].Service != nil) { + if cluster.ConnectionPooler[cluster.masterRole()] != nil && + (cluster.ConnectionPooler[cluster.masterRole()].Deployment != nil || cluster.ConnectionPooler[cluster.masterRole()].Service != nil) { return fmt.Errorf("Connection pooler master was not deleted") } return nil @@ -142,8 +142,8 @@ func OnlyMasterDeleted(cluster *Cluster, err error, reason SyncReason) error { func OnlyReplicaDeleted(cluster *Cluster, err error, reason SyncReason) error { - if cluster.ConnectionPooler[Replica] != nil && - (cluster.ConnectionPooler[Replica].Deployment != nil || cluster.ConnectionPooler[Replica].Service != nil) { + if cluster.ConnectionPooler[cluster.replicaRole()] != nil && + (cluster.ConnectionPooler[cluster.replicaRole()].Deployment != nil || cluster.ConnectionPooler[cluster.replicaRole()].Service != nil) { return fmt.Errorf("Connection pooler replica was not deleted") } return nil @@ -323,7 +323,7 @@ func TestConnectionPoolerCreateDeletion(t *testing.T) { cluster.Name = "acid-fake-cluster" cluster.Namespace = "default" - _, err := cluster.createService(Master) + _, err := cluster.createService(cluster.masterRole()) assert.NoError(t, err) _, err = cluster.createStatefulSet() assert.NoError(t, err) @@ -334,7 +334,7 @@ func TestConnectionPoolerCreateDeletion(t *testing.T) { t.Errorf("%s: Cannot create connection pooler, %s, %+v", testName, err, reason) } - for _, role := range [2]PostgresRole{Master, Replica} { + for _, role := range [2]PostgresRole{cluster.masterRole(), cluster.replicaRole()} { poolerLabels := cluster.labelsSet(false) poolerLabels["application"] = "db-connection-pooler" poolerLabels["connection-pooler"] = cluster.connectionPoolerName(role) @@ -369,7 +369,7 @@ func TestConnectionPoolerCreateDeletion(t *testing.T) { t.Errorf("%s: Cannot sync connection pooler, %s", testName, err) } - for _, role := range [2]PostgresRole{Master, Replica} { + for _, role := range [2]PostgresRole{cluster.masterRole(), cluster.replicaRole()} { err = cluster.deleteConnectionPooler(role) if err != nil { t.Errorf("%s: Cannot delete connection pooler, %s", testName, err) @@ -424,6 +424,7 @@ func TestConnectionPoolerSync(t *testing.T) { DefaultMemoryRequest: "300Mi", DefaultMemoryLimit: "300Mi", PodRoleLabel: "spilo-role", + PodLeaderLabelValue: leaderLabelValue, }, }, }, client, pg, logger, eventRecorder) @@ -431,7 +432,7 @@ func TestConnectionPoolerSync(t *testing.T) { cluster.Name = "acid-fake-cluster" cluster.Namespace = "default" - _, err := cluster.createService(Master) + _, err := cluster.createService(cluster.masterRole()) assert.NoError(t, err) _, err = cluster.createStatefulSet() assert.NoError(t, err) @@ -765,7 +766,7 @@ func TestConnectionPoolerPodSpec(t *testing.T) { check: testEnvs, }, } - for _, role := range [2]PostgresRole{Master, Replica} { + for _, role := range [2]PostgresRole{cluster.masterRole(), cluster.replicaRole()} { for _, tt := range tests { podSpec, _ := tt.cluster.generateConnectionPoolerPodTemplate(role) @@ -802,12 +803,12 @@ func TestConnectionPoolerDeploymentSpec(t *testing.T) { }, } cluster.ConnectionPooler = map[PostgresRole]*ConnectionPoolerObjects{ - Master: { + cluster.masterRole(): { Deployment: nil, Service: nil, LookupFunction: true, Name: "", - Role: Master, + Role: cluster.masterRole(), }, } @@ -854,7 +855,7 @@ func TestConnectionPoolerDeploymentSpec(t *testing.T) { }, } for _, tt := range tests { - deployment, err := tt.cluster.generateConnectionPoolerDeployment(cluster.ConnectionPooler[Master]) + deployment, err := tt.cluster.generateConnectionPoolerDeployment(cluster.ConnectionPooler[cluster.masterRole()]) if err != tt.expected && err.Error() != tt.expected.Error() { t.Errorf("%s [%s]: Could not generate deployment spec,\n %+v, expected\n %+v", @@ -921,7 +922,7 @@ func testLabels(cluster *Cluster, podSpec *v1.PodTemplateSpec, role PostgresRole func testSelector(cluster *Cluster, deployment *appsv1.Deployment) error { labels := deployment.Spec.Selector.MatchLabels - expected := cluster.connectionPoolerLabels(Master, true).MatchLabels + expected := cluster.connectionPoolerLabels(cluster.masterRole(), true).MatchLabels if labels["connection-pooler"] != expected["connection-pooler"] { return fmt.Errorf("Labels are incorrect, got %+v, expected %+v", @@ -1018,20 +1019,20 @@ func TestPoolerTLS(t *testing.T) { // create pooler resources cluster.ConnectionPooler = map[PostgresRole]*ConnectionPoolerObjects{} - cluster.ConnectionPooler[Master] = &ConnectionPoolerObjects{ + cluster.ConnectionPooler[cluster.masterRole()] = &ConnectionPoolerObjects{ Deployment: nil, Service: nil, - Name: cluster.connectionPoolerName(Master), + Name: cluster.connectionPoolerName(cluster.masterRole()), ClusterName: clusterName, Namespace: namespace, LookupFunction: false, - Role: Master, + Role: cluster.masterRole(), } - _, err = cluster.syncConnectionPoolerWorker(nil, &pg, Master) + _, err = cluster.syncConnectionPoolerWorker(nil, &pg, cluster.masterRole()) assert.NoError(t, err) - deploy, err := client.Deployments(namespace).Get(context.TODO(), cluster.connectionPoolerName(Master), metav1.GetOptions{}) + deploy, err := client.Deployments(namespace).Get(context.TODO(), cluster.connectionPoolerName(cluster.masterRole()), metav1.GetOptions{}) assert.NoError(t, err) fsGroup := int64(103) @@ -1088,17 +1089,17 @@ func TestConnectionPoolerServiceSpec(t *testing.T) { }, } cluster.ConnectionPooler = map[PostgresRole]*ConnectionPoolerObjects{ - Master: { + cluster.masterRole(): { Deployment: nil, Service: nil, LookupFunction: false, - Role: Master, + Role: cluster.masterRole(), }, - Replica: { + cluster.replicaRole(): { Deployment: nil, Service: nil, LookupFunction: false, - Role: Replica, + Role: cluster.replicaRole(), }, } @@ -1138,7 +1139,7 @@ func TestConnectionPoolerServiceSpec(t *testing.T) { check: testServiceSelector, }, } - for _, role := range [2]PostgresRole{Master, Replica} { + for _, role := range [2]PostgresRole{cluster.masterRole(), cluster.replicaRole()} { for _, tt := range tests { service := tt.cluster.generateConnectionPoolerService(tt.cluster.ConnectionPooler[role]) diff --git a/pkg/cluster/k8sres.go b/pkg/cluster/k8sres.go index ff5536303..c561c2518 100644 --- a/pkg/cluster/k8sres.go +++ b/pkg/cluster/k8sres.go @@ -77,7 +77,7 @@ func (c *Cluster) statefulSetName() string { func (c *Cluster) serviceName(role PostgresRole) string { name := c.Name switch role { - case Replica: + case c.replicaRole(): name = fmt.Sprintf("%s-%s", name, "repl") case Patroni: name = fmt.Sprintf("%s-%s", name, "config") @@ -950,6 +950,14 @@ func (c *Cluster) generateSpiloPodEnvVars( Name: "KUBERNETES_ROLE_LABEL", Value: c.OpConfig.PodRoleLabel, }, + { + Name: "KUBERNETES_LEADER_LABEL_VALUE", + Value: c.OpConfig.PodLeaderLabelValue, + }, + { + Name: "KUBERNETES_STANDBY_LEADER_LABEL_VALUE", + Value: c.OpConfig.PodLeaderLabelValue, + }, { Name: "PGPASSWORD_SUPERUSER", ValueFrom: &v1.EnvVarSource{ @@ -1529,7 +1537,7 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef Spec: appsv1.StatefulSetSpec{ Replicas: &numberOfInstances, Selector: c.labelsSelector(), - ServiceName: c.serviceName(Master), + ServiceName: c.serviceName(c.masterRole()), Template: *podTemplate, VolumeClaimTemplates: []v1.PersistentVolumeClaim{*volumeClaimTemplate}, UpdateStrategy: updateStrategy, @@ -1948,7 +1956,7 @@ func (c *Cluster) shouldCreateLoadBalancerForService(role PostgresRole, spec *ac switch role { - case Replica: + case c.replicaRole(): // if the value is explicitly set in a Postgresql manifest, follow this setting if spec.EnableReplicaLoadBalancer != nil { @@ -1958,7 +1966,7 @@ func (c *Cluster) shouldCreateLoadBalancerForService(role PostgresRole, spec *ac // otherwise, follow the operator configuration return c.OpConfig.EnableReplicaLoadBalancer - case Master: + case c.masterRole(): if spec.EnableMasterLoadBalancer != nil { return *spec.EnableMasterLoadBalancer @@ -1980,7 +1988,7 @@ func (c *Cluster) generateService(role PostgresRole, spec *acidv1.PostgresSpec) // no selector for master, see https://github.com/zalando/postgres-operator/issues/340 // if kubernetes_use_configmaps is set master service needs a selector - if role == Replica || c.patroniKubernetesUseConfigMaps() { + if role == c.replicaRole() || c.patroniKubernetesUseConfigMaps() { serviceSpec.Selector = c.roleLabelsSet(false, role) } @@ -2047,9 +2055,9 @@ func (c *Cluster) getCustomServiceAnnotations(role PostgresRole, spec *acidv1.Po maps.Copy(annotations, spec.ServiceAnnotations) switch role { - case Master: + case c.masterRole(): maps.Copy(annotations, spec.MasterServiceAnnotations) - case Replica: + case c.replicaRole(): maps.Copy(annotations, spec.ReplicaServiceAnnotations) } } @@ -2220,7 +2228,7 @@ func (c *Cluster) generatePodDisruptionBudget() *policyv1.PodDisruptionBudget { // define label selector and add the master role selector if enabled labels := c.labelsSet(false) if pdbMasterLabelSelector == nil || *c.OpConfig.PDBMasterLabelSelector { - labels[c.OpConfig.PodRoleLabel] = string(Master) + labels[c.OpConfig.PodRoleLabel] = string(c.masterRole()) } return &policyv1.PodDisruptionBudget{ diff --git a/pkg/cluster/k8sres_test.go b/pkg/cluster/k8sres_test.go index 612e4525a..b9647cbf9 100644 --- a/pkg/cluster/k8sres_test.go +++ b/pkg/cluster/k8sres_test.go @@ -576,67 +576,67 @@ func TestGenerateSpiloPodEnvVars(t *testing.T) { } expectedSpiloWalPathCompat := []ExpectedValue{ { - envIndex: 12, + envIndex: 14, envVarConstant: "ENABLE_WAL_PATH_COMPAT", envVarValue: "true", }, } expectedValuesS3Bucket := []ExpectedValue{ { - envIndex: 15, + envIndex: 17, envVarConstant: "WAL_S3_BUCKET", envVarValue: "global-s3-bucket", }, { - envIndex: 16, + envIndex: 18, envVarConstant: "WAL_BUCKET_SCOPE_SUFFIX", envVarValue: fmt.Sprintf("/%s", dummyUUID), }, { - envIndex: 17, + envIndex: 19, envVarConstant: "WAL_BUCKET_SCOPE_PREFIX", envVarValue: "", }, } expectedValuesGCPCreds := []ExpectedValue{ { - envIndex: 15, + envIndex: 17, envVarConstant: "WAL_GS_BUCKET", envVarValue: "global-gs-bucket", }, { - envIndex: 16, + envIndex: 18, envVarConstant: "WAL_BUCKET_SCOPE_SUFFIX", envVarValue: fmt.Sprintf("/%s", dummyUUID), }, { - envIndex: 17, + envIndex: 19, envVarConstant: "WAL_BUCKET_SCOPE_PREFIX", envVarValue: "", }, { - envIndex: 18, + envIndex: 20, envVarConstant: "GOOGLE_APPLICATION_CREDENTIALS", envVarValue: "some-path-to-credentials", }, } expectedS3BucketConfigMap := []ExpectedValue{ { - envIndex: 17, + envIndex: 19, envVarConstant: "wal_s3_bucket", envVarValue: "global-s3-bucket-configmap", }, } expectedCustomS3BucketSpec := []ExpectedValue{ { - envIndex: 15, + envIndex: 17, envVarConstant: "WAL_S3_BUCKET", envVarValue: "custom-s3-bucket", }, } expectedCustomVariableSecret := []ExpectedValue{ { - envIndex: 16, + envIndex: 18, envVarConstant: "custom_variable", envVarValueRef: &v1.EnvVarSource{ SecretKeyRef: &v1.SecretKeySelector{ @@ -650,72 +650,72 @@ func TestGenerateSpiloPodEnvVars(t *testing.T) { } expectedCustomVariableConfigMap := []ExpectedValue{ { - envIndex: 16, + envIndex: 18, envVarConstant: "custom_variable", envVarValue: "configmap-test", }, } expectedCustomVariableSpec := []ExpectedValue{ { - envIndex: 15, + envIndex: 17, envVarConstant: "CUSTOM_VARIABLE", envVarValue: "spec-env-test", }, } expectedCloneEnvSpec := []ExpectedValue{ { - envIndex: 16, + envIndex: 18, envVarConstant: "CLONE_WALE_S3_PREFIX", envVarValue: "s3://another-bucket", }, { - envIndex: 19, + envIndex: 21, envVarConstant: "CLONE_WAL_BUCKET_SCOPE_PREFIX", envVarValue: "", }, { - envIndex: 20, + envIndex: 22, envVarConstant: "CLONE_AWS_ENDPOINT", envVarValue: "s3.eu-central-1.amazonaws.com", }, } expectedCloneEnvSpecEnv := []ExpectedValue{ { - envIndex: 15, + envIndex: 17, envVarConstant: "CLONE_WAL_BUCKET_SCOPE_PREFIX", envVarValue: "test-cluster", }, { - envIndex: 17, + envIndex: 19, envVarConstant: "CLONE_WALE_S3_PREFIX", envVarValue: "s3://another-bucket", }, { - envIndex: 21, + envIndex: 23, envVarConstant: "CLONE_AWS_ENDPOINT", envVarValue: "s3.eu-central-1.amazonaws.com", }, } expectedCloneEnvConfigMap := []ExpectedValue{ { - envIndex: 16, + envIndex: 18, envVarConstant: "CLONE_WAL_S3_BUCKET", envVarValue: "global-s3-bucket", }, { - envIndex: 17, + envIndex: 19, envVarConstant: "CLONE_WAL_BUCKET_SCOPE_SUFFIX", envVarValue: fmt.Sprintf("/%s", dummyUUID), }, { - envIndex: 21, + envIndex: 23, envVarConstant: "clone_aws_endpoint", envVarValue: "s3.eu-west-1.amazonaws.com", }, } expectedCloneEnvSecret := []ExpectedValue{ { - envIndex: 21, + envIndex: 23, envVarConstant: "clone_aws_access_key_id", envVarValueRef: &v1.EnvVarSource{ SecretKeyRef: &v1.SecretKeySelector{ @@ -729,12 +729,12 @@ func TestGenerateSpiloPodEnvVars(t *testing.T) { } expectedStandbyEnvSecret := []ExpectedValue{ { - envIndex: 15, + envIndex: 17, envVarConstant: "STANDBY_WALE_GS_PREFIX", envVarValue: "gs://some/path/", }, { - envIndex: 20, + envIndex: 22, envVarConstant: "standby_google_application_credentials", envVarValueRef: &v1.EnvVarSource{ SecretKeyRef: &v1.SecretKeySelector{ @@ -2359,7 +2359,7 @@ func TestGeneratePodDisruptionBudget(t *testing.T) { } if !masterLabelSelectorDisabled && !reflect.DeepEqual(podDisruptionBudget.Spec.Selector, &metav1.LabelSelector{ - MatchLabels: map[string]string{"spilo-role": "master", "cluster-name": "myapp-database"}}) { + MatchLabels: map[string]string{"spilo-role": leaderLabelValue, "cluster-name": "myapp-database"}}) { return fmt.Errorf("MatchLabels incorrect.") } @@ -2389,7 +2389,7 @@ func TestGeneratePodDisruptionBudget(t *testing.T) { { scenario: "With multiple instances", spec: New( - Config{OpConfig: config.Config{Resources: config.Resources{ClusterNameLabel: "cluster-name", PodRoleLabel: "spilo-role"}, PDBNameFormat: "postgres-{cluster}-pdb"}}, + Config{OpConfig: config.Config{Resources: config.Resources{ClusterNameLabel: "cluster-name", PodRoleLabel: "spilo-role", PodLeaderLabelValue: leaderLabelValue}, PDBNameFormat: "postgres-{cluster}-pdb"}}, k8sutil.KubernetesClient{}, acidv1.Postgresql{ ObjectMeta: metav1.ObjectMeta{Name: "myapp-database", Namespace: "myapp"}, @@ -2406,7 +2406,7 @@ func TestGeneratePodDisruptionBudget(t *testing.T) { { scenario: "With zero instances", spec: New( - Config{OpConfig: config.Config{Resources: config.Resources{ClusterNameLabel: "cluster-name", PodRoleLabel: "spilo-role"}, PDBNameFormat: "postgres-{cluster}-pdb"}}, + Config{OpConfig: config.Config{Resources: config.Resources{ClusterNameLabel: "cluster-name", PodRoleLabel: "spilo-role", PodLeaderLabelValue: leaderLabelValue}, PDBNameFormat: "postgres-{cluster}-pdb"}}, k8sutil.KubernetesClient{}, acidv1.Postgresql{ ObjectMeta: metav1.ObjectMeta{Name: "myapp-database", Namespace: "myapp"}, @@ -2423,7 +2423,7 @@ func TestGeneratePodDisruptionBudget(t *testing.T) { { scenario: "With PodDisruptionBudget disabled", spec: New( - Config{OpConfig: config.Config{Resources: config.Resources{ClusterNameLabel: "cluster-name", PodRoleLabel: "spilo-role"}, PDBNameFormat: "postgres-{cluster}-pdb", EnablePodDisruptionBudget: util.False()}}, + Config{OpConfig: config.Config{Resources: config.Resources{ClusterNameLabel: "cluster-name", PodRoleLabel: "spilo-role", PodLeaderLabelValue: leaderLabelValue}, PDBNameFormat: "postgres-{cluster}-pdb", EnablePodDisruptionBudget: util.False()}}, k8sutil.KubernetesClient{}, acidv1.Postgresql{ ObjectMeta: metav1.ObjectMeta{Name: "myapp-database", Namespace: "myapp"}, @@ -2440,7 +2440,7 @@ func TestGeneratePodDisruptionBudget(t *testing.T) { { scenario: "With non-default PDBNameFormat and PodDisruptionBudget explicitly enabled", spec: New( - Config{OpConfig: config.Config{Resources: config.Resources{ClusterNameLabel: "cluster-name", PodRoleLabel: "spilo-role"}, PDBNameFormat: "postgres-{cluster}-databass-budget", EnablePodDisruptionBudget: util.True()}}, + Config{OpConfig: config.Config{Resources: config.Resources{ClusterNameLabel: "cluster-name", PodRoleLabel: "spilo-role", PodLeaderLabelValue: leaderLabelValue}, PDBNameFormat: "postgres-{cluster}-databass-budget", EnablePodDisruptionBudget: util.True()}}, k8sutil.KubernetesClient{}, acidv1.Postgresql{ ObjectMeta: metav1.ObjectMeta{Name: "myapp-database", Namespace: "myapp"}, @@ -2457,7 +2457,7 @@ func TestGeneratePodDisruptionBudget(t *testing.T) { { scenario: "With PDBMasterLabelSelector disabled", spec: New( - Config{OpConfig: config.Config{Resources: config.Resources{ClusterNameLabel: "cluster-name", PodRoleLabel: "spilo-role"}, PDBNameFormat: "postgres-{cluster}-pdb", EnablePodDisruptionBudget: util.True(), PDBMasterLabelSelector: util.False()}}, + Config{OpConfig: config.Config{Resources: config.Resources{ClusterNameLabel: "cluster-name", PodRoleLabel: "spilo-role", PodLeaderLabelValue: leaderLabelValue}, PDBNameFormat: "postgres-{cluster}-pdb", EnablePodDisruptionBudget: util.True(), PDBMasterLabelSelector: util.False()}}, k8sutil.KubernetesClient{}, acidv1.Postgresql{ ObjectMeta: metav1.ObjectMeta{Name: "myapp-database", Namespace: "myapp"}, @@ -2474,7 +2474,7 @@ func TestGeneratePodDisruptionBudget(t *testing.T) { { scenario: "With OwnerReference enabled", spec: New( - Config{OpConfig: config.Config{Resources: config.Resources{ClusterNameLabel: "cluster-name", PodRoleLabel: "spilo-role", EnableOwnerReferences: util.True()}, PDBNameFormat: "postgres-{cluster}-pdb", EnablePodDisruptionBudget: util.True()}}, + Config{OpConfig: config.Config{Resources: config.Resources{ClusterNameLabel: "cluster-name", PodRoleLabel: "spilo-role", PodLeaderLabelValue: leaderLabelValue, EnableOwnerReferences: util.True()}, PDBNameFormat: "postgres-{cluster}-pdb", EnablePodDisruptionBudget: util.True()}}, k8sutil.KubernetesClient{}, acidv1.Postgresql{ ObjectMeta: metav1.ObjectMeta{Name: "myapp-database", Namespace: "myapp"}, @@ -2550,6 +2550,7 @@ func TestGenerateService(t *testing.T) { DefaultMemoryRequest: "0.7Gi", MaxMemoryRequest: "1.0Gi", DefaultMemoryLimit: "1.3Gi", + PodLeaderLabelValue: leaderLabelValue, }, SidecarImages: map[string]string{ "deprecated-global-sidecar": "image:123", @@ -2576,10 +2577,10 @@ func TestGenerateService(t *testing.T) { }, }, k8sutil.KubernetesClient{}, acidv1.Postgresql{}, logger, eventRecorder) - service := cluster.generateService(Master, &spec) + service := cluster.generateService(cluster.masterRole(), &spec) assert.Equal(t, v1.ServiceExternalTrafficPolicyTypeCluster, service.Spec.ExternalTrafficPolicy) cluster.OpConfig.ExternalTrafficPolicy = "Local" - service = cluster.generateService(Master, &spec) + service = cluster.generateService(cluster.masterRole(), &spec) assert.Equal(t, v1.ServiceExternalTrafficPolicyTypeLocal, service.Spec.ExternalTrafficPolicy) } @@ -2605,28 +2606,28 @@ func TestCreateLoadBalancerLogic(t *testing.T) { }{ { subtest: "new format, load balancer is enabled for replica", - role: Replica, + role: cluster.replicaRole(), spec: &acidv1.PostgresSpec{EnableReplicaLoadBalancer: util.True()}, opConfig: config.Config{}, result: true, }, { subtest: "new format, load balancer is disabled for replica", - role: Replica, + role: cluster.replicaRole(), spec: &acidv1.PostgresSpec{EnableReplicaLoadBalancer: util.False()}, opConfig: config.Config{}, result: false, }, { subtest: "new format, load balancer isn't specified for replica", - role: Replica, + role: cluster.replicaRole(), spec: &acidv1.PostgresSpec{EnableReplicaLoadBalancer: nil}, opConfig: config.Config{EnableReplicaLoadBalancer: true}, result: true, }, { subtest: "new format, load balancer isn't specified for replica", - role: Replica, + role: cluster.replicaRole(), spec: &acidv1.PostgresSpec{EnableReplicaLoadBalancer: nil}, opConfig: config.Config{EnableReplicaLoadBalancer: false}, result: false, @@ -2690,7 +2691,7 @@ func TestEnableLoadBalancers(t *testing.T) { namespace := "default" clusterNameLabel := "cluster-name" roleLabel := "spilo-role" - roles := []PostgresRole{Master, Replica} + roles := []PostgresRole{cluster.masterRole(), cluster.replicaRole()} sourceRanges := []string{"192.186.1.2/22"} extTrafficPolicy := "Cluster" diff --git a/pkg/cluster/pod.go b/pkg/cluster/pod.go index bd2172c18..9420d6164 100644 --- a/pkg/cluster/pod.go +++ b/pkg/cluster/pod.go @@ -44,7 +44,7 @@ func (c *Cluster) getRolePods(role PostgresRole) ([]v1.Pod, error) { return nil, fmt.Errorf("could not get list of pods: %v", err) } - if role == Master && len(pods.Items) > 1 { + if role == c.masterRole() && len(pods.Items) > 1 { return nil, fmt.Errorf("too many masters") } @@ -234,7 +234,7 @@ func (c *Cluster) MigrateMasterPod(podName spec.NamespacedName) error { return nil } - if role := PostgresRole(oldMaster.Labels[c.OpConfig.PodRoleLabel]); role != Master { + if role := PostgresRole(oldMaster.Labels[c.OpConfig.PodRoleLabel]); role != c.masterRole() { c.logger.Warningf("no action needed: pod %q is not the master (anymore)", podName) return nil } @@ -312,7 +312,7 @@ func (c *Cluster) MigrateReplicaPod(podName spec.NamespacedName, fromNodeName st return nil } - if role := PostgresRole(replicaPod.Labels[c.OpConfig.PodRoleLabel]); role != Replica { + if role := PostgresRole(replicaPod.Labels[c.OpConfig.PodRoleLabel]); role != c.replicaRole() { return fmt.Errorf("check failed: pod %q is not a replica", podName) } @@ -416,7 +416,7 @@ func (c *Cluster) recreatePods(pods []v1.Pod, switchoverCandidates []spec.Namesp for i, pod := range pods { role := PostgresRole(pod.Labels[c.OpConfig.PodRoleLabel]) - if role == Master { + if role == c.masterRole() { masterPod = &pods[i] continue } @@ -428,9 +428,9 @@ func (c *Cluster) recreatePods(pods []v1.Pod, switchoverCandidates []spec.Namesp } newRole := PostgresRole(newPod.Labels[c.OpConfig.PodRoleLabel]) - if newRole == Replica { + if newRole == c.replicaRole() { replicas = append(replicas, util.NameFromMeta(pod.ObjectMeta)) - } else if newRole == Master { + } else if newRole == c.masterRole() { newMasterPod = newPod } } diff --git a/pkg/cluster/resources.go b/pkg/cluster/resources.go index 3f47328ee..b7716bbf9 100644 --- a/pkg/cluster/resources.go +++ b/pkg/cluster/resources.go @@ -132,7 +132,7 @@ func getPodIndex(podName string) (int32, error) { } func (c *Cluster) preScaleDown(newStatefulSet *appsv1.StatefulSet) error { - masterPod, err := c.getRolePods(Master) + masterPod, err := c.getRolePods(c.masterRole()) if err != nil { return fmt.Errorf("could not get master pod: %v", err) } @@ -393,7 +393,7 @@ func (c *Cluster) generateEndpointSubsets(role PostgresRole) []v1.EndpointSubset result := make([]v1.EndpointSubset, 0) pods, err := c.getRolePods(role) if err != nil { - if role == Master { + if role == c.masterRole() { c.logger.Warningf("could not obtain the address for %s pod: %v", role, err) } else { c.logger.Warningf("could not obtain the addresses for %s pods: %v", role, err) @@ -410,7 +410,7 @@ func (c *Cluster) generateEndpointSubsets(role PostgresRole) []v1.EndpointSubset Addresses: endPointAddresses, Ports: []v1.EndpointPort{{Name: "postgresql", Port: 5432, Protocol: "TCP"}}, }) - } else if role == Master { + } else if role == c.masterRole() { c.logger.Warningf("master is not running, generated master endpoint does not contain any addresses") } @@ -682,22 +682,22 @@ func (c *Cluster) deleteLogicalBackupJob() error { // GetServiceMaster returns cluster's kubernetes master Service func (c *Cluster) GetServiceMaster() *v1.Service { - return c.Services[Master] + return c.Services[c.masterRole()] } // GetServiceReplica returns cluster's kubernetes replica Service func (c *Cluster) GetServiceReplica() *v1.Service { - return c.Services[Replica] + return c.Services[c.replicaRole()] } // GetEndpointMaster returns cluster's kubernetes master Endpoint func (c *Cluster) GetEndpointMaster() *v1.Endpoints { - return c.Endpoints[Master] + return c.Endpoints[c.masterRole()] } // GetEndpointReplica returns cluster's kubernetes replica Endpoint func (c *Cluster) GetEndpointReplica() *v1.Endpoints { - return c.Endpoints[Replica] + return c.Endpoints[c.replicaRole()] } // GetStatefulSet returns cluster's kubernetes StatefulSet diff --git a/pkg/cluster/sync.go b/pkg/cluster/sync.go index d1a339001..6b1b322f3 100644 --- a/pkg/cluster/sync.go +++ b/pkg/cluster/sync.go @@ -340,7 +340,7 @@ func (c *Cluster) syncPatroniService() error { } func (c *Cluster) syncServices() error { - for _, role := range []PostgresRole{Master, Replica} { + for _, role := range []PostgresRole{c.masterRole(), c.replicaRole()} { c.logger.Debugf("syncing %s service", role) if !c.patroniKubernetesUseConfigMaps() { @@ -404,8 +404,8 @@ func (c *Cluster) syncEndpoint(role PostgresRole) error { if ep, err = c.KubeClient.Endpoints(c.Namespace).Get(context.TODO(), c.serviceName(role), metav1.GetOptions{}); err == nil { desiredEp := c.generateEndpoint(role, ep.Subsets) // if owner references differ we update which would also change annotations - if !reflect.DeepEqual(ep.ObjectMeta.OwnerReferences, desiredEp.ObjectMeta.OwnerReferences) { - c.logger.Infof("new %s endpoints's owner references do not match the current ones", role) + if !reflect.DeepEqual(ep.ObjectMeta.OwnerReferences, desiredEp.ObjectMeta.OwnerReferences) || !reflect.DeepEqual(ep.Labels, desiredEp.Labels) { + c.logger.Infof("new %s endpoints's owner references or labels do not match the current ones", role) c.setProcessName("updating %v endpoint", role) ep, err = c.KubeClient.Endpoints(c.Namespace).Update(context.TODO(), desiredEp, metav1.UpdateOptions{}) if err != nil { @@ -545,7 +545,7 @@ func (c *Cluster) syncStatefulSet() error { podsToRecreate = append(podsToRecreate, pod) } else { role := PostgresRole(pod.Labels[c.OpConfig.PodRoleLabel]) - if role == Master { + if role == c.masterRole() { continue } switchoverCandidates = append(switchoverCandidates, util.NameFromMeta(pod.ObjectMeta)) @@ -616,7 +616,7 @@ func (c *Cluster) syncStatefulSet() error { podsToRecreate = append(podsToRecreate, pod) } else { role := PostgresRole(pod.Labels[c.OpConfig.PodRoleLabel]) - if role == Master { + if role == c.masterRole() { continue } switchoverCandidates = append(switchoverCandidates, util.NameFromMeta(pod.ObjectMeta)) @@ -726,9 +726,9 @@ func (c *Cluster) restartInstances(pods []v1.Pod, restartWait uint32, restartPri errors := make([]string, 0) remainingPods := make([]*v1.Pod, 0) - skipRole := Master + skipRole := c.masterRole() if restartPrimaryFirst { - skipRole = Replica + skipRole = c.replicaRole() } for i, pod := range pods { @@ -1422,7 +1422,7 @@ func (c *Cluster) syncDatabases() error { if len(createDatabases) > 0 { // trigger creation of pooler objects in new database in syncConnectionPooler if c.ConnectionPooler != nil { - for _, role := range [2]PostgresRole{Master, Replica} { + for _, role := range [2]PostgresRole{c.masterRole(), c.replicaRole()} { c.ConnectionPooler[role].LookupFunction = false } } diff --git a/pkg/cluster/sync_test.go b/pkg/cluster/sync_test.go index d45a193cb..5e86c8a39 100644 --- a/pkg/cluster/sync_test.go +++ b/pkg/cluster/sync_test.go @@ -556,7 +556,7 @@ func TestSyncStandbyClusterConfiguration(t *testing.T) { podLabels := map[string]string{ "cluster-name": clusterName, "application": applicationLabel, - "spilo-role": "master", + "spilo-role": leaderLabelValue, } mockPod.Labels = podLabels client.PodsGetter.Pods(namespace).Create(context.TODO(), mockPod, metav1.CreateOptions{}) diff --git a/pkg/cluster/types.go b/pkg/cluster/types.go index 8e9263d49..845d350ef 100644 --- a/pkg/cluster/types.go +++ b/pkg/cluster/types.go @@ -15,8 +15,6 @@ type PostgresRole string const ( // spilo roles - Master PostgresRole = "master" - Replica PostgresRole = "replica" Patroni PostgresRole = "config" // roles returned by Patroni cluster endpoint diff --git a/pkg/cluster/util.go b/pkg/cluster/util.go index c570fcc3a..6afba2797 100644 --- a/pkg/cluster/util.go +++ b/pkg/cluster/util.go @@ -340,7 +340,7 @@ func (c *Cluster) waitForPodLabel(podEvents chan PodEvent, stopCh chan struct{}, podRole := PostgresRole(podEvent.CurPod.Labels[c.OpConfig.PodRoleLabel]) if role == nil { - if podRole == Master || podRole == Replica { + if podRole == c.masterRole() || podRole == c.replicaRole() { return podEvent.CurPod, nil } } else if *role == podRole { @@ -399,12 +399,12 @@ func (c *Cluster) _waitPodLabelsReady(anyReplica bool) error { } masterListOption := metav1.ListOptions{ LabelSelector: labels.Merge(ls, labels.Set{ - c.OpConfig.PodRoleLabel: string(Master), + c.OpConfig.PodRoleLabel: string(c.masterRole()), }).String(), } replicaListOption := metav1.ListOptions{ LabelSelector: labels.Merge(ls, labels.Set{ - c.OpConfig.PodRoleLabel: string(Replica), + c.OpConfig.PodRoleLabel: string(c.replicaRole()), }).String(), } podsNumber = 1 @@ -515,7 +515,7 @@ func (c *Cluster) roleLabelsSet(shouldAddExtraLabels bool, role PostgresRole) la func (c *Cluster) dnsName(role PostgresRole) string { var dnsString, oldDnsString string - if role == Master { + if role == c.masterRole() { dnsString = c.masterDNSName(c.Name) } else { dnsString = c.replicaDNSName(c.Name) @@ -524,7 +524,7 @@ func (c *Cluster) dnsName(role PostgresRole) string { // if cluster name starts with teamID we might need to provide backwards compatibility clusterNameWithoutTeamPrefix, _ := acidv1.ExtractClusterName(c.Name, c.Spec.TeamID) if clusterNameWithoutTeamPrefix != "" { - if role == Master { + if role == c.masterRole() { oldDnsString = c.oldMasterDNSName(clusterNameWithoutTeamPrefix) } else { oldDnsString = c.oldReplicaDNSName(clusterNameWithoutTeamPrefix) diff --git a/pkg/cluster/util_test.go b/pkg/cluster/util_test.go index 2cb755c6c..2b7889de5 100644 --- a/pkg/cluster/util_test.go +++ b/pkg/cluster/util_test.go @@ -161,7 +161,7 @@ func checkResourcesInheritedAnnotations(cluster *Cluster, resultAnnotations map[ } checkPooler := func(annotations map[string]string) error { - for _, role := range []PostgresRole{Master, Replica} { + for _, role := range []PostgresRole{cluster.masterRole(), cluster.replicaRole()} { deploy, err := cluster.KubeClient.Deployments(namespace).Get(context.TODO(), cluster.connectionPoolerName(role), metav1.GetOptions{}) if err != nil { return err @@ -244,7 +244,7 @@ func checkResourcesInheritedAnnotations(cluster *Cluster, resultAnnotations map[ func createPods(cluster *Cluster) []v1.Pod { podsList := make([]v1.Pod, 0) - for i, role := range []PostgresRole{Master, Replica} { + for i, role := range []PostgresRole{cluster.masterRole(), cluster.replicaRole()} { podsList = append(podsList, v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: fmt.Sprintf("%s-%d", clusterName, i), @@ -325,7 +325,7 @@ func newInheritedAnnotationsCluster(client k8sutil.KubernetesClient) (*Cluster, if err != nil { return nil, err } - _, err = cluster.createService(Master) + _, err = cluster.createService(cluster.masterRole()) if err != nil { return nil, err } @@ -365,7 +365,7 @@ func newInheritedAnnotationsCluster(client k8sutil.KubernetesClient) (*Cluster, } func createPatroniResources(cluster *Cluster) error { - patroniService := cluster.generateService(Replica, &pg.Spec) + patroniService := cluster.generateService(cluster.replicaRole(), &pg.Spec) patroniService.ObjectMeta.Name = cluster.serviceName(Patroni) _, err := cluster.KubeClient.Services(namespace).Create(context.TODO(), patroniService, metav1.CreateOptions{}) if err != nil { @@ -479,7 +479,7 @@ func annotateResources(cluster *Cluster) error { } } - for _, role := range []PostgresRole{Master, Replica} { + for _, role := range []PostgresRole{cluster.masterRole(), cluster.replicaRole()} { deploy, err := cluster.KubeClient.Deployments(namespace).Get(context.TODO(), cluster.connectionPoolerName(role), metav1.GetOptions{}) if err != nil { return err diff --git a/pkg/controller/node.go b/pkg/controller/node.go index 2836b4f7f..6de1b492f 100644 --- a/pkg/controller/node.go +++ b/pkg/controller/node.go @@ -108,7 +108,7 @@ func (c *Controller) attemptToMoveMasterPodsOffNode(node *v1.Node) error { podName := util.NameFromMeta(pod.ObjectMeta) role, ok := pod.Labels[c.opConfig.PodRoleLabel] - if !ok || cluster.PostgresRole(role) != cluster.Master { + if !ok || cluster.PostgresRole(role) != cluster.PostgresRole(c.opConfig.PodLeaderLabelValue) { if !ok { c.logger.Warningf("could not move pod %q: pod has no role", podName) } diff --git a/pkg/controller/operator_config.go b/pkg/controller/operator_config.go index 5739f6314..3d8bc5670 100644 --- a/pkg/controller/operator_config.go +++ b/pkg/controller/operator_config.go @@ -110,6 +110,7 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigur } result.PodRoleLabel = util.Coalesce(fromCRD.Kubernetes.PodRoleLabel, "spilo-role") + result.PodLeaderLabelValue = util.Coalesce(fromCRD.Kubernetes.PodLeaderLabelValue, "master") result.ClusterLabels = util.CoalesceStrMap(fromCRD.Kubernetes.ClusterLabels, map[string]string{"application": "spilo"}) result.InheritedLabels = fromCRD.Kubernetes.InheritedLabels result.InheritedAnnotations = fromCRD.Kubernetes.InheritedAnnotations diff --git a/pkg/util/config/config.go b/pkg/util/config/config.go index 30b967beb..0bd04bb53 100644 --- a/pkg/util/config/config.go +++ b/pkg/util/config/config.go @@ -48,6 +48,7 @@ type Resources struct { DeleteAnnotationDateKey string `name:"delete_annotation_date_key"` DeleteAnnotationNameKey string `name:"delete_annotation_name_key"` PodRoleLabel string `name:"pod_role_label" default:"spilo-role"` + PodLeaderLabelValue string `name:"pod_leader_label_value" default:"master"` PodToleration map[string]string `name:"toleration" default:""` DefaultCPURequest string `name:"default_cpu_request"` DefaultMemoryRequest string `name:"default_memory_request"`