diff --git a/internal/controller/postgrescluster/instance.go b/internal/controller/postgrescluster/instance.go index e24c0aca7..fbb693fd1 100644 --- a/internal/controller/postgrescluster/instance.go +++ b/internal/controller/postgrescluster/instance.go @@ -1395,8 +1395,10 @@ func addPGBackRestToInstancePodSpec( ctx context.Context, cluster *v1beta1.PostgresCluster, instanceCertificates *corev1.Secret, instancePod *corev1.PodSpec, ) { - pgbackrest.AddServerToInstancePod(ctx, cluster, instancePod, - instanceCertificates.Name) + if pgbackrest.RepoHostVolumeDefined(cluster) { + pgbackrest.AddServerToInstancePod(ctx, cluster, instancePod, + instanceCertificates.Name) + } pgbackrest.AddConfigToInstancePod(cluster, instancePod) } diff --git a/internal/controller/postgrescluster/instance_test.go b/internal/controller/postgrescluster/instance_test.go index 2381b4cb5..f31b38624 100644 --- a/internal/controller/postgrescluster/instance_test.go +++ b/internal/controller/postgrescluster/instance_test.go @@ -565,104 +565,14 @@ func TestAddPGBackRestToInstancePodSpec(t *testing.T) { readOnly: true - name: other resources: {} -- command: - - pgbackrest - - server - livenessProbe: - exec: - command: - - pgbackrest - - server-ping - name: pgbackrest - resources: {} - securityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL - privileged: false - readOnlyRootFilesystem: true - runAsNonRoot: true - seccompProfile: - type: RuntimeDefault - volumeMounts: - - mountPath: /etc/pgbackrest/server - name: pgbackrest-server - readOnly: true - - mountPath: /pgdata - name: postgres-data - - mountPath: /pgwal - name: postgres-wal - - mountPath: /etc/pgbackrest/conf.d - name: pgbackrest-config - readOnly: true -- command: - - bash - - -ceu - - -- - - |- - monitor() { - exec {fd}<> <(:||:) - until read -r -t 5 -u "${fd}"; do - if - [[ "${filename}" -nt "/proc/self/fd/${fd}" ]] && - pkill -HUP --exact --parent=0 pgbackrest - then - exec {fd}>&- && exec {fd}<> <(:||:) - stat --dereference --format='Loaded configuration dated %y' "${filename}" - elif - { [[ "${directory}" -nt "/proc/self/fd/${fd}" ]] || - [[ "${authority}" -nt "/proc/self/fd/${fd}" ]] - } && - pkill -HUP --exact --parent=0 pgbackrest - then - exec {fd}>&- && exec {fd}<> <(:||:) - stat --format='Loaded certificates dated %y' "${directory}" - fi - done - }; export directory="$1" authority="$2" filename="$3"; export -f monitor; exec -a "$0" bash -ceu monitor - - pgbackrest-config - - /etc/pgbackrest/server - - /etc/pgbackrest/conf.d/~postgres-operator/tls-ca.crt - - /etc/pgbackrest/conf.d/~postgres-operator_server.conf - name: pgbackrest-config - resources: {} - securityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL - privileged: false - readOnlyRootFilesystem: true - runAsNonRoot: true - seccompProfile: - type: RuntimeDefault - volumeMounts: - - mountPath: /etc/pgbackrest/server - name: pgbackrest-server - readOnly: true - - mountPath: /etc/pgbackrest/conf.d - name: pgbackrest-config - readOnly: true `)) - // Instance configuration files with certificates. + // Instance configuration files but no certificates. // Other volumes are ignored. assert.Assert(t, cmp.MarshalMatches(out.Volumes, ` - name: other - name: postgres-data - name: postgres-wal -- name: pgbackrest-server - projected: - sources: - - secret: - items: - - key: pgbackrest-server.crt - path: server-tls.crt - - key: pgbackrest-server.key - mode: 384 - path: server-tls.key - name: some-secret - name: pgbackrest-config projected: sources: @@ -672,19 +582,7 @@ func TestAddPGBackRestToInstancePodSpec(t *testing.T) { path: pgbackrest_instance.conf - key: config-hash path: config-hash - - key: pgbackrest-server.conf - path: ~postgres-operator_server.conf name: hippo-pgbackrest-config - - secret: - items: - - key: pgbackrest.ca-roots - path: ~postgres-operator/tls-ca.crt - - key: pgbackrest-client.crt - path: ~postgres-operator/client-tls.crt - - key: pgbackrest-client.key - mode: 384 - path: ~postgres-operator/client-tls.key - name: hippo-pgbackrest `)) }) @@ -733,6 +631,7 @@ func TestAddPGBackRestToInstancePodSpec(t *testing.T) { mode: 384 path: ~postgres-operator/client-tls.key name: hippo-pgbackrest + optional: true `)) } diff --git a/internal/controller/postgrescluster/pgbackrest.go b/internal/controller/postgrescluster/pgbackrest.go index 667463edf..2d8d35572 100644 --- a/internal/controller/postgrescluster/pgbackrest.go +++ b/internal/controller/postgrescluster/pgbackrest.go @@ -23,6 +23,7 @@ import ( "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/labels" utilerrors "k8s.io/apimachinery/pkg/util/errors" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/reconcile" @@ -320,8 +321,10 @@ func (r *Reconciler) cleanupRepoResources(ctx context.Context, } // If a dedicated repo host resource and a dedicated repo host is enabled, then // add to the slice and do not delete. - ownedNoDelete = append(ownedNoDelete, owned) - delete = false + if pgbackrest.RepoHostVolumeDefined(postgresCluster) { + ownedNoDelete = append(ownedNoDelete, owned) + delete = false + } case hasLabel(naming.LabelPGBackRestRepoVolume): if !backupsSpecFound { break @@ -680,30 +683,29 @@ func (r *Reconciler) generateRepoHostIntent(ctx context.Context, postgresCluster pgbackrest.AddServerToRepoPod(ctx, postgresCluster, &repo.Spec.Template.Spec) - if pgbackrest.RepoHostVolumeDefined(postgresCluster) { - // add the init container to make the pgBackRest repo volume log directory - pgBackRestLogPath := pgbackrest.MakePGBackrestLogDir(&repo.Spec.Template, postgresCluster) + // add the init container to make the pgBackRest repo volume log directory + pgBackRestLogPath := pgbackrest.MakePGBackrestLogDir(&repo.Spec.Template, postgresCluster) - containersToAdd := []string{naming.PGBackRestRepoContainerName} + containersToAdd := []string{naming.PGBackRestRepoContainerName} - // If OpenTelemetryLogs is enabled, we want to add the collector to the pod - // and also add the RepoVolumes to the container. - if collector.OpenTelemetryLogsEnabled(ctx, postgresCluster) { - collector.AddToPod(ctx, postgresCluster.Spec.Instrumentation, postgresCluster.Spec.ImagePullPolicy, - &corev1.ConfigMap{ObjectMeta: naming.PGBackRestConfig(postgresCluster)}, - &repo.Spec.Template, []corev1.VolumeMount{}, "", - []string{pgBackRestLogPath}, true, false) + // If OpenTelemetryLogs is enabled, we want to add the collector to the pod + // and also add the RepoVolumes to the container. + if collector.OpenTelemetryLogsEnabled(ctx, postgresCluster) { + collector.AddToPod(ctx, postgresCluster.Spec.Instrumentation, postgresCluster.Spec.ImagePullPolicy, + &corev1.ConfigMap{ObjectMeta: naming.PGBackRestConfig(postgresCluster)}, + &repo.Spec.Template, []corev1.VolumeMount{}, "", + []string{pgBackRestLogPath}, true, false) - containersToAdd = append(containersToAdd, naming.ContainerCollector) - } + containersToAdd = append(containersToAdd, naming.ContainerCollector) + } - // add pgBackRest repo volumes to pod and to containers - if err := pgbackrest.AddRepoVolumesToPod(postgresCluster, &repo.Spec.Template, - getRepoPVCNames(postgresCluster, repoResources.pvcs), - containersToAdd...); err != nil { - return nil, errors.WithStack(err) - } + // add pgBackRest repo volumes to pod and to containers + if err := pgbackrest.AddRepoVolumesToPod(postgresCluster, &repo.Spec.Template, + getRepoPVCNames(postgresCluster, repoResources.pvcs), + containersToAdd...); err != nil { + return nil, errors.WithStack(err) } + // add configs to pod pgbackrest.AddConfigToRepoPod(postgresCluster, &repo.Spec.Template.Spec) @@ -772,7 +774,12 @@ func (r *Reconciler) generateRepoVolumeIntent(postgresCluster *v1beta1.PostgresC // generateBackupJobSpecIntent generates a JobSpec for a pgBackRest backup job func generateBackupJobSpecIntent(ctx context.Context, postgresCluster *v1beta1.PostgresCluster, repo v1beta1.PGBackRestRepo, serviceAccountName string, - labels, annotations map[string]string, opts ...string) *batchv1.JobSpec { + labels, annotations map[string]string, opts ...string) (*batchv1.JobSpec, error) { + + selector, containerName, err := getPGBackRestExecSelector(postgresCluster, repo) + if err != nil { + return nil, errors.WithStack(err) + } repoIndex := regexRepoIndex.FindString(repo.Name) cmdOpts := []string{ @@ -792,9 +799,9 @@ func generateBackupJobSpecIntent(ctx context.Context, postgresCluster *v1beta1.P {Name: "COMMAND", Value: "backup"}, {Name: "COMMAND_OPTS", Value: strings.Join(cmdOpts, " ")}, {Name: "COMPARE_HASH", Value: "true"}, - {Name: "CONTAINER", Value: naming.PGBackRestRepoContainerName}, + {Name: "CONTAINER", Value: containerName}, {Name: "NAMESPACE", Value: postgresCluster.GetNamespace()}, - {Name: "SELECTOR", Value: naming.PGBackRestDedicatedSelector(postgresCluster.GetName()).String()}, + {Name: "SELECTOR", Value: selector.String()}, }, Image: config.PGBackRestContainerImage(postgresCluster), ImagePullPolicy: postgresCluster.Spec.ImagePullPolicy, @@ -855,9 +862,13 @@ func generateBackupJobSpecIntent(ctx context.Context, postgresCluster *v1beta1.P jobSpec.Template.Spec.ImagePullSecrets = postgresCluster.Spec.ImagePullSecrets // add pgBackRest configs to template - pgbackrest.AddConfigToRepoPod(postgresCluster, &jobSpec.Template.Spec) + if containerName == naming.PGBackRestRepoContainerName { + pgbackrest.AddConfigToRepoPod(postgresCluster, &jobSpec.Template.Spec) + } else { + pgbackrest.AddConfigToInstancePod(postgresCluster, &jobSpec.Template.Spec) + } - return jobSpec + return jobSpec, nil } // +kubebuilder:rbac:groups="",resources="configmaps",verbs={delete,list} @@ -1406,14 +1417,19 @@ func (r *Reconciler) reconcilePGBackRest(ctx context.Context, var repoHost *appsv1.StatefulSet var repoHostName string - // reconcile the pgbackrest repository host - repoHost, err = r.reconcileDedicatedRepoHost(ctx, postgresCluster, repoResources, instances, repoHostSA.GetName()) - if err != nil { - log.Error(err, "unable to reconcile pgBackRest repo host") - result.Requeue = true - return result, nil + if pgbackrest.RepoHostVolumeDefined(postgresCluster) { + // reconcile the pgbackrest repository host + repoHost, err = r.reconcileDedicatedRepoHost(ctx, postgresCluster, repoResources, instances, repoHostSA.GetName()) + if err != nil { + log.Error(err, "unable to reconcile pgBackRest repo host") + result.Requeue = true + return result, nil + } + repoHostName = repoHost.GetName() + } else { + // remove the dedicated repo host status if a dedicated host is not enabled + meta.RemoveStatusCondition(&postgresCluster.Status.Conditions, ConditionRepoHostReady) } - repoHostName = repoHost.GetName() if err := r.reconcilePGBackRestSecret(ctx, postgresCluster, repoHost, rootCA); err != nil { log.Error(err, "unable to reconcile pgBackRest secret") @@ -2017,6 +2033,8 @@ func (r *Reconciler) reconcilePGBackRestConfig(ctx context.Context, repoHostName, configHash, serviceName, serviceNamespace string, instanceNames []string) error { + log := logging.FromContext(ctx).WithValues("reconcileResource", "repoConfig") + backrestConfig, err := pgbackrest.CreatePGBackRestConfigMapIntent(ctx, postgresCluster, repoHostName, configHash, serviceName, serviceNamespace, instanceNames) if err != nil { @@ -2030,6 +2048,12 @@ func (r *Reconciler) reconcilePGBackRestConfig(ctx context.Context, return errors.WithStack(err) } + repoHostConfigured := pgbackrest.RepoHostVolumeDefined(postgresCluster) + if !repoHostConfigured { + log.V(1).Info("skipping SSH reconciliation, no repo hosts configured") + return nil + } + return nil } @@ -2343,11 +2367,13 @@ func (r *Reconciler) reconcileManualBackup(ctx context.Context, return nil } - // determine if the dedicated repository host is ready using the repo host ready + // determine if the dedicated repository host is ready (if enabled) using the repo host ready // condition, and return if not - repoCondition := meta.FindStatusCondition(postgresCluster.Status.Conditions, ConditionRepoHostReady) - if repoCondition == nil || repoCondition.Status != metav1.ConditionTrue { - return nil + if pgbackrest.RepoHostVolumeDefined(postgresCluster) { + repoCondition := meta.FindStatusCondition(postgresCluster.Status.Conditions, ConditionRepoHostReady) + if repoCondition == nil || repoCondition.Status != metav1.ConditionTrue { + return nil + } } // Determine if the replica create backup is complete and return if not. This allows for proper @@ -2429,8 +2455,11 @@ func (r *Reconciler) reconcileManualBackup(ctx context.Context, backupJob.Labels = labels backupJob.Annotations = annotations - spec := generateBackupJobSpecIntent(ctx, postgresCluster, repo, + spec, err := generateBackupJobSpecIntent(ctx, postgresCluster, repo, serviceAccount.GetName(), labels, annotations, backupOpts...) + if err != nil { + return errors.WithStack(err) + } backupJob.Spec = *spec @@ -2518,6 +2547,13 @@ func (r *Reconciler) reconcileReplicaCreateBackup(ctx context.Context, replicaRepoReady = (condition.Status == metav1.ConditionTrue) } + // get pod name and container name as needed to exec into the proper pod and create + // the pgBackRest backup + _, containerName, err := getPGBackRestExecSelector(postgresCluster, replicaCreateRepo) + if err != nil { + return errors.WithStack(err) + } + // determine if the dedicated repository host is ready using the repo host ready status var dedicatedRepoReady bool condition = meta.FindStatusCondition(postgresCluster.Status.Conditions, ConditionRepoHostReady) @@ -2544,10 +2580,14 @@ func (r *Reconciler) reconcileReplicaCreateBackup(ctx context.Context, // - The job has failed. The Job will be deleted and recreated to try again. // - The replica creation repo has changed since the Job was created. Delete and recreate // with the Job with the proper repo configured. + // - The "config" annotation has changed, indicating there is a new primary. Delete and + // recreate the Job with the proper config mounted (applicable when a dedicated repo + // host is not enabled). // - The "config hash" annotation has changed, indicating a configuration change has been // made in the spec (specifically a change to the config for an external repo). Delete // and recreate the Job with proper hash per the current config. if failed || replicaCreateRepoChanged || + (job.GetAnnotations()[naming.PGBackRestCurrentConfig] != containerName) || (job.GetAnnotations()[naming.PGBackRestConfigHash] != configHash) { if err := r.Client.Delete(ctx, job, client.PropagationPolicy(metav1.DeletePropagationBackground)); err != nil { @@ -2563,9 +2603,10 @@ func (r *Reconciler) reconcileReplicaCreateBackup(ctx context.Context, } } + dedicatedEnabled := pgbackrest.RepoHostVolumeDefined(postgresCluster) // return if no job has been created and the replica repo or the dedicated // repo host is not ready - if job == nil && (!dedicatedRepoReady || !replicaRepoReady) { + if job == nil && ((dedicatedEnabled && !dedicatedRepoReady) || !replicaRepoReady) { return nil } @@ -2584,13 +2625,17 @@ func (r *Reconciler) reconcileReplicaCreateBackup(ctx context.Context, annotations = naming.Merge(postgresCluster.Spec.Metadata.GetAnnotationsOrNil(), postgresCluster.Spec.Backups.PGBackRest.Metadata.GetAnnotationsOrNil(), map[string]string{ - naming.PGBackRestConfigHash: configHash, + naming.PGBackRestCurrentConfig: containerName, + naming.PGBackRestConfigHash: configHash, }) backupJob.Labels = labels backupJob.Annotations = annotations - spec := generateBackupJobSpecIntent(ctx, postgresCluster, replicaCreateRepo, + spec, err := generateBackupJobSpecIntent(ctx, postgresCluster, replicaCreateRepo, serviceAccount.GetName(), labels, annotations) + if err != nil { + return errors.WithStack(err) + } backupJob.Spec = *spec @@ -2772,6 +2817,27 @@ func (r *Reconciler) reconcileStanzaCreate(ctx context.Context, return false, nil } +// getPGBackRestExecSelector returns a selector and container name that allows the proper +// Pod (along with a specific container within it) to be found within the Kubernetes +// cluster as needed to exec into the container and run a pgBackRest command. +func getPGBackRestExecSelector(postgresCluster *v1beta1.PostgresCluster, + repo v1beta1.PGBackRestRepo) (labels.Selector, string, error) { + + var err error + var podSelector labels.Selector + var containerName string + + if repo.Volume != nil { + podSelector = naming.PGBackRestDedicatedSelector(postgresCluster.GetName()) + containerName = naming.PGBackRestRepoContainerName + } else { + podSelector, err = naming.AsSelector(naming.ClusterPrimary(postgresCluster.GetName())) + containerName = naming.ContainerDatabase + } + + return podSelector, containerName, err +} + // getRepoHostStatus is responsible for returning the pgBackRest status for the // provided pgBackRest repository host func getRepoHostStatus(repoHost *appsv1.StatefulSet) *v1beta1.RepoHostStatus { @@ -3016,8 +3082,11 @@ func (r *Reconciler) reconcilePGBackRestCronJob( // set backup type (i.e. "full", "diff", "incr") backupOpts := []string{"--type=" + backupType} - jobSpec := generateBackupJobSpecIntent(ctx, cluster, repo, + jobSpec, err := generateBackupJobSpecIntent(ctx, cluster, repo, serviceAccount.GetName(), labels, annotations, backupOpts...) + if err != nil { + return errors.WithStack(err) + } // Suspend cronjobs when shutdown or read-only. Any jobs that have already // started will continue. @@ -3050,7 +3119,7 @@ func (r *Reconciler) reconcilePGBackRestCronJob( // set metadata pgBackRestCronJob.SetGroupVersionKind(batchv1.SchemeGroupVersion.WithKind("CronJob")) - err := errors.WithStack(r.setControllerReference(cluster, pgBackRestCronJob)) + err = errors.WithStack(r.setControllerReference(cluster, pgBackRestCronJob)) if err == nil { err = r.apply(ctx, pgBackRestCronJob) diff --git a/internal/controller/postgrescluster/pgbackrest_test.go b/internal/controller/postgrescluster/pgbackrest_test.go index b1083ade3..1bb08a846 100644 --- a/internal/controller/postgrescluster/pgbackrest_test.go +++ b/internal/controller/postgrescluster/pgbackrest_test.go @@ -887,6 +887,52 @@ func TestReconcileStanzaCreate(t *testing.T) { } } +func TestGetPGBackRestExecSelector(t *testing.T) { + + testCases := []struct { + cluster *v1beta1.PostgresCluster + repo v1beta1.PGBackRestRepo + desc string + expectedSelector string + expectedContainer string + }{{ + desc: "volume repo defined dedicated repo host enabled", + cluster: &v1beta1.PostgresCluster{ + ObjectMeta: metav1.ObjectMeta{Name: "hippo"}, + }, + repo: v1beta1.PGBackRestRepo{ + Name: "repo1", + Volume: &v1beta1.RepoPVC{}, + }, + expectedSelector: "postgres-operator.crunchydata.com/cluster=hippo," + + "postgres-operator.crunchydata.com/pgbackrest=," + + "postgres-operator.crunchydata.com/pgbackrest-dedicated=", + expectedContainer: "pgbackrest", + }, { + desc: "cloud repo defined no repo host enabled", + cluster: &v1beta1.PostgresCluster{ + ObjectMeta: metav1.ObjectMeta{Name: "hippo"}, + }, + repo: v1beta1.PGBackRestRepo{ + Name: "repo1", + S3: &v1beta1.RepoS3{}, + }, + expectedSelector: "postgres-operator.crunchydata.com/cluster=hippo," + + "postgres-operator.crunchydata.com/instance," + + "postgres-operator.crunchydata.com/role=master", + expectedContainer: "database", + }} + + for _, tc := range testCases { + t.Run(tc.desc, func(t *testing.T) { + selector, container, err := getPGBackRestExecSelector(tc.cluster, tc.repo) + assert.NilError(t, err) + assert.Assert(t, selector.String() == tc.expectedSelector) + assert.Assert(t, container == tc.expectedContainer) + }) + } +} + func TestReconcileReplicaCreateBackup(t *testing.T) { // Garbage collector cleans up test resources before the test completes if strings.EqualFold(os.Getenv("USE_EXISTING_CLUSTER"), "true") { @@ -971,13 +1017,17 @@ func TestReconcileReplicaCreateBackup(t *testing.T) { } assert.Assert(t, foundOwnershipRef) - var foundHashAnnotation bool + var foundConfigAnnotation, foundHashAnnotation bool // verify annotations for k, v := range backupJob.GetAnnotations() { + if k == naming.PGBackRestCurrentConfig && v == naming.PGBackRestRepoContainerName { + foundConfigAnnotation = true + } if k == naming.PGBackRestConfigHash && v == configHash { foundHashAnnotation = true } } + assert.Assert(t, foundConfigAnnotation) assert.Assert(t, foundHashAnnotation) // verify container & env vars @@ -1698,11 +1748,11 @@ func TestGetPGBackRestResources(t *testing.T) { jobCount: 0, pvcCount: 0, hostCount: 1, }, }, { - desc: "no dedicated repo host defined, dedicated sts not deleted", + desc: "no dedicated repo host defined delete dedicated sts", createResources: []client.Object{ &appsv1.StatefulSet{ ObjectMeta: metav1.ObjectMeta{ - Name: "keep-dedicated-two", + Name: "delete-dedicated", Namespace: namespace, Labels: naming.PGBackRestDedicatedLabels(clusterName), }, @@ -1731,8 +1781,43 @@ func TestGetPGBackRestResources(t *testing.T) { }, }, result: testResult{ - // Host count is 2 due to previous repo host sts not being deleted. - jobCount: 0, pvcCount: 0, hostCount: 2, + jobCount: 0, pvcCount: 0, hostCount: 0, + }, + }, { + desc: "no repo host defined delete dedicated sts", + createResources: []client.Object{ + &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "delete-dedicated-no-repo-host", + Namespace: namespace, + Labels: naming.PGBackRestDedicatedLabels(clusterName), + }, + Spec: appsv1.StatefulSetSpec{ + Selector: metav1.SetAsLabelSelector( + naming.PGBackRestDedicatedLabels(clusterName)), + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: naming.PGBackRestDedicatedLabels(clusterName), + }, + Spec: corev1.PodSpec{}, + }, + }, + }, + }, + cluster: &v1beta1.PostgresCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: clusterName, + Namespace: namespace, + UID: types.UID(clusterUID), + }, + Spec: v1beta1.PostgresClusterSpec{ + Backups: v1beta1.Backups{ + PGBackRest: v1beta1.PGBackRestArchive{}, + }, + }, + }, + result: testResult{ + jobCount: 0, pvcCount: 0, hostCount: 0, }, }} @@ -2564,11 +2649,12 @@ func TestCopyConfigurationResources(t *testing.T) { func TestGenerateBackupJobIntent(t *testing.T) { ctx := context.Background() t.Run("empty", func(t *testing.T) { - spec := generateBackupJobSpecIntent(ctx, + spec, err := generateBackupJobSpecIntent(ctx, &v1beta1.PostgresCluster{}, v1beta1.PGBackRestRepo{}, "", nil, nil, ) + assert.NilError(t, err) assert.Assert(t, cmp.MarshalMatches(spec.Template.Spec, ` containers: - command: @@ -2581,10 +2667,10 @@ containers: - name: COMPARE_HASH value: "true" - name: CONTAINER - value: pgbackrest + value: database - name: NAMESPACE - name: SELECTOR - value: postgres-operator.crunchydata.com/cluster=,postgres-operator.crunchydata.com/pgbackrest=,postgres-operator.crunchydata.com/pgbackrest-dedicated= + value: postgres-operator.crunchydata.com/cluster=,postgres-operator.crunchydata.com/instance,postgres-operator.crunchydata.com/role=master name: pgbackrest resources: {} securityContext: @@ -2611,23 +2697,11 @@ volumes: sources: - configMap: items: - - key: pgbackrest_repo.conf - path: pgbackrest_repo.conf + - key: pgbackrest_instance.conf + path: pgbackrest_instance.conf - key: config-hash path: config-hash - - key: pgbackrest-server.conf - path: ~postgres-operator_server.conf name: -pgbackrest-config - - secret: - items: - - key: pgbackrest.ca-roots - path: ~postgres-operator/tls-ca.crt - - key: pgbackrest-client.crt - path: ~postgres-operator/client-tls.crt - - key: pgbackrest-client.key - mode: 384 - path: ~postgres-operator/client-tls.key - name: -pgbackrest `)) }) @@ -2637,11 +2711,12 @@ volumes: ImagePullPolicy: corev1.PullAlways, }, } - job := generateBackupJobSpecIntent(ctx, + job, err := generateBackupJobSpecIntent(ctx, cluster, v1beta1.PGBackRestRepo{}, "", nil, nil, ) + assert.NilError(t, err) assert.Equal(t, job.Template.Spec.Containers[0].ImagePullPolicy, corev1.PullAlways) }) @@ -2652,11 +2727,12 @@ volumes: cluster.Spec.Backups = v1beta1.Backups{ PGBackRest: v1beta1.PGBackRestArchive{}, } - job := generateBackupJobSpecIntent(ctx, + job, err := generateBackupJobSpecIntent(ctx, cluster, v1beta1.PGBackRestRepo{}, "", nil, nil, ) + assert.NilError(t, err) assert.DeepEqual(t, job.Template.Spec.Containers[0].Resources, corev1.ResourceRequirements{}) }) @@ -2669,11 +2745,12 @@ volumes: }, }, } - job := generateBackupJobSpecIntent(ctx, + job, err := generateBackupJobSpecIntent(ctx, cluster, v1beta1.PGBackRestRepo{}, "", nil, nil, ) + assert.NilError(t, err) assert.DeepEqual(t, job.Template.Spec.Containers[0].Resources, corev1.ResourceRequirements{ Requests: corev1.ResourceList{ @@ -2708,11 +2785,12 @@ volumes: }, }, } - job := generateBackupJobSpecIntent(ctx, + job, err := generateBackupJobSpecIntent(ctx, cluster, v1beta1.PGBackRestRepo{}, "", nil, nil, ) + assert.NilError(t, err) assert.Equal(t, job.Template.Spec.Affinity, affinity) }) @@ -2721,11 +2799,12 @@ volumes: cluster.Spec.Backups.PGBackRest.Jobs = &v1beta1.BackupJobs{ PriorityClassName: initialize.String("some-priority-class"), } - job := generateBackupJobSpecIntent(ctx, + job, err := generateBackupJobSpecIntent(ctx, cluster, v1beta1.PGBackRestRepo{}, "", nil, nil, ) + assert.NilError(t, err) assert.Equal(t, job.Template.Spec.PriorityClassName, "some-priority-class") }) @@ -2739,11 +2818,12 @@ volumes: cluster.Spec.Backups.PGBackRest.Jobs = &v1beta1.BackupJobs{ Tolerations: tolerations, } - job := generateBackupJobSpecIntent(ctx, + job, err := generateBackupJobSpecIntent(ctx, cluster, v1beta1.PGBackRestRepo{}, "", nil, nil, ) + assert.NilError(t, err) assert.DeepEqual(t, job.Template.Spec.Tolerations, tolerations) }) @@ -2753,16 +2833,18 @@ volumes: t.Run("Undefined", func(t *testing.T) { cluster.Spec.Backups.PGBackRest.Jobs = nil - spec := generateBackupJobSpecIntent(ctx, + spec, err := generateBackupJobSpecIntent(ctx, cluster, v1beta1.PGBackRestRepo{}, "", nil, nil, ) + assert.NilError(t, err) assert.Assert(t, spec.TTLSecondsAfterFinished == nil) cluster.Spec.Backups.PGBackRest.Jobs = &v1beta1.BackupJobs{} - spec = generateBackupJobSpecIntent(ctx, + spec, err = generateBackupJobSpecIntent(ctx, cluster, v1beta1.PGBackRestRepo{}, "", nil, nil, ) + assert.NilError(t, err) assert.Assert(t, spec.TTLSecondsAfterFinished == nil) }) @@ -2771,9 +2853,10 @@ volumes: TTLSecondsAfterFinished: initialize.Int32(0), } - spec := generateBackupJobSpecIntent(ctx, + spec, err := generateBackupJobSpecIntent(ctx, cluster, v1beta1.PGBackRestRepo{}, "", nil, nil, ) + assert.NilError(t, err) if assert.Check(t, spec.TTLSecondsAfterFinished != nil) { assert.Equal(t, *spec.TTLSecondsAfterFinished, int32(0)) } @@ -2784,9 +2867,10 @@ volumes: TTLSecondsAfterFinished: initialize.Int32(100), } - spec := generateBackupJobSpecIntent(ctx, + spec, err := generateBackupJobSpecIntent(ctx, cluster, v1beta1.PGBackRestRepo{}, "", nil, nil, ) + assert.NilError(t, err) if assert.Check(t, spec.TTLSecondsAfterFinished != nil) { assert.Equal(t, *spec.TTLSecondsAfterFinished, int32(100)) } diff --git a/internal/naming/annotations.go b/internal/naming/annotations.go index a2fedb574..38d30926d 100644 --- a/internal/naming/annotations.go +++ b/internal/naming/annotations.go @@ -32,6 +32,14 @@ const ( // (and therefore must be recreated) PGBackRestConfigHash = annotationPrefix + "pgbackrest-hash" + // PGBackRestCurrentConfig is an annotation used to indicate the name of the pgBackRest + // configuration associated with a specific Job as determined by either the current primary + // (if no dedicated repository host is enabled), or the dedicated repository host. This helps + // in detecting pgBackRest backup Jobs that no longer mount the proper pgBackRest + // configuration, e.g. because a failover has occurred, or because dedicated repo host has been + // enabled or disabled. + PGBackRestCurrentConfig = annotationPrefix + "pgbackrest-config" + // PGBackRestRestore is the annotation that is added to a PostgresCluster to initiate an in-place // restore. The value of the annotation will be a unique identifier for a restore Job (e.g. a // timestamp), which will be stored in the PostgresCluster status to properly track completion diff --git a/internal/naming/annotations_test.go b/internal/naming/annotations_test.go index f64004557..593d00098 100644 --- a/internal/naming/annotations_test.go +++ b/internal/naming/annotations_test.go @@ -20,6 +20,7 @@ func TestAnnotationsValid(t *testing.T) { assert.Assert(t, nil == validation.IsQualifiedName(PGBackRestBackup)) assert.Assert(t, nil == validation.IsQualifiedName(PGBackRestBackupJobCompletion)) assert.Assert(t, nil == validation.IsQualifiedName(PGBackRestConfigHash)) + assert.Assert(t, nil == validation.IsQualifiedName(PGBackRestCurrentConfig)) assert.Assert(t, nil == validation.IsQualifiedName(PGBackRestIPVersion)) assert.Assert(t, nil == validation.IsQualifiedName(PGBackRestRestore)) assert.Assert(t, nil == validation.IsQualifiedName(PostgresExporterCollectorsAnnotation)) diff --git a/internal/naming/selectors.go b/internal/naming/selectors.go index a7b105de4..c51f2d026 100644 --- a/internal/naming/selectors.go +++ b/internal/naming/selectors.go @@ -152,6 +152,13 @@ func ClusterPostgresUsers(cluster string) metav1.LabelSelector { } } +// ClusterPrimary selects things for the Primary PostgreSQL instance. +func ClusterPrimary(cluster string) metav1.LabelSelector { + s := ClusterInstances(cluster) + s.MatchLabels[LabelRole] = RolePatroniLeader + return s +} + // CrunchyBridgeClusterPostgresRoles selects things labeled for CrunchyBridgeCluster // PostgreSQL roles in cluster. func CrunchyBridgeClusterPostgresRoles(clusterName string) metav1.LabelSelector { diff --git a/internal/naming/selectors_test.go b/internal/naming/selectors_test.go index a9d2ce987..c8617bcb7 100644 --- a/internal/naming/selectors_test.go +++ b/internal/naming/selectors_test.go @@ -148,6 +148,16 @@ func TestClusterPostgresUsers(t *testing.T) { assert.ErrorContains(t, err, "Invalid") } +func TestClusterPrimary(t *testing.T) { + s, err := AsSelector(ClusterPrimary("something")) + assert.NilError(t, err) + assert.DeepEqual(t, s.String(), strings.Join([]string{ + "postgres-operator.crunchydata.com/cluster=something", + "postgres-operator.crunchydata.com/instance", + "postgres-operator.crunchydata.com/role=master", + }, ",")) +} + func TestCrunchyBridgeClusterPostgresRoles(t *testing.T) { s, err := AsSelector(CrunchyBridgeClusterPostgresRoles("something")) assert.NilError(t, err) diff --git a/internal/pgbackrest/config.go b/internal/pgbackrest/config.go index c99e952af..f4b66fad7 100644 --- a/internal/pgbackrest/config.go +++ b/internal/pgbackrest/config.go @@ -96,6 +96,7 @@ func CreatePGBackRestConfigMapIntent(ctx context.Context, postgresCluster *v1bet // create an empty map for the config data initialize.Map(&cm.Data) + addDedicatedHost := RepoHostVolumeDefined(postgresCluster) pgdataDir := postgres.DataDirectory(postgresCluster) // Port will always be populated, since the API will set a default of 5432 if not provided pgPort := *postgresCluster.Spec.Port @@ -108,14 +109,13 @@ func CreatePGBackRestConfigMapIntent(ctx context.Context, postgresCluster *v1bet postgresCluster.Spec.Backups.PGBackRest.Global, ).String() + // As the cluster transitions from having a repository host to having none, // PostgreSQL instances that have not rolled out expect to mount a server // config file. Always populate that file so those volumes stay valid and - // Kubernetes propagates their contents to those pods. The repo host name - // given below should always be set, but this guards for cases when it might - // not be. + // Kubernetes propagates their contents to those pods. cm.Data[serverConfigMapKey] = "" - if repoHostName != "" { + if addDedicatedHost && repoHostName != "" { cm.Data[serverConfigMapKey] = iniGeneratedWarning + serverConfig(postgresCluster).String() diff --git a/internal/pgbackrest/config.md b/internal/pgbackrest/config.md index dd1127643..f19c9ac1e 100644 --- a/internal/pgbackrest/config.md +++ b/internal/pgbackrest/config.md @@ -6,17 +6,17 @@ # pgBackRest Configuration Overview -The initial pgBackRest configuration for the Postgres Clusters is designed to stand up a +The initial pgBackRest configuration for the Postgres Clusters is designed to stand up a minimal configuration for use by the various pgBackRest functions needed by the Postgres cluster. These settings are meant to be the minimally required settings, with other settings supported through the use of custom configurations. -During initial cluster creation, four pgBackRest use cases are involved. +During initial cluster creation, four pgBackRest use cases are involved. -These settings are configured in either the [global] or [stanza] sections of the +These settings are configured in either the [global] or [stanza] sections of the pgBackRest configuration based on their designation in the pgBackRest code. For more information on the above, and other settings, please see -https://github.com/pgbackrest/pgbackrest/blob/release/2.38/src/config/parse.auto.c + As shown, the settings with the `cfgSectionGlobal` designation are @@ -24,18 +24,17 @@ As shown, the settings with the `cfgSectionGlobal` designation are `log-level-file`: Level for file logging. Set to 'off' when the repo host has no volume. -`repo-path`: Path where backups and archive are stored. +`repo-path`: Path where backups and archive are stored. The repository is where pgBackRest stores backups and archives WAL segments. `repo-host`: Repository host when operating remotely via TLS. - The settings with the `cfgSectionStanza` designation are `pg-host`: PostgreSQL host for operating remotely via TLS. `pg-path`: The path of the PostgreSQL data directory. - This should be the same as the data_directory setting in postgresql.conf. + This should be the same as the data_directory setting in postgresql.conf. `pg-port`: The port that PostgreSQL is running on. @@ -44,14 +43,13 @@ The settings with the `cfgSectionStanza` designation are For more information on these and other configuration settings, please see `https://pgbackrest.org/configuration.html`. -# Configuration Per Function +## Configuration Per Function -Below, each of the four configuration sets is outlined by use case. Please note that certain -settings have acceptable defaults for the cluster's usage (such as for `repo1-type` which +Below, each of the four configuration sets is outlined by use case. Please note that certain +settings have acceptable defaults for the cluster's usage (such as for `repo1-type` which defaults to `posix`), so those settings are not included. - -1. Primary Database Pod +1. Primary Database Pod [global] log-path @@ -86,28 +84,26 @@ log-path [global] log-path - -# Initial pgBackRest Configuration +## Initial pgBackRest Configuration In order to be used by the Postgres cluster, these default configurations are stored in -a configmap. This configmap is named with the following convention `-pgbackrest-config`, +a configmap. This configmap is named with the following convention `-pgbackrest-config`, such that a cluster named 'mycluster' would have a configuration configmap named `mycluster-pgbackrest-config`. -As noted above, there are three distinct default configurations, each of which is referenced +As noted above, there are three distinct default configurations, each of which is referenced by a key value in the configmap's data section. For the primary database pod, the key is `pgbackrest_primary.conf`. For the pgBackRest repo pod, the key is `pgbackrest_repo.conf`. Finally, for the pgBackRest stanza job pod and the initial pgBackRest backup job pod, the key is `pgbackrest_job.conf`. - -For each pod, the relevant configuration file is mounted as a projected volume named + +For each pod, the relevant configuration file is mounted as a projected volume named `pgbackrest-config-vol`. The configuration file will be found in the `/etc/pgbackrest` directory -of the relevant container and is named `pgbackrest.conf`, matching the default pgBackRest location. -For more information, please see +of the relevant container and is named `pgbackrest.conf`, matching the default pgBackRest location. +For more information, please see `https://pgbackrest.org/configuration.html#introduction` - -# Custom Configuration Support +## Custom Configuration Support TODO(tjmoore4): Document custom configuration solution once implemented @@ -116,7 +112,7 @@ flag with the desired pgBackRest command. This should point to the directory pat where the `*.conf` file with the custom configuration is located. This file will be added as a projected volume and must be formatted in the standard -pgBackRest INI convention. Please note that any of the configuration settings listed +pgBackRest INI convention. Please note that any of the configuration settings listed above MUST BE CONFIGURED VIA THE POSTGRESCLUSTER SPEC so as to avoid errors. For more information, please see @@ -140,7 +136,7 @@ command-line or top-to-bottom in INI files. The remaining options must be set exactly once. `pgbackrest` exits non-zero when the option occurs twice on the command-line or twice in a file: -``` +```text ERROR: [031]: option 'io-timeout' cannot be set multiple times ``` diff --git a/internal/pgbackrest/config_test.go b/internal/pgbackrest/config_test.go index a314ad310..cdbaa725a 100644 --- a/internal/pgbackrest/config_test.go +++ b/internal/pgbackrest/config_test.go @@ -46,6 +46,54 @@ func TestCreatePGBackRestConfigMapIntent(t *testing.T) { assert.Equal(t, configmap.Data["pgbackrest-server.conf"], "") }) + t.Run("NoVolumeRepoCloudRepoPresent", func(t *testing.T) { + cluster := cluster.DeepCopy() + cluster.Spec.Backups.PGBackRest.Global = map[string]string{ + "repo1-test": "something", + } + cluster.Spec.Backups.PGBackRest.Repos = []v1beta1.PGBackRestRepo{ + { + Name: "repo1", + GCS: &v1beta1.RepoGCS{Bucket: "g-bucket"}, + }, + } + + configmap, err := CreatePGBackRestConfigMapIntent(context.Background(), cluster, + "", "anumber", "pod-service-name", "test-ns", + []string{"some-instance"}) + + assert.NilError(t, err) + assert.DeepEqual(t, configmap.Annotations, map[string]string{}) + assert.DeepEqual(t, configmap.Labels, map[string]string{ + "postgres-operator.crunchydata.com/cluster": "hippo-dance", + "postgres-operator.crunchydata.com/pgbackrest": "", + "postgres-operator.crunchydata.com/pgbackrest-config": "", + }) + + assert.Equal(t, configmap.Data["config-hash"], "anumber") + assert.Equal(t, configmap.Data["pgbackrest-server.conf"], "") + assert.Equal(t, configmap.Data["pgbackrest_repo.conf"], "") + + assert.Equal(t, configmap.Data["pgbackrest_instance.conf"], strings.Trim(` +# Generated by postgres-operator. DO NOT EDIT. +# Your changes will not be saved. + +[global] +archive-async = y +log-path = /pgdata/pgbackrest/log +repo1-gcs-bucket = g-bucket +repo1-path = /pgbackrest/repo1 +repo1-test = something +repo1-type = gcs +spool-path = /pgdata/pgbackrest-spool + +[db] +pg1-path = /pgdata/pg12 +pg1-port = 2345 +pg1-socket-path = /tmp/postgres + `, "\t\n")+"\n") + }) + t.Run("DedicatedRepoHost", func(t *testing.T) { cluster := cluster.DeepCopy() cluster.Spec.Backups.PGBackRest.Global = map[string]string{ diff --git a/internal/pgbackrest/reconcile.go b/internal/pgbackrest/reconcile.go index 4e789d137..907012ac1 100644 --- a/internal/pgbackrest/reconcile.go +++ b/internal/pgbackrest/reconcile.go @@ -105,15 +105,22 @@ func AddConfigToInstancePod( {Key: ConfigHashKey, Path: ConfigHashKey}, } + // As the cluster transitions from having a repository host to having none, + // PostgreSQL instances that have not rolled out expect to mount client + // certificates. Specify those files are optional so the configuration + // volumes stay valid and Kubernetes propagates their contents to those pods. secret := corev1.VolumeProjection{Secret: &corev1.SecretProjection{}} secret.Secret.Name = naming.PGBackRestSecret(cluster).Name + secret.Secret.Optional = initialize.Bool(true) - configmap.ConfigMap.Items = append( - configmap.ConfigMap.Items, corev1.KeyToPath{ - Key: serverConfigMapKey, - Path: serverConfigProjectionPath, - }) - secret.Secret.Items = append(secret.Secret.Items, clientCertificates()...) + if RepoHostVolumeDefined(cluster) { + configmap.ConfigMap.Items = append( + configmap.ConfigMap.Items, corev1.KeyToPath{ + Key: serverConfigMapKey, + Path: serverConfigProjectionPath, + }) + secret.Secret.Items = append(secret.Secret.Items, clientCertificates()...) + } // Start with a copy of projections specified in the cluster. Items later in // the list take precedence over earlier items (that is, last write wins). @@ -406,13 +413,15 @@ func InstanceCertificates(ctx context.Context, ) error { var err error - initialize.Map(&outInstanceCertificates.Data) + if RepoHostVolumeDefined(inCluster) { + initialize.Map(&outInstanceCertificates.Data) - if err == nil { - outInstanceCertificates.Data[certInstanceSecretKey], err = certFile(inDNS) - } - if err == nil { - outInstanceCertificates.Data[certInstancePrivateKeySecretKey], err = certFile(inDNSKey) + if err == nil { + outInstanceCertificates.Data[certInstanceSecretKey], err = certFile(inDNS) + } + if err == nil { + outInstanceCertificates.Data[certInstancePrivateKeySecretKey], err = certFile(inDNSKey) + } } return err diff --git a/internal/pgbackrest/reconcile_test.go b/internal/pgbackrest/reconcile_test.go index ebd73bc4c..530541706 100644 --- a/internal/pgbackrest/reconcile_test.go +++ b/internal/pgbackrest/reconcile_test.go @@ -231,19 +231,7 @@ func TestAddConfigToInstancePod(t *testing.T) { path: pgbackrest_instance.conf - key: config-hash path: config-hash - - key: pgbackrest-server.conf - path: ~postgres-operator_server.conf name: hippo-pgbackrest-config - - secret: - items: - - key: pgbackrest.ca-roots - path: ~postgres-operator/tls-ca.crt - - key: pgbackrest-client.crt - path: ~postgres-operator/client-tls.crt - - key: pgbackrest-client.key - mode: 384 - path: ~postgres-operator/client-tls.key - name: hippo-pgbackrest `)) }) @@ -266,19 +254,7 @@ func TestAddConfigToInstancePod(t *testing.T) { path: pgbackrest_instance.conf - key: config-hash path: config-hash - - key: pgbackrest-server.conf - path: ~postgres-operator_server.conf name: hippo-pgbackrest-config - - secret: - items: - - key: pgbackrest.ca-roots - path: ~postgres-operator/tls-ca.crt - - key: pgbackrest-client.crt - path: ~postgres-operator/client-tls.crt - - key: pgbackrest-client.key - mode: 384 - path: ~postgres-operator/client-tls.key - name: hippo-pgbackrest `)) }) @@ -319,6 +295,7 @@ func TestAddConfigToInstancePod(t *testing.T) { mode: 384 path: ~postgres-operator/client-tls.key name: hippo-pgbackrest + optional: true `)) }) } diff --git a/internal/pgbackrest/tls-server.md b/internal/pgbackrest/tls-server.md index 7c8f191c3..56af386d5 100644 --- a/internal/pgbackrest/tls-server.md +++ b/internal/pgbackrest/tls-server.md @@ -12,10 +12,8 @@ on different pods: - [dedicated repository host](https://pgbackrest.org/user-guide.html#repo-host) - [backup from standby](https://pgbackrest.org/user-guide.html#standby-backup) -When a PostgresCluster is configured to store backups on a PVC, the dedicated -repository host is used to make that PVC available to all PostgreSQL instances -in the cluster. Regardless of whether the repo host has a defined PVC, it -functions as the server for the pgBackRest clients that run on the Instances. +When a PostgresCluster is configured to store backups on a PVC, we start a dedicated +repository host to make that PVC available to all PostgreSQL instances in the cluster. The repository host runs a `pgbackrest` server that is secured through TLS and [certificates][]. When performing backups, it connects to `pgbackrest` servers @@ -26,32 +24,30 @@ to the repository host to [send and receive WAL files][archiving]. [archiving]: https://www.postgresql.org/docs/current/continuous-archiving.html [certificates]: certificates.md - The `pgbackrest` command acts as a TLS client and connects to a pgBackRest TLS server when `pg-host-type=tls` and/or `repo-host-type=tls`. The default for these is `ssh`: -- https://github.com/pgbackrest/pgbackrest/blob/release/2.38/src/config/parse.auto.c#L3771 -- https://github.com/pgbackrest/pgbackrest/blob/release/2.38/src/config/parse.auto.c#L6137 - +- +- The pgBackRest TLS server is configured through the `tls-server-*` [options](config.md). In pgBackRest 2.38, changing any of these options or changing certificate contents requires a reload of the server, as shown in the "Setup TLS Server" section of the documentation, with the command configured as -``` +```text ExecReload=kill -HUP $MAINPID ``` -- https://pgbackrest.org/user-guide-rhel.html#repo-host/setup-tls +- - `tls-server-address`, `tls-server-port`
The network address and port on which to listen. pgBackRest 2.38 listens on the *first* address returned by `getaddrinfo()`. There is no way to listen on all interfaces. - - https://github.com/pgbackrest/pgbackrest/blob/release/2.38/src/common/io/socket/server.c#L172 - - https://github.com/pgbackrest/pgbackrest/blob/release/2.38/src/common/io/socket/common.c#L87 + - + - - `tls-server-cert-file`, `tls-server-key-file`
The [certificate chain][certificates] and private key pair used to encrypt connections. @@ -65,12 +61,11 @@ ExecReload=kill -HUP $MAINPID to interact with. [Required](https://github.com/pgbackrest/pgbackrest/blob/release/2.38/src/config/parse.auto.c#L8751). - In pgBackRest 2.38, as mentioned above, sending SIGHUP causes a configuration reload. -- https://github.com/pgbackrest/pgbackrest/blob/release/2.38/src/command/server/server.c#L178 +- -``` +```text P00 DETAIL: configuration reload begin P00 INFO: server command begin 2.38... P00 DETAIL: configuration reload end @@ -78,20 +73,18 @@ P00 DETAIL: configuration reload end Sending SIGINT to the TLS server causes it to exit with code 63, TermError. -- https://github.com/pgbackrest/pgbackrest/blob/release/2.38/src/common/exit.c#L73-L75 -- https://github.com/pgbackrest/pgbackrest/blob/release/2.38/src/common/exit.c#L62 -- https://github.com/pgbackrest/pgbackrest/blob/release/2.38/src/common/error.auto.c#L48 +- +- +- - -``` +```text P00 INFO: server command end: terminated on signal [SIGINT] ``` Sending SIGTERM exits the signal loop and lead to the command termination. -- https://github.com/pgbackrest/pgbackrest/blob/release/2.38/src/command/server/server.c#L194 +- - -``` +```text P00 INFO: server command end: completed successfully ``` diff --git a/internal/upgradecheck/http.go b/internal/upgradecheck/http.go index fe8585d42..c2796ffe5 100644 --- a/internal/upgradecheck/http.go +++ b/internal/upgradecheck/http.go @@ -70,7 +70,7 @@ func checkForUpgrades(ctx context.Context, url, versionString string, backoff wa var headerPayloadStruct *clientUpgradeData // Prep request - req, err := http.NewRequest("GET", url, nil) + req, err := http.NewRequestWithContext(ctx, "GET", url, nil) if err == nil { // generateHeader always returns some sort of struct, using defaults/nil values // in case some of the checks return errors