Skip to content

Commit 9dc09fc

Browse files
committed
Add validation for kubevirt-velero-plugin and UT
This update introduces validation logic to ensure that the kubevirt-velero-plugin is properly enabled and configured within the Velero deployment for DR protection of VirtualMachines using the internal recipe `vm-recipe`. The plugin is essential for enabling backup and restore operations for KubeVirt-managed VirtualMachines (VMs), DataVolumes (DVs), PersistentVolumeClaims (PVCs), and associated resources. Includes unit tests to verify the correctness of the validation logic. Signed-off-by: pruthvitd <[email protected]>
1 parent aa98b96 commit 9dc09fc

File tree

6 files changed

+531
-91
lines changed

6 files changed

+531
-91
lines changed

Makefile

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -181,6 +181,9 @@ test-vrg-vs: generate manifests envtest ## Run VolumeReplicationGroupVolSync tes
181181
test-vrg-recipe: generate manifests envtest ## Run VolumeReplicationGroupRecipe tests.
182182
go test ./internal/controller -coverprofile cover.out -ginkgo.focus VolumeReplicationGroupRecipe
183183

184+
test-vrg-vmrecipe: generate manifests envtest
185+
go test ./internal/controller -coverprofile cover.out -ginkgo.focus VolumeReplicationGroupVMRecipe
186+
184187
test-vrg-kubeobjects: generate manifests envtest ## Run VolumeReplicationGroupKubeObjects tests.
185188
go test ./internal/controller -coverprofile cover.out -ginkgo.focus VRG_KubeObjectProtection
186189

internal/controller/drplacementcontrol_controller.go

Lines changed: 1 addition & 83 deletions
Original file line numberDiff line numberDiff line change
@@ -1404,9 +1404,6 @@ func (r *DRPlacementControlReconciler) updateResourceCondition(
14041404
ProtectedPVCs: extractProtectedPVCNames(vrg),
14051405
}
14061406

1407-
drpc.Status.ResourceConditions.Conditions = assignConditionsWithConflictCheck(
1408-
vrgs, vrg, VRGConditionTypeNoClusterDataConflict)
1409-
14101407
if vrg.Status.PVCGroups != nil {
14111408
drpc.Status.ResourceConditions.ResourceMeta.PVCGroups = vrg.Status.PVCGroups
14121409
}
@@ -1421,7 +1418,7 @@ func (r *DRPlacementControlReconciler) updateResourceCondition(
14211418
drpc.Status.LastKubeObjectProtectionTime = &vrg.Status.KubeObjectProtection.CaptureToRecoverFrom.EndTime
14221419
}
14231420

1424-
updateDRPCProtectedCondition(drpc, vrg, clusterName)
1421+
updateDRPCProtectedCondition(drpc, vrg, clusterName, vrgs)
14251422
}
14261423

14271424
// getVRG retrieves a VRG either from the provided map or fetches it from the managed cluster/S3 store.
@@ -1482,85 +1479,6 @@ func extractProtectedPVCNames(vrg *rmn.VolumeReplicationGroup) []string {
14821479
return protectedPVCs
14831480
}
14841481

1485-
// findConflictCondition selects the appropriate condition from VRGs based on the conflict type.
1486-
func findConflictCondition(vrgs map[string]*rmn.VolumeReplicationGroup, conflictType string) *metav1.Condition {
1487-
var selectedCondition *metav1.Condition
1488-
1489-
for _, vrg := range vrgs {
1490-
condition := meta.FindStatusCondition(vrg.Status.Conditions, conflictType)
1491-
if condition != nil && condition.Status == metav1.ConditionFalse {
1492-
// Prioritize primary VRG's condition if available
1493-
if isVRGPrimary(vrg) {
1494-
return condition // Exit early if primary VRG condition is found
1495-
}
1496-
1497-
// Assign the first non-primary VRG's condition if no primary found yet
1498-
if selectedCondition == nil {
1499-
selectedCondition = condition
1500-
}
1501-
}
1502-
}
1503-
1504-
return selectedCondition
1505-
}
1506-
1507-
// assignConditionsWithConflictCheck assigns conditions from a given VRG while prioritizing conflict conditions.
1508-
func assignConditionsWithConflictCheck(vrgs map[string]*rmn.VolumeReplicationGroup,
1509-
vrg *rmn.VolumeReplicationGroup, conflictType string,
1510-
) []metav1.Condition {
1511-
conditions := &vrg.Status.Conditions
1512-
conflictCondition := findConflictCondition(vrgs, conflictType)
1513-
1514-
// Ensure the conflict condition is present in the conditions list
1515-
if conflictCondition != nil {
1516-
setConflictStatusCondition(conditions, *conflictCondition)
1517-
}
1518-
1519-
return *conditions
1520-
}
1521-
1522-
func setConflictStatusCondition(existingConditions *[]metav1.Condition,
1523-
newCondition metav1.Condition,
1524-
) metav1.Condition {
1525-
if existingConditions == nil {
1526-
existingConditions = &[]metav1.Condition{}
1527-
}
1528-
1529-
existingCondition := rmnutil.FindCondition(*existingConditions, newCondition.Type)
1530-
if existingCondition == nil {
1531-
newCondition.LastTransitionTime = metav1.NewTime(time.Now())
1532-
*existingConditions = append(*existingConditions, newCondition)
1533-
1534-
return newCondition
1535-
}
1536-
1537-
if existingCondition.Status != newCondition.Status ||
1538-
existingCondition.Reason != newCondition.Reason {
1539-
existingCondition.Status = newCondition.Status
1540-
existingCondition.Reason = newCondition.Reason
1541-
existingCondition.LastTransitionTime = metav1.NewTime(time.Now())
1542-
}
1543-
1544-
defaultValue := "none"
1545-
if newCondition.Reason == "" {
1546-
newCondition.Reason = defaultValue
1547-
}
1548-
1549-
if newCondition.Message == "" {
1550-
newCondition.Message = defaultValue
1551-
}
1552-
1553-
existingCondition.Reason = newCondition.Reason
1554-
existingCondition.Message = newCondition.Message
1555-
// TODO: Why not update lastTranTime if the above change?
1556-
1557-
if existingCondition.ObservedGeneration != newCondition.ObservedGeneration {
1558-
existingCondition.LastTransitionTime = metav1.NewTime(time.Now())
1559-
}
1560-
1561-
return *existingCondition
1562-
}
1563-
15641482
// clusterForVRGStatus determines which cluster's VRG should be inspected for status updates to DRPC
15651483
func (r *DRPlacementControlReconciler) clusterForVRGStatus(
15661484
drpc *rmn.DRPlacementControl, userPlacement client.Object, log logr.Logger,

internal/controller/protected_condition.go

Lines changed: 103 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -5,11 +5,13 @@ package controllers
55

66
import (
77
"fmt"
8+
"time"
89

910
"k8s.io/apimachinery/pkg/api/meta"
1011
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
1112

1213
rmn "github.com/ramendr/ramen/api/v1alpha1"
14+
rmnutil "github.com/ramendr/ramen/internal/controller/util"
1315
)
1416

1517
func updateProtectedConditionUnknown(drpc *rmn.DRPlacementControl, clusterName string) {
@@ -29,16 +31,17 @@ func updateDRPCProtectedCondition(
2931
drpc *rmn.DRPlacementControl,
3032
vrg *rmn.VolumeReplicationGroup,
3133
clusterName string,
34+
vrgs map[string]*rmn.VolumeReplicationGroup,
3235
) {
3336
if updateVRGClusterDataReady(drpc, vrg, clusterName) {
3437
return
3538
}
3639

37-
if updateDRPCProtectedForReplicationState(drpc, vrg, clusterName) {
40+
if updateDRPCProtectedForReplicationState(drpc, vrg, clusterName, vrgs) {
3841
return
3942
}
4043

41-
if updateVRGNoClusterDataConflict(drpc, vrg, clusterName) {
44+
if updateVRGNoClusterDataConflict(drpc, vrg, vrgs) {
4245
return
4346
}
4447

@@ -65,13 +68,19 @@ func updateDRPCProtectedForReplicationState(
6568
drpc *rmn.DRPlacementControl,
6669
vrg *rmn.VolumeReplicationGroup,
6770
clusterName string,
71+
vrgs map[string]*rmn.VolumeReplicationGroup,
6872
) bool {
73+
var fromCluster string
74+
75+
drpc.Status.ResourceConditions.Conditions, fromCluster = mergeVRGsConditions(
76+
vrgs, vrg, VRGConditionTypeDataReady)
77+
6978
switch vrg.Spec.ReplicationState {
7079
case rmn.Primary:
71-
return updateVRGDataReadyAsPrimary(drpc, vrg, clusterName) ||
80+
return updateVRGDataReadyAsPrimary(drpc, vrg, fromCluster) ||
7281
updateVRGDataProtectedAsPrimary(drpc, vrg, clusterName)
7382
case rmn.Secondary:
74-
return updateVRGDataReadyAsSecondary(drpc, vrg, clusterName) ||
83+
return updateVRGDataReadyAsSecondary(drpc, vrg, fromCluster) ||
7584
updateVRGDataProtectedAsSecondary(drpc, vrg, clusterName)
7685
}
7786

@@ -186,8 +195,13 @@ func updateVRGDataProtectedAsPrimary(drpc *rmn.DRPlacementControl,
186195
// - Returns a bool that is true if status was updated, and false otherwise
187196
func updateVRGNoClusterDataConflict(drpc *rmn.DRPlacementControl,
188197
vrg *rmn.VolumeReplicationGroup,
189-
clusterName string,
198+
vrgs map[string]*rmn.VolumeReplicationGroup,
190199
) bool {
200+
var clusterName string
201+
202+
drpc.Status.ResourceConditions.Conditions, clusterName = mergeVRGsConditions(
203+
vrgs, vrg, VRGConditionTypeNoClusterDataConflict)
204+
191205
return genericUpdateProtectedForCondition(drpc, vrg, clusterName, VRGConditionTypeNoClusterDataConflict,
192206
"workload data protection", "checking for workload data conflict", "conflicting workload data")
193207
}
@@ -306,3 +320,87 @@ func updateMiscVRGStatus(drpc *rmn.DRPlacementControl,
306320

307321
return !updated
308322
}
323+
324+
// findConflictCondition selects the appropriate condition from VRGs based on the conflict type.
325+
func findConflictCondition(vrgs map[string]*rmn.VolumeReplicationGroup,
326+
conflictType string,
327+
) (*metav1.Condition, string) {
328+
var selectedCondition *metav1.Condition
329+
330+
var clusterName string
331+
332+
for _, vrg := range vrgs {
333+
condition := meta.FindStatusCondition(vrg.Status.Conditions, conflictType)
334+
if condition != nil && condition.Status == metav1.ConditionFalse {
335+
// Prioritize primary VRG's condition if available
336+
clusterName = vrg.GetAnnotations()[DestinationClusterAnnotationKey]
337+
if isVRGPrimary(vrg) {
338+
return condition, clusterName // Exit early if primary VRG condition is found
339+
}
340+
341+
// Assign the first non-primary VRG's condition if no primary found yet
342+
if selectedCondition == nil {
343+
selectedCondition = condition
344+
}
345+
}
346+
}
347+
348+
return selectedCondition, clusterName
349+
}
350+
351+
// mergeVRGsConditions assigns conditions from a given VRG while prioritizing conflict conditions.
352+
func mergeVRGsConditions(vrgs map[string]*rmn.VolumeReplicationGroup,
353+
vrg *rmn.VolumeReplicationGroup, conflictType string,
354+
) ([]metav1.Condition, string) {
355+
conditions := &vrg.Status.Conditions
356+
conflictCondition, clusterName := findConflictCondition(vrgs, conflictType)
357+
358+
// Ensure the conflict condition is present in the conditions list
359+
if conflictCondition != nil {
360+
setConflictStatusCondition(conditions, *conflictCondition)
361+
}
362+
363+
return *conditions, clusterName
364+
}
365+
366+
func setConflictStatusCondition(existingConditions *[]metav1.Condition,
367+
newCondition metav1.Condition,
368+
) metav1.Condition {
369+
if existingConditions == nil {
370+
existingConditions = &[]metav1.Condition{}
371+
}
372+
373+
existingCondition := rmnutil.FindCondition(*existingConditions, newCondition.Type)
374+
if existingCondition == nil {
375+
newCondition.LastTransitionTime = metav1.NewTime(time.Now())
376+
*existingConditions = append(*existingConditions, newCondition)
377+
378+
return newCondition
379+
}
380+
381+
if existingCondition.Status != newCondition.Status ||
382+
existingCondition.Reason != newCondition.Reason {
383+
existingCondition.Status = newCondition.Status
384+
existingCondition.Reason = newCondition.Reason
385+
existingCondition.LastTransitionTime = metav1.NewTime(time.Now())
386+
}
387+
388+
defaultValue := "none"
389+
if newCondition.Reason == "" {
390+
newCondition.Reason = defaultValue
391+
}
392+
393+
if newCondition.Message == "" {
394+
newCondition.Message = defaultValue
395+
}
396+
397+
existingCondition.Reason = newCondition.Reason
398+
existingCondition.Message = newCondition.Message
399+
// TODO: Why not update lastTranTime if the above change?
400+
401+
if existingCondition.ObservedGeneration != newCondition.ObservedGeneration {
402+
existingCondition.LastTransitionTime = metav1.NewTime(time.Now())
403+
}
404+
405+
return *existingCondition
406+
}

internal/controller/volumereplicationgroup_controller.go

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -579,6 +579,16 @@ func (v *VRGInstance) processVRG() ctrl.Result {
579579

580580
v.log.Info("Recipe", "elements", v.recipeElements)
581581

582+
if err := recipeVMBackupValidate(v.ctx, v.reconciler.Client, v.recipeElements,
583+
*v.instance, *v.ramenConfig, v.log); err != nil {
584+
return v.invalid(
585+
err,
586+
"Velero misconfiguration detected; disaster recovery for the VM workload will not succeed. "+
587+
"Verify Velero configuration and enable the required plugins",
588+
false,
589+
)
590+
}
591+
582592
if err := v.updatePVCList(); err != nil {
583593
return v.invalid(err, "Failed to process list of PVCs to protect", true)
584594
}

0 commit comments

Comments
 (0)