Skip to content

Commit 6c8ac7a

Browse files
committed
Validate kubevirt plugin
1 parent 80962b0 commit 6c8ac7a

File tree

3 files changed

+192
-88
lines changed

3 files changed

+192
-88
lines changed

internal/controller/drplacementcontrol_controller.go

Lines changed: 1 addition & 83 deletions
Original file line numberDiff line numberDiff line change
@@ -1368,9 +1368,6 @@ func (r *DRPlacementControlReconciler) updateResourceCondition(
13681368
ProtectedPVCs: extractProtectedPVCNames(vrg),
13691369
}
13701370

1371-
drpc.Status.ResourceConditions.Conditions = assignConditionsWithConflictCheck(
1372-
vrgs, vrg, VRGConditionTypeNoClusterDataConflict)
1373-
13741371
if vrg.Status.PVCGroups != nil {
13751372
drpc.Status.ResourceConditions.ResourceMeta.PVCGroups = vrg.Status.PVCGroups
13761373
}
@@ -1385,7 +1382,7 @@ func (r *DRPlacementControlReconciler) updateResourceCondition(
13851382
drpc.Status.LastKubeObjectProtectionTime = &vrg.Status.KubeObjectProtection.CaptureToRecoverFrom.EndTime
13861383
}
13871384

1388-
updateDRPCProtectedCondition(drpc, vrg, clusterName)
1385+
updateDRPCProtectedCondition(drpc, vrg, clusterName, vrgs)
13891386
}
13901387

13911388
// getVRG retrieves a VRG either from the provided map or fetches it from the managed cluster/S3 store.
@@ -1446,85 +1443,6 @@ func extractProtectedPVCNames(vrg *rmn.VolumeReplicationGroup) []string {
14461443
return protectedPVCs
14471444
}
14481445

1449-
// findConflictCondition selects the appropriate condition from VRGs based on the conflict type.
1450-
func findConflictCondition(vrgs map[string]*rmn.VolumeReplicationGroup, conflictType string) *metav1.Condition {
1451-
var selectedCondition *metav1.Condition
1452-
1453-
for _, vrg := range vrgs {
1454-
condition := meta.FindStatusCondition(vrg.Status.Conditions, conflictType)
1455-
if condition != nil && condition.Status == metav1.ConditionFalse {
1456-
// Prioritize primary VRG's condition if available
1457-
if isVRGPrimary(vrg) {
1458-
return condition // Exit early if primary VRG condition is found
1459-
}
1460-
1461-
// Assign the first non-primary VRG's condition if no primary found yet
1462-
if selectedCondition == nil {
1463-
selectedCondition = condition
1464-
}
1465-
}
1466-
}
1467-
1468-
return selectedCondition
1469-
}
1470-
1471-
// assignConditionsWithConflictCheck assigns conditions from a given VRG while prioritizing conflict conditions.
1472-
func assignConditionsWithConflictCheck(vrgs map[string]*rmn.VolumeReplicationGroup,
1473-
vrg *rmn.VolumeReplicationGroup, conflictType string,
1474-
) []metav1.Condition {
1475-
conditions := &vrg.Status.Conditions
1476-
conflictCondition := findConflictCondition(vrgs, conflictType)
1477-
1478-
// Ensure the conflict condition is present in the conditions list
1479-
if conflictCondition != nil {
1480-
setConflictStatusCondition(conditions, *conflictCondition)
1481-
}
1482-
1483-
return *conditions
1484-
}
1485-
1486-
func setConflictStatusCondition(existingConditions *[]metav1.Condition,
1487-
newCondition metav1.Condition,
1488-
) metav1.Condition {
1489-
if existingConditions == nil {
1490-
existingConditions = &[]metav1.Condition{}
1491-
}
1492-
1493-
existingCondition := rmnutil.FindCondition(*existingConditions, newCondition.Type)
1494-
if existingCondition == nil {
1495-
newCondition.LastTransitionTime = metav1.NewTime(time.Now())
1496-
*existingConditions = append(*existingConditions, newCondition)
1497-
1498-
return newCondition
1499-
}
1500-
1501-
if existingCondition.Status != newCondition.Status ||
1502-
existingCondition.Reason != newCondition.Reason {
1503-
existingCondition.Status = newCondition.Status
1504-
existingCondition.Reason = newCondition.Reason
1505-
existingCondition.LastTransitionTime = metav1.NewTime(time.Now())
1506-
}
1507-
1508-
defaultValue := "none"
1509-
if newCondition.Reason == "" {
1510-
newCondition.Reason = defaultValue
1511-
}
1512-
1513-
if newCondition.Message == "" {
1514-
newCondition.Message = defaultValue
1515-
}
1516-
1517-
existingCondition.Reason = newCondition.Reason
1518-
existingCondition.Message = newCondition.Message
1519-
// TODO: Why not update lastTranTime if the above change?
1520-
1521-
if existingCondition.ObservedGeneration != newCondition.ObservedGeneration {
1522-
existingCondition.LastTransitionTime = metav1.NewTime(time.Now())
1523-
}
1524-
1525-
return *existingCondition
1526-
}
1527-
15281446
// clusterForVRGStatus determines which cluster's VRG should be inspected for status updates to DRPC
15291447
func (r *DRPlacementControlReconciler) clusterForVRGStatus(
15301448
drpc *rmn.DRPlacementControl, userPlacement client.Object, log logr.Logger,

internal/controller/protected_condition.go

Lines changed: 103 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -5,8 +5,10 @@ package controllers
55

66
import (
77
"fmt"
8+
"time"
89

910
rmn "github.com/ramendr/ramen/api/v1alpha1"
11+
rmnutil "github.com/ramendr/ramen/internal/controller/util"
1012
"k8s.io/apimachinery/pkg/api/meta"
1113
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
1214
)
@@ -28,16 +30,17 @@ func updateDRPCProtectedCondition(
2830
drpc *rmn.DRPlacementControl,
2931
vrg *rmn.VolumeReplicationGroup,
3032
clusterName string,
33+
vrgs map[string]*rmn.VolumeReplicationGroup,
3134
) {
3235
if updateVRGClusterDataReady(drpc, vrg, clusterName) {
3336
return
3437
}
3538

36-
if updateDRPCProtectedForReplicationState(drpc, vrg, clusterName) {
39+
if updateDRPCProtectedForReplicationState(drpc, vrg, clusterName, vrgs) {
3740
return
3841
}
3942

40-
if updateVRGNoClusterDataConflict(drpc, vrg, clusterName) {
43+
if updateVRGNoClusterDataConflict(drpc, vrg, vrgs) {
4144
return
4245
}
4346

@@ -64,13 +67,19 @@ func updateDRPCProtectedForReplicationState(
6467
drpc *rmn.DRPlacementControl,
6568
vrg *rmn.VolumeReplicationGroup,
6669
clusterName string,
70+
vrgs map[string]*rmn.VolumeReplicationGroup,
6771
) bool {
72+
var fromCluster string
73+
74+
drpc.Status.ResourceConditions.Conditions, fromCluster = mergeVRGsConditions(
75+
vrgs, vrg, VRGConditionTypeDataReady)
76+
6877
switch vrg.Spec.ReplicationState {
6978
case rmn.Primary:
70-
return updateVRGDataReadyAsPrimary(drpc, vrg, clusterName) ||
79+
return updateVRGDataReadyAsPrimary(drpc, vrg, fromCluster) ||
7180
updateVRGDataProtectedAsPrimary(drpc, vrg, clusterName)
7281
case rmn.Secondary:
73-
return updateVRGDataReadyAsSecondary(drpc, vrg, clusterName) ||
82+
return updateVRGDataReadyAsSecondary(drpc, vrg, fromCluster) ||
7483
updateVRGDataProtectedAsSecondary(drpc, vrg, clusterName)
7584
}
7685

@@ -185,8 +194,13 @@ func updateVRGDataProtectedAsPrimary(drpc *rmn.DRPlacementControl,
185194
// - Returns a bool that is true if status was updated, and false otherwise
186195
func updateVRGNoClusterDataConflict(drpc *rmn.DRPlacementControl,
187196
vrg *rmn.VolumeReplicationGroup,
188-
clusterName string,
197+
vrgs map[string]*rmn.VolumeReplicationGroup,
189198
) bool {
199+
var clusterName string
200+
201+
drpc.Status.ResourceConditions.Conditions, clusterName = mergeVRGsConditions(
202+
vrgs, vrg, VRGConditionTypeNoClusterDataConflict)
203+
190204
return genericUpdateProtectedForCondition(drpc, vrg, clusterName, VRGConditionTypeNoClusterDataConflict,
191205
"workload data protection", "checking for workload data conflict", "conflicting workload data")
192206
}
@@ -305,3 +319,87 @@ func updateMiscVRGStatus(drpc *rmn.DRPlacementControl,
305319

306320
return !updated
307321
}
322+
323+
// findConflictCondition selects the appropriate condition from VRGs based on the conflict type.
324+
func findConflictCondition(vrgs map[string]*rmn.VolumeReplicationGroup,
325+
conflictType string,
326+
) (*metav1.Condition, string) {
327+
var selectedCondition *metav1.Condition
328+
329+
var clusterName string
330+
331+
for _, vrg := range vrgs {
332+
condition := meta.FindStatusCondition(vrg.Status.Conditions, conflictType)
333+
if condition != nil && condition.Status == metav1.ConditionFalse {
334+
// Prioritize primary VRG's condition if available
335+
clusterName = vrg.GetAnnotations()[DestinationClusterAnnotationKey]
336+
if isVRGPrimary(vrg) {
337+
return condition, clusterName // Exit early if primary VRG condition is found
338+
}
339+
340+
// Assign the first non-primary VRG's condition if no primary found yet
341+
if selectedCondition == nil {
342+
selectedCondition = condition
343+
}
344+
}
345+
}
346+
347+
return selectedCondition, clusterName
348+
}
349+
350+
// mergeVRGsConditions assigns conditions from a given VRG while prioritizing conflict conditions.
351+
func mergeVRGsConditions(vrgs map[string]*rmn.VolumeReplicationGroup,
352+
vrg *rmn.VolumeReplicationGroup, conflictType string,
353+
) ([]metav1.Condition, string) {
354+
conditions := &vrg.Status.Conditions
355+
conflictCondition, clusterName := findConflictCondition(vrgs, conflictType)
356+
357+
// Ensure the conflict condition is present in the conditions list
358+
if conflictCondition != nil {
359+
setConflictStatusCondition(conditions, *conflictCondition)
360+
}
361+
362+
return *conditions, clusterName
363+
}
364+
365+
func setConflictStatusCondition(existingConditions *[]metav1.Condition,
366+
newCondition metav1.Condition,
367+
) metav1.Condition {
368+
if existingConditions == nil {
369+
existingConditions = &[]metav1.Condition{}
370+
}
371+
372+
existingCondition := rmnutil.FindCondition(*existingConditions, newCondition.Type)
373+
if existingCondition == nil {
374+
newCondition.LastTransitionTime = metav1.NewTime(time.Now())
375+
*existingConditions = append(*existingConditions, newCondition)
376+
377+
return newCondition
378+
}
379+
380+
if existingCondition.Status != newCondition.Status ||
381+
existingCondition.Reason != newCondition.Reason {
382+
existingCondition.Status = newCondition.Status
383+
existingCondition.Reason = newCondition.Reason
384+
existingCondition.LastTransitionTime = metav1.NewTime(time.Now())
385+
}
386+
387+
defaultValue := "none"
388+
if newCondition.Reason == "" {
389+
newCondition.Reason = defaultValue
390+
}
391+
392+
if newCondition.Message == "" {
393+
newCondition.Message = defaultValue
394+
}
395+
396+
existingCondition.Reason = newCondition.Reason
397+
existingCondition.Message = newCondition.Message
398+
// TODO: Why not update lastTranTime if the above change?
399+
400+
if existingCondition.ObservedGeneration != newCondition.ObservedGeneration {
401+
existingCondition.LastTransitionTime = metav1.NewTime(time.Now())
402+
}
403+
404+
return *existingCondition
405+
}

internal/controller/vrg_recipe.go

Lines changed: 88 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,7 @@ import (
1717
"github.com/ramendr/ramen/internal/controller/kubeobjects"
1818
"github.com/ramendr/ramen/internal/controller/util"
1919
recipev1 "github.com/ramendr/recipe/api/v1alpha1"
20+
appsv1 "k8s.io/api/apps/v1"
2021
"k8s.io/apimachinery/pkg/types"
2122
"k8s.io/apimachinery/pkg/util/sets"
2223
"sigs.k8s.io/controller-runtime/pkg/builder"
@@ -165,6 +166,10 @@ func RecipeElementsGet(ctx context.Context, reader client.Reader, vrg ramen.Volu
165166
return recipeElements, fmt.Errorf("recipe %v namespaces validation error: %w", recipeNamespacedName.String(), err)
166167
}
167168

169+
if err := recipeVMBackupValidate(ctx, reader, recipeElements, vrg, ramenConfig, log); err != nil {
170+
return recipeElements, fmt.Errorf("recipe %v VM backup validation error: %w", recipeNamespacedName.String(), err)
171+
}
172+
168173
return recipeElements, nil
169174
}
170175

@@ -329,6 +334,89 @@ func recipeNamespacesValidate(recipeElements util.RecipeElements, vrg ramen.Volu
329334
return nil
330335
}
331336

337+
func recipeVMBackupValidate(ctx context.Context, reader client.Reader, recipeElements util.RecipeElements,
338+
vrg ramen.VolumeReplicationGroup,
339+
ramenConfig ramen.RamenConfig, log logr.Logger,
340+
) error {
341+
vmProtection := isProtectingVirtualMachine(recipeElements, vrg, log)
342+
343+
if !vmProtection {
344+
return nil
345+
}
346+
347+
if ramenConfig.KubeObjectProtection.VeleroNamespaceName != "" ||
348+
len(ramenConfig.KubeObjectProtection.VeleroNamespaceName) > 0 {
349+
if isKubeVirtEnabled(ctx, reader, ramenConfig, log) {
350+
return nil
351+
}
352+
353+
return fmt.Errorf("kubevirt plugin is disabled, hence VM resources backup/restore might not succeed")
354+
}
355+
356+
log.Info("Velero is not used for discovered app DR")
357+
358+
return nil
359+
}
360+
361+
func isProtectingVirtualMachine(recipeElements util.RecipeElements, vrg ramen.VolumeReplicationGroup,
362+
log logr.Logger,
363+
) bool {
364+
if vrg.Spec.KubeObjectProtection.RecipeRef.Name == recipecore.VMRecipeName {
365+
log.Info("Recipe protects VM resources")
366+
367+
return true
368+
}
369+
370+
// ToDO: Add validatation on recipe protecting VMs without using inbuilt VM-recipe
371+
for _, group := range recipeElements.RecipeWithParams.Spec.Groups {
372+
if len(group.IncludedResourceTypes) > 0 {
373+
for _, resourceTypes := range group.IncludedResourceTypes {
374+
if strings.Contains(strings.ToLower(resourceTypes), "virtualmachine") {
375+
log.Info("Recipe protects VM resources")
376+
377+
return true
378+
}
379+
}
380+
}
381+
}
382+
383+
log.Info("Recipe is not protecting VM resource")
384+
385+
return false
386+
}
387+
388+
func isKubeVirtEnabled(ctx context.Context, reader client.Reader,
389+
ramenConfig ramen.RamenConfig, log logr.Logger,
390+
) bool {
391+
log.Info("Checking if kubevirt is enabled")
392+
// Read Velero deployment and check if kubevirt plugin is enabled
393+
var veleroDeploy appsv1.Deployment
394+
395+
veleroLookUpKey := types.NamespacedName{
396+
Name: "velero",
397+
Namespace: ramenConfig.KubeObjectProtection.VeleroNamespaceName,
398+
}
399+
// Fetch the Velero deployment
400+
if err := reader.Get(ctx, veleroLookUpKey, &veleroDeploy); err != nil {
401+
log.Error(err, "failed to get Velero deployment")
402+
403+
return false
404+
}
405+
406+
// Go through InitContainers to check if kubevirt plugin is enabled
407+
for _, ic := range veleroDeploy.Spec.Template.Spec.InitContainers {
408+
if strings.Contains(ic.Name, "kubevirt-velero-plugin") {
409+
log.Info("kubevirt is enabled")
410+
411+
return true
412+
}
413+
}
414+
415+
log.Info("kubevirt is not enabled")
416+
417+
return false
418+
}
419+
332420
func recipeNamespaceNames(recipeElements util.RecipeElements) sets.Set[string] {
333421
namespaceNames := make(sets.Set[string], 0)
334422

0 commit comments

Comments
 (0)