Skip to content

Commit 7b0121e

Browse files
committed
Validate kubevirt plugin
This PR introduces validation logic to ensure that the kubevirt-velero-plugin is properly enabled and configured in the Velero deployment. The plugin is essential for enabling backup and restore operations for KubeVirt-managed VirtualMachines (VMs), DataVolumes (DVs), PersistentVolumeClaims (PVCs), and associated resources. Signed-off-by: pruthvitd <[email protected]>
1 parent 2a1a55c commit 7b0121e

File tree

3 files changed

+197
-91
lines changed

3 files changed

+197
-91
lines changed

internal/controller/drplacementcontrol_controller.go

Lines changed: 1 addition & 83 deletions
Original file line numberDiff line numberDiff line change
@@ -1368,9 +1368,6 @@ func (r *DRPlacementControlReconciler) updateResourceCondition(
13681368
ProtectedPVCs: extractProtectedPVCNames(vrg),
13691369
}
13701370

1371-
drpc.Status.ResourceConditions.Conditions = assignConditionsWithConflictCheck(
1372-
vrgs, vrg, VRGConditionTypeNoClusterDataConflict)
1373-
13741371
if vrg.Status.PVCGroups != nil {
13751372
drpc.Status.ResourceConditions.ResourceMeta.PVCGroups = vrg.Status.PVCGroups
13761373
}
@@ -1385,7 +1382,7 @@ func (r *DRPlacementControlReconciler) updateResourceCondition(
13851382
drpc.Status.LastKubeObjectProtectionTime = &vrg.Status.KubeObjectProtection.CaptureToRecoverFrom.EndTime
13861383
}
13871384

1388-
updateDRPCProtectedCondition(drpc, vrg, clusterName)
1385+
updateDRPCProtectedCondition(drpc, vrg, clusterName, vrgs)
13891386
}
13901387

13911388
// getVRG retrieves a VRG either from the provided map or fetches it from the managed cluster/S3 store.
@@ -1446,85 +1443,6 @@ func extractProtectedPVCNames(vrg *rmn.VolumeReplicationGroup) []string {
14461443
return protectedPVCs
14471444
}
14481445

1449-
// findConflictCondition selects the appropriate condition from VRGs based on the conflict type.
1450-
func findConflictCondition(vrgs map[string]*rmn.VolumeReplicationGroup, conflictType string) *metav1.Condition {
1451-
var selectedCondition *metav1.Condition
1452-
1453-
for _, vrg := range vrgs {
1454-
condition := meta.FindStatusCondition(vrg.Status.Conditions, conflictType)
1455-
if condition != nil && condition.Status == metav1.ConditionFalse {
1456-
// Prioritize primary VRG's condition if available
1457-
if isVRGPrimary(vrg) {
1458-
return condition // Exit early if primary VRG condition is found
1459-
}
1460-
1461-
// Assign the first non-primary VRG's condition if no primary found yet
1462-
if selectedCondition == nil {
1463-
selectedCondition = condition
1464-
}
1465-
}
1466-
}
1467-
1468-
return selectedCondition
1469-
}
1470-
1471-
// assignConditionsWithConflictCheck assigns conditions from a given VRG while prioritizing conflict conditions.
1472-
func assignConditionsWithConflictCheck(vrgs map[string]*rmn.VolumeReplicationGroup,
1473-
vrg *rmn.VolumeReplicationGroup, conflictType string,
1474-
) []metav1.Condition {
1475-
conditions := &vrg.Status.Conditions
1476-
conflictCondition := findConflictCondition(vrgs, conflictType)
1477-
1478-
// Ensure the conflict condition is present in the conditions list
1479-
if conflictCondition != nil {
1480-
setConflictStatusCondition(conditions, *conflictCondition)
1481-
}
1482-
1483-
return *conditions
1484-
}
1485-
1486-
func setConflictStatusCondition(existingConditions *[]metav1.Condition,
1487-
newCondition metav1.Condition,
1488-
) metav1.Condition {
1489-
if existingConditions == nil {
1490-
existingConditions = &[]metav1.Condition{}
1491-
}
1492-
1493-
existingCondition := rmnutil.FindCondition(*existingConditions, newCondition.Type)
1494-
if existingCondition == nil {
1495-
newCondition.LastTransitionTime = metav1.NewTime(time.Now())
1496-
*existingConditions = append(*existingConditions, newCondition)
1497-
1498-
return newCondition
1499-
}
1500-
1501-
if existingCondition.Status != newCondition.Status ||
1502-
existingCondition.Reason != newCondition.Reason {
1503-
existingCondition.Status = newCondition.Status
1504-
existingCondition.Reason = newCondition.Reason
1505-
existingCondition.LastTransitionTime = metav1.NewTime(time.Now())
1506-
}
1507-
1508-
defaultValue := "none"
1509-
if newCondition.Reason == "" {
1510-
newCondition.Reason = defaultValue
1511-
}
1512-
1513-
if newCondition.Message == "" {
1514-
newCondition.Message = defaultValue
1515-
}
1516-
1517-
existingCondition.Reason = newCondition.Reason
1518-
existingCondition.Message = newCondition.Message
1519-
// TODO: Why not update lastTranTime if the above change?
1520-
1521-
if existingCondition.ObservedGeneration != newCondition.ObservedGeneration {
1522-
existingCondition.LastTransitionTime = metav1.NewTime(time.Now())
1523-
}
1524-
1525-
return *existingCondition
1526-
}
1527-
15281446
// clusterForVRGStatus determines which cluster's VRG should be inspected for status updates to DRPC
15291447
func (r *DRPlacementControlReconciler) clusterForVRGStatus(
15301448
drpc *rmn.DRPlacementControl, userPlacement client.Object, log logr.Logger,

internal/controller/protected_condition.go

Lines changed: 103 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -5,11 +5,13 @@ package controllers
55

66
import (
77
"fmt"
8+
"time"
89

910
"k8s.io/apimachinery/pkg/api/meta"
1011
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
1112

1213
rmn "github.com/ramendr/ramen/api/v1alpha1"
14+
rmnutil "github.com/ramendr/ramen/internal/controller/util"
1315
)
1416

1517
func updateProtectedConditionUnknown(drpc *rmn.DRPlacementControl, clusterName string) {
@@ -29,16 +31,17 @@ func updateDRPCProtectedCondition(
2931
drpc *rmn.DRPlacementControl,
3032
vrg *rmn.VolumeReplicationGroup,
3133
clusterName string,
34+
vrgs map[string]*rmn.VolumeReplicationGroup,
3235
) {
3336
if updateVRGClusterDataReady(drpc, vrg, clusterName) {
3437
return
3538
}
3639

37-
if updateDRPCProtectedForReplicationState(drpc, vrg, clusterName) {
40+
if updateDRPCProtectedForReplicationState(drpc, vrg, clusterName, vrgs) {
3841
return
3942
}
4043

41-
if updateVRGNoClusterDataConflict(drpc, vrg, clusterName) {
44+
if updateVRGNoClusterDataConflict(drpc, vrg, vrgs) {
4245
return
4346
}
4447

@@ -65,13 +68,19 @@ func updateDRPCProtectedForReplicationState(
6568
drpc *rmn.DRPlacementControl,
6669
vrg *rmn.VolumeReplicationGroup,
6770
clusterName string,
71+
vrgs map[string]*rmn.VolumeReplicationGroup,
6872
) bool {
73+
var fromCluster string
74+
75+
drpc.Status.ResourceConditions.Conditions, fromCluster = mergeVRGsConditions(
76+
vrgs, vrg, VRGConditionTypeDataReady)
77+
6978
switch vrg.Spec.ReplicationState {
7079
case rmn.Primary:
71-
return updateVRGDataReadyAsPrimary(drpc, vrg, clusterName) ||
80+
return updateVRGDataReadyAsPrimary(drpc, vrg, fromCluster) ||
7281
updateVRGDataProtectedAsPrimary(drpc, vrg, clusterName)
7382
case rmn.Secondary:
74-
return updateVRGDataReadyAsSecondary(drpc, vrg, clusterName) ||
83+
return updateVRGDataReadyAsSecondary(drpc, vrg, fromCluster) ||
7584
updateVRGDataProtectedAsSecondary(drpc, vrg, clusterName)
7685
}
7786

@@ -186,8 +195,13 @@ func updateVRGDataProtectedAsPrimary(drpc *rmn.DRPlacementControl,
186195
// - Returns a bool that is true if status was updated, and false otherwise
187196
func updateVRGNoClusterDataConflict(drpc *rmn.DRPlacementControl,
188197
vrg *rmn.VolumeReplicationGroup,
189-
clusterName string,
198+
vrgs map[string]*rmn.VolumeReplicationGroup,
190199
) bool {
200+
var clusterName string
201+
202+
drpc.Status.ResourceConditions.Conditions, clusterName = mergeVRGsConditions(
203+
vrgs, vrg, VRGConditionTypeNoClusterDataConflict)
204+
191205
return genericUpdateProtectedForCondition(drpc, vrg, clusterName, VRGConditionTypeNoClusterDataConflict,
192206
"workload data protection", "checking for workload data conflict", "conflicting workload data")
193207
}
@@ -306,3 +320,87 @@ func updateMiscVRGStatus(drpc *rmn.DRPlacementControl,
306320

307321
return !updated
308322
}
323+
324+
// findConflictCondition selects the appropriate condition from VRGs based on the conflict type.
325+
func findConflictCondition(vrgs map[string]*rmn.VolumeReplicationGroup,
326+
conflictType string,
327+
) (*metav1.Condition, string) {
328+
var selectedCondition *metav1.Condition
329+
330+
var clusterName string
331+
332+
for _, vrg := range vrgs {
333+
condition := meta.FindStatusCondition(vrg.Status.Conditions, conflictType)
334+
if condition != nil && condition.Status == metav1.ConditionFalse {
335+
// Prioritize primary VRG's condition if available
336+
clusterName = vrg.GetAnnotations()[DestinationClusterAnnotationKey]
337+
if isVRGPrimary(vrg) {
338+
return condition, clusterName // Exit early if primary VRG condition is found
339+
}
340+
341+
// Assign the first non-primary VRG's condition if no primary found yet
342+
if selectedCondition == nil {
343+
selectedCondition = condition
344+
}
345+
}
346+
}
347+
348+
return selectedCondition, clusterName
349+
}
350+
351+
// mergeVRGsConditions assigns conditions from a given VRG while prioritizing conflict conditions.
352+
func mergeVRGsConditions(vrgs map[string]*rmn.VolumeReplicationGroup,
353+
vrg *rmn.VolumeReplicationGroup, conflictType string,
354+
) ([]metav1.Condition, string) {
355+
conditions := &vrg.Status.Conditions
356+
conflictCondition, clusterName := findConflictCondition(vrgs, conflictType)
357+
358+
// Ensure the conflict condition is present in the conditions list
359+
if conflictCondition != nil {
360+
setConflictStatusCondition(conditions, *conflictCondition)
361+
}
362+
363+
return *conditions, clusterName
364+
}
365+
366+
func setConflictStatusCondition(existingConditions *[]metav1.Condition,
367+
newCondition metav1.Condition,
368+
) metav1.Condition {
369+
if existingConditions == nil {
370+
existingConditions = &[]metav1.Condition{}
371+
}
372+
373+
existingCondition := rmnutil.FindCondition(*existingConditions, newCondition.Type)
374+
if existingCondition == nil {
375+
newCondition.LastTransitionTime = metav1.NewTime(time.Now())
376+
*existingConditions = append(*existingConditions, newCondition)
377+
378+
return newCondition
379+
}
380+
381+
if existingCondition.Status != newCondition.Status ||
382+
existingCondition.Reason != newCondition.Reason {
383+
existingCondition.Status = newCondition.Status
384+
existingCondition.Reason = newCondition.Reason
385+
existingCondition.LastTransitionTime = metav1.NewTime(time.Now())
386+
}
387+
388+
defaultValue := "none"
389+
if newCondition.Reason == "" {
390+
newCondition.Reason = defaultValue
391+
}
392+
393+
if newCondition.Message == "" {
394+
newCondition.Message = defaultValue
395+
}
396+
397+
existingCondition.Reason = newCondition.Reason
398+
existingCondition.Message = newCondition.Message
399+
// TODO: Why not update lastTranTime if the above change?
400+
401+
if existingCondition.ObservedGeneration != newCondition.ObservedGeneration {
402+
existingCondition.LastTransitionTime = metav1.NewTime(time.Now())
403+
}
404+
405+
return *existingCondition
406+
}

internal/controller/vrg_recipe.go

Lines changed: 93 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,8 @@ import (
1313

1414
"github.com/go-logr/logr"
1515
recipev1 "github.com/ramendr/recipe/api/v1alpha1"
16+
appsv1 "k8s.io/api/apps/v1"
17+
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
1618
"k8s.io/apimachinery/pkg/types"
1719
"k8s.io/apimachinery/pkg/util/sets"
1820
"sigs.k8s.io/controller-runtime/pkg/builder"
@@ -28,9 +30,11 @@ import (
2830
)
2931

3032
const (
31-
WorkflowAnyError = "any-error"
32-
WorkflowEssentialError = "essential-error"
33-
WorkflowFullError = "full-error"
33+
WorkflowAnyError = "any-error"
34+
WorkflowEssentialError = "essential-error"
35+
WorkflowFullError = "full-error"
36+
veleroDeployment = "velero"
37+
kubevirtVeleroPluginName = "kubevirt-velero-plugin"
3438
)
3539

3640
func captureWorkflowDefault(vrg ramen.VolumeReplicationGroup, ramenConfig ramen.RamenConfig) []kubeobjects.CaptureSpec {
@@ -166,6 +170,10 @@ func RecipeElementsGet(ctx context.Context, reader client.Reader, vrg ramen.Volu
166170
return recipeElements, fmt.Errorf("recipe %v namespaces validation error: %w", recipeNamespacedName.String(), err)
167171
}
168172

173+
if err := recipeVMBackupValidate(ctx, reader, ramenConfig, log); err != nil {
174+
return recipeElements, fmt.Errorf("recipe %v VM backup validation error: %w", recipeNamespacedName.String(), err)
175+
}
176+
169177
return recipeElements, nil
170178
}
171179

@@ -191,6 +199,7 @@ func isRecipeReconcileToStop(parameters map[string][]string) bool {
191199

192200
func getRecipeParameters(vrg ramen.VolumeReplicationGroup, ramenConfig ramen.RamenConfig) map[string][]string {
193201
parameters := vrg.Spec.KubeObjectProtection.RecipeParameters
202+
194203
if vrg.Spec.KubeObjectProtection.RecipeRef.Namespace == RamenOperandsNamespace(ramenConfig) &&
195204
vrg.Spec.KubeObjectProtection.RecipeRef.Name == recipecore.VMRecipeName {
196205
parameters["VM_NAMESPACE"] = append(parameters["VM_NAMESPACE"], *vrg.Spec.ProtectedNamespaces...)
@@ -330,6 +339,87 @@ func recipeNamespacesValidate(recipeElements util.RecipeElements, vrg ramen.Volu
330339
return nil
331340
}
332341

342+
// If VM protection is requested, Velero and the kubevirt plugin must be present,
343+
// unless an alternate supported solution is configured.
344+
func recipeVMBackupValidate(ctx context.Context, reader client.Reader,
345+
ramenConfig ramen.RamenConfig, log logr.Logger,
346+
) error {
347+
if !isVirtualMachineCRDInstalled(ctx, reader, log) {
348+
return nil
349+
}
350+
351+
if ramenConfig.KubeObjectProtection.VeleroNamespaceName != "" ||
352+
len(ramenConfig.KubeObjectProtection.VeleroNamespaceName) > 0 {
353+
if isKubeVirtEnabled(ctx, reader, ramenConfig, log) {
354+
return nil
355+
}
356+
357+
return fmt.Errorf("kubevirt plugin is disabled, hence VM resources backup/restore might not succeed")
358+
}
359+
360+
log.Info("Skipping kubevirt-plugin validation as Velero is not configured;" +
361+
"VM protection requires Velero and the kubevirt plugin unless an alternate supported solution is in place.")
362+
363+
return nil
364+
}
365+
366+
// isVirtualMachineCRDInstalled checks if the KubeVirt VirtualMachine CRD exists
367+
func isVirtualMachineCRDInstalled(ctx context.Context, reader client.Reader, log logr.Logger) bool {
368+
var vmCRD apiextensionsv1.CustomResourceDefinition
369+
370+
crdName := "virtualmachines.kubevirt.io"
371+
372+
err := reader.Get(ctx, client.ObjectKey{Name: crdName}, &vmCRD)
373+
if err != nil {
374+
log.Error(err, "VirtualMachine CRD not found; skipping kubevirt-velero-plugin validation")
375+
376+
return false
377+
}
378+
379+
log.Info(fmt.Sprintf("VirtualMachine CRD found: %s", vmCRD.Name))
380+
381+
return true
382+
}
383+
384+
func isKubeVirtEnabled(ctx context.Context, reader client.Reader,
385+
ramenConfig ramen.RamenConfig, log logr.Logger,
386+
) bool {
387+
// Read Velero deployment and check if kubevirt plugin is enabled
388+
var veleroDeploy appsv1.Deployment
389+
390+
veleroLookUpKey := types.NamespacedName{
391+
Name: veleroDeployment,
392+
Namespace: ramenConfig.KubeObjectProtection.VeleroNamespaceName,
393+
}
394+
// Fetch the Velero deployment
395+
if err := reader.Get(ctx, veleroLookUpKey, &veleroDeploy); err != nil {
396+
log.Error(err, "failed to get Velero deployment")
397+
398+
return false
399+
}
400+
401+
// Go through InitContainers to check if kubevirt plugin is enabled
402+
initContainers := veleroDeploy.Spec.Template.Spec.InitContainers
403+
404+
initContainerNames := make([]string, 0, len(initContainers))
405+
406+
for i := range initContainers {
407+
if strings.Contains(initContainers[i].Name, kubevirtVeleroPluginName) {
408+
log.Info("kubevirt is enabled")
409+
410+
return true
411+
}
412+
413+
initContainerNames = append(initContainerNames, initContainers[i].Name)
414+
}
415+
416+
log.Info(fmt.Sprintf("InitContainers were [%v]; '%s' init-container not found in the list",
417+
initContainerNames, kubevirtVeleroPluginName))
418+
log.Info("kubevirt is not enabled")
419+
420+
return false
421+
}
422+
333423
func recipeNamespaceNames(recipeElements util.RecipeElements) sets.Set[string] {
334424
namespaceNames := make(sets.Set[string], 0)
335425

0 commit comments

Comments
 (0)