diff --git a/images/virt-artifact/patches/024-auto-migrate-if-nodeplacement-changed.patch b/images/virt-artifact/patches/024-auto-migrate-if-nodeplacement-changed.patch new file mode 100644 index 000000000..89ba17a68 --- /dev/null +++ b/images/virt-artifact/patches/024-auto-migrate-if-nodeplacement-changed.patch @@ -0,0 +1,96 @@ +diff --git a/pkg/virt-controller/watch/vmi.go b/pkg/virt-controller/watch/vmi.go +index 0c4bfca389..9130fc940c 100644 +--- a/pkg/virt-controller/watch/vmi.go ++++ b/pkg/virt-controller/watch/vmi.go +@@ -691,6 +691,10 @@ func (c *VMIController) updateStatus(vmi *virtv1.VirtualMachineInstance, pod *k8 + c.syncVolumesUpdate(vmiCopy) + } + ++ if err := c.updateNodePlacementCondition(vmiCopy, pod); err != nil { ++ return fmt.Errorf("failed to update condition %s", virtv1.VirtualMachineInstanceNodePlacementChange) ++ } ++ + case vmi.IsScheduled(): + if !vmiPodExists { + vmiCopy.Status.Phase = virtv1.Failed +@@ -2416,6 +2420,41 @@ func (c *VMIController) syncVolumesUpdate(vmi *virtv1.VirtualMachineInstance) { + vmiConditions.UpdateCondition(vmi, &condition) + } + ++func (c *VMIController) updateNodePlacementCondition(vmi *virtv1.VirtualMachineInstance, pod *k8sv1.Pod) error { ++ status := k8sv1.ConditionFalse ++ changed, err := c.isChangedNodePlacement(vmi, pod) ++ if err != nil { ++ return fmt.Errorf("could not verify if NodePlacement update is required: %w", err) ++ } ++ if changed { ++ status = k8sv1.ConditionTrue ++ } ++ c.syncNodePlacementUpdate(vmi, status) ++ return nil ++} ++ ++func (c *VMIController) isChangedNodePlacement(vmi *virtv1.VirtualMachineInstance, pod *k8sv1.Pod) (bool, error) { ++ if vmi == nil || pod == nil { ++ return false, nil ++ } ++ templatePod, err := c.templateService.RenderLaunchManifest(vmi) ++ if err != nil { ++ return false, err ++ } ++ ++ return !equality.Semantic.DeepEqual(pod.Spec.NodeSelector, templatePod.Spec.NodeSelector) || ++ !equality.Semantic.DeepEqual(pod.Spec.Affinity, templatePod.Spec.Affinity), nil ++} ++ ++func (c *VMIController) syncNodePlacementUpdate(vmi *virtv1.VirtualMachineInstance, status k8sv1.ConditionStatus) { ++ vmiConditions := controller.NewVirtualMachineInstanceConditionManager() ++ condition := virtv1.VirtualMachineInstanceCondition{ ++ Type: virtv1.VirtualMachineInstanceNodePlacementChange, ++ Status: status, ++ } ++ vmiConditions.UpdateCondition(vmi, &condition) ++} ++ + func (c *VMIController) aggregateDataVolumesConditions(vmiCopy *virtv1.VirtualMachineInstance, dvs []*cdiv1.DataVolume) { + if len(dvs) == 0 { + return +diff --git a/pkg/virt-controller/watch/workload-updater/workload-updater.go b/pkg/virt-controller/watch/workload-updater/workload-updater.go +index a7d0f76e24..0482b732fe 100644 +--- a/pkg/virt-controller/watch/workload-updater/workload-updater.go ++++ b/pkg/virt-controller/watch/workload-updater/workload-updater.go +@@ -214,7 +214,7 @@ func (c *WorkloadUpdateController) updateVmi(_, obj interface{}) { + return + } + +- if !(isHotplugInProgress(vmi) || isVolumesUpdateInProgress(vmi)) || ++ if !(isHotplugInProgress(vmi) || isVolumesUpdateInProgress(vmi) || isNodePlacementInProgress(vmi)) || + migrationutils.IsMigrating(vmi) { + return + } +@@ -324,6 +324,11 @@ func isVolumesUpdateInProgress(vmi *virtv1.VirtualMachineInstance) bool { + virtv1.VirtualMachineInstanceVolumesChange, k8sv1.ConditionTrue) + } + ++func isNodePlacementInProgress(vmi *virtv1.VirtualMachineInstance) bool { ++ return controller.NewVirtualMachineInstanceConditionManager().HasConditionWithStatus(vmi, ++ virtv1.VirtualMachineInstanceNodePlacementChange, k8sv1.ConditionTrue) ++} ++ + func (c *WorkloadUpdateController) doesRequireMigration(vmi *virtv1.VirtualMachineInstance) bool { + if vmi.IsFinal() || migrationutils.IsMigrating(vmi) { + return false +diff --git a/staging/src/kubevirt.io/api/core/v1/types.go b/staging/src/kubevirt.io/api/core/v1/types.go +index 7aa814d8f1..b7e5792a71 100644 +--- a/staging/src/kubevirt.io/api/core/v1/types.go ++++ b/staging/src/kubevirt.io/api/core/v1/types.go +@@ -568,6 +568,9 @@ const ( + + // Summarizes that all the DataVolumes attached to the VMI are Ready or not + VirtualMachineInstanceDataVolumesReady VirtualMachineInstanceConditionType = "DataVolumesReady" ++ ++ // Indicates that the VMI has affinity or nodeSelector changes ++ VirtualMachineInstanceNodePlacementChange VirtualMachineInstanceConditionType = "NodePlacementChange" + ) + + // These are valid reasons for VMI conditions. diff --git a/images/virt-artifact/patches/README.md b/images/virt-artifact/patches/README.md index 4ae99e60c..1f02e2646 100644 --- a/images/virt-artifact/patches/README.md +++ b/images/virt-artifact/patches/README.md @@ -83,3 +83,7 @@ Unsuccessful migrations may leave a lot of Pods. These huge lists reduce perform Replace the expressions for the ValidatingAdmissionPolicy kubevirt-node-restriction-policy. This is necessary because of the kube-api-rewriter that changes the labels. + +#### `024-auto-migrate-if-nodeplacement-changed.patch` + +Start the migration if the nodeSelector or affinity has changed.