diff --git a/api/core/v1alpha2/vdcondition/condition.go b/api/core/v1alpha2/vdcondition/condition.go index d17e6ab0d..d51db6978 100644 --- a/api/core/v1alpha2/vdcondition/condition.go +++ b/api/core/v1alpha2/vdcondition/condition.go @@ -34,7 +34,7 @@ const ( SnapshottingType Type = "Snapshotting" // StorageClassReadyType indicates whether the storage class is ready. StorageClassReadyType Type = "StorageClassReady" - // InUseType indicates whether the VirtualDisk is attached to a running VirtualMachine or is being used in a process of a VirtualImage creation. + // InUseType indicates whether the VirtualDisk is attached to a running VirtualMachine or is being used in a process of an image creation. InUseType Type = "InUse" ) diff --git a/images/virtualization-artifact/pkg/controller/cvi/internal/source/object_ref_vd.go b/images/virtualization-artifact/pkg/controller/cvi/internal/source/object_ref_vd.go index 384c0372e..3a5ba7082 100644 --- a/images/virtualization-artifact/pkg/controller/cvi/internal/source/object_ref_vd.go +++ b/images/virtualization-artifact/pkg/controller/cvi/internal/source/object_ref_vd.go @@ -228,7 +228,9 @@ func (ds ObjectRefVirtualDisk) Validate(ctx context.Context, cvi *virtv2.Cluster } inUseCondition, _ := conditions.GetCondition(vdcondition.InUseType, vd.Status.Conditions) - if inUseCondition.Status == metav1.ConditionTrue && inUseCondition.Reason == vdcondition.AllowedForImageUsage.String() { + if inUseCondition.Status == metav1.ConditionTrue && + inUseCondition.Reason == vdcondition.AllowedForImageUsage.String() && + inUseCondition.ObservedGeneration == vd.Status.ObservedGeneration { return nil } diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/inuse.go b/images/virtualization-artifact/pkg/controller/vd/internal/inuse.go index 050895c3c..c9265ee2e 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/inuse.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/inuse.go @@ -22,7 +22,6 @@ import ( "slices" corev1 "k8s.io/api/core/v1" - k8serrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/types" @@ -71,7 +70,7 @@ func (h InUseHandler) Handle(ctx context.Context, vd *virtv2.VirtualDisk) (recon for _, vm := range vms.Items { if h.isVDAttachedToVM(vd.GetName(), vm) { if vm.Status.Phase != virtv2.MachineStopped { - allowUseForVM = isVMReady(vm.Status.Conditions) + allowUseForVM = isVMCanStart(vm.Status.Conditions) if allowUseForVM { break @@ -92,7 +91,7 @@ func (h InUseHandler) Handle(ctx context.Context, vd *virtv2.VirtualDisk) (recon Namespace: vm.GetNamespace(), LabelSelector: labels.SelectorFromSet(map[string]string{virtv1.VirtualMachineNameLabel: vm.GetName()}), }) - if err != nil && !k8serrors.IsNotFound(err) { + if err != nil { return reconcile.Result{}, fmt.Errorf("unable to list virt-launcher Pod for VM %q: %w", vm.GetName(), err) } @@ -117,7 +116,11 @@ func (h InUseHandler) Handle(ctx context.Context, vd *virtv2.VirtualDisk) (recon allowedPhases := []virtv2.ImagePhase{virtv2.ImageProvisioning, virtv2.ImagePending} for _, vi := range vis.Items { - if slices.Contains(allowedPhases, vi.Status.Phase) && vi.Spec.DataSource.Type == virtv2.DataSourceTypeObjectRef && vi.Spec.DataSource.ObjectRef != nil && vi.Spec.DataSource.ObjectRef.Kind == virtv2.VirtualDiskKind { + if slices.Contains(allowedPhases, vi.Status.Phase) && + vi.Spec.DataSource.Type == virtv2.DataSourceTypeObjectRef && + vi.Spec.DataSource.ObjectRef != nil && + vi.Spec.DataSource.ObjectRef.Kind == virtv2.VirtualDiskKind && + vi.Spec.DataSource.ObjectRef.Name == vd.Name { allowUseForImage = true break } @@ -129,13 +132,16 @@ func (h InUseHandler) Handle(ctx context.Context, vd *virtv2.VirtualDisk) (recon return reconcile.Result{}, fmt.Errorf("error getting cluster virtual images: %w", err) } for _, cvi := range cvis.Items { - if slices.Contains(allowedPhases, cvi.Status.Phase) && cvi.Spec.DataSource.Type == virtv2.DataSourceTypeObjectRef && cvi.Spec.DataSource.ObjectRef != nil && cvi.Spec.DataSource.ObjectRef.Kind == virtv2.VirtualDiskKind { + if slices.Contains(allowedPhases, cvi.Status.Phase) && + cvi.Spec.DataSource.Type == virtv2.DataSourceTypeObjectRef && + cvi.Spec.DataSource.ObjectRef != nil && + cvi.Spec.DataSource.ObjectRef.Kind == virtv2.VirtualDiskKind && + cvi.Spec.DataSource.ObjectRef.Name == vd.Name { allowUseForImage = true } } cb := conditions.NewConditionBuilder(vdcondition.InUseType) - switch { case allowUseForVM && inUseCondition.Status == metav1.ConditionUnknown: if inUseCondition.Reason != vdcondition.AllowedForVirtualMachineUsage.String() { @@ -156,19 +162,20 @@ func (h InUseHandler) Handle(ctx context.Context, vd *virtv2.VirtualDisk) (recon conditions.SetCondition(cb, &vd.Status.Conditions) } default: - needChange := false + setUnknown := false if inUseCondition.Reason == vdcondition.AllowedForVirtualMachineUsage.String() && !allowUseForVM { - needChange = true + setUnknown = true } if inUseCondition.Reason == vdcondition.AllowedForImageUsage.String() && !allowUseForImage { - needChange = true + setUnknown = true } - if needChange { + if setUnknown { cb.Generation(vd.Generation).Status(metav1.ConditionUnknown).Reason(conditions.ReasonUnknown).Message("") conditions.SetCondition(cb, &vd.Status.Conditions) + return reconcile.Result{Requeue: true}, nil } } @@ -185,7 +192,7 @@ func (h InUseHandler) isVDAttachedToVM(vdName string, vm virtv2.VirtualMachine) return false } -func isVMReady(conditions []metav1.Condition) bool { +func isVMCanStart(conditions []metav1.Condition) bool { critConditions := []string{ vmcondition.TypeIPAddressReady.String(), vmcondition.TypeClassReady.String(), diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/inuse_test.go b/images/virtualization-artifact/pkg/controller/vd/internal/inuse_test.go index 98d102532..1a44d1d8e 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/inuse_test.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/inuse_test.go @@ -19,8 +19,8 @@ package internal import ( "context" - "github.com/onsi/ginkgo/v2" - "github.com/onsi/gomega" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" clientgoscheme "k8s.io/client-go/kubernetes/scheme" @@ -34,24 +34,24 @@ import ( "github.com/deckhouse/virtualization/api/core/v1alpha2/vmcondition" ) -var _ = ginkgo.Describe("InUseHandler", func() { +var _ = Describe("InUseHandler", func() { var ( scheme *runtime.Scheme ctx context.Context handler *InUseHandler ) - ginkgo.BeforeEach(func() { + BeforeEach(func() { scheme = runtime.NewScheme() - gomega.Expect(clientgoscheme.AddToScheme(scheme)).To(gomega.Succeed()) - gomega.Expect(virtv2.AddToScheme(scheme)).To(gomega.Succeed()) - gomega.Expect(virtv1.AddToScheme(scheme)).To(gomega.Succeed()) + Expect(clientgoscheme.AddToScheme(scheme)).To(Succeed()) + Expect(virtv2.AddToScheme(scheme)).To(Succeed()) + Expect(virtv1.AddToScheme(scheme)).To(Succeed()) ctx = context.TODO() }) - ginkgo.Context("when VirtualDisk is not in use", func() { - ginkgo.It("must set status Unknown and reason Unknown", func() { + Context("when VirtualDisk is not in use", func() { + It("must set status Unknown and reason Unknown", func() { vd := &virtv2.VirtualDisk{ ObjectMeta: metav1.ObjectMeta{ Name: "test-vd", @@ -66,17 +66,17 @@ var _ = ginkgo.Describe("InUseHandler", func() { handler = NewInUseHandler(k8sClient) result, err := handler.Handle(ctx, vd) - gomega.Expect(err).ToNot(gomega.HaveOccurred()) - gomega.Expect(result).To(gomega.Equal(ctrl.Result{})) + Expect(err).ToNot(HaveOccurred()) + Expect(result).To(Equal(ctrl.Result{})) cond, _ := conditions.GetCondition(vdcondition.InUseType, vd.Status.Conditions) - gomega.Expect(cond).ToNot(gomega.BeNil()) - gomega.Expect(cond.Status).To(gomega.Equal(metav1.ConditionUnknown)) + Expect(cond).ToNot(BeNil()) + Expect(cond.Status).To(Equal(metav1.ConditionUnknown)) }) }) - ginkgo.Context("when VirtualDisk is used by running VirtualMachine", func() { - ginkgo.It("must set status True and reason AllowedForVirtualMachineUsage", func() { + Context("when VirtualDisk is used by running VirtualMachine", func() { + It("must set status True and reason AllowedForVirtualMachineUsage", func() { vd := &virtv2.VirtualDisk{ ObjectMeta: metav1.ObjectMeta{ Name: "test-vd", @@ -115,18 +115,18 @@ var _ = ginkgo.Describe("InUseHandler", func() { handler = NewInUseHandler(k8sClient) result, err := handler.Handle(ctx, vd) - gomega.Expect(err).ToNot(gomega.HaveOccurred()) - gomega.Expect(result).To(gomega.Equal(ctrl.Result{})) + Expect(err).ToNot(HaveOccurred()) + Expect(result).To(Equal(ctrl.Result{})) cond, _ := conditions.GetCondition(vdcondition.InUseType, vd.Status.Conditions) - gomega.Expect(cond).ToNot(gomega.BeNil()) - gomega.Expect(cond.Status).To(gomega.Equal(metav1.ConditionTrue)) - gomega.Expect(cond.Reason).To(gomega.Equal(vdcondition.AllowedForVirtualMachineUsage.String())) + Expect(cond).ToNot(BeNil()) + Expect(cond.Status).To(Equal(metav1.ConditionTrue)) + Expect(cond.Reason).To(Equal(vdcondition.AllowedForVirtualMachineUsage.String())) }) }) - ginkgo.Context("when VirtualDisk is used by not ready VirtualMachine", func() { - ginkgo.It("must set status True and reason AllowedForVirtualMachineUsage", func() { + Context("when VirtualDisk is used by not ready VirtualMachine", func() { + It("it sets Unknown", func() { vd := &virtv2.VirtualDisk{ ObjectMeta: metav1.ObjectMeta{ Name: "test-vd", @@ -166,17 +166,17 @@ var _ = ginkgo.Describe("InUseHandler", func() { handler = NewInUseHandler(k8sClient) result, err := handler.Handle(ctx, vd) - gomega.Expect(err).ToNot(gomega.HaveOccurred()) - gomega.Expect(result).To(gomega.Equal(ctrl.Result{})) + Expect(err).ToNot(HaveOccurred()) + Expect(result).To(Equal(ctrl.Result{})) cond, _ := conditions.GetCondition(vdcondition.InUseType, vd.Status.Conditions) - gomega.Expect(cond).ToNot(gomega.BeNil()) - gomega.Expect(cond.Status).To(gomega.Equal(metav1.ConditionUnknown)) + Expect(cond).ToNot(BeNil()) + Expect(cond.Status).To(Equal(metav1.ConditionUnknown)) }) }) - ginkgo.Context("when VirtualDisk is used by VirtualImage", func() { - ginkgo.It("must set status True and reason AllowedForImageUsage", func() { + Context("when VirtualDisk is used by VirtualImage", func() { + It("must set status True and reason AllowedForImageUsage", func() { vd := &virtv2.VirtualDisk{ ObjectMeta: metav1.ObjectMeta{ Name: "test-vd", @@ -211,18 +211,18 @@ var _ = ginkgo.Describe("InUseHandler", func() { handler = NewInUseHandler(k8sClient) result, err := handler.Handle(ctx, vd) - gomega.Expect(err).ToNot(gomega.HaveOccurred()) - gomega.Expect(result).To(gomega.Equal(ctrl.Result{})) + Expect(err).ToNot(HaveOccurred()) + Expect(result).To(Equal(ctrl.Result{})) cond, _ := conditions.GetCondition(vdcondition.InUseType, vd.Status.Conditions) - gomega.Expect(cond).ToNot(gomega.BeNil()) - gomega.Expect(cond.Status).To(gomega.Equal(metav1.ConditionTrue)) - gomega.Expect(cond.Reason).To(gomega.Equal(vdcondition.AllowedForImageUsage.String())) + Expect(cond).ToNot(BeNil()) + Expect(cond.Status).To(Equal(metav1.ConditionTrue)) + Expect(cond.Reason).To(Equal(vdcondition.AllowedForImageUsage.String())) }) }) - ginkgo.Context("when VirtualDisk is used by ClusterVirtualImage", func() { - ginkgo.It("must set status True and reason AllowedForImageUsage", func() { + Context("when VirtualDisk is used by ClusterVirtualImage", func() { + It("must set status True and reason AllowedForImageUsage", func() { vd := &virtv2.VirtualDisk{ ObjectMeta: metav1.ObjectMeta{ Name: "test-vd", @@ -258,18 +258,18 @@ var _ = ginkgo.Describe("InUseHandler", func() { handler = NewInUseHandler(k8sClient) result, err := handler.Handle(ctx, vd) - gomega.Expect(err).ToNot(gomega.HaveOccurred()) - gomega.Expect(result).To(gomega.Equal(ctrl.Result{})) + Expect(err).ToNot(HaveOccurred()) + Expect(result).To(Equal(ctrl.Result{})) cond, _ := conditions.GetCondition(vdcondition.InUseType, vd.Status.Conditions) - gomega.Expect(cond).ToNot(gomega.BeNil()) - gomega.Expect(cond.Status).To(gomega.Equal(metav1.ConditionTrue)) - gomega.Expect(cond.Reason).To(gomega.Equal(vdcondition.AllowedForImageUsage.String())) + Expect(cond).ToNot(BeNil()) + Expect(cond.Status).To(Equal(metav1.ConditionTrue)) + Expect(cond.Reason).To(Equal(vdcondition.AllowedForImageUsage.String())) }) }) - ginkgo.Context("when VirtualDisk is used by VirtualImage and VirtualMachine", func() { - ginkgo.It("must set status True and reason AllowedForVirtualMachineUsage", func() { + Context("when VirtualDisk is used by VirtualImage and VirtualMachine", func() { + It("must set status True and reason AllowedForVirtualMachineUsage", func() { vd := &virtv2.VirtualDisk{ ObjectMeta: metav1.ObjectMeta{ Name: "test-vd", @@ -319,18 +319,18 @@ var _ = ginkgo.Describe("InUseHandler", func() { handler = NewInUseHandler(k8sClient) result, err := handler.Handle(ctx, vd) - gomega.Expect(err).ToNot(gomega.HaveOccurred()) - gomega.Expect(result).To(gomega.Equal(ctrl.Result{})) + Expect(err).ToNot(HaveOccurred()) + Expect(result).To(Equal(ctrl.Result{})) cond, _ := conditions.GetCondition(vdcondition.InUseType, vd.Status.Conditions) - gomega.Expect(cond).ToNot(gomega.BeNil()) - gomega.Expect(cond.Status).To(gomega.Equal(metav1.ConditionTrue)) - gomega.Expect(cond.Reason).To(gomega.Equal(vdcondition.AllowedForVirtualMachineUsage.String())) + Expect(cond).ToNot(BeNil()) + Expect(cond.Status).To(Equal(metav1.ConditionTrue)) + Expect(cond.Reason).To(Equal(vdcondition.AllowedForVirtualMachineUsage.String())) }) }) - ginkgo.Context("when VirtualDisk is used by VirtualMachine after create image", func() { - ginkgo.It("must set status True and reason AllowedForVirtualMachineUsage", func() { + Context("when VirtualDisk is used by VirtualMachine after create image", func() { + It("must set status True and reason AllowedForVirtualMachineUsage", func() { vd := &virtv2.VirtualDisk{ ObjectMeta: metav1.ObjectMeta{ Name: "test-vd", @@ -366,18 +366,18 @@ var _ = ginkgo.Describe("InUseHandler", func() { handler = NewInUseHandler(k8sClient) result, err := handler.Handle(ctx, vd) - gomega.Expect(err).ToNot(gomega.HaveOccurred()) - gomega.Expect(result).To(gomega.Equal(ctrl.Result{})) + Expect(err).ToNot(HaveOccurred()) + Expect(result).To(Equal(ctrl.Result{})) cond, _ := conditions.GetCondition(vdcondition.InUseType, vd.Status.Conditions) - gomega.Expect(cond).ToNot(gomega.BeNil()) - gomega.Expect(cond.Status).To(gomega.Equal(metav1.ConditionTrue)) - gomega.Expect(cond.Reason).To(gomega.Equal(vdcondition.AllowedForVirtualMachineUsage.String())) + Expect(cond).ToNot(BeNil()) + Expect(cond.Status).To(Equal(metav1.ConditionTrue)) + Expect(cond.Reason).To(Equal(vdcondition.AllowedForVirtualMachineUsage.String())) }) }) - ginkgo.Context("when VirtualDisk is used by VirtualImage after running VM", func() { - ginkgo.It("must set status True and reason AllowedForImageUsage", func() { + Context("when VirtualDisk is used by VirtualImage after running VM", func() { + It("must set status True and reason AllowedForImageUsage", func() { vd := &virtv2.VirtualDisk{ ObjectMeta: metav1.ObjectMeta{ Name: "test-vd", @@ -418,18 +418,18 @@ var _ = ginkgo.Describe("InUseHandler", func() { handler = NewInUseHandler(k8sClient) result, err := handler.Handle(ctx, vd) - gomega.Expect(err).ToNot(gomega.HaveOccurred()) - gomega.Expect(result).To(gomega.Equal(ctrl.Result{})) + Expect(err).ToNot(HaveOccurred()) + Expect(result).To(Equal(ctrl.Result{})) cond, _ := conditions.GetCondition(vdcondition.InUseType, vd.Status.Conditions) - gomega.Expect(cond).ToNot(gomega.BeNil()) - gomega.Expect(cond.Status).To(gomega.Equal(metav1.ConditionTrue)) - gomega.Expect(cond.Reason).To(gomega.Equal(vdcondition.AllowedForImageUsage.String())) + Expect(cond).ToNot(BeNil()) + Expect(cond.Status).To(Equal(metav1.ConditionTrue)) + Expect(cond.Reason).To(Equal(vdcondition.AllowedForImageUsage.String())) }) }) - ginkgo.Context("when VirtualDisk is not in use after create image", func() { - ginkgo.It("must set status Unknown and reason Unknown", func() { + Context("when VirtualDisk is not in use after image creation", func() { + It("must set status Unknown and reason Unknown", func() { vd := &virtv2.VirtualDisk{ ObjectMeta: metav1.ObjectMeta{ Name: "test-vd", @@ -450,17 +450,17 @@ var _ = ginkgo.Describe("InUseHandler", func() { handler = NewInUseHandler(k8sClient) result, err := handler.Handle(ctx, vd) - gomega.Expect(err).ToNot(gomega.HaveOccurred()) - gomega.Expect(result).To(gomega.Equal(ctrl.Result{})) + Expect(err).ToNot(HaveOccurred()) + Expect(result).To(Equal(ctrl.Result{})) cond, _ := conditions.GetCondition(vdcondition.InUseType, vd.Status.Conditions) - gomega.Expect(cond).ToNot(gomega.BeNil()) - gomega.Expect(cond.Status).To(gomega.Equal(metav1.ConditionUnknown)) + Expect(cond).ToNot(BeNil()) + Expect(cond.Status).To(Equal(metav1.ConditionUnknown)) }) }) - ginkgo.Context("when VirtualDisk is not in use after running VM", func() { - ginkgo.It("must set status Unknown and reason Unknown", func() { + Context("when VirtualDisk is not in use after VM deletion", func() { + It("must set status Unknown and reason Unknown", func() { vd := &virtv2.VirtualDisk{ ObjectMeta: metav1.ObjectMeta{ Name: "test-vd", @@ -481,12 +481,12 @@ var _ = ginkgo.Describe("InUseHandler", func() { handler = NewInUseHandler(k8sClient) result, err := handler.Handle(ctx, vd) - gomega.Expect(err).ToNot(gomega.HaveOccurred()) - gomega.Expect(result).To(gomega.Equal(ctrl.Result{})) + Expect(err).ToNot(HaveOccurred()) + Expect(result).To(Equal(ctrl.Result{})) cond, _ := conditions.GetCondition(vdcondition.InUseType, vd.Status.Conditions) - gomega.Expect(cond).ToNot(gomega.BeNil()) - gomega.Expect(cond.Status).To(gomega.Equal(metav1.ConditionUnknown)) + Expect(cond).ToNot(BeNil()) + Expect(cond.Status).To(Equal(metav1.ConditionUnknown)) }) }) }) diff --git a/images/virtualization-artifact/pkg/controller/vi/internal/source/object_ref_vd.go b/images/virtualization-artifact/pkg/controller/vi/internal/source/object_ref_vd.go index 55ed3aee0..9e857fa4e 100644 --- a/images/virtualization-artifact/pkg/controller/vi/internal/source/object_ref_vd.go +++ b/images/virtualization-artifact/pkg/controller/vi/internal/source/object_ref_vd.go @@ -380,7 +380,9 @@ func (ds ObjectRefVirtualDisk) Validate(ctx context.Context, vi *virtv2.VirtualI } inUseCondition, _ := conditions.GetCondition(vdcondition.InUseType, vd.Status.Conditions) - if inUseCondition.Status == metav1.ConditionTrue && inUseCondition.Reason == vdcondition.AllowedForImageUsage.String() { + if inUseCondition.Status == metav1.ConditionTrue && + inUseCondition.Reason == vdcondition.AllowedForImageUsage.String() && + inUseCondition.ObservedGeneration == vd.Status.ObservedGeneration { return nil } diff --git a/images/virtualization-artifact/pkg/controller/vm/internal/block_device.go b/images/virtualization-artifact/pkg/controller/vm/internal/block_device.go index 700c618fa..13dc5fc6c 100644 --- a/images/virtualization-artifact/pkg/controller/vm/internal/block_device.go +++ b/images/virtualization-artifact/pkg/controller/vm/internal/block_device.go @@ -169,10 +169,10 @@ func (h *BlockDeviceHandler) Handle(ctx context.Context, s state.VirtualMachineS return reconcile.Result{}, err } - if !h.checkAllowVirtualDisks(vds) { + if !h.areVirtualDisksAllowedToUse(vds) { mgr.Update(cb.Status(metav1.ConditionFalse). Reason(vmcondition.ReasonBlockDevicesNotReady). - Message("virtual disks are not allowed for use in the virtual machine, it may be used to create an images").Condition()) + Message("Virtual disks are not allowed to be used in the virtual machine, check where else they are used.").Condition()) changed.Status.Conditions = mgr.Generate() return reconcile.Result{}, nil } @@ -184,10 +184,12 @@ func (h *BlockDeviceHandler) Handle(ctx context.Context, s state.VirtualMachineS return reconcile.Result{}, nil } -func (h *BlockDeviceHandler) checkAllowVirtualDisks(vds map[string]*virtv2.VirtualDisk) bool { +func (h *BlockDeviceHandler) areVirtualDisksAllowedToUse(vds map[string]*virtv2.VirtualDisk) bool { for _, vd := range vds { inUseCondition, _ := conditions.GetCondition(vdcondition.InUseType, vd.Status.Conditions) - if inUseCondition.Status != metav1.ConditionTrue || inUseCondition.Reason != vdcondition.AllowedForVirtualMachineUsage.String() { + if inUseCondition.Status != metav1.ConditionTrue || + inUseCondition.Reason != vdcondition.AllowedForVirtualMachineUsage.String() || + inUseCondition.ObservedGeneration != vd.Status.ObservedGeneration { return false } } diff --git a/images/virtualization-artifact/pkg/controller/vm/internal/block_devices_test.go b/images/virtualization-artifact/pkg/controller/vm/internal/block_devices_test.go index f8144d245..30aa9e40b 100644 --- a/images/virtualization-artifact/pkg/controller/vm/internal/block_devices_test.go +++ b/images/virtualization-artifact/pkg/controller/vm/internal/block_devices_test.go @@ -35,6 +35,113 @@ func TestBlockDeviceHandler(t *testing.T) { RunSpecs(t, "BlockDeviceHandler Suite") } +var _ = Describe("func areVirtualDisksAllowedToUse", func() { + var h *BlockDeviceHandler + var vdFoo *virtv2.VirtualDisk + var vdBar *virtv2.VirtualDisk + + BeforeEach(func() { + h = NewBlockDeviceHandler(nil, &EventRecorderMock{ + EventFunc: func(_ runtime.Object, _, _, _ string) {}, + }) + vdFoo = &virtv2.VirtualDisk{ + ObjectMeta: metav1.ObjectMeta{Name: "vd-foo"}, + Status: virtv2.VirtualDiskStatus{ + Conditions: []metav1.Condition{ + { + Type: vdcondition.InUseType.String(), + Reason: vdcondition.AllowedForVirtualMachineUsage.String(), + Status: metav1.ConditionTrue, + }, + }, + }, + } + vdBar = &virtv2.VirtualDisk{ + ObjectMeta: metav1.ObjectMeta{Name: "vd-bar"}, + Status: virtv2.VirtualDiskStatus{ + Conditions: []metav1.Condition{ + { + Type: vdcondition.InUseType.String(), + Reason: vdcondition.AllowedForVirtualMachineUsage.String(), + Status: metav1.ConditionTrue, + }, + }, + }, + } + }) + + Context("VirtualDisk is not allowed", func() { + It("returns false", func() { + anyVd := &virtv2.VirtualDisk{ + ObjectMeta: metav1.ObjectMeta{Name: "anyVd"}, + Status: virtv2.VirtualDiskStatus{ + Conditions: []metav1.Condition{ + { + Type: vdcondition.InUseType.String(), + Reason: conditions.ReasonUnknown.String(), + Status: metav1.ConditionUnknown, + }, + }, + }, + } + + vds := map[string]*virtv2.VirtualDisk{ + vdFoo.Name: vdFoo, + vdBar.Name: vdBar, + anyVd.Name: anyVd, + } + + allowed := h.areVirtualDisksAllowedToUse(vds) + Expect(allowed).To(BeFalse()) + }) + }) + + Context("VirtualDisk is used to create image", func() { + It("returns false", func() { + anyVd := &virtv2.VirtualDisk{ + ObjectMeta: metav1.ObjectMeta{Name: "anyVd"}, + Status: virtv2.VirtualDiskStatus{ + Phase: virtv2.DiskReady, + Target: virtv2.DiskTarget{PersistentVolumeClaim: "pvc-foo"}, + Conditions: []metav1.Condition{ + { + Type: vdcondition.ReadyType.String(), + Reason: vdcondition.Ready.String(), + Status: metav1.ConditionTrue, + }, + { + Type: vdcondition.InUseType.String(), + Reason: vdcondition.AllowedForImageUsage.String(), + Status: metav1.ConditionTrue, + }, + }, + }, + } + + vds := map[string]*virtv2.VirtualDisk{ + vdFoo.Name: vdFoo, + vdBar.Name: vdBar, + anyVd.Name: anyVd, + } + + allowed := h.areVirtualDisksAllowedToUse(vds) + Expect(allowed).To(BeFalse()) + }) + }) + + Context("VirtualDisk is allowed", func() { + It("returns true", func() { + vds := map[string]*virtv2.VirtualDisk{ + vdFoo.Name: vdFoo, + vdBar.Name: vdBar, + } + + allowed := h.areVirtualDisksAllowedToUse(vds) + Expect(allowed).To(BeTrue()) + }) + }) +}) + var _ = Describe("BlockDeviceHandler", func() { var h *BlockDeviceHandler var logger *slog.Logger @@ -134,84 +241,6 @@ var _ = Describe("BlockDeviceHandler", func() { }) }) - Context("BlockDevices are not ready, because VirtualDisk is not allowed", func() { - It("return false", func() { - anyVd := &virtv2.VirtualDisk{ - ObjectMeta: metav1.ObjectMeta{Name: "anyVd"}, - Status: virtv2.VirtualDiskStatus{ - Phase: virtv2.DiskReady, - Target: virtv2.DiskTarget{PersistentVolumeClaim: "pvc-foo"}, - Conditions: []metav1.Condition{ - { - Type: vdcondition.ReadyType.String(), - Reason: vdcondition.Ready.String(), - Status: metav1.ConditionTrue, - }, - { - Type: vdcondition.InUseType.String(), - Reason: conditions.ReasonUnknown.String(), - Status: metav1.ConditionUnknown, - }, - }, - }, - } - - vds := map[string]*virtv2.VirtualDisk{ - vdFoo.Name: vdFoo, - vdBar.Name: vdBar, - anyVd.Name: anyVd, - } - - allowed := h.checkAllowVirtualDisks(vds) - Expect(allowed).To(BeFalse()) - }) - }) - - Context("BlockDevices are not ready, because VirtualDisk using for create image", func() { - It("return false", func() { - anyVd := &virtv2.VirtualDisk{ - ObjectMeta: metav1.ObjectMeta{Name: "anyVd"}, - Status: virtv2.VirtualDiskStatus{ - Phase: virtv2.DiskReady, - Target: virtv2.DiskTarget{PersistentVolumeClaim: "pvc-foo"}, - Conditions: []metav1.Condition{ - { - Type: vdcondition.ReadyType.String(), - Reason: vdcondition.Ready.String(), - Status: metav1.ConditionTrue, - }, - { - Type: vdcondition.InUseType.String(), - Reason: vdcondition.AllowedForImageUsage.String(), - Status: metav1.ConditionTrue, - }, - }, - }, - } - - vds := map[string]*virtv2.VirtualDisk{ - vdFoo.Name: vdFoo, - vdBar.Name: vdBar, - anyVd.Name: anyVd, - } - - allowed := h.checkAllowVirtualDisks(vds) - Expect(allowed).To(BeFalse()) - }) - }) - - Context("BlockDevices are ready, because VirtualDisk is allowed", func() { - It("return false", func() { - vds := map[string]*virtv2.VirtualDisk{ - vdFoo.Name: vdFoo, - vdBar.Name: vdBar, - } - - allowed := h.checkAllowVirtualDisks(vds) - Expect(allowed).To(BeTrue()) - }) - }) - Context("Image is not ready", func() { It("VirtualImage not ready: cannot start, no warnings", func() { vi.Status.Phase = virtv2.ImagePending diff --git a/images/virtualization-artifact/pkg/controller/watchers/vd_enqueuer.go b/images/virtualization-artifact/pkg/controller/watchers/vd_enqueuer.go index bda8a2859..9572c7dc3 100644 --- a/images/virtualization-artifact/pkg/controller/watchers/vd_enqueuer.go +++ b/images/virtualization-artifact/pkg/controller/watchers/vd_enqueuer.go @@ -26,6 +26,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/reconcile" "github.com/deckhouse/deckhouse/pkg/log" + "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vdcondition" @@ -51,8 +52,45 @@ func (w VirtualDiskRequestEnqueuer) GetEnqueueFrom() client.Object { return w.enqueueFromObj } -func (w VirtualDiskRequestEnqueuer) EnqueueRequests(ctx context.Context, obj client.Object) (requests []reconcile.Request) { - // Enqueue CVI or VI specified by the object ref. +func (w VirtualDiskRequestEnqueuer) EnqueueRequestsFromVDs(ctx context.Context, obj client.Object) (requests []reconcile.Request) { + var vds virtv2.VirtualDiskList + err := w.client.List(ctx, &vds) + if err != nil { + w.logger.Error(fmt.Sprintf("failed to list vd: %s", err)) + return + } + + for _, vd := range vds.Items { + dsReady, _ := conditions.GetCondition(vdcondition.DatasourceReadyType, vd.Status.Conditions) + if dsReady.Status == metav1.ConditionTrue { + continue + } + + if vd.Spec.DataSource == nil || vd.Spec.DataSource.Type != virtv2.DataSourceTypeObjectRef { + continue + } + + ref := vd.Spec.DataSource.ObjectRef + + if ref == nil || ref.Kind != w.enqueueFromKind { + continue + } + + if ref.Name == obj.GetName() { + requests = append(requests, reconcile.Request{ + NamespacedName: types.NamespacedName{ + Namespace: vd.Namespace, + Name: vd.Name, + }, + }) + } + } + + return + +} + +func (w VirtualDiskRequestEnqueuer) EnqueueRequestsFromVIs(obj client.Object) (requests []reconcile.Request) { if w.enqueueFromKind == virtv2.VirtualDiskObjectRefKindVirtualImage { vi, ok := obj.(*virtv2.VirtualImage) if !ok { @@ -68,7 +106,12 @@ func (w VirtualDiskRequestEnqueuer) EnqueueRequests(ctx context.Context, obj cli }, }) } - } else if w.enqueueFromKind == virtv2.VirtualDiskObjectRefKindClusterVirtualImage { + } + return +} + +func (w VirtualDiskRequestEnqueuer) EnqueueRequestsFromCVIs(obj client.Object) (requests []reconcile.Request) { + if w.enqueueFromKind == virtv2.VirtualDiskObjectRefKindClusterVirtualImage { cvi, ok := obj.(*virtv2.ClusterVirtualImage) if !ok { w.logger.Error(fmt.Sprintf("expected a ClusterVirtualImage but got a %T", obj)) @@ -84,39 +127,30 @@ func (w VirtualDiskRequestEnqueuer) EnqueueRequests(ctx context.Context, obj cli }) } } + return +} - var vds virtv2.VirtualDiskList - err := w.client.List(ctx, &vds) - if err != nil { - w.logger.Error(fmt.Sprintf("failed to list vd: %s", err)) - return - } - - for _, vd := range vds.Items { - dsReady, _ := conditions.GetCondition(vdcondition.DatasourceReadyType, vd.Status.Conditions) - if dsReady.Status == metav1.ConditionTrue { - continue - } - - if vd.Spec.DataSource == nil || vd.Spec.DataSource.Type != virtv2.DataSourceTypeObjectRef { - continue - } +func (w VirtualDiskRequestEnqueuer) EnqueueRequests(ctx context.Context, obj client.Object) (requests []reconcile.Request) { + vds := w.EnqueueRequestsFromVDs(ctx, obj) + vdsFromVIs := w.EnqueueRequestsFromVIs(obj) + vdsFromCVIs := w.EnqueueRequestsFromCVIs(obj) - ref := vd.Spec.DataSource.ObjectRef + uniqueRequests := make(map[reconcile.Request]struct{}) - if ref == nil || ref.Kind != w.enqueueFromKind { - continue - } + for _, req := range vds { + uniqueRequests[req] = struct{}{} + } + for _, req := range vdsFromVIs { + uniqueRequests[req] = struct{}{} + } + for _, req := range vdsFromCVIs { + uniqueRequests[req] = struct{}{} + } - if ref.Name == obj.GetName() { - requests = append(requests, reconcile.Request{ - NamespacedName: types.NamespacedName{ - Namespace: vd.Namespace, - Name: vd.Name, - }, - }) - } + var aggregatedResults []reconcile.Request + for req := range uniqueRequests { + aggregatedResults = append(aggregatedResults, req) } - return + return aggregatedResults }