Skip to content

Commit

Permalink
fix(vmbda): write to condition message if disk is already attached to…
Browse files Browse the repository at this point in the history
… vm spec

Signed-off-by: Isteb4k <[email protected]>
  • Loading branch information
Isteb4k committed Aug 9, 2024
1 parent 363283d commit cfae49c
Show file tree
Hide file tree
Showing 8 changed files with 372 additions and 232 deletions.
4 changes: 3 additions & 1 deletion api/core/v1alpha2/vmbdacondition/condition.go
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,9 @@ const (
NotAttached AttachedReason = "NotAttached"
// AttachmentRequestSent signifies that the attachment request has been sent and the hot-plug process has started.
AttachmentRequestSent AttachedReason = "AttachmentRequestSent"
// Conflict indicates that there is another `VirtualMachineBlockDeviceAttachment` with the same virtual machine and virtual disk to be hot-plugged.
// Conflict indicates that virtual disk is already attached to the virtual machine:
// Either there is another `VirtualMachineBlockDeviceAttachment` with the same virtual machine and virtual disk to be hot-plugged.
// or the virtual disk is already attached to the virtual machine spec.
// Only the one that was created or started sooner can be processed.
Conflict AttachedReason = "Conflict"
)
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,12 @@ func NewAttachmentService(client Client, controllerNamespace string) *Attachment
}
}

var ErrVolumeStatusNotReady = errors.New("hotplug is not ready")
var (
ErrVolumeStatusNotReady = errors.New("hotplug is not ready")
ErrDiskIsSpecAttached = errors.New("virtual disk is already attached to the virtual machine spec")
ErrHotPlugRequestAlreadySent = errors.New("attachment request is already sent")
ErrVirtualMachineWaitsForRestartApproval = errors.New("virtual machine waits for restart approval")
)

func (s AttachmentService) IsHotPlugged(vd *virtv2.VirtualDisk, vm *virtv2.VirtualMachine, kvvmi *virtv1.VirtualMachineInstance) (bool, error) {
if vd == nil {
Expand Down Expand Up @@ -74,15 +79,27 @@ func (s AttachmentService) IsHotPlugged(vd *virtv2.VirtualDisk, vm *virtv2.Virtu
return false, nil
}

func (s AttachmentService) IsHotPlugRequestSent(vd *virtv2.VirtualDisk, kvvm *virtv1.VirtualMachine) (bool, error) {
name := kvbuilder.GenerateVMDDiskName(vd.Name)
func (s AttachmentService) CanHotPlug(vd *virtv2.VirtualDisk, vm *virtv2.VirtualMachine, kvvm *virtv1.VirtualMachine) (bool, error) {
if vd == nil {
return false, errors.New("cannot hot plug a nil VirtualDisk")
}

for _, vr := range kvvm.Status.VolumeRequests {
if vr.AddVolumeOptions.Name == name {
return true, nil
if vm == nil {
return false, errors.New("cannot hot plug a disk into a nil VirtualMachine")
}

if kvvm == nil {
return false, errors.New("cannot hot plug a disk into a nil KVVM")
}

for _, bdr := range vm.Spec.BlockDeviceRefs {
if bdr.Kind == virtv2.DiskDevice && bdr.Name == vd.Name {
return false, fmt.Errorf("%w: virtual machine has a virtual disk reference, but it is not a hot-plugged volume", ErrDiskIsSpecAttached)
}
}

name := kvbuilder.GenerateVMDDiskName(vd.Name)

if kvvm.Spec.Template != nil {
for _, vs := range kvvm.Spec.Template.Spec.Volumes {
if vs.Name == name {
Expand All @@ -91,21 +108,26 @@ func (s AttachmentService) IsHotPlugRequestSent(vd *virtv2.VirtualDisk, kvvm *vi
}

if !vs.PersistentVolumeClaim.Hotpluggable {
return false, fmt.Errorf("kvvm %s/%s spec volume %s has a pvc reference, but it is not a hot-plugged volume", kvvm.Namespace, kvvm.Name, vs.Name)
return false, fmt.Errorf("%w: virtual machine has a virtual disk reference, but it is not a hot-plugged volume", ErrDiskIsSpecAttached)
}

return true, nil
return false, ErrHotPlugRequestAlreadySent
}
}
}

return false, nil
}
for _, vr := range kvvm.Status.VolumeRequests {
if vr.AddVolumeOptions.Name == name {
return false, ErrHotPlugRequestAlreadySent
}
}

var (
ErrVirtualDiskIsAlreadyAttached = errors.New("virtual disk is already attached to virtual machine")
ErrVirtualMachineWaitsForRestartApproval = errors.New("virtual machine waits for restart approval")
)
if len(vm.Status.RestartAwaitingChanges) > 0 {
return false, ErrVirtualMachineWaitsForRestartApproval
}

return true, nil
}

func (s AttachmentService) HotPlugDisk(ctx context.Context, vd *virtv2.VirtualDisk, vm *virtv2.VirtualMachine, kvvm *virtv1.VirtualMachine) error {
if vd == nil {
Expand All @@ -116,14 +138,8 @@ func (s AttachmentService) HotPlugDisk(ctx context.Context, vd *virtv2.VirtualDi
return errors.New("cannot hot plug a disk into a nil VirtualMachine")
}

for _, bdr := range vm.Spec.BlockDeviceRefs {
if bdr.Kind == virtv2.DiskDevice && bdr.Name == vd.Name {
return ErrVirtualDiskIsAlreadyAttached
}
}

if len(vm.Status.RestartAwaitingChanges) > 0 {
return ErrVirtualMachineWaitsForRestartApproval
if kvvm == nil {
return errors.New("cannot hot plug a disk into a nil KVVM")
}

name := kvbuilder.GenerateVMDDiskName(vd.Name)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -79,17 +79,6 @@ func (h *BlockDeviceHandler) Handle(ctx context.Context, s state.VirtualMachineS
cb := conditions.NewConditionBuilder2(vmcondition.TypeBlockDevicesReady).
Generation(current.GetGeneration())

disksMessage := h.checkBlockDevicesSanity(current)
if !isDeletion(current) && disksMessage != "" {
log.Error(fmt.Sprintf("invalid disks: %s", disksMessage))
mgr.Update(cb.
Status(metav1.ConditionFalse).
Reason2(vmcondition.ReasonBlockDevicesNotReady).
Message(disksMessage).Condition())
changed.Status.Conditions = mgr.Generate()
return reconcile.Result{Requeue: true}, nil
}

bdState := NewBlockDeviceState(s)
err := bdState.Reload(ctx)
if err != nil {
Expand All @@ -99,65 +88,30 @@ func (h *BlockDeviceHandler) Handle(ctx context.Context, s state.VirtualMachineS
if isDeletion(current) {
return reconcile.Result{}, h.removeFinalizersOnBlockDevices(ctx, changed, bdState)
}

if err = h.setFinalizersOnBlockDevices(ctx, changed, bdState); err != nil {
return reconcile.Result{}, fmt.Errorf("unable to add block devices finalizers: %w", err)
}

// Fill BlockDeviceRefs every time without knowledge of previously kept BlockDeviceRefs.
changed.Status.BlockDeviceRefs = nil

// Set BlockDeviceRef from spec to the status.
for _, bdSpecRef := range changed.Spec.BlockDeviceRefs {
bdStatusRef := h.getDiskStatusRef(bdSpecRef.Kind, bdSpecRef.Name)
bdStatusRef.Size = h.getBlockDeviceSize(&bdStatusRef, bdState)

changed.Status.BlockDeviceRefs = append(
changed.Status.BlockDeviceRefs,
bdStatusRef,
)
}

vmbdas, err := s.VMBDAList(ctx)
// Get hot plugged BlockDeviceRefs from vmbdas.
vmbdaStatusRefs, err := h.getBlockDeviceStatusRefsFromVMBDA(ctx, s)
if err != nil {
return reconcile.Result{}, err
return reconcile.Result{}, fmt.Errorf("failed to get hotplugged block devices: %w", err)
}

// Set hot plugged BlockDeviceRef to the status.
for _, vmbda := range vmbdas {
switch vmbda.Status.Phase {
case virtv2.BlockDeviceAttachmentPhaseInProgress,
virtv2.BlockDeviceAttachmentPhaseAttached:
default:
continue
}

var vd *virtv2.VirtualDisk
vd, err = s.VirtualDisk(ctx, vmbda.Spec.BlockDeviceRef.Name)
if err != nil {
return reconcile.Result{}, err
}

if vd == nil {
continue
}

bdStatusRef := h.getDiskStatusRef(virtv2.DiskDevice, vmbda.Spec.BlockDeviceRef.Name)
bdStatusRef.Size = vd.Status.Capacity
bdStatusRef.Hotplugged = true
bdStatusRef.VirtualMachineBlockDeviceAttachmentName = vmbda.Name
// Get BlockDeviceRefs from spec.
bdStatusRefs, conflictWarning := h.getBlockDeviceStatusRefsFromSpec(current.Spec.BlockDeviceRefs, bdState, vmbdaStatusRefs)

changed.Status.BlockDeviceRefs = append(
changed.Status.BlockDeviceRefs,
bdStatusRef,
)
}
// Fill BlockDeviceRefs every time without knowledge of previously kept BlockDeviceRefs.
changed.Status.BlockDeviceRefs = bdStatusRefs
changed.Status.BlockDeviceRefs = append(changed.Status.BlockDeviceRefs, vmbdaStatusRefs...)

kvvmi, err := s.KVVMI(ctx)
if err != nil {
return reconcile.Result{}, err
}

// Sync BlockDeviceRef in the status with KVVMI volumes.
// Sync BlockDeviceRefs in the status with KVVMI volumes.
if kvvmi != nil {
for i, bdStatusRef := range changed.Status.BlockDeviceRefs {
vs := h.findVolumeStatus(GenerateDiskName(bdStatusRef.Kind, bdStatusRef.Name), kvvmi)
Expand All @@ -170,7 +124,18 @@ func (h *BlockDeviceHandler) Handle(ctx context.Context, s state.VirtualMachineS
}
}

// Update the BlockDevicesReady condition.
// Update the BlockDevicesReady condition if there are conflicted virtual disks.
if conflictWarning != "" {
log.Info(fmt.Sprintf("Conflicted virtual disks: %s", conflictWarning))

mgr.Update(cb.Status(metav1.ConditionFalse).
Reason2(vmcondition.ReasonBlockDevicesNotReady).
Message(conflictWarning).Condition())
changed.Status.Conditions = mgr.Generate()
return reconcile.Result{Requeue: true}, nil
}

// Update the BlockDevicesReady condition if there are any block devices that are not ready.
if readyCount, wffc := h.countReadyBlockDevices(current, bdState, log); len(current.Spec.BlockDeviceRefs) != readyCount {
var reason vmcondition.Reason
var msg string
Expand All @@ -185,8 +150,7 @@ func (h *BlockDeviceHandler) Handle(ctx context.Context, s state.VirtualMachineS
log.Info(msg, "actualReady", readyCount, "expectedReady", len(current.Spec.BlockDeviceRefs))

h.recorder.Event(changed, corev1.EventTypeNormal, reason.String(), msg)
mgr.Update(cb.
Status(metav1.ConditionFalse).
mgr.Update(cb.Status(metav1.ConditionFalse).
Reason(reason.String()).
Message(msg).
Condition())
Expand All @@ -205,35 +169,74 @@ func (h *BlockDeviceHandler) Name() string {
return nameBlockDeviceHandler
}

// checkBlockDevicesSanity compares spec.blockDevices and status.blockDevicesAttached.
// It returns false if the same disk contains in both arrays.
// It is a precaution to not apply changes in spec.blockDevices if disk is already
// hotplugged using the VMBDA resource. The reverse check is done by the vmbda-controller.
func (h *BlockDeviceHandler) checkBlockDevicesSanity(vm *virtv2.VirtualMachine) string {
if vm == nil {
return ""
func (h *BlockDeviceHandler) getBlockDeviceStatusRefsFromSpec(bdSpecRefs []virtv2.BlockDeviceSpecRef, bdState BlockDevicesState, hotplugs []virtv2.BlockDeviceStatusRef) ([]virtv2.BlockDeviceStatusRef, string) {
hotplugsByName := make(map[string]struct{}, len(hotplugs))
for _, hotplug := range hotplugs {
hotplugsByName[hotplug.Name] = struct{}{}
}
disks := make([]string, 0)
hotplugged := make(map[string]struct{})

for _, bda := range vm.Status.BlockDeviceRefs {
if bda.Hotplugged {
hotplugged[bda.Name] = struct{}{}
}
}
var conflictedRefs []string
var refs []virtv2.BlockDeviceStatusRef

for _, bd := range vm.Spec.BlockDeviceRefs {
if bd.Kind == virtv2.DiskDevice {
if _, ok := hotplugged[bd.Name]; ok {
disks = append(disks, bd.Name)
for _, bdSpecRef := range bdSpecRefs {
// It is a precaution to not apply changes in spec.blockDeviceRefs if disk is already
// hotplugged using the VMBDA resource. The reverse check is done by the vmbda-controller.
if bdSpecRef.Kind == virtv2.DiskDevice {
if _, conflict := hotplugsByName[bdSpecRef.Name]; conflict {
conflictedRefs = append(conflictedRefs, bdSpecRef.Name)
continue
}
}

bdStatusRef := h.getDiskStatusRef(bdSpecRef.Kind, bdSpecRef.Name)
bdStatusRef.Size = h.getBlockDeviceSize(&bdStatusRef, bdState)

refs = append(refs, bdStatusRef)
}

if len(disks) == 0 {
return ""
var warning string
if len(conflictedRefs) > 0 {
warning = fmt.Sprintf("spec.blockDeviceRefs field contains hotplugged disks (%s): unplug or remove them from spec to continue.", strings.Join(conflictedRefs, ", "))
}
return fmt.Sprintf("spec.blockDeviceRefs contain hotplugged disks: %s. Unplug or remove them from spec to continue.", strings.Join(disks, ", "))

return refs, warning
}

func (h *BlockDeviceHandler) getBlockDeviceStatusRefsFromVMBDA(ctx context.Context, s state.VirtualMachineState) ([]virtv2.BlockDeviceStatusRef, error) {
vmbdas, err := s.VMBDAList(ctx)
if err != nil {
return nil, err
}

var refs []virtv2.BlockDeviceStatusRef

for _, vmbda := range vmbdas {
switch vmbda.Status.Phase {
case virtv2.BlockDeviceAttachmentPhaseInProgress,
virtv2.BlockDeviceAttachmentPhaseAttached:
default:
continue
}

var vd *virtv2.VirtualDisk
vd, err = s.VirtualDisk(ctx, vmbda.Spec.BlockDeviceRef.Name)
if err != nil {
return nil, err
}

if vd == nil {
continue
}

bdStatusRef := h.getDiskStatusRef(virtv2.DiskDevice, vmbda.Spec.BlockDeviceRef.Name)
bdStatusRef.Size = vd.Status.Capacity
bdStatusRef.Hotplugged = true
bdStatusRef.VirtualMachineBlockDeviceAttachmentName = vmbda.Name

refs = append(refs, bdStatusRef)
}

return refs, nil
}

// countReadyBlockDevices check if all attached images and disks are ready to use by the VM.
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,54 @@
/*
Copyright 2024 Flant JSC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/

package validators

import (
"context"
"fmt"

"sigs.k8s.io/controller-runtime/pkg/webhook/admission"

"github.com/deckhouse/virtualization/api/core/v1alpha2"
)

type BlockDeviceSpecRefsValidator struct{}

func NewBlockDeviceSpecRefsValidator() *BlockDeviceSpecRefsValidator {
return &BlockDeviceSpecRefsValidator{}
}

func (v *BlockDeviceSpecRefsValidator) ValidateCreate(_ context.Context, vm *v1alpha2.VirtualMachine) (admission.Warnings, error) {
return nil, v.noDoubles(vm)
}

func (v *BlockDeviceSpecRefsValidator) ValidateUpdate(_ context.Context, _, newVM *v1alpha2.VirtualMachine) (admission.Warnings, error) {
return nil, v.noDoubles(newVM)
}

func (v *BlockDeviceSpecRefsValidator) noDoubles(vm *v1alpha2.VirtualMachine) error {
blockDevicesByRef := make(map[v1alpha2.BlockDeviceSpecRef]struct{}, len(vm.Spec.BlockDeviceRefs))

for _, bdRef := range vm.Spec.BlockDeviceRefs {
if _, ok := blockDevicesByRef[bdRef]; ok {
return fmt.Errorf("cannot specify the same block device reference more than once: %s with name %s has has a duplicate reference", bdRef.Kind, bdRef.Name)
}

blockDevicesByRef[bdRef] = struct{}{}
}

return nil
}
Loading

0 comments on commit cfae49c

Please sign in to comment.