diff --git a/images/cdi-artifact/patches/019-add-provisioner-tolerations-anno.patch b/images/cdi-artifact/patches/019-add-provisioner-tolerations-anno.patch new file mode 100644 index 000000000..afd414acf --- /dev/null +++ b/images/cdi-artifact/patches/019-add-provisioner-tolerations-anno.patch @@ -0,0 +1,137 @@ +diff --git a/pkg/controller/clone-controller.go b/pkg/controller/clone-controller.go +index 59ee5fd3f..046d1f916 100644 +--- a/pkg/controller/clone-controller.go ++++ b/pkg/controller/clone-controller.go +@@ -500,6 +500,11 @@ func (r *CloneReconciler) CreateCloneSourcePod(image, pullPolicy string, pvc *co + return nil, err + } + ++ workloadNodePlacement, err = cc.AdjustWorkloadNodePlacement(context.TODO(), r.client, workloadNodePlacement, pvc) ++ if err != nil { ++ return nil, fmt.Errorf("failed to adjust workload node placement: %w", err) ++ } ++ + sourcePvc, err := r.getCloneRequestSourcePVC(pvc) + if err != nil { + return nil, err +diff --git a/pkg/controller/common/util.go b/pkg/controller/common/util.go +index 48c73628d..a2f3eb7e2 100644 +--- a/pkg/controller/common/util.go ++++ b/pkg/controller/common/util.go +@@ -21,6 +21,7 @@ import ( + "crypto/rand" + "crypto/rsa" + "crypto/tls" ++ "encoding/json" + "fmt" + "io" + "math" +@@ -95,6 +96,9 @@ const ( + // AnnExternalPopulation annotation marks a PVC as "externally populated", allowing the import-controller to skip it + AnnExternalPopulation = AnnAPIGroup + "/externalPopulation" + ++ // AnnProvisionerTolerations annotation specifies tolerations to use for provisioners. ++ AnnProvisionerTolerations = "virt.deckhouse.io/provisioner-tolerations" ++ + // AnnDeleteAfterCompletion is PVC annotation for deleting DV after completion + AnnDeleteAfterCompletion = AnnAPIGroup + "/storage.deleteAfterCompletion" + // AnnPodRetainAfterCompletion is PVC annotation for retaining transfer pods after completion +@@ -780,6 +784,50 @@ func GetWorkloadNodePlacement(ctx context.Context, c client.Client) (*sdkapi.Nod + return &cr.Spec.Workloads, nil + } + ++// AdjustWorkloadNodePlacement adds tolerations specified in prime pvc annotation. ++func AdjustWorkloadNodePlacement(ctx context.Context, c client.Client, nodePlacement *sdkapi.NodePlacement, primePVC *corev1.PersistentVolumeClaim) (*sdkapi.NodePlacement, error) { ++ targetPVCKey := types.NamespacedName{ ++ Namespace: primePVC.Namespace, ++ } ++ ++ for _, ref := range primePVC.OwnerReferences { ++ if ref.Kind == "PersistentVolumeClaim" { ++ targetPVCKey.Name = ref.Name ++ } ++ } ++ ++ var targetPVC corev1.PersistentVolumeClaim ++ err := c.Get(ctx, targetPVCKey, &targetPVC) ++ if err != nil { ++ return nil, fmt.Errorf("failed to get target pvc %s: %w", targetPVCKey, err) ++ } ++ ++ provisionerTolerations, err := ExtractProvisionerTolerations(&targetPVC) ++ if err != nil { ++ return nil, fmt.Errorf("failed to extract provisioner tolerations: %w", err) ++ } ++ ++ nodePlacement.Tolerations = append(nodePlacement.Tolerations, provisionerTolerations...) ++ ++ return nodePlacement, nil ++} ++ ++func ExtractProvisionerTolerations(obj client.Object) ([]corev1.Toleration, error) { ++ rawTolerations := obj.GetAnnotations()[AnnProvisionerTolerations] ++ ++ if rawTolerations == "" { ++ return nil, nil ++ } ++ ++ var tolerations []corev1.Toleration ++ err := json.Unmarshal([]byte(rawTolerations), &tolerations) ++ if err != nil { ++ return nil, fmt.Errorf("failed to unmarshal provisioner tolerations %s: %w", rawTolerations, err) ++ } ++ ++ return tolerations, nil ++} ++ + // GetActiveCDI returns the active CDI CR + func GetActiveCDI(ctx context.Context, c client.Client) (*cdiv1.CDI, error) { + crList := &cdiv1.CDIList{} +diff --git a/pkg/controller/datavolume/controller-base.go b/pkg/controller/datavolume/controller-base.go +index b8c9f893e..99f8501be 100644 +--- a/pkg/controller/datavolume/controller-base.go ++++ b/pkg/controller/datavolume/controller-base.go +@@ -1145,6 +1145,11 @@ func (r *ReconcilerBase) newPersistentVolumeClaim(dataVolume *cdiv1.DataVolume, + annotations[k] = v + } + annotations[cc.AnnPodRestarts] = "0" ++ ++ if dataVolume.Annotations[cc.AnnProvisionerTolerations] != "" { ++ annotations[cc.AnnProvisionerTolerations] = dataVolume.Annotations[cc.AnnProvisionerTolerations] ++ } ++ + annotations[cc.AnnContentType] = string(cc.GetContentType(dataVolume.Spec.ContentType)) + if dataVolume.Spec.PriorityClassName != "" { + annotations[cc.AnnPriorityClassName] = dataVolume.Spec.PriorityClassName +diff --git a/pkg/controller/import-controller.go b/pkg/controller/import-controller.go +index 49f1ff898..ba9fcb531 100644 +--- a/pkg/controller/import-controller.go ++++ b/pkg/controller/import-controller.go +@@ -859,6 +859,11 @@ func createImporterPod(ctx context.Context, log logr.Logger, client client.Clien + return nil, err + } + ++ args.workloadNodePlacement, err = cc.AdjustWorkloadNodePlacement(context.TODO(), client, args.workloadNodePlacement, args.pvc) ++ if err != nil { ++ return nil, fmt.Errorf("failed to adjust workload node placement: %w", err) ++ } ++ + if isRegistryNodeImport(args) { + args.importImage, err = getRegistryImportImage(args.pvc) + if err != nil { +diff --git a/pkg/controller/upload-controller.go b/pkg/controller/upload-controller.go +index f251cae5d..ed4420fb9 100644 +--- a/pkg/controller/upload-controller.go ++++ b/pkg/controller/upload-controller.go +@@ -623,6 +623,11 @@ func (r *UploadReconciler) createUploadPod(args UploadPodArgs) (*corev1.Pod, err + return nil, err + } + ++ workloadNodePlacement, err = cc.AdjustWorkloadNodePlacement(context.TODO(), r.client, workloadNodePlacement, args.PVC) ++ if err != nil { ++ return nil, fmt.Errorf("failed to adjust workload node placement: %w", err) ++ } ++ + pod := r.makeUploadPodSpec(args, podResourceRequirements, imagePullSecrets, workloadNodePlacement) + util.SetRecommendedLabels(pod, r.installerLabels, "cdi-controller") + diff --git a/images/cdi-artifact/patches/README.md b/images/cdi-artifact/patches/README.md index 2a115ccb4..1ea32c62c 100644 --- a/images/cdi-artifact/patches/README.md +++ b/images/cdi-artifact/patches/README.md @@ -76,3 +76,7 @@ This is necessary for ensuring that the metrics can be accessed only by Promethe Currently covered metrics: - cdi-controller - cdi-deployment + +#### `019-add-provisioner-tolerations-anno.patch` + +Add annotation to manage provisioner tolerations to avoid unschedulable error. diff --git a/images/virtualization-artifact/pkg/controller/common/util.go b/images/virtualization-artifact/pkg/controller/common/util.go index 6e3b73426..f3f8a4378 100644 --- a/images/virtualization-artifact/pkg/controller/common/util.go +++ b/images/virtualization-artifact/pkg/controller/common/util.go @@ -40,15 +40,21 @@ const ( // AnnAPIGroup is the APIGroup for virtualization-controller. AnnAPIGroup = "virt.deckhouse.io" - // AnnCreatedBy is a pod annotation indicating if the pod was created by the PVC + // AnnCreatedBy is a pod annotation indicating if the pod was created by the PVC. AnnCreatedBy = AnnAPIGroup + "/storage.createdByController" // AnnPodRetainAfterCompletion is PVC annotation for retaining transfer pods after completion AnnPodRetainAfterCompletion = AnnAPIGroup + "/storage.pod.retainAfterCompletion" - // AnnUploadURL provides a const for CVMI/VMI/VMD uploadURL annotation + // AnnUploadURL provides a const for CVMI/VMI/VMD uploadURL annotation. AnnUploadURL = AnnAPIGroup + "/upload.url" + // AnnTolerationsHash provides a const for annotation with hash of applied tolerations. + AnnTolerationsHash = AnnAPIGroup + "/tolerations-hash" + + // AnnProvisionerTolerations provides a const for tolerations to use for provisioners. + AnnProvisionerTolerations = AnnAPIGroup + "/provisioner-tolerations" + // AnnDefaultStorageClass is the annotation indicating that a storage class is the default one. AnnDefaultStorageClass = "storageclass.kubernetes.io/is-default-class" diff --git a/images/virtualization-artifact/pkg/controller/conditions/getter.go b/images/virtualization-artifact/pkg/controller/conditions/getter.go new file mode 100644 index 000000000..1d334e004 --- /dev/null +++ b/images/virtualization-artifact/pkg/controller/conditions/getter.go @@ -0,0 +1,13 @@ +package conditions + +import corev1 "k8s.io/api/core/v1" + +func GetPodCondition(condType corev1.PodConditionType, conds []corev1.PodCondition) (corev1.PodCondition, bool) { + for _, cond := range conds { + if cond.Type == condType { + return cond, true + } + } + + return corev1.PodCondition{}, false +} diff --git a/images/virtualization-artifact/pkg/controller/cvi/internal/source/interfaces.go b/images/virtualization-artifact/pkg/controller/cvi/internal/source/interfaces.go index fbdc608f4..c8fa792ae 100644 --- a/images/virtualization-artifact/pkg/controller/cvi/internal/source/interfaces.go +++ b/images/virtualization-artifact/pkg/controller/cvi/internal/source/interfaces.go @@ -35,7 +35,7 @@ import ( //go:generate moq -rm -out mock.go . Importer Uploader Stat type Importer interface { - Start(ctx context.Context, settings *importer.Settings, obj service.ObjectKind, sup *supplements.Generator, caBundle *datasource.CABundle) error + Start(ctx context.Context, settings *importer.Settings, obj service.ObjectKind, sup *supplements.Generator, caBundle *datasource.CABundle, opts ...service.Option) error StartWithPodSetting(ctx context.Context, settings *importer.Settings, sup *supplements.Generator, caBundle *datasource.CABundle, podSettings *importer.PodSettings) error CleanUp(ctx context.Context, sup *supplements.Generator) (bool, error) CleanUpSupplements(ctx context.Context, sup *supplements.Generator) (bool, error) diff --git a/images/virtualization-artifact/pkg/controller/cvi/internal/source/mock.go b/images/virtualization-artifact/pkg/controller/cvi/internal/source/mock.go index 19f0b6e27..b20b847e4 100644 --- a/images/virtualization-artifact/pkg/controller/cvi/internal/source/mock.go +++ b/images/virtualization-artifact/pkg/controller/cvi/internal/source/mock.go @@ -46,7 +46,7 @@ var _ Importer = &ImporterMock{} // ProtectFunc: func(ctx context.Context, pod *corev1.Pod) error { // panic("mock out the Protect method") // }, -// StartFunc: func(ctx context.Context, settings *importer.Settings, obj service.ObjectKind, sup *supplements.Generator, caBundle *datasource.CABundle) error { +// StartFunc: func(ctx context.Context, settings *importer.Settings, obj service.ObjectKind, sup *supplements.Generator, caBundle *datasource.CABundle, opts ...service.Option) error { // panic("mock out the Start method") // }, // StartWithPodSettingFunc: func(ctx context.Context, settings *importer.Settings, sup *supplements.Generator, caBundle *datasource.CABundle, podSettings *importer.PodSettings) error { @@ -81,7 +81,7 @@ type ImporterMock struct { ProtectFunc func(ctx context.Context, pod *corev1.Pod) error // StartFunc mocks the Start method. - StartFunc func(ctx context.Context, settings *importer.Settings, obj service.ObjectKind, sup *supplements.Generator, caBundle *datasource.CABundle) error + StartFunc func(ctx context.Context, settings *importer.Settings, obj service.ObjectKind, sup *supplements.Generator, caBundle *datasource.CABundle, opts ...service.Option) error // StartWithPodSettingFunc mocks the StartWithPodSetting method. StartWithPodSettingFunc func(ctx context.Context, settings *importer.Settings, sup *supplements.Generator, caBundle *datasource.CABundle, podSettings *importer.PodSettings) error @@ -151,6 +151,8 @@ type ImporterMock struct { Sup *supplements.Generator // CaBundle is the caBundle argument value. CaBundle *datasource.CABundle + // Opts is the opts argument value. + Opts []service.Option } // StartWithPodSetting holds details about calls to the StartWithPodSetting method. StartWithPodSetting []struct { @@ -413,7 +415,7 @@ func (mock *ImporterMock) ProtectCalls() []struct { } // Start calls StartFunc. -func (mock *ImporterMock) Start(ctx context.Context, settings *importer.Settings, obj service.ObjectKind, sup *supplements.Generator, caBundle *datasource.CABundle) error { +func (mock *ImporterMock) Start(ctx context.Context, settings *importer.Settings, obj service.ObjectKind, sup *supplements.Generator, caBundle *datasource.CABundle, opts ...service.Option) error { if mock.StartFunc == nil { panic("ImporterMock.StartFunc: method is nil but Importer.Start was just called") } @@ -423,17 +425,19 @@ func (mock *ImporterMock) Start(ctx context.Context, settings *importer.Settings Obj service.ObjectKind Sup *supplements.Generator CaBundle *datasource.CABundle + Opts []service.Option }{ Ctx: ctx, Settings: settings, Obj: obj, Sup: sup, CaBundle: caBundle, + Opts: opts, } mock.lockStart.Lock() mock.calls.Start = append(mock.calls.Start, callInfo) mock.lockStart.Unlock() - return mock.StartFunc(ctx, settings, obj, sup, caBundle) + return mock.StartFunc(ctx, settings, obj, sup, caBundle, opts...) } // StartCalls gets all the calls that were made to Start. @@ -446,6 +450,7 @@ func (mock *ImporterMock) StartCalls() []struct { Obj service.ObjectKind Sup *supplements.Generator CaBundle *datasource.CABundle + Opts []service.Option } { var calls []struct { Ctx context.Context @@ -453,6 +458,7 @@ func (mock *ImporterMock) StartCalls() []struct { Obj service.ObjectKind Sup *supplements.Generator CaBundle *datasource.CABundle + Opts []service.Option } mock.lockStart.RLock() calls = mock.calls.Start diff --git a/images/virtualization-artifact/pkg/controller/importer/importer_pod.go b/images/virtualization-artifact/pkg/controller/importer/importer_pod.go index 635b6ec68..9368f44df 100644 --- a/images/virtualization-artifact/pkg/controller/importer/importer_pod.go +++ b/images/virtualization-artifact/pkg/controller/importer/importer_pod.go @@ -18,6 +18,8 @@ package importer import ( "context" + "encoding/base64" + "encoding/json" "fmt" "path" "strconv" @@ -30,6 +32,7 @@ import ( common "github.com/deckhouse/virtualization-controller/pkg/common" podutil "github.com/deckhouse/virtualization-controller/pkg/common/pod" cc "github.com/deckhouse/virtualization-controller/pkg/controller/common" + "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" "github.com/deckhouse/virtualization-controller/pkg/sdk/framework/helper" ) @@ -89,51 +92,29 @@ type PodSettings struct { ImagePullSecrets []corev1.LocalObjectReference PriorityClassName string PVCName string - // workloadNodePlacement *sdkapi.NodePlacement + NodePlacement *NodePlacement } // CreatePod creates and returns a pointer to a pod which is created based on the passed-in endpoint, secret // name, etc. A nil secret means the endpoint credentials are not passed to the // importer pod. func (imp *Importer) CreatePod(ctx context.Context, client client.Client) (*corev1.Pod, error) { - var err error - // args.ResourceRequirements, err = cc.GetDefaultPodResourceRequirements(client) - // if err != nil { - // return nil, err - // } - - // args.ImagePullSecrets, err = cc.GetImagePullSecrets(client) - // if err != nil { - // return nil, err - // } - - // args.workloadNodePlacement, err = cc.GetWorkloadNodePlacement(ctx, client) - // if err != nil { - // return nil, err - // } - - pod := imp.makeImporterPodSpec() - - if err = client.Create(ctx, pod); err != nil { + pod, err := imp.makeImporterPodSpec() + if err != nil { return nil, err } - return pod, nil -} - -// CleanupPod deletes importer Pod. -// No need to delete CABundle configmap and auth Secret. They have ownerRef and will be gc'ed. -func CleanupPod(ctx context.Context, client client.Client, pod *corev1.Pod) error { - if pod == nil { - return nil + err = client.Create(ctx, pod) + if err != nil { + return nil, err } - return helper.CleanupObject(ctx, client, pod) + return pod, nil } // makeImporterPodSpec creates and return the importer pod spec based on the passed-in endpoint, secret and pvc. -func (imp *Importer) makeImporterPodSpec() *corev1.Pod { - pod := &corev1.Pod{ +func (imp *Importer) makeImporterPodSpec() (*corev1.Pod, error) { + pod := corev1.Pod{ TypeMeta: metav1.TypeMeta{ Kind: "Pod", APIVersion: "v1", @@ -144,36 +125,39 @@ func (imp *Importer) makeImporterPodSpec() *corev1.Pod { Annotations: map[string]string{ cc.AnnCreatedBy: "yes", }, - // Labels: map[string]string{ - // common.CDILabelKey: common.CDILabelValue, - // common.CDIComponentLabel: common.ImporterPodNamePrefix, - // common.PrometheusLabelKey: common.PrometheusLabelValue, - // }, OwnerReferences: []metav1.OwnerReference{ imp.PodSettings.OwnerReference, }, }, Spec: corev1.PodSpec{ // Container and volumes will be added later. - Containers: []corev1.Container{}, - Volumes: []corev1.Volume{}, - RestartPolicy: corev1.RestartPolicyOnFailure, - // NodeSelector: args.workloadNodePlacement.NodeSelector, - // Tolerations: args.workloadNodePlacement.Tolerations, - // Affinity: args.workloadNodePlacement.Affinity, + Containers: []corev1.Container{}, + Volumes: []corev1.Volume{}, + RestartPolicy: corev1.RestartPolicyOnFailure, PriorityClassName: imp.PodSettings.PriorityClassName, ImagePullSecrets: imp.PodSettings.ImagePullSecrets, }, } - cc.SetRecommendedLabels(pod, imp.PodSettings.InstallerLabels, imp.PodSettings.ControllerName) + if imp.PodSettings.NodePlacement != nil && len(imp.PodSettings.NodePlacement.Tolerations) > 0 { + pod.Spec.Tolerations = imp.PodSettings.NodePlacement.Tolerations + + JSON, err := json.Marshal(imp.PodSettings.NodePlacement.Tolerations) + if err != nil { + return nil, err + } + + pod.Annotations[cc.AnnTolerationsHash] = base64.StdEncoding.EncodeToString(JSON) + } + + cc.SetRecommendedLabels(&pod, imp.PodSettings.InstallerLabels, imp.PodSettings.ControllerName) cc.SetRestrictedSecurityContext(&pod.Spec) container := imp.makeImporterContainerSpec() - imp.addVolumes(pod, container) + imp.addVolumes(&pod, container) pod.Spec.Containers = append(pod.Spec.Containers, *container) - return pod + return &pod, nil } func (imp *Importer) makeImporterContainerSpec() *corev1.Container { @@ -414,3 +398,33 @@ type PodNamer interface { func FindPod(ctx context.Context, client client.Client, name PodNamer) (*corev1.Pod, error) { return helper.FetchObject(ctx, name.ImporterPod(), client, &corev1.Pod{}) } + +func IsNodePlacementChanged(nodePlacement *NodePlacement, pod *corev1.Pod) (bool, error) { + if pod == nil { + return false, nil + } + + podScheduled, _ := conditions.GetPodCondition(corev1.PodScheduled, pod.Status.Conditions) + if podScheduled.Status == corev1.ConditionTrue { + return false, nil + } + + oldHash, exists := pod.Annotations[cc.AnnTolerationsHash] + + if nodePlacement == nil && exists { + return true, nil + } + + if len(nodePlacement.Tolerations) == 0 && !exists { + return false, nil + } + + JSON, err := json.Marshal(nodePlacement.Tolerations) + if err != nil { + return false, err + } + + newHash := base64.StdEncoding.EncodeToString(JSON) + + return oldHash != newHash, nil +} diff --git a/images/virtualization-artifact/pkg/controller/importer/importer_pod_test.go b/images/virtualization-artifact/pkg/controller/importer/importer_pod_test.go index dfa4c4195..45d2c1713 100644 --- a/images/virtualization-artifact/pkg/controller/importer/importer_pod_test.go +++ b/images/virtualization-artifact/pkg/controller/importer/importer_pod_test.go @@ -19,6 +19,7 @@ package importer import ( "testing" + "github.com/stretchr/testify/require" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/utils/ptr" @@ -53,7 +54,8 @@ func Test_MakePodSpec(t *testing.T) { imp := NewImporter(podSettings, settings) - pod := imp.makeImporterPodSpec() + pod, err := imp.makeImporterPodSpec() + require.NoError(t, err) if pod.Namespace == "" { t.Fatalf("pod.Namespace should not be empty!") @@ -90,7 +92,8 @@ func Test_MakePodSpec_CABundle(t *testing.T) { imp := NewImporter(podSettings, settings) - pod := imp.makeImporterPodSpec() + pod, err := imp.makeImporterPodSpec() + require.NoError(t, err) hasCAVol := false for _, vol := range pod.Spec.Volumes { diff --git a/images/virtualization-artifact/pkg/controller/importer/node_placement.go b/images/virtualization-artifact/pkg/controller/importer/node_placement.go index e9511d562..475aae8c7 100644 --- a/images/virtualization-artifact/pkg/controller/importer/node_placement.go +++ b/images/virtualization-artifact/pkg/controller/importer/node_placement.go @@ -16,32 +16,30 @@ limitations under the License. package importer -import corev1 "k8s.io/api/core/v1" +import ( + "encoding/json" -// NodePlacement describes node scheduling configuration. -// +k8s:openapi-gen=true -type NodePlacement struct { - // nodeSelector is the node selector applied to the relevant kind of pods - // It specifies a map of key-value pairs: for the pod to be eligible to run on a node, - // the node must have each of the indicated key-value pairs as labels - // (it can have additional labels as well). - // See https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector - // +kubebuilder:validation:Optional - // +optional - NodeSelector map[string]string `json:"nodeSelector,omitempty"` - - // affinity enables pod affinity/anti-affinity placement expanding the types of constraints - // that can be expressed with nodeSelector. - // affinity is going to be applied to the relevant kind of pods in parallel with nodeSelector - // See https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity - // +kubebuilder:validation:Optional - // +optional - Affinity *corev1.Affinity `json:"affinity,omitempty"` + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" +) +type NodePlacement struct { // tolerations is a list of tolerations applied to the relevant kind of pods // See https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ for more info. // These are additional tolerations other than default ones. - // +kubebuilder:validation:Optional - // +optional Tolerations []corev1.Toleration `json:"tolerations,omitempty"` } + +func GetTolerationsPatch(tolerations []corev1.Toleration) (client.Patch, error) { + data, err := json.Marshal(map[string]interface{}{ + "spec": map[string]interface{}{ + "tolerations": tolerations, + }, + }) + if err != nil { + return nil, err + } + + return client.RawPatch(types.MergePatchType, data), nil +} diff --git a/images/virtualization-artifact/pkg/controller/kvbuilder/dv.go b/images/virtualization-artifact/pkg/controller/kvbuilder/dv.go index 9ba5f9eb8..84a4304cd 100644 --- a/images/virtualization-artifact/pkg/controller/kvbuilder/dv.go +++ b/images/virtualization-artifact/pkg/controller/kvbuilder/dv.go @@ -17,6 +17,9 @@ limitations under the License. package kvbuilder import ( + "encoding/json" + "fmt" + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -25,6 +28,8 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/common" "github.com/deckhouse/virtualization-controller/pkg/common/pvc" + common2 "github.com/deckhouse/virtualization-controller/pkg/controller/common" + "github.com/deckhouse/virtualization-controller/pkg/controller/importer" "github.com/deckhouse/virtualization-controller/pkg/sdk/framework/helper" ) @@ -76,6 +81,23 @@ func (b *DV) SetDataSource(source *cdiv1.DataVolumeSource) { b.Resource.Spec.Source = source } +func (b *DV) SetNodePlacement(nodePlacement *importer.NodePlacement) error { + if nodePlacement == nil { + return nil + } + + if len(nodePlacement.Tolerations) > 0 { + JSON, err := json.Marshal(nodePlacement.Tolerations) + if err != nil { + return fmt.Errorf("failed to marshal node placement tolerations: %w", err) + } + + b.Resource.Annotations[common2.AnnProvisionerTolerations] = string(JSON) + } + + return nil +} + func (b *DV) SetRegistryDataSource(imageName, authSecret, caBundleConfigMap string) { url := common.DockerRegistrySchemePrefix + imageName diff --git a/images/virtualization-artifact/pkg/controller/service/condition.go b/images/virtualization-artifact/pkg/controller/service/condition.go index d8b9ec424..6cc581e54 100644 --- a/images/virtualization-artifact/pkg/controller/service/condition.go +++ b/images/virtualization-artifact/pkg/controller/service/condition.go @@ -36,16 +36,6 @@ func CapitalizeFirstLetter(s string) string { return string(runes) } -func GetPodCondition(condType corev1.PodConditionType, conds []corev1.PodCondition) (corev1.PodCondition, bool) { - for _, cond := range conds { - if cond.Type == condType { - return cond, true - } - } - - return corev1.PodCondition{}, false -} - func GetDataVolumeCondition(conditionType cdiv1.DataVolumeConditionType, conditions []cdiv1.DataVolumeCondition) *cdiv1.DataVolumeCondition { for i, condition := range conditions { if condition.Type == conditionType { diff --git a/images/virtualization-artifact/pkg/controller/service/disk_service.go b/images/virtualization-artifact/pkg/controller/service/disk_service.go index 38a34d435..4b8662e77 100644 --- a/images/virtualization-artifact/pkg/controller/service/disk_service.go +++ b/images/virtualization-artifact/pkg/controller/service/disk_service.go @@ -39,6 +39,7 @@ import ( dvutil "github.com/deckhouse/virtualization-controller/pkg/common/datavolume" "github.com/deckhouse/virtualization-controller/pkg/controller/common" + "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" "github.com/deckhouse/virtualization-controller/pkg/controller/kvbuilder" "github.com/deckhouse/virtualization-controller/pkg/controller/supplements" "github.com/deckhouse/virtualization-controller/pkg/dvcr" @@ -72,11 +73,24 @@ func (s DiskService) Start( source *cdiv1.DataVolumeSource, obj ObjectKind, sup *supplements.Generator, + opts ...Option, ) error { dvBuilder := kvbuilder.NewDV(sup.DataVolume()) dvBuilder.SetDataSource(source) dvBuilder.SetOwnerRef(obj, obj.GroupVersionKind()) + for _, opt := range opts { + switch v := opt.(type) { + case *NodePlacementOption: + err := dvBuilder.SetNodePlacement(v.nodePlacement) + if err != nil { + return fmt.Errorf("set node placement: %w", err) + } + default: + return fmt.Errorf("unknown Start option") + } + } + sc, err := s.GetStorageClass(ctx, storageClass) if err != nil { return err @@ -482,12 +496,12 @@ func (s DiskService) CheckImportProcess(ctx context.Context, dv *cdiv1.DataVolum } if cdiImporterPrime != nil { - podInitializedCond, ok := GetPodCondition(corev1.PodInitialized, cdiImporterPrime.Status.Conditions) + podInitializedCond, ok := conditions.GetPodCondition(corev1.PodInitialized, cdiImporterPrime.Status.Conditions) if ok && podInitializedCond.Status == corev1.ConditionFalse && strings.Contains(podInitializedCond.Reason, "Error") { return fmt.Errorf("%w; %s error %s: %s", ErrDataVolumeNotRunning, key.String(), podInitializedCond.Reason, podInitializedCond.Message) } - podScheduledCond, ok := GetPodCondition(corev1.PodScheduled, cdiImporterPrime.Status.Conditions) + podScheduledCond, ok := conditions.GetPodCondition(corev1.PodScheduled, cdiImporterPrime.Status.Conditions) if ok && podScheduledCond.Status == corev1.ConditionFalse && strings.Contains(podScheduledCond.Reason, "Error") { return fmt.Errorf("%w; %s error %s: %s", ErrDataVolumeNotRunning, key.String(), podScheduledCond.Reason, podScheduledCond.Message) } diff --git a/images/virtualization-artifact/pkg/controller/service/importer_service.go b/images/virtualization-artifact/pkg/controller/service/importer_service.go index 854836ccd..78fd9daa6 100644 --- a/images/virtualization-artifact/pkg/controller/service/importer_service.go +++ b/images/virtualization-artifact/pkg/controller/service/importer_service.go @@ -65,11 +65,39 @@ func NewImporterService( } } -func (s ImporterService) Start(ctx context.Context, settings *importer.Settings, obj ObjectKind, sup *supplements.Generator, caBundle *datasource.CABundle) error { +type Option interface{} + +type NodePlacementOption struct { + nodePlacement *importer.NodePlacement +} + +func WithNodePlacement(nodePlacement *importer.NodePlacement) Option { + return &NodePlacementOption{nodePlacement: nodePlacement} +} + +func (s ImporterService) Start( + ctx context.Context, + settings *importer.Settings, + obj ObjectKind, + sup *supplements.Generator, + caBundle *datasource.CABundle, + opts ...Option, +) error { ownerRef := metav1.NewControllerRef(obj, obj.GroupVersionKind()) settings.Verbose = s.verbose - pod, err := importer.NewImporter(s.getPodSettings(ownerRef, sup), settings).CreatePod(ctx, s.client) + podSettings := s.getPodSettings(ownerRef, sup) + + for _, opt := range opts { + switch v := opt.(type) { + case *NodePlacementOption: + podSettings.NodePlacement = v.nodePlacement + default: + return fmt.Errorf("unknown Start option") + } + } + + pod, err := importer.NewImporter(podSettings, settings).CreatePod(ctx, s.client) if err != nil && !k8serrors.IsAlreadyExists(err) { return err } @@ -92,6 +120,10 @@ func (s ImporterService) CleanUp(ctx context.Context, sup *supplements.Generator return s.CleanUpSupplements(ctx, sup) } +func (s ImporterService) IsNodePlacementChanged(nodePlacement *importer.NodePlacement, pod *corev1.Pod) (bool, error) { + return importer.IsNodePlacementChanged(nodePlacement, pod) +} + func (s ImporterService) DeletePod(ctx context.Context, obj ObjectKind, controllerName string) (bool, error) { labelSelector := client.MatchingLabels{common.AppKubernetesManagedByLabel: controllerName} diff --git a/images/virtualization-artifact/pkg/controller/service/stat_service.go b/images/virtualization-artifact/pkg/controller/service/stat_service.go index 437c0affc..0201cd908 100644 --- a/images/virtualization-artifact/pkg/controller/service/stat_service.go +++ b/images/virtualization-artifact/pkg/controller/service/stat_service.go @@ -30,6 +30,7 @@ import ( "github.com/deckhouse/deckhouse/pkg/log" "github.com/deckhouse/virtualization-controller/pkg/common" cc "github.com/deckhouse/virtualization-controller/pkg/controller/common" + "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" "github.com/deckhouse/virtualization-controller/pkg/controller/monitoring" "github.com/deckhouse/virtualization-controller/pkg/imageformat" "github.com/deckhouse/virtualization-controller/pkg/util" @@ -106,12 +107,12 @@ func (s StatService) CheckPod(pod *corev1.Pod) error { return errors.New("nil pod") } - podInitializedCond, ok := GetPodCondition(corev1.PodInitialized, pod.Status.Conditions) + podInitializedCond, ok := conditions.GetPodCondition(corev1.PodInitialized, pod.Status.Conditions) if ok && podInitializedCond.Status == corev1.ConditionFalse { return fmt.Errorf("provisioning Pod %s/%s is %w: %s", pod.Namespace, pod.Name, ErrNotInitialized, podInitializedCond.Message) } - podScheduledCond, ok := GetPodCondition(corev1.PodScheduled, pod.Status.Conditions) + podScheduledCond, ok := conditions.GetPodCondition(corev1.PodScheduled, pod.Status.Conditions) if ok && podScheduledCond.Status == corev1.ConditionFalse { return fmt.Errorf("provisioning Pod %s/%s is %w: %s", pod.Namespace, pod.Name, ErrNotScheduled, podScheduledCond.Message) } diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/source/registry.go b/images/virtualization-artifact/pkg/controller/vd/internal/source/registry.go index e0cbbf42e..709e8b507 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/source/registry.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/source/registry.go @@ -132,7 +132,16 @@ func (ds RegistryDataSource) Sync(ctx context.Context, vd *virtv2.VirtualDisk) ( vd.Status.Progress = "0%" envSettings := ds.getEnvSettings(vd, supgen) - err = ds.importerService.Start(ctx, envSettings, vd, supgen, datasource.NewCABundleForVMD(vd.GetNamespace(), vd.Spec.DataSource)) + + var nodePlacement importer.NodePlacement + + nodePlacement.Tolerations, err = ds.getTolerations(ctx, vd) + if err != nil { + setPhaseConditionToFailed(cb, &vd.Status.Phase, fmt.Errorf("unexpected error: %w", err)) + return reconcile.Result{}, fmt.Errorf("fauled to get importer tolerations: %w", err) + } + + err = ds.importerService.Start(ctx, envSettings, vd, supgen, datasource.NewCABundleForVMD(vd.GetNamespace(), vd.Spec.DataSource), service.WithNodePlacement(&nodePlacement)) switch { case err == nil: // OK. @@ -155,22 +164,63 @@ func (ds RegistryDataSource) Sync(ctx context.Context, vd *virtv2.VirtualDisk) ( err = ds.statService.CheckPod(pod) if err != nil { - vd.Status.Phase = virtv2.DiskFailed - switch { - case errors.Is(err, service.ErrNotInitialized), errors.Is(err, service.ErrNotScheduled): + case errors.Is(err, service.ErrNotInitialized): + vd.Status.Phase = virtv2.DiskFailed cb. Status(metav1.ConditionFalse). Reason(vdcondition.ProvisioningNotStarted). - Message(service.CapitalizeFirstLetter(err.Error() + ".")) + Message(service.CapitalizeFirstLetter(err.Error()) + ".") + return reconcile.Result{}, nil + case errors.Is(err, service.ErrNotScheduled): + vd.Status.Phase = virtv2.DiskPending + + var nodePlacement importer.NodePlacement + + podErr := err + + nodePlacement.Tolerations, err = ds.getTolerations(ctx, vd) + if err != nil { + setPhaseConditionToFailed(cb, &vd.Status.Phase, fmt.Errorf("unexpected error: %w", err)) + return reconcile.Result{}, fmt.Errorf("fauled to get importer tolerations: %w", err) + } + + var isChanged bool + isChanged, err = ds.importerService.IsNodePlacementChanged(&nodePlacement, pod) + if err != nil { + setPhaseConditionToFailed(cb, &vd.Status.Phase, fmt.Errorf("unexpected error: %w", err)) + return reconcile.Result{}, err + } + + if isChanged { + log.Info("The node placement has changed for importer pod: recreate the provisioner") + _, err = ds.importerService.CleanUp(ctx, supgen) + if err != nil { + setPhaseConditionToFailed(cb, &vd.Status.Phase, fmt.Errorf("unexpected error: %w", err)) + return reconcile.Result{}, err + } + + cb. + Status(metav1.ConditionFalse). + Reason(vdcondition.ProvisioningNotStarted). + Message("Provisioner recreation due to a changes in the virtual machine tolerations.") + } else { + cb. + Status(metav1.ConditionFalse). + Reason(vdcondition.ProvisioningNotStarted). + Message(service.CapitalizeFirstLetter(podErr.Error()) + ".") + } + return reconcile.Result{}, nil case errors.Is(err, service.ErrProvisioningFailed): + vd.Status.Phase = virtv2.DiskFailed cb. Status(metav1.ConditionFalse). Reason(vdcondition.ProvisioningFailed). - Message(service.CapitalizeFirstLetter(err.Error() + ".")) + Message(service.CapitalizeFirstLetter(err.Error()) + ".") return reconcile.Result{}, nil default: + vd.Status.Phase = virtv2.DiskFailed return reconcile.Result{}, err } } @@ -227,7 +277,15 @@ func (ds RegistryDataSource) Sync(ctx context.Context, vd *virtv2.VirtualDisk) ( source := ds.getSource(supgen, ds.statService.GetDVCRImageName(pod)) - err = ds.diskService.Start(ctx, diskSize, sc, source, vd, supgen) + var nodePlacement importer.NodePlacement + + nodePlacement.Tolerations, err = ds.getTolerations(ctx, vd) + if err != nil { + setPhaseConditionToFailed(cb, &vd.Status.Phase, fmt.Errorf("unexpected error: %w", err)) + return reconcile.Result{}, fmt.Errorf("fauled to get importer tolerations: %w", err) + } + + err = ds.diskService.Start(ctx, diskSize, sc, source, vd, supgen, service.WithNodePlacement(&nodePlacement)) if updated, err := setPhaseConditionFromStorageError(err, vd, cb); err != nil || updated { return reconcile.Result{}, err } @@ -387,3 +445,36 @@ func (ds RegistryDataSource) getPVCSize(vd *virtv2.VirtualDisk, pod *corev1.Pod) return service.GetValidatedPVCSize(vd.Spec.PersistentVolumeClaim.Size, unpackedSize) } + +func (ds RegistryDataSource) getTolerations(ctx context.Context, vd *virtv2.VirtualDisk) ([]corev1.Toleration, error) { + if len(vd.Status.AttachedToVirtualMachines) != 1 { + return nil, nil + } + + vmKey := types.NamespacedName{Name: vd.Status.AttachedToVirtualMachines[0].Name, Namespace: vd.Namespace} + vm, err := helper.FetchObject(ctx, vmKey, ds.client, &virtv2.VirtualMachine{}) + if err != nil { + return nil, fmt.Errorf("unable to get the virtual machine %s: %w", vmKey, err) + } + + if vm == nil { + return nil, nil + } + + var tolerations []corev1.Toleration + tolerations = append(tolerations, vm.Spec.Tolerations...) + + vmClassKey := types.NamespacedName{Name: vm.Spec.VirtualMachineClassName} + vmClass, err := helper.FetchObject(ctx, vmClassKey, ds.client, &virtv2.VirtualMachineClass{}) + if err != nil { + return nil, fmt.Errorf("unable to get the virtual machine class %s: %w", vmClassKey, err) + } + + if vmClass == nil { + return tolerations, nil + } + + tolerations = append(tolerations, vmClass.Spec.Tolerations...) + + return tolerations, nil +} diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/watcher/pod_watcher.go b/images/virtualization-artifact/pkg/controller/vd/internal/watcher/pod_watcher.go new file mode 100644 index 000000000..b5b20d0a5 --- /dev/null +++ b/images/virtualization-artifact/pkg/controller/vd/internal/watcher/pod_watcher.go @@ -0,0 +1,66 @@ +package watcher + +import ( + "fmt" + log "log/slog" + + corev1 "k8s.io/api/core/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/predicate" + "sigs.k8s.io/controller-runtime/pkg/source" + + "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" + virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" +) + +type PodWatcher struct { + logger *log.Logger + client client.Client +} + +func NewPodWatcher(client client.Client) *PodWatcher { + return &PodWatcher{ + logger: log.Default().With("watcher", "pod"), + client: client, + } +} + +func (w PodWatcher) Watch(mgr manager.Manager, ctr controller.Controller) error { + return ctr.Watch( + source.Kind(mgr.GetCache(), &corev1.Pod{}), + handler.EnqueueRequestForOwner( + mgr.GetScheme(), + mgr.GetRESTMapper(), + &virtv2.VirtualDisk{}, + handler.OnlyControllerOwner(), + ), + predicate.Funcs{ + CreateFunc: func(e event.CreateEvent) bool { return false }, + DeleteFunc: func(e event.DeleteEvent) bool { return true }, + UpdateFunc: w.filterUpdateEvents, + }, + ) +} + +func (w PodWatcher) filterUpdateEvents(e event.UpdateEvent) bool { + oldPod, ok := e.ObjectOld.(*corev1.Pod) + if !ok { + w.logger.Error(fmt.Sprintf("expected an old Pod but got a %T", e.ObjectOld)) + return false + } + + newPod, ok := e.ObjectNew.(*corev1.Pod) + if !ok { + w.logger.Error(fmt.Sprintf("expected a new Pod but got a %T", e.ObjectNew)) + return false + } + + oldPodScheduled, _ := conditions.GetPodCondition(corev1.PodScheduled, oldPod.Status.Conditions) + newPodScheduled, _ := conditions.GetPodCondition(corev1.PodScheduled, newPod.Status.Conditions) + + return oldPodScheduled.LastTransitionTime != newPodScheduled.LastTransitionTime +} diff --git a/images/virtualization-artifact/pkg/controller/vd/vd_reconciler.go b/images/virtualization-artifact/pkg/controller/vd/vd_reconciler.go index b5b181bf4..6203973c5 100644 --- a/images/virtualization-artifact/pkg/controller/vd/vd_reconciler.go +++ b/images/virtualization-artifact/pkg/controller/vd/vd_reconciler.go @@ -42,6 +42,10 @@ import ( virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" ) +type Watcher interface { + Watch(mgr manager.Manager, ctr controller.Controller) error +} + type Handler interface { Handle(ctx context.Context, vd *virtv2.VirtualDisk) (reconcile.Result, error) } @@ -232,14 +236,15 @@ func (r *Reconciler) SetupController(_ context.Context, mgr manager.Manager, ctr return fmt.Errorf("error setting watch on CVIs: %w", err) } - w := watcher.NewVirtualDiskSnapshotWatcher(mgr.GetClient()) - if err := w.Watch(mgr, ctr); err != nil { - return fmt.Errorf("error setting watch on VDSnapshots: %w", err) - } - - storageClassReadyWatcher := watcher.NewStorageClassWatcher(mgr.GetClient()) - if err := storageClassReadyWatcher.Watch(mgr, ctr); err != nil { - return fmt.Errorf("error setting watch on StorageClass: %w", err) + for _, w := range []Watcher{ + watcher.NewVirtualDiskSnapshotWatcher(mgr.GetClient()), + watcher.NewStorageClassWatcher(mgr.GetClient()), + watcher.NewPodWatcher(mgr.GetClient()), + } { + err := w.Watch(mgr, ctr) + if err != nil { + return fmt.Errorf("error setting watcher: %w", err) + } } return nil diff --git a/images/virtualization-artifact/pkg/controller/vi/internal/source/interfaces.go b/images/virtualization-artifact/pkg/controller/vi/internal/source/interfaces.go index 046671c1a..fb8e41225 100644 --- a/images/virtualization-artifact/pkg/controller/vi/internal/source/interfaces.go +++ b/images/virtualization-artifact/pkg/controller/vi/internal/source/interfaces.go @@ -35,7 +35,7 @@ import ( //go:generate moq -rm -out mock.go . Importer Uploader Stat Handler type Importer interface { - Start(ctx context.Context, settings *importer.Settings, obj service.ObjectKind, sup *supplements.Generator, caBundle *datasource.CABundle) error + Start(ctx context.Context, settings *importer.Settings, obj service.ObjectKind, sup *supplements.Generator, caBundle *datasource.CABundle, opts ...service.Option) error StartWithPodSetting(ctx context.Context, settings *importer.Settings, sup *supplements.Generator, caBundle *datasource.CABundle, podSettings *importer.PodSettings) error CleanUp(ctx context.Context, sup *supplements.Generator) (bool, error) CleanUpSupplements(ctx context.Context, sup *supplements.Generator) (bool, error) diff --git a/images/virtualization-artifact/pkg/controller/vi/internal/source/mock.go b/images/virtualization-artifact/pkg/controller/vi/internal/source/mock.go index 3e5d84578..586808b49 100644 --- a/images/virtualization-artifact/pkg/controller/vi/internal/source/mock.go +++ b/images/virtualization-artifact/pkg/controller/vi/internal/source/mock.go @@ -1,19 +1,3 @@ -/* -Copyright 2024 Flant JSC - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - // Code generated by moq; DO NOT EDIT. // github.com/matryer/moq @@ -60,7 +44,7 @@ var _ Importer = &ImporterMock{} // ProtectFunc: func(ctx context.Context, pod *corev1.Pod) error { // panic("mock out the Protect method") // }, -// StartFunc: func(ctx context.Context, settings *importer.Settings, obj service.ObjectKind, sup *supplements.Generator, caBundle *datasource.CABundle) error { +// StartFunc: func(ctx context.Context, settings *importer.Settings, obj service.ObjectKind, sup *supplements.Generator, caBundle *datasource.CABundle, opts ...service.Option) error { // panic("mock out the Start method") // }, // StartWithPodSettingFunc: func(ctx context.Context, settings *importer.Settings, sup *supplements.Generator, caBundle *datasource.CABundle, podSettings *importer.PodSettings) error { @@ -92,7 +76,7 @@ type ImporterMock struct { ProtectFunc func(ctx context.Context, pod *corev1.Pod) error // StartFunc mocks the Start method. - StartFunc func(ctx context.Context, settings *importer.Settings, obj service.ObjectKind, sup *supplements.Generator, caBundle *datasource.CABundle) error + StartFunc func(ctx context.Context, settings *importer.Settings, obj service.ObjectKind, sup *supplements.Generator, caBundle *datasource.CABundle, opts ...service.Option) error // StartWithPodSettingFunc mocks the StartWithPodSetting method. StartWithPodSettingFunc func(ctx context.Context, settings *importer.Settings, sup *supplements.Generator, caBundle *datasource.CABundle, podSettings *importer.PodSettings) error @@ -153,6 +137,8 @@ type ImporterMock struct { Sup *supplements.Generator // CaBundle is the caBundle argument value. CaBundle *datasource.CABundle + // Opts is the opts argument value. + Opts []service.Option } // StartWithPodSetting holds details about calls to the StartWithPodSetting method. StartWithPodSetting []struct { @@ -374,7 +360,7 @@ func (mock *ImporterMock) ProtectCalls() []struct { } // Start calls StartFunc. -func (mock *ImporterMock) Start(ctx context.Context, settings *importer.Settings, obj service.ObjectKind, sup *supplements.Generator, caBundle *datasource.CABundle) error { +func (mock *ImporterMock) Start(ctx context.Context, settings *importer.Settings, obj service.ObjectKind, sup *supplements.Generator, caBundle *datasource.CABundle, opts ...service.Option) error { if mock.StartFunc == nil { panic("ImporterMock.StartFunc: method is nil but Importer.Start was just called") } @@ -384,17 +370,19 @@ func (mock *ImporterMock) Start(ctx context.Context, settings *importer.Settings Obj service.ObjectKind Sup *supplements.Generator CaBundle *datasource.CABundle + Opts []service.Option }{ Ctx: ctx, Settings: settings, Obj: obj, Sup: sup, CaBundle: caBundle, + Opts: opts, } mock.lockStart.Lock() mock.calls.Start = append(mock.calls.Start, callInfo) mock.lockStart.Unlock() - return mock.StartFunc(ctx, settings, obj, sup, caBundle) + return mock.StartFunc(ctx, settings, obj, sup, caBundle, opts...) } // StartCalls gets all the calls that were made to Start. @@ -407,6 +395,7 @@ func (mock *ImporterMock) StartCalls() []struct { Obj service.ObjectKind Sup *supplements.Generator CaBundle *datasource.CABundle + Opts []service.Option } { var calls []struct { Ctx context.Context @@ -414,6 +403,7 @@ func (mock *ImporterMock) StartCalls() []struct { Obj service.ObjectKind Sup *supplements.Generator CaBundle *datasource.CABundle + Opts []service.Option } mock.lockStart.RLock() calls = mock.calls.Start