diff --git a/images/webhooks/src/handlers/nscValidator.go b/images/webhooks/src/handlers/nscValidator.go index eef659a..7ac2d46 100644 --- a/images/webhooks/src/handlers/nscValidator.go +++ b/images/webhooks/src/handlers/nscValidator.go @@ -7,7 +7,6 @@ import ( "github.com/slok/kubewebhook/v2/pkg/model" kwhvalidating "github.com/slok/kubewebhook/v2/pkg/webhook/validating" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" "k8s.io/klog/v2" mc "webhooks/api" ) @@ -38,24 +37,10 @@ func NSCValidate(ctx context.Context, arReview *model.AdmissionReview, obj metav v3presents = true } - for _, itemClass := range listClasses.Items { - if itemClass.Name == nsc.Name { - continue - } - if itemClass.Spec.Connection.NFSVersion == "3" { - v3presents = true - } - } - klog.Infof("NFSv3 NFSStorageClass exists: %t", v3presents) nfsModuleConfig := &mc.ModuleConfig{} - err = cl.Get(ctx, types.NamespacedName{Name: csiNfsModuleName, Namespace: ""}, nfsModuleConfig) - if err != nil { - klog.Fatal(err) - } - if value, exists := nfsModuleConfig.Spec.Settings["v3support"]; exists && value == true { v3enabled = true } else { diff --git a/monitoring/prometheus-rules/nfsv3-storage-class-without-nfsv3-support.yaml b/monitoring/prometheus-rules/nfsv3-storage-class-without-nfsv3-support.yaml new file mode 100644 index 0000000..9669b38 --- /dev/null +++ b/monitoring/prometheus-rules/nfsv3-storage-class-without-nfsv3-support.yaml @@ -0,0 +1,40 @@ +- name: kubernetes.pv.settings_check + rules: + - alert: NotSupportedNFSv3StorageClass + expr: # count(kube_persistentvolume_labels{label_storage_deckhouse_io_linstor_settings_mismatch="true", label_storage_deckhouse_io_linstor_settings_mismatch_ignore!="true"}) > 0 + for: 5m + labels: + severity_level: # "4" + tier: cluster + annotations: + plk_markup_format: # "markdown" + plk_protocol_version: "1" +# plk_create_group_if_not_exists__d8_drbd_device_health: "ReplicatedPVSettingsCheck,tier=~tier,prometheus=deckhouse,kubernetes=~kubernetes" +# plk_grouped_by__d8_drbd_device_health: "ReplicatedPVSettingsCheck,tier=~tier,prometheus=deckhouse,kubernetes=~kubernetes" + summary: Replicated PVs has incorrect settings + description: | +# There are persistent volumes in the cluster that were created before migration to ReplicatedStorageClass. +# You can recreate it, or add the label storage.deckhouse.io/linstor-settings-mismatch-ignore!=true to ignore it for the PV. +# Please note that in the future, when transitioning from LINSTOR to a new controller, the settings for all such PVs will be automatically modified to match the current StorageClass settings. +# +# You can view all of such PV with command +# `kubectl get pv -l storage.deckhouse.io/linstor-settings-mismatch=true,storage.deckhouse.io/linstor-settings-mismatch-ignore!=true` +# +# Also, you can add label for all incorrect PVs +# `kubectl label pv -l storage.deckhouse.io/linstor-settings-mismatch=true storage.deckhouse.io/linstor-settings-mismatch-ignore=true` + - alert: #ReplicatedPVWithIncorrectQuorumMinimumRedundancy + expr: # count(kube_persistentvolume_labels{label_storage_deckhouse_io_unable_to_set_quorum_minimum_redundancy="true"}) > 0 + for: 5m + labels: + severity_level: "3" + tier: cluster + annotations: +# plk_markup_format: "markdown" +# plk_protocol_version: "1" +# plk_create_group_if_not_exists__d8_drbd_device_health: "ReplicatedPVSettingsCheck,tier=~tier,prometheus=deckhouse,kubernetes=~kubernetes" +# plk_grouped_by__d8_drbd_device_health: "ReplicatedPVSettingsCheck,tier=~tier,prometheus=deckhouse,kubernetes=~kubernetes" + summary: # Replicated PVs has incorrect quorum-minimum-redundancy setting + description: | +# There are persistent volumes in the cluster that has incorrect quorum-minimum-redundancy setting. +# +# Please, contact tech support for assistance. \ No newline at end of file