From feafbedda1824d01cb22b5bbbb849bce42dcb433 Mon Sep 17 00:00:00 2001 From: Rahul Sharma Date: Thu, 21 Nov 2024 03:37:27 +0530 Subject: [PATCH] avoid resetting condition with severity error to severity warning during next reconcile (#573) --- .../linodecluster_controller_test.go | 30 +++++++++---------- .../linodefirewall_controller_test.go | 3 ++ .../linodemachine_controller_test.go | 2 +- .../controller/linodevpc_controller_test.go | 3 ++ util/reconciler/conditions.go | 6 ++-- 5 files changed, 25 insertions(+), 19 deletions(-) diff --git a/internal/controller/linodecluster_controller_test.go b/internal/controller/linodecluster_controller_test.go index 5d9e08529..e72c08de8 100644 --- a/internal/controller/linodecluster_controller_test.go +++ b/internal/controller/linodecluster_controller_test.go @@ -114,6 +114,21 @@ var _ = Describe("cluster-lifecycle", Ordered, Label("cluster", "cluster-lifecyc ctlrSuite.Run( OneOf( + Path( + Call("vpc present but not ready", func(ctx context.Context, mck Mock) { + Expect(k8sClient.Create(ctx, &linodeVPC)).To(Succeed()) + linodeVPC.Status.Ready = false + k8sClient.Status().Update(ctx, &linodeVPC) + }), + OneOf( + Path(Result("", func(ctx context.Context, mck Mock) { + reconciler.Client = k8sClient + _, err := reconciler.reconcile(ctx, cScope, mck.Logger()) + Expect(err).NotTo(HaveOccurred()) + Expect(rec.ConditionTrue(&linodeCluster, ConditionPreflightLinodeVPCReady)).To(BeFalse()) + })), + ), + ), Path( Call("firewall doesn't exist", func(ctx context.Context, mck Mock) { cScope.LinodeCluster.Spec.NodeBalancerFirewallRef = &corev1.ObjectReference{Name: "firewalltest"} @@ -151,21 +166,6 @@ var _ = Describe("cluster-lifecycle", Ordered, Label("cluster", "cluster-lifecyc })), ), ), - Path( - Call("vpc present but not ready", func(ctx context.Context, mck Mock) { - Expect(k8sClient.Create(ctx, &linodeVPC)).To(Succeed()) - linodeVPC.Status.Ready = false - k8sClient.Status().Update(ctx, &linodeVPC) - }), - OneOf( - Path(Result("", func(ctx context.Context, mck Mock) { - reconciler.Client = k8sClient - _, err := reconciler.reconcile(ctx, cScope, mck.Logger()) - Expect(err).NotTo(HaveOccurred()) - Expect(rec.ConditionTrue(&linodeCluster, ConditionPreflightLinodeVPCReady)).To(BeFalse()) - })), - ), - ), Path( Call("cluster is not created because there was an error creating nb", func(ctx context.Context, mck Mock) { cScope.LinodeClient = mck.LinodeClient diff --git a/internal/controller/linodefirewall_controller_test.go b/internal/controller/linodefirewall_controller_test.go index 1eddadd6c..c4e5e88c8 100644 --- a/internal/controller/linodefirewall_controller_test.go +++ b/internal/controller/linodefirewall_controller_test.go @@ -25,6 +25,8 @@ import ( "github.com/linode/linodego" "go.uber.org/mock/gomock" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + "sigs.k8s.io/cluster-api/util/conditions" "sigs.k8s.io/cluster-api/util/patch" "sigs.k8s.io/controller-runtime/pkg/client" @@ -169,6 +171,7 @@ var _ = Describe("lifecycle", Ordered, Label("firewalls", "lifecycle"), func() { }), OneOf( Path(Result("update requeues for update rules error", func(ctx context.Context, mck Mock) { + conditions.MarkFalse(fwScope.LinodeFirewall, clusterv1.ReadyCondition, "test", clusterv1.ConditionSeverityWarning, "%s", "test") mck.LinodeClient.EXPECT().UpdateFirewallRules(ctx, 1, gomock.Any()).Return(nil, &linodego.Error{Code: http.StatusInternalServerError}) res, err := reconciler.reconcile(ctx, mck.Logger(), &fwScope) Expect(err).NotTo(HaveOccurred()) diff --git a/internal/controller/linodemachine_controller_test.go b/internal/controller/linodemachine_controller_test.go index 9d240135b..fdc43a6bf 100644 --- a/internal/controller/linodemachine_controller_test.go +++ b/internal/controller/linodemachine_controller_test.go @@ -630,7 +630,7 @@ var _ = Describe("create", Label("machine", "create"), func() { Expect(rutil.ConditionTrue(&linodeMachine, ConditionPreflightMetadataSupportConfigured)).To(BeTrue()) Expect(rutil.ConditionTrue(&linodeMachine, ConditionPreflightCreated)).To(BeFalse()) - Expect(conditions.Get(&linodeMachine, ConditionPreflightCreated).Severity).To(Equal(clusterv1.ConditionSeverityError)) + Expect(conditions.Get(&linodeMachine, ConditionPreflightCreated).Severity).To(Equal(clusterv1.ConditionSeverityWarning)) Expect(conditions.Get(&linodeMachine, ConditionPreflightCreated).Message).To(ContainSubstring("time is up")) }) }) diff --git a/internal/controller/linodevpc_controller_test.go b/internal/controller/linodevpc_controller_test.go index 8482a58d5..0bd13c187 100644 --- a/internal/controller/linodevpc_controller_test.go +++ b/internal/controller/linodevpc_controller_test.go @@ -26,6 +26,8 @@ import ( apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/utils/ptr" + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + "sigs.k8s.io/cluster-api/util/conditions" "sigs.k8s.io/cluster-api/util/patch" "sigs.k8s.io/controller-runtime/pkg/client" @@ -163,6 +165,7 @@ var _ = Describe("lifecycle", Ordered, Label("vpc", "lifecycle"), func() { }), OneOf( Path(Result("update requeues", func(ctx context.Context, mck Mock) { + conditions.MarkFalse(vpcScope.LinodeVPC, clusterv1.ReadyCondition, "test", clusterv1.ConditionSeverityWarning, "%s", "test") res, err := reconciler.reconcile(ctx, mck.Logger(), &vpcScope) Expect(err).NotTo(HaveOccurred()) Expect(res.RequeueAfter).To(Equal(rec.DefaultVPCControllerReconcileDelay)) diff --git a/util/reconciler/conditions.go b/util/reconciler/conditions.go index da63677a0..fc994e628 100644 --- a/util/reconciler/conditions.go +++ b/util/reconciler/conditions.go @@ -31,13 +31,12 @@ func HasConditionSeverity(from conditions.Getter, typ clusterv1.ConditionType, s } func RecordDecayingCondition(to conditions.Setter, typ clusterv1.ConditionType, reason, message string, timeout time.Duration) bool { - conditions.MarkFalse(to, typ, reason, clusterv1.ConditionSeverityWarning, "%s", message) - if HasStaleCondition(to, typ, timeout) { conditions.MarkFalse(to, typ, reason, clusterv1.ConditionSeverityError, "%s", message) return true } + conditions.MarkFalse(to, typ, reason, clusterv1.ConditionSeverityWarning, "%s", message) return false } @@ -47,5 +46,6 @@ func HasStaleCondition(from conditions.Getter, typ clusterv1.ConditionType, time return false } - return time.Now().After(cond.LastTransitionTime.Add(timeout)) + // if severity is already set to error, it was a stale condition + return cond.Severity == clusterv1.ConditionSeverityError || time.Now().After(cond.LastTransitionTime.Add(timeout)) }