From c2d8ae2a2abfefd3dafa8006d449b695ff6135a7 Mon Sep 17 00:00:00 2001 From: Anand Rajagopal Date: Thu, 8 Feb 2024 18:09:55 +0000 Subject: [PATCH] A small fix to avoid deadlock that can happen as mentioned in issue #3682 --- provider/mem/mem.go | 6 ++--- provider/mem/mem_test.go | 56 ++++++++++++++++++++++++++++++++++++++++ 2 files changed, 59 insertions(+), 3 deletions(-) diff --git a/provider/mem/mem.go b/provider/mem/mem.go index 0e39ba1d5f..ceeff099d0 100644 --- a/provider/mem/mem.go +++ b/provider/mem/mem.go @@ -149,13 +149,13 @@ func max(a, b int) int { // resolved and successfully notified about. // They are not guaranteed to be in chronological order. func (a *Alerts) Subscribe() provider.AlertIterator { + alerts := a.alerts.List() a.mtx.Lock() defer a.mtx.Unlock() var ( - done = make(chan struct{}) - alerts = a.alerts.List() - ch = make(chan *types.Alert, max(len(alerts), alertChannelLength)) + done = make(chan struct{}) + ch = make(chan *types.Alert, max(len(alerts), alertChannelLength)) ) for _, a := range alerts { diff --git a/provider/mem/mem_test.go b/provider/mem/mem_test.go index a45321844c..3b1bd31472 100644 --- a/provider/mem/mem_test.go +++ b/provider/mem/mem_test.go @@ -135,6 +135,62 @@ func TestAlertsSubscribePutStarvation(t *testing.T) { } } +func TestDeadLock(t *testing.T) { + t0 := time.Now() + t1 := t0.Add(5 * time.Second) + + marker := types.NewMarker(prometheus.NewRegistry()) + // Run gc every 5 milliseconds to increase the possibility of a deadlock with Subscribe() + alerts, err := NewAlerts(context.Background(), marker, 5*time.Millisecond, noopCallback{}, log.NewNopLogger(), nil) + if err != nil { + t.Fatal(err) + } + alertsToInsert := []*types.Alert{} + for i := 0; i < 200+1; i++ { + alertsToInsert = append(alertsToInsert, &types.Alert{ + Alert: model.Alert{ + // Make sure the fingerprints differ + Labels: model.LabelSet{"iteration": model.LabelValue(strconv.Itoa(i))}, + Annotations: model.LabelSet{"foo": "bar"}, + StartsAt: t0, + EndsAt: t1, + GeneratorURL: "http://example.com/prometheus", + }, + UpdatedAt: t0, + Timeout: false, + }) + } + + if err := alerts.Put(alertsToInsert...); err != nil { + t.Fatal("Unable to add alerts") + } + done := make(chan bool) + + // call subscribe repeatedly in a goroutine to increase + // the possibility of a deadlock occurring + go func() { + tick := time.NewTicker(10 * time.Millisecond) + defer tick.Stop() + stopAfter := time.After(1 * time.Second) + for { + select { + case <-tick.C: + alerts.Subscribe() + case <-stopAfter: + done <- true + break + } + } + }() + + select { + case <-done: + // no deadlock + case <-time.After(10 * time.Second): + t.Error("Deadlock detected") + } +} + func TestAlertsPut(t *testing.T) { marker := types.NewMarker(prometheus.NewRegistry()) alerts, err := NewAlerts(context.Background(), marker, 30*time.Minute, noopCallback{}, log.NewNopLogger(), nil)