1 From 51f6e15573004ce9a32edcb5f2b19fa3fba8ca8e Mon Sep 17 00:00:00 2001
2 From: Bob Peterson <rpeterso@redhat.com>
3 Date: Thu, 8 Nov 2018 14:04:50 -0500
4 Subject: dlm: Don't swamp the CPU with callbacks queued during recovery
6 [ Upstream commit 216f0efd19b9cc32207934fd1b87a45f2c4c593e ]
8 Before this patch, recovery would cause all callbacks to be delayed,
9 put on a queue, and afterward they were all queued to the callback
10 work queue. This patch does the same thing, but occasionally takes
11 a break after 25 of them so it won't swamp the CPU at the expense
12 of other RT processes like corosync.
14 Signed-off-by: Bob Peterson <rpeterso@redhat.com>
15 Signed-off-by: David Teigland <teigland@redhat.com>
16 Signed-off-by: Sasha Levin <sashal@kernel.org>
18 fs/dlm/ast.c | 10 ++++++++++
19 1 file changed, 10 insertions(+)
21 diff --git a/fs/dlm/ast.c b/fs/dlm/ast.c
22 index 07fed838d8fd..15fa4239ae9f 100644
25 @@ -290,6 +290,8 @@ void dlm_callback_suspend(struct dlm_ls *ls)
26 flush_workqueue(ls->ls_callback_wq);
29 +#define MAX_CB_QUEUE 25
31 void dlm_callback_resume(struct dlm_ls *ls)
33 struct dlm_lkb *lkb, *safe;
34 @@ -300,15 +302,23 @@ void dlm_callback_resume(struct dlm_ls *ls)
35 if (!ls->ls_callback_wq)
39 mutex_lock(&ls->ls_cb_mutex);
40 list_for_each_entry_safe(lkb, safe, &ls->ls_cb_delay, lkb_cb_list) {
41 list_del_init(&lkb->lkb_cb_list);
42 queue_work(ls->ls_callback_wq, &lkb->lkb_cb_work);
44 + if (count == MAX_CB_QUEUE)
47 mutex_unlock(&ls->ls_cb_mutex);
50 log_rinfo(ls, "dlm_callback_resume %d", count);
51 + if (count == MAX_CB_QUEUE) {