diff options
author | Bob Peterson <rpeterso@redhat.com> | 2018-11-08 20:04:50 +0100 |
---|---|---|
committer | David Teigland <teigland@redhat.com> | 2018-11-08 20:17:00 +0100 |
commit | 216f0efd19b9cc32207934fd1b87a45f2c4c593e (patch) | |
tree | 78c64859be77492003b792df6363ab931398bfa8 /fs/dlm | |
parent | dlm: don't leak kernel pointer to userspace (diff) | |
download | linux-216f0efd19b9cc32207934fd1b87a45f2c4c593e.tar.xz linux-216f0efd19b9cc32207934fd1b87a45f2c4c593e.zip |
dlm: Don't swamp the CPU with callbacks queued during recovery
Before this patch, recovery would cause all callbacks to be delayed,
put on a queue, and afterward they were all queued to the callback
work queue. This patch does the same thing, but occasionally takes
a break after 25 of them so it won't swamp the CPU at the expense
of other RT processes like corosync.
Signed-off-by: Bob Peterson <rpeterso@redhat.com>
Signed-off-by: David Teigland <teigland@redhat.com>
Diffstat (limited to 'fs/dlm')
-rw-r--r-- | fs/dlm/ast.c | 10 |
1 files changed, 10 insertions, 0 deletions
diff --git a/fs/dlm/ast.c b/fs/dlm/ast.c index 562fa8c3edff..47ee66d70109 100644 --- a/fs/dlm/ast.c +++ b/fs/dlm/ast.c @@ -292,6 +292,8 @@ void dlm_callback_suspend(struct dlm_ls *ls) flush_workqueue(ls->ls_callback_wq); } +#define MAX_CB_QUEUE 25 + void dlm_callback_resume(struct dlm_ls *ls) { struct dlm_lkb *lkb, *safe; @@ -302,15 +304,23 @@ void dlm_callback_resume(struct dlm_ls *ls) if (!ls->ls_callback_wq) return; +more: mutex_lock(&ls->ls_cb_mutex); list_for_each_entry_safe(lkb, safe, &ls->ls_cb_delay, lkb_cb_list) { list_del_init(&lkb->lkb_cb_list); queue_work(ls->ls_callback_wq, &lkb->lkb_cb_work); count++; + if (count == MAX_CB_QUEUE) + break; } mutex_unlock(&ls->ls_cb_mutex); if (count) log_rinfo(ls, "dlm_callback_resume %d", count); + if (count == MAX_CB_QUEUE) { + count = 0; + cond_resched(); + goto more; + } } |