diff options
author | Alexander Aring <aahringo@redhat.com> | 2022-10-27 22:45:18 +0200 |
---|---|---|
committer | David Teigland <teigland@redhat.com> | 2022-11-08 19:59:41 +0100 |
commit | a4c0352bb1094cbe242f4458e267de845790737a (patch) | |
tree | d245450da6ff15bbc51e1a7073c17bc95be09744 /fs/dlm | |
parent | fs: dlm: use list_first_entry marco (diff) | |
download | linux-a4c0352bb1094cbe242f4458e267de845790737a.tar.xz linux-a4c0352bb1094cbe242f4458e267de845790737a.zip |
fs: dlm: convert ls_cb_mutex mutex to spinlock
This patch converts the ls_cb_mutex mutex to a spinlock, there is no
sleepable context when this lock is held.
Signed-off-by: Alexander Aring <aahringo@redhat.com>
Signed-off-by: David Teigland <teigland@redhat.com>
Diffstat (limited to 'fs/dlm')
-rw-r--r-- | fs/dlm/ast.c | 12 | ||||
-rw-r--r-- | fs/dlm/dlm_internal.h | 2 | ||||
-rw-r--r-- | fs/dlm/lockspace.c | 2 |
3 files changed, 8 insertions, 8 deletions
diff --git a/fs/dlm/ast.c b/fs/dlm/ast.c index 6e07c151ad28..daaa0dff6ef4 100644 --- a/fs/dlm/ast.c +++ b/fs/dlm/ast.c @@ -200,13 +200,13 @@ void dlm_add_cb(struct dlm_lkb *lkb, uint32_t flags, int mode, int status, if (!prev_seq) { kref_get(&lkb->lkb_ref); - mutex_lock(&ls->ls_cb_mutex); + spin_lock(&ls->ls_cb_lock); if (test_bit(LSFL_CB_DELAY, &ls->ls_flags)) { list_add(&lkb->lkb_cb_list, &ls->ls_cb_delay); } else { queue_work(ls->ls_callback_wq, &lkb->lkb_cb_work); } - mutex_unlock(&ls->ls_cb_mutex); + spin_unlock(&ls->ls_cb_lock); } out: mutex_unlock(&lkb->lkb_cb_mutex); @@ -289,9 +289,9 @@ void dlm_callback_stop(struct dlm_ls *ls) void dlm_callback_suspend(struct dlm_ls *ls) { if (ls->ls_callback_wq) { - mutex_lock(&ls->ls_cb_mutex); + spin_lock(&ls->ls_cb_lock); set_bit(LSFL_CB_DELAY, &ls->ls_flags); - mutex_unlock(&ls->ls_cb_mutex); + spin_unlock(&ls->ls_cb_lock); flush_workqueue(ls->ls_callback_wq); } @@ -309,7 +309,7 @@ void dlm_callback_resume(struct dlm_ls *ls) return; more: - mutex_lock(&ls->ls_cb_mutex); + spin_lock(&ls->ls_cb_lock); list_for_each_entry_safe(lkb, safe, &ls->ls_cb_delay, lkb_cb_list) { list_del_init(&lkb->lkb_cb_list); queue_work(ls->ls_callback_wq, &lkb->lkb_cb_work); @@ -320,7 +320,7 @@ more: empty = list_empty(&ls->ls_cb_delay); if (empty) clear_bit(LSFL_CB_DELAY, &ls->ls_flags); - mutex_unlock(&ls->ls_cb_mutex); + spin_unlock(&ls->ls_cb_lock); sum += count; if (!empty) { diff --git a/fs/dlm/dlm_internal.h b/fs/dlm/dlm_internal.h index 94fadb619ba0..fc4be8c35703 100644 --- a/fs/dlm/dlm_internal.h +++ b/fs/dlm/dlm_internal.h @@ -631,7 +631,7 @@ struct dlm_ls { /* recovery related */ - struct mutex ls_cb_mutex; + spinlock_t ls_cb_lock; struct list_head ls_cb_delay; /* save for queue_work later */ struct timer_list ls_timer; struct task_struct *ls_recoverd_task; diff --git a/fs/dlm/lockspace.c b/fs/dlm/lockspace.c index 9479c8110979..4965737705b7 100644 --- a/fs/dlm/lockspace.c +++ b/fs/dlm/lockspace.c @@ -567,7 +567,7 @@ static int new_lockspace(const char *name, const char *cluster, init_completion(&ls->ls_recovery_done); ls->ls_recovery_result = -1; - mutex_init(&ls->ls_cb_mutex); + spin_lock_init(&ls->ls_cb_lock); INIT_LIST_HEAD(&ls->ls_cb_delay); ls->ls_recoverd_task = NULL; |