diff options
author | Alexander Aring <aahringo@redhat.com> | 2024-04-02 21:17:58 +0200 |
---|---|---|
committer | David Teigland <teigland@redhat.com> | 2024-04-09 18:43:49 +0200 |
commit | 455597a55f402e52e1c577c921bf5fe3aa4d2281 (patch) | |
tree | 73352d55a39dec79f8a71b8977f6b15ab9bdb313 /fs/dlm | |
parent | dlm: remove allocation parameter in msg allocation (diff) | |
download | linux-455597a55f402e52e1c577c921bf5fe3aa4d2281.tar.xz linux-455597a55f402e52e1c577c921bf5fe3aa4d2281.zip |
dlm: switch to GFP_ATOMIC in dlm allocations
Replace GFP_NOFS with GFP_ATOMIC. Also stop using idr_preload which
uses a non-bh spin_lock. This is further preparation for softirq
message processing.
Signed-off-by: Alexander Aring <aahringo@redhat.com>
Signed-off-by: David Teigland <teigland@redhat.com>
Diffstat (limited to 'fs/dlm')
-rw-r--r-- | fs/dlm/lock.c | 2 | ||||
-rw-r--r-- | fs/dlm/memory.c | 6 | ||||
-rw-r--r-- | fs/dlm/recover.c | 2 | ||||
-rw-r--r-- | fs/dlm/requestqueue.c | 2 |
4 files changed, 4 insertions, 8 deletions
diff --git a/fs/dlm/lock.c b/fs/dlm/lock.c index 2f94ffc3cf82..d87464614bc5 100644 --- a/fs/dlm/lock.c +++ b/fs/dlm/lock.c @@ -1206,13 +1206,11 @@ static int _create_lkb(struct dlm_ls *ls, struct dlm_lkb **lkb_ret, INIT_LIST_HEAD(&lkb->lkb_ownqueue); INIT_LIST_HEAD(&lkb->lkb_rsb_lookup); - idr_preload(GFP_NOFS); spin_lock(&ls->ls_lkbidr_spin); rv = idr_alloc(&ls->ls_lkbidr, lkb, start, end, GFP_NOWAIT); if (rv >= 0) lkb->lkb_id = rv; spin_unlock(&ls->ls_lkbidr_spin); - idr_preload_end(); if (rv < 0) { log_error(ls, "create_lkb idr error %d", rv); diff --git a/fs/dlm/memory.c b/fs/dlm/memory.c index ab663ca66aca..15a8b1cee433 100644 --- a/fs/dlm/memory.c +++ b/fs/dlm/memory.c @@ -84,7 +84,7 @@ char *dlm_allocate_lvb(struct dlm_ls *ls) { char *p; - p = kzalloc(ls->ls_lvblen, GFP_NOFS); + p = kzalloc(ls->ls_lvblen, GFP_ATOMIC); return p; } @@ -97,7 +97,7 @@ struct dlm_rsb *dlm_allocate_rsb(struct dlm_ls *ls) { struct dlm_rsb *r; - r = kmem_cache_zalloc(rsb_cache, GFP_NOFS); + r = kmem_cache_zalloc(rsb_cache, GFP_ATOMIC); return r; } @@ -112,7 +112,7 @@ struct dlm_lkb *dlm_allocate_lkb(struct dlm_ls *ls) { struct dlm_lkb *lkb; - lkb = kmem_cache_zalloc(lkb_cache, GFP_NOFS); + lkb = kmem_cache_zalloc(lkb_cache, GFP_ATOMIC); return lkb; } diff --git a/fs/dlm/recover.c b/fs/dlm/recover.c index 53917c0aa3c0..ce6dc914cb86 100644 --- a/fs/dlm/recover.c +++ b/fs/dlm/recover.c @@ -310,7 +310,6 @@ static int recover_idr_add(struct dlm_rsb *r) struct dlm_ls *ls = r->res_ls; int rv; - idr_preload(GFP_NOFS); spin_lock(&ls->ls_recover_idr_lock); if (r->res_id) { rv = -1; @@ -326,7 +325,6 @@ static int recover_idr_add(struct dlm_rsb *r) rv = 0; out_unlock: spin_unlock(&ls->ls_recover_idr_lock); - idr_preload_end(); return rv; } diff --git a/fs/dlm/requestqueue.c b/fs/dlm/requestqueue.c index 892d6ca21e74..c05940afd063 100644 --- a/fs/dlm/requestqueue.c +++ b/fs/dlm/requestqueue.c @@ -37,7 +37,7 @@ void dlm_add_requestqueue(struct dlm_ls *ls, int nodeid, int length = le16_to_cpu(ms->m_header.h_length) - sizeof(struct dlm_message); - e = kmalloc(sizeof(struct rq_entry) + length, GFP_NOFS); + e = kmalloc(sizeof(struct rq_entry) + length, GFP_ATOMIC); if (!e) { log_print("dlm_add_requestqueue: out of memory len %d", length); return; |