summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAndreas Gruenbacher <agruenba@redhat.com>2024-04-12 19:16:58 +0200
committerAndreas Gruenbacher <agruenba@redhat.com>2024-04-24 19:48:20 +0200
commit1cd28e15864054f3c48baee9eecda1c0441c48ac (patch)
tree53915029bc33af767064766ab8f324e2fcb076a3
parentgfs2: Unlock fewer glocks on unmount (diff)
downloadlinux-1cd28e15864054f3c48baee9eecda1c0441c48ac.tar.xz
linux-1cd28e15864054f3c48baee9eecda1c0441c48ac.zip
gfs2: finish_xmote cleanup
Currently, function finish_xmote() takes and releases the glock spinlock. However, all of its callers immediately take that spinlock again, so it makes more sense to take the spin lock before calling finish_xmote() already. With that, thaw_glock() is the only place that sets the GLF_HAVE_REPLY flag outside of the glock spinlock, but it also takes that spinlock immediately thereafter. Change that to set the bit when the spinlock is already held. This allows to switch from test_and_clear_bit() to test_bit() and clear_bit() in glock_work_func(). Signed-off-by: Andreas Gruenbacher <agruenba@redhat.com>
-rw-r--r--fs/gfs2/glock.c21
1 files changed, 13 insertions, 8 deletions
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index 4cf8971ce8ee..b1a2862d431d 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -625,7 +625,6 @@ static void finish_xmote(struct gfs2_glock *gl, unsigned int ret)
struct gfs2_holder *gh;
unsigned state = ret & LM_OUT_ST_MASK;
- spin_lock(&gl->gl_lockref.lock);
trace_gfs2_glock_state_change(gl, state);
state_change(gl, state);
gh = find_first_waiter(gl);
@@ -673,7 +672,6 @@ retry:
gl->gl_target, state);
GLOCK_BUG_ON(gl, 1);
}
- spin_unlock(&gl->gl_lockref.lock);
return;
}
@@ -696,7 +694,6 @@ retry:
}
out:
clear_bit(GLF_LOCK, &gl->gl_flags);
- spin_unlock(&gl->gl_lockref.lock);
}
static bool is_system_glock(struct gfs2_glock *gl)
@@ -843,15 +840,19 @@ skip_inval:
if (ret == -EINVAL && gl->gl_target == LM_ST_UNLOCKED &&
target == LM_ST_UNLOCKED &&
test_bit(DFL_UNMOUNT, &ls->ls_recover_flags)) {
+ spin_lock(&gl->gl_lockref.lock);
finish_xmote(gl, target);
- gfs2_glock_queue_work(gl, 0);
+ __gfs2_glock_queue_work(gl, 0);
+ spin_unlock(&gl->gl_lockref.lock);
} else if (ret) {
fs_err(sdp, "lm_lock ret %d\n", ret);
GLOCK_BUG_ON(gl, !gfs2_withdrawing_or_withdrawn(sdp));
}
} else { /* lock_nolock */
+ spin_lock(&gl->gl_lockref.lock);
finish_xmote(gl, target);
- gfs2_glock_queue_work(gl, 0);
+ __gfs2_glock_queue_work(gl, 0);
+ spin_unlock(&gl->gl_lockref.lock);
}
out:
spin_lock(&gl->gl_lockref.lock);
@@ -1108,11 +1109,12 @@ static void glock_work_func(struct work_struct *work)
struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_work.work);
unsigned int drop_refs = 1;
- if (test_and_clear_bit(GLF_REPLY_PENDING, &gl->gl_flags)) {
+ spin_lock(&gl->gl_lockref.lock);
+ if (test_bit(GLF_REPLY_PENDING, &gl->gl_flags)) {
+ clear_bit(GLF_REPLY_PENDING, &gl->gl_flags);
finish_xmote(gl, gl->gl_reply);
drop_refs++;
}
- spin_lock(&gl->gl_lockref.lock);
if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
gl->gl_state != LM_ST_UNLOCKED &&
gl->gl_demote_state != LM_ST_EXCLUSIVE) {
@@ -2183,8 +2185,11 @@ static void thaw_glock(struct gfs2_glock *gl)
return;
if (!lockref_get_not_dead(&gl->gl_lockref))
return;
+
+ spin_lock(&gl->gl_lockref.lock);
set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
- gfs2_glock_queue_work(gl, 0);
+ __gfs2_glock_queue_work(gl, 0);
+ spin_unlock(&gl->gl_lockref.lock);
}
/**