summaryrefslogtreecommitdiffstats
path: root/fs/gfs2
diff options
context:
space:
mode:
authorAndreas Gruenbacher <agruenba@redhat.com>2023-08-23 17:15:47 +0200
committerAndreas Gruenbacher <agruenba@redhat.com>2023-09-05 15:58:17 +0200
commita475c5dd16e57c570113eccba51955b5df8bb052 (patch)
tree8aaa3cbb8b76af25163bf38997a1d2f4a6fdb07f /fs/gfs2
parentgfs2: Fix initial quota data refcount (diff)
downloadlinux-a475c5dd16e57c570113eccba51955b5df8bb052.tar.xz
linux-a475c5dd16e57c570113eccba51955b5df8bb052.zip
gfs2: Free quota data objects synchronously
In gfs2_quota_cleanup(), wait for the quota data objects to be freed before returning. Otherwise, there is no guarantee that the quota data objects will be gone when their kmem cache is destroyed. Signed-off-by: Andreas Gruenbacher <agruenba@redhat.com>
Diffstat (limited to 'fs/gfs2')
-rw-r--r--fs/gfs2/quota.c37
1 files changed, 34 insertions, 3 deletions
diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c
index 97fdf64148ba..75be0d8e243f 100644
--- a/fs/gfs2/quota.c
+++ b/fs/gfs2/quota.c
@@ -109,7 +109,11 @@ static inline void spin_unlock_bucket(unsigned int hash)
static void gfs2_qd_dealloc(struct rcu_head *rcu)
{
struct gfs2_quota_data *qd = container_of(rcu, struct gfs2_quota_data, qd_rcu);
+ struct gfs2_sbd *sdp = qd->qd_sbd;
+
kmem_cache_free(gfs2_quotad_cachep, qd);
+ if (atomic_dec_and_test(&sdp->sd_quota_count))
+ wake_up(&sdp->sd_kill_wait);
}
static void gfs2_qd_dispose(struct gfs2_quota_data *qd)
@@ -143,7 +147,6 @@ static void gfs2_qd_list_dispose(struct list_head *list)
list_del(&qd->qd_lru);
gfs2_qd_dispose(qd);
- atomic_dec(&sdp->sd_quota_count);
}
}
@@ -317,13 +320,24 @@ static void qd_hold(struct gfs2_quota_data *qd)
static void qd_put(struct gfs2_quota_data *qd)
{
+ struct gfs2_sbd *sdp;
+
if (lockref_put_or_lock(&qd->qd_lockref))
return;
+ BUG_ON(__lockref_is_dead(&qd->qd_lockref));
+ sdp = qd->qd_sbd;
+ if (unlikely(!test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags))) {
+ lockref_mark_dead(&qd->qd_lockref);
+ spin_unlock(&qd->qd_lockref.lock);
+
+ gfs2_qd_dispose(qd);
+ return;
+ }
+
qd->qd_lockref.count = 0;
list_lru_add(&gfs2_qd_lru, &qd->qd_lru);
spin_unlock(&qd->qd_lockref.lock);
-
}
static int slot_get(struct gfs2_quota_data *qd)
@@ -1465,16 +1479,33 @@ void gfs2_quota_cleanup(struct gfs2_sbd *sdp)
{
struct gfs2_quota_data *qd;
LIST_HEAD(dispose);
+ int count;
+
+ BUG_ON(test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags));
spin_lock(&qd_lock);
list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) {
+ spin_lock(&qd->qd_lockref.lock);
+ if (qd->qd_lockref.count != 0) {
+ spin_unlock(&qd->qd_lockref.lock);
+ continue;
+ }
+ lockref_mark_dead(&qd->qd_lockref);
+ spin_unlock(&qd->qd_lockref.lock);
+
list_lru_del(&gfs2_qd_lru, &qd->qd_lru);
list_add(&qd->qd_lru, &dispose);
}
spin_unlock(&qd_lock);
gfs2_qd_list_dispose(&dispose);
- gfs2_assert_warn(sdp, !atomic_read(&sdp->sd_quota_count));
+
+ wait_event_timeout(sdp->sd_kill_wait,
+ (count = atomic_read(&sdp->sd_quota_count)) == 0,
+ HZ * 60);
+
+ if (count != 0)
+ fs_err(sdp, "%d left-over quota data objects\n", count);
kvfree(sdp->sd_quota_bitmap);
sdp->sd_quota_bitmap = NULL;