summaryrefslogtreecommitdiffstats
path: root/fs/gfs2/trans.c
diff options
context:
space:
mode:
authorAndreas Gruenbacher <agruenba@redhat.com>2020-12-17 16:14:30 +0100
committerAndreas Gruenbacher <agruenba@redhat.com>2021-02-22 21:16:23 +0100
commit2129b4288852cf872c42870c7f6e813ce0611199 (patch)
tree94a1bf3898930c5b7c62eaaa00ee27a4287b5133 /fs/gfs2/trans.c
parentgfs2: Rework the log space allocation logic (diff)
downloadlinux-2129b4288852cf872c42870c7f6e813ce0611199.tar.xz
linux-2129b4288852cf872c42870c7f6e813ce0611199.zip
gfs2: Per-revoke accounting in transactions
In the log, revokes are stored as a revoke descriptor (struct gfs2_log_descriptor), followed by zero or more additional revoke blocks (struct gfs2_meta_header). On filesystems with a blocksize of 4k, the revoke descriptor contains up to 503 revokes, and the metadata blocks contain up to 509 revokes each. We've so far been reserving space for revokes in transactions in block granularity, so a lot more space than necessary was being allocated and then released again. This patch switches to assigning revokes to transactions individually instead. Initially, space for the revoke descriptor is reserved and handed out to transactions. When more revokes than that are reserved, additional revoke blocks are added. When the log is flushed, the space for the additional revoke blocks is released, but we keep the space for the revoke descriptor block allocated. Transactions may still reserve more revokes than they will actually need in the end, but now we won't overshoot the target as much, and by only returning the space for excess revokes at log flush time, we further reduce the amount of contention between processes. Signed-off-by: Andreas Gruenbacher <agruenba@redhat.com>
Diffstat (limited to 'fs/gfs2/trans.c')
-rw-r--r--fs/gfs2/trans.c34
1 files changed, 26 insertions, 8 deletions
diff --git a/fs/gfs2/trans.c b/fs/gfs2/trans.c
index 231ca1a41a73..ab96cf0bf26b 100644
--- a/fs/gfs2/trans.c
+++ b/fs/gfs2/trans.c
@@ -31,16 +31,18 @@ static void gfs2_print_trans(struct gfs2_sbd *sdp, const struct gfs2_trans *tr)
fs_warn(sdp, "blocks=%u revokes=%u reserved=%u touched=%u\n",
tr->tr_blocks, tr->tr_revokes, tr->tr_reserved,
test_bit(TR_TOUCHED, &tr->tr_flags));
- fs_warn(sdp, "Buf %u/%u Databuf %u/%u Revoke %u/%u\n",
+ fs_warn(sdp, "Buf %u/%u Databuf %u/%u Revoke %u\n",
tr->tr_num_buf_new, tr->tr_num_buf_rm,
tr->tr_num_databuf_new, tr->tr_num_databuf_rm,
- tr->tr_num_revoke, tr->tr_num_revoke_rm);
+ tr->tr_num_revoke);
}
int __gfs2_trans_begin(struct gfs2_trans *tr, struct gfs2_sbd *sdp,
unsigned int blocks, unsigned int revokes,
unsigned long ip)
{
+ unsigned int extra_revokes;
+
if (current->journal_info) {
gfs2_print_trans(sdp, current->journal_info);
BUG();
@@ -62,8 +64,6 @@ int __gfs2_trans_begin(struct gfs2_trans *tr, struct gfs2_sbd *sdp,
*/
tr->tr_reserved += blocks + 1 + DIV_ROUND_UP(blocks - 1, databuf_limit(sdp));
}
- if (revokes)
- tr->tr_reserved += gfs2_struct2blk(sdp, revokes) - 1;
INIT_LIST_HEAD(&tr->tr_databuf);
INIT_LIST_HEAD(&tr->tr_buf);
INIT_LIST_HEAD(&tr->tr_list);
@@ -75,10 +75,26 @@ int __gfs2_trans_begin(struct gfs2_trans *tr, struct gfs2_sbd *sdp,
sb_start_intwrite(sdp->sd_vfs);
- gfs2_log_reserve(sdp, tr->tr_reserved);
+ /*
+ * Try the reservations under sd_log_flush_lock to prevent log flushes
+ * from creating inconsistencies between the number of allocated and
+ * reserved revokes. If that fails, do a full-block allocation outside
+ * of the lock to avoid stalling log flushes. Then, allot the
+ * appropriate number of blocks to revokes, use as many revokes locally
+ * as needed, and "release" the surplus into the revokes pool.
+ */
down_read(&sdp->sd_log_flush_lock);
+ if (gfs2_log_try_reserve(sdp, tr, &extra_revokes))
+ goto reserved;
+ up_read(&sdp->sd_log_flush_lock);
+ gfs2_log_reserve(sdp, tr, &extra_revokes);
+ down_read(&sdp->sd_log_flush_lock);
+
+reserved:
+ gfs2_log_release_revokes(sdp, extra_revokes);
if (unlikely(!test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags))) {
+ gfs2_log_release_revokes(sdp, tr->tr_revokes);
up_read(&sdp->sd_log_flush_lock);
gfs2_log_release(sdp, tr->tr_reserved);
sb_end_intwrite(sdp->sd_vfs);
@@ -113,14 +129,17 @@ void gfs2_trans_end(struct gfs2_sbd *sdp)
current->journal_info = NULL;
if (!test_bit(TR_TOUCHED, &tr->tr_flags)) {
- gfs2_log_release(sdp, tr->tr_reserved);
+ gfs2_log_release_revokes(sdp, tr->tr_revokes);
up_read(&sdp->sd_log_flush_lock);
+ gfs2_log_release(sdp, tr->tr_reserved);
if (!test_bit(TR_ONSTACK, &tr->tr_flags))
gfs2_trans_free(sdp, tr);
sb_end_intwrite(sdp->sd_vfs);
return;
}
+ gfs2_log_release_revokes(sdp, tr->tr_revokes - tr->tr_num_revoke);
+
nbuf = tr->tr_num_buf_new + tr->tr_num_databuf_new;
nbuf -= tr->tr_num_buf_rm;
nbuf -= tr->tr_num_databuf_rm;
@@ -278,7 +297,6 @@ void gfs2_trans_add_revoke(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd)
void gfs2_trans_remove_revoke(struct gfs2_sbd *sdp, u64 blkno, unsigned int len)
{
struct gfs2_bufdata *bd, *tmp;
- struct gfs2_trans *tr = current->journal_info;
unsigned int n = len;
gfs2_log_lock(sdp);
@@ -290,7 +308,7 @@ void gfs2_trans_remove_revoke(struct gfs2_sbd *sdp, u64 blkno, unsigned int len)
if (bd->bd_gl)
gfs2_glock_remove_revoke(bd->bd_gl);
kmem_cache_free(gfs2_bufdata_cachep, bd);
- tr->tr_num_revoke_rm++;
+ gfs2_log_release_revokes(sdp, 1);
if (--n == 0)
break;
}