diff options
author | Darrick J. Wong <djwong@kernel.org> | 2021-09-17 02:28:07 +0200 |
---|---|---|
committer | Darrick J. Wong <djwong@kernel.org> | 2021-10-14 18:19:31 +0200 |
commit | 512edfac85d243ed6a5a5f42f513ebb7c2d32863 (patch) | |
tree | 92ceb8ce47c29eeb9abd9255a623042aa2cac0e8 /fs/xfs/libxfs/xfs_defer.c | |
parent | xfs: formalize the process of holding onto resources across a defer roll (diff) | |
download | linux-512edfac85d243ed6a5a5f42f513ebb7c2d32863.tar.xz linux-512edfac85d243ed6a5a5f42f513ebb7c2d32863.zip |
xfs: port the defer ops capture and continue to resource capture
When log recovery tries to recover a transaction that had log intent
items attached to it, it has to save certain parts of the transaction
state (reservation, dfops chain, inodes with no automatic unlock) so
that it can finish single-stepping the recovered transactions before
finishing the chains.
This is done with the xfs_defer_ops_capture and xfs_defer_ops_continue
functions. Right now they open-code this functionality, so let's port
this to the formalized resource capture structure that we introduced in
the previous patch. This enables us to hold up to two inodes and two
buffers during log recovery, the same way we do for regular runtime.
With this patch applied, we'll be ready to support atomic extent swap
which holds two inodes; and logged xattrs which holds one inode and one
xattr leaf buffer.
Signed-off-by: Darrick J. Wong <djwong@kernel.org>
Reviewed-by: Allison Henderson <allison.henderson@oracle.com>
Diffstat (limited to 'fs/xfs/libxfs/xfs_defer.c')
-rw-r--r-- | fs/xfs/libxfs/xfs_defer.c | 86 |
1 files changed, 65 insertions, 21 deletions
diff --git a/fs/xfs/libxfs/xfs_defer.c b/fs/xfs/libxfs/xfs_defer.c index 7c6490f3e537..136a367d7b16 100644 --- a/fs/xfs/libxfs/xfs_defer.c +++ b/fs/xfs/libxfs/xfs_defer.c @@ -650,10 +650,11 @@ xfs_defer_move( */ static struct xfs_defer_capture * xfs_defer_ops_capture( - struct xfs_trans *tp, - struct xfs_inode *capture_ip) + struct xfs_trans *tp) { struct xfs_defer_capture *dfc; + unsigned short i; + int error; if (list_empty(&tp->t_dfops)) return NULL; @@ -677,27 +678,48 @@ xfs_defer_ops_capture( /* Preserve the log reservation size. */ dfc->dfc_logres = tp->t_log_res; + error = xfs_defer_save_resources(&dfc->dfc_held, tp); + if (error) { + /* + * Resource capture should never fail, but if it does, we + * still have to shut down the log and release things + * properly. + */ + xfs_force_shutdown(tp->t_mountp, SHUTDOWN_CORRUPT_INCORE); + } + /* - * Grab an extra reference to this inode and attach it to the capture - * structure. + * Grab extra references to the inodes and buffers because callers are + * expected to release their held references after we commit the + * transaction. */ - if (capture_ip) { - ihold(VFS_I(capture_ip)); - dfc->dfc_capture_ip = capture_ip; + for (i = 0; i < dfc->dfc_held.dr_inos; i++) { + ASSERT(xfs_isilocked(dfc->dfc_held.dr_ip[i], XFS_ILOCK_EXCL)); + ihold(VFS_I(dfc->dfc_held.dr_ip[i])); } + for (i = 0; i < dfc->dfc_held.dr_bufs; i++) + xfs_buf_hold(dfc->dfc_held.dr_bp[i]); + return dfc; } /* Release all resources that we used to capture deferred ops. */ void -xfs_defer_ops_release( +xfs_defer_ops_capture_free( struct xfs_mount *mp, struct xfs_defer_capture *dfc) { + unsigned short i; + xfs_defer_cancel_list(mp, &dfc->dfc_dfops); - if (dfc->dfc_capture_ip) - xfs_irele(dfc->dfc_capture_ip); + + for (i = 0; i < dfc->dfc_held.dr_bufs; i++) + xfs_buf_relse(dfc->dfc_held.dr_bp[i]); + + for (i = 0; i < dfc->dfc_held.dr_inos; i++) + xfs_irele(dfc->dfc_held.dr_ip[i]); + kmem_free(dfc); } @@ -712,24 +734,21 @@ xfs_defer_ops_release( int xfs_defer_ops_capture_and_commit( struct xfs_trans *tp, - struct xfs_inode *capture_ip, struct list_head *capture_list) { struct xfs_mount *mp = tp->t_mountp; struct xfs_defer_capture *dfc; int error; - ASSERT(!capture_ip || xfs_isilocked(capture_ip, XFS_ILOCK_EXCL)); - /* If we don't capture anything, commit transaction and exit. */ - dfc = xfs_defer_ops_capture(tp, capture_ip); + dfc = xfs_defer_ops_capture(tp); if (!dfc) return xfs_trans_commit(tp); /* Commit the transaction and add the capture structure to the list. */ error = xfs_trans_commit(tp); if (error) { - xfs_defer_ops_release(mp, dfc); + xfs_defer_ops_capture_free(mp, dfc); return error; } @@ -747,17 +766,19 @@ void xfs_defer_ops_continue( struct xfs_defer_capture *dfc, struct xfs_trans *tp, - struct xfs_inode **captured_ipp) + struct xfs_defer_resources *dres) { ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES); ASSERT(!(tp->t_flags & XFS_TRANS_DIRTY)); /* Lock and join the captured inode to the new transaction. */ - if (dfc->dfc_capture_ip) { - xfs_ilock(dfc->dfc_capture_ip, XFS_ILOCK_EXCL); - xfs_trans_ijoin(tp, dfc->dfc_capture_ip, 0); - } - *captured_ipp = dfc->dfc_capture_ip; + if (dfc->dfc_held.dr_inos == 2) + xfs_lock_two_inodes(dfc->dfc_held.dr_ip[0], XFS_ILOCK_EXCL, + dfc->dfc_held.dr_ip[1], XFS_ILOCK_EXCL); + else if (dfc->dfc_held.dr_inos == 1) + xfs_ilock(dfc->dfc_held.dr_ip[0], XFS_ILOCK_EXCL); + xfs_defer_restore_resources(tp, &dfc->dfc_held); + memcpy(dres, &dfc->dfc_held, sizeof(struct xfs_defer_resources)); /* Move captured dfops chain and state to the transaction. */ list_splice_init(&dfc->dfc_dfops, &tp->t_dfops); @@ -765,3 +786,26 @@ xfs_defer_ops_continue( kmem_free(dfc); } + +/* Release the resources captured and continued during recovery. */ +void +xfs_defer_resources_rele( + struct xfs_defer_resources *dres) +{ + unsigned short i; + + for (i = 0; i < dres->dr_inos; i++) { + xfs_iunlock(dres->dr_ip[i], XFS_ILOCK_EXCL); + xfs_irele(dres->dr_ip[i]); + dres->dr_ip[i] = NULL; + } + + for (i = 0; i < dres->dr_bufs; i++) { + xfs_buf_relse(dres->dr_bp[i]); + dres->dr_bp[i] = NULL; + } + + dres->dr_inos = 0; + dres->dr_bufs = 0; + dres->dr_ordered = 0; +} |