summaryrefslogtreecommitdiffstats
path: root/fs/xfs
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2024-04-29 08:15:25 +0200
committerChandan Babu R <chandanbabu@kernel.org>2024-04-30 06:15:19 +0200
commit2a9b99d45be0981536f6d3faf40ae3f58febdd49 (patch)
tree86c1573899cfd8090af465584ae0f44fe64c9cb6 /fs/xfs
parentxfs: don't open code XFS_FILBLKS_MIN in xfs_bmapi_write (diff)
downloadlinux-2a9b99d45be0981536f6d3faf40ae3f58febdd49.tar.xz
linux-2a9b99d45be0981536f6d3faf40ae3f58febdd49.zip
xfs: pass the actual offset and len to allocate to xfs_bmapi_allocate
xfs_bmapi_allocate currently overwrites offset and len when converting delayed allocations, and duplicates the length cap done for non-delalloc allocations. Move all that logic into the callers to avoid duplication and to make the calling conventions more obvious. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: "Darrick J. Wong" <djwong@kernel.org> Signed-off-by: Chandan Babu R <chandanbabu@kernel.org>
Diffstat (limited to 'fs/xfs')
-rw-r--r--fs/xfs/libxfs/xfs_bmap.c32
1 files changed, 18 insertions, 14 deletions
diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
index 6507dcaac438..c4126f37dea3 100644
--- a/fs/xfs/libxfs/xfs_bmap.c
+++ b/fs/xfs/libxfs/xfs_bmap.c
@@ -4185,21 +4185,11 @@ xfs_bmapi_allocate(
int error;
ASSERT(bma->length > 0);
+ ASSERT(bma->length <= XFS_MAX_BMBT_EXTLEN);
- /*
- * For the wasdelay case, we could also just allocate the stuff asked
- * for in this bmap call but that wouldn't be as good.
- */
if (bma->wasdel) {
- bma->length = (xfs_extlen_t)bma->got.br_blockcount;
- bma->offset = bma->got.br_startoff;
if (!xfs_iext_peek_prev_extent(ifp, &bma->icur, &bma->prev))
bma->prev.br_startoff = NULLFILEOFF;
- } else {
- bma->length = XFS_FILBLKS_MIN(bma->length, XFS_MAX_BMBT_EXTLEN);
- if (!bma->eof)
- bma->length = XFS_FILBLKS_MIN(bma->length,
- bma->got.br_startoff - bma->offset);
}
if (bma->flags & XFS_BMAPI_CONTIG)
@@ -4533,6 +4523,15 @@ xfs_bmapi_write(
*/
bma.length = XFS_FILBLKS_MIN(len, XFS_MAX_BMBT_EXTLEN);
+ if (wasdelay) {
+ bma.offset = bma.got.br_startoff;
+ bma.length = bma.got.br_blockcount;
+ } else {
+ if (!eof)
+ bma.length = XFS_FILBLKS_MIN(bma.length,
+ bma.got.br_startoff - bno);
+ }
+
ASSERT(bma.length > 0);
error = xfs_bmapi_allocate(&bma);
if (error) {
@@ -4686,12 +4685,17 @@ xfs_bmapi_convert_one_delalloc(
bma.tp = tp;
bma.ip = ip;
bma.wasdel = true;
- bma.offset = bma.got.br_startoff;
- bma.length = max_t(xfs_filblks_t, bma.got.br_blockcount,
- XFS_MAX_BMBT_EXTLEN);
bma.minleft = xfs_bmapi_minleft(tp, ip, whichfork);
/*
+ * Always allocate convert from the start of the delalloc extent even if
+ * that is outside the passed in range to create large contiguous
+ * extents on disk.
+ */
+ bma.offset = bma.got.br_startoff;
+ bma.length = bma.got.br_blockcount;
+
+ /*
* When we're converting the delalloc reservations backing dirty pages
* in the page cache, we must be careful about how we create the new
* extents: