diff options
author | Qu Wenruo <wqu@suse.com> | 2020-12-02 07:48:08 +0100 |
---|---|---|
committer | David Sterba <dsterba@suse.com> | 2020-12-09 19:16:11 +0100 |
commit | d0a7a9c050f3d0e11626ee5b3cebb0e4388ffce6 (patch) | |
tree | 3b200a4d5a6eb4be17d9aedcdebe0a62eb833ee1 /fs/btrfs/scrub.c | |
parent | btrfs: scrub: reduce width of extent_len/stripe_len from 64 to 32 bits (diff) | |
download | linux-d0a7a9c050f3d0e11626ee5b3cebb0e4388ffce6.tar.xz linux-d0a7a9c050f3d0e11626ee5b3cebb0e4388ffce6.zip |
btrfs: scrub: always allocate one full page for one sector for RAID56
For scrub_pages() and scrub_pages_for_parity(), we currently allocate
one scrub_page structure for one page.
This is fine if we only read/write one sector one time. But for cases
like scrubbing RAID56, we need to read/write the full stripe, which is
in 64K size for now.
For subpage size, we will submit the read in just one page, which is
normally a good thing, but for RAID56 case, it only expects to see one
sector, not the full stripe in its endio function.
This could lead to wrong parity checksum for RAID56 on subpage.
To make the existing code work well for subpage case, here we take a
shortcut by always allocating a full page for one sector.
This should provide the base to make RAID56 work for subpage case.
The cost is pretty obvious now, for one RAID56 stripe now we always need
16 pages. For support subpage situation (64K page size, 4K sector size),
this means we need full one megabyte to scrub just one RAID56 stripe.
And for data scrub, each 4K sector will also need one 64K page.
This is mostly just a workaround, the proper fix for this is a much
larger project, using scrub_block to replace scrub_page, and allow
scrub_block to handle multi pages, csums, and csum_bitmap to avoid
allocating one page for each sector.
Signed-off-by: Qu Wenruo <wqu@suse.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
Diffstat (limited to 'fs/btrfs/scrub.c')
-rw-r--r-- | fs/btrfs/scrub.c | 21 |
1 files changed, 16 insertions, 5 deletions
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c index 8026606f7510..a1bf87958f8e 100644 --- a/fs/btrfs/scrub.c +++ b/fs/btrfs/scrub.c @@ -2153,6 +2153,7 @@ static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u32 len, u64 physical_for_dev_replace) { struct scrub_block *sblock; + const u32 sectorsize = sctx->fs_info->sectorsize; int index; sblock = kzalloc(sizeof(*sblock), GFP_KERNEL); @@ -2171,7 +2172,12 @@ static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u32 len, for (index = 0; len > 0; index++) { struct scrub_page *spage; - u32 l = min_t(u32, len, PAGE_SIZE); + /* + * Here we will allocate one page for one sector to scrub. + * This is fine if PAGE_SIZE == sectorsize, but will cost + * more memory for PAGE_SIZE > sectorsize case. + */ + u32 l = min(sectorsize, len); spage = kzalloc(sizeof(*spage), GFP_KERNEL); if (!spage) { @@ -2483,8 +2489,11 @@ static int scrub_pages_for_parity(struct scrub_parity *sparity, { struct scrub_ctx *sctx = sparity->sctx; struct scrub_block *sblock; + const u32 sectorsize = sctx->fs_info->sectorsize; int index; + ASSERT(IS_ALIGNED(len, sectorsize)); + sblock = kzalloc(sizeof(*sblock), GFP_KERNEL); if (!sblock) { spin_lock(&sctx->stat_lock); @@ -2503,7 +2512,6 @@ static int scrub_pages_for_parity(struct scrub_parity *sparity, for (index = 0; len > 0; index++) { struct scrub_page *spage; - u32 l = min_t(u32, len, PAGE_SIZE); spage = kzalloc(sizeof(*spage), GFP_KERNEL); if (!spage) { @@ -2538,9 +2546,12 @@ leave_nomem: spage->page = alloc_page(GFP_KERNEL); if (!spage->page) goto leave_nomem; - len -= l; - logical += l; - physical += l; + + + /* Iterate over the stripe range in sectorsize steps */ + len -= sectorsize; + logical += sectorsize; + physical += sectorsize; } WARN_ON(sblock->page_count == 0); |