diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2023-08-11 21:14:08 +0200 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2023-08-11 21:14:08 +0200 |
commit | 360e694282fce69608e2775bf843b2aafd19e4b4 (patch) | |
tree | f46cbd76ded02b76d424abaf56e219fd13ca17c4 /drivers/block | |
parent | Merge tag 'io_uring-6.5-2023-08-11' of git://git.kernel.dk/linux (diff) | |
parent | nvme: core: don't hold rcu read lock in nvme_ns_chr_uring_cmd_iopoll (diff) | |
download | linux-360e694282fce69608e2775bf843b2aafd19e4b4.tar.xz linux-360e694282fce69608e2775bf843b2aafd19e4b4.zip |
Merge tag 'block-6.5-2023-08-11' of git://git.kernel.dk/linux
Pull block fixes from Jens Axboe:
- NVMe pull request via Keith:
- Fixes for request_queue state (Ming)
- Another uuid quirk (August)
- RCU poll fix for NVMe (Ming)
- Fix for an IO stall with polled IO (me)
- Fix for blk-iocost stats enable/disable accounting (Chengming)
- Regression fix for large pages for zram (Christoph)
* tag 'block-6.5-2023-08-11' of git://git.kernel.dk/linux:
nvme: core: don't hold rcu read lock in nvme_ns_chr_uring_cmd_iopoll
blk-iocost: fix queue stats accounting
block: don't make REQ_POLLED imply REQ_NOWAIT
block: get rid of unused plug->nowait flag
zram: take device and not only bvec offset into account
nvme-pci: add NVME_QUIRK_BOGUS_NID for Samsung PM9B1 256G and 512G
nvme-rdma: fix potential unbalanced freeze & unfreeze
nvme-tcp: fix potential unbalanced freeze & unfreeze
nvme: fix possible hang when removing a controller during error recovery
Diffstat (limited to 'drivers/block')
-rw-r--r-- | drivers/block/zram/zram_drv.c | 32 |
1 files changed, 20 insertions, 12 deletions
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index 5676e6dd5b16..06673c6ca255 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c @@ -1870,15 +1870,16 @@ static void zram_bio_discard(struct zram *zram, struct bio *bio) static void zram_bio_read(struct zram *zram, struct bio *bio) { - struct bvec_iter iter; - struct bio_vec bv; - unsigned long start_time; + unsigned long start_time = bio_start_io_acct(bio); + struct bvec_iter iter = bio->bi_iter; - start_time = bio_start_io_acct(bio); - bio_for_each_segment(bv, bio, iter) { + do { u32 index = iter.bi_sector >> SECTORS_PER_PAGE_SHIFT; u32 offset = (iter.bi_sector & (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT; + struct bio_vec bv = bio_iter_iovec(bio, iter); + + bv.bv_len = min_t(u32, bv.bv_len, PAGE_SIZE - offset); if (zram_bvec_read(zram, &bv, index, offset, bio) < 0) { atomic64_inc(&zram->stats.failed_reads); @@ -1890,22 +1891,26 @@ static void zram_bio_read(struct zram *zram, struct bio *bio) zram_slot_lock(zram, index); zram_accessed(zram, index); zram_slot_unlock(zram, index); - } + + bio_advance_iter_single(bio, &iter, bv.bv_len); + } while (iter.bi_size); + bio_end_io_acct(bio, start_time); bio_endio(bio); } static void zram_bio_write(struct zram *zram, struct bio *bio) { - struct bvec_iter iter; - struct bio_vec bv; - unsigned long start_time; + unsigned long start_time = bio_start_io_acct(bio); + struct bvec_iter iter = bio->bi_iter; - start_time = bio_start_io_acct(bio); - bio_for_each_segment(bv, bio, iter) { + do { u32 index = iter.bi_sector >> SECTORS_PER_PAGE_SHIFT; u32 offset = (iter.bi_sector & (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT; + struct bio_vec bv = bio_iter_iovec(bio, iter); + + bv.bv_len = min_t(u32, bv.bv_len, PAGE_SIZE - offset); if (zram_bvec_write(zram, &bv, index, offset, bio) < 0) { atomic64_inc(&zram->stats.failed_writes); @@ -1916,7 +1921,10 @@ static void zram_bio_write(struct zram *zram, struct bio *bio) zram_slot_lock(zram, index); zram_accessed(zram, index); zram_slot_unlock(zram, index); - } + + bio_advance_iter_single(bio, &iter, bv.bv_len); + } while (iter.bi_size); + bio_end_io_acct(bio, start_time); bio_endio(bio); } |